text stringlengths 38 1.54M |
|---|
while True:
registro = []
caso = int(input())
if caso == 0:
break
while caso > 0:
caso -= 1
registro.append(input())
manipulacao = list(registro)
regPalMudada = []
for i in registro:
palMudada = 0
for j in manipulacao:
preLen = len(j)
j = j.replace(i, '')
posLen = len(j)
if preLen > posLen:
palMudada += 1
manipulacao = list(registro)
regPalMudada.append(palMudada)
print(regPalMudada)
c = 0
for k in regPalMudada:
if k > c:
c = k
print(c)
|
from collections import OrderedDict
import glob
import imp
import json
import os
import shutil
from houdiniResourceManager.core import node_manager
imp.reload(node_manager)
node_type_data = None
file_name_data = None
class JSON_Loading_Error(Exception):
pass
def init_file_name_data():
'''
Set's the global variable config the configuration fetched from the json file "node_type_data.json" in config directory
'''
global file_name_data
this_dir = os.path.dirname((os.path.dirname(__file__)))
json_file_path = os.path.normpath(os.path.join(this_dir,"config","fileName_template.json"))
if json_file_path and os.path.exists(json_file_path):
with open(json_file_path, 'r') as file:
file_name_data = json.load(file, object_pairs_hook=OrderedDict)
else:
raise JSON_Loading_Error("JSON file '" + os.path.normpath(json_file_path) + "' does not exist!")
def init_node_type_data():
'''
Set's the global variable config the configuration fetched from the json file "node_type_data.json" in config directory
'''
global node_type_data
this_dir = os.path.dirname((os.path.dirname(__file__)))
json_file_path = os.path.normpath(os.path.join(this_dir,"config","node_type_data.json"))
if json_file_path and os.path.exists(json_file_path):
with open(json_file_path, 'r') as file:
node_type_data = json.load(file, object_pairs_hook=OrderedDict)
else:
raise JSON_Loading_Error("JSON file '" + os.path.normpath(json_file_path) + "' does not exist!")
def collect(module_names, from_selected=False):
nodes=[]
for module_name in module_names:
module_nodes = node_manager.get_nodes_by_type(module_name)
if module_nodes:
nodes+=module_nodes
return nodes
def get_file_path(node):
global node_type_data
if node_type_data:
file_name_parm = node_type_data[node.type().name()]['file_name_param']
return (os.path.normpath(node.parm(file_name_parm).eval()))
return None
def get_files(node):
global node_type_data
if node_type_data:
file_path = get_file_path(node)
sequance_tags = node_type_data[node.type().name()]['sequance_tags']
for sequance_tag in sequance_tags :
file_path = file_path.replace(sequance_tag, "*")
return (glob.glob(file_path))
return None
def get_files_count(node):
files = get_files(node)
return(len(files))
def get_files_size(node):
files = get_files(node)
size = 0
for file_ in files:
size += os.path.getsize(file_)
return (size/1024.0/1024.0)
def get_node_path(node):
return (node.path())
def get_node_specific_sequencers(node):
global node_type_data
if node_type_data:
return (node_type_data[node.type().name()]['sequance_tags'])
return None
def modify_file_path(file_path, cut_data = None, new_dir = None, new_fileName = None, prefix=None, replace_data=None, suffix=None ):
file_path_base, file_extension = os.path.splitext(file_path)
file_path_base_split = os.path.split(file_path_base)
directory_ = file_path_base_split[0]
new_name = file_path_base_split[1]
if new_fileName:
new_name = new_fileName
else:
if cut_data:
new_name = new_name[:cut_data["from"]] + new_name[cut_data["to"]:]
if replace_data:
new_name = new_name.replace(replace_data["from"], replace_data["to"])
if prefix:
new_name = prefix + new_name
if suffix:
new_name = new_name + suffix
if new_dir:
directory_ = new_dir
return (os.path.join(directory_, new_name)+file_extension)
def modify_node(node, cut_data = None, new_dir=None, new_fileName = None, prefix=None, replace_data=None, suffix=None, affect_files=True , copy_files=True):
file_path = get_file_path(node)
errors = []
new_path = modify_file_path(file_path, cut_data = cut_data, new_dir=new_dir, new_fileName=new_fileName, prefix=prefix, replace_data=replace_data, suffix=suffix )
sane_sequancers = []
if affect_files:
sequance_tags = get_node_specific_sequencers(node)
for sequance_tag in sequance_tags:
if sequance_tag in file_path:
if sequance_tag in new_path :
sane_sequancers.append(sequance_tag)
else:
errors.append("Sequancer '" + sequance_tag + "' missing after renaming, can't continue furter")
if not errors:
files = get_files(node)
sequancer_replacement_datas = []
for sequance_tag in sane_sequancers:
sequance_dict = {}
sequance_dict['sequance_tag'] = sequance_tag
sequance_dict['split_items'] = file_path.split(sequance_tag)
sequancer_replacement_datas.append(sequance_dict)
for file_path_ in files:
new_file_path_ = file_path_
for sequancer_replacement_data in sequancer_replacement_datas:
sequance = file_path_
for item in sequancer_replacement_data['split_items']:
sequance = sequance.replace(item, "")
new_file_path_ = new_path.replace(sequancer_replacement_data['sequance_tag'],sequance)
if copy_files:
shutil.copyfile(file_path_, new_file_path_)
else:
os.rename(file_path_, new_file_path_)
if not errors:
set_file_path(node, new_path)
return errors
def set_file_path(node, new_path):
global node_type_data
if node_type_data:
file_name_parm = node_type_data[node.type().name()]['file_name_param']
node.parm(file_name_parm).set(new_path)
return True
return False |
#!/usr/bin/env python
import os
import sys
import tempfile
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import evaluate
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import get_scheduler
from tqdm.auto import tqdm
def setup(rank, world_size, fn=None, backend="gloo"): # 'tcp'
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group(backend, rank=rank, world_size=world_size)
if fn is not None:
fn(rank, world_size)
def cleanup():
dist.destroy_process_group()
def run_old(rank, size):
print(dist.get_world_size())
tensor = torch.ones(1)
list = [torch.zeros(1) for _ in range(size)]
# dist.gather(tensor, dst=0, gather_list=list, group=0)
# print('Rank ', rank, ' has data ', sum(list)[0])
def run(rank, size):
group = dist.new_group([0, 1, 2, 3])
tensor = torch.ones(1)
dist.all_reduce(tensor, op=dist.ReduceOp.SUM, group=group)
print("Rank ", rank, " has data ", tensor[0])
def run_blocking(rank, size):
tensor = torch.zeros(1)
if rank == 0:
tensor += 1
dist.send(tensor=tensor, dst=1)
else:
dist.recv(tensor=tensor, src=0)
print("Rank ", rank, " has data ", tensor[0])
def run_nonblocking(rank, size):
tensor = torch.zeros(1)
req = None
if rank == 0:
tensor += 1
req = dist.isend(tensor=tensor, dst=1)
print("Rank 0 started sending")
else:
req = dist.irecv(tensor=tensor, src=0)
print("Rank 1 started receiving")
req.wait()
print("Rank ", rank, " has data ", tensor[0])
def run_model():
dataset = load_dataset("imdb")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized = dataset.map(tokenize_function, batched=True)
tokenized = tokenized.remove_columns(["text"])
tokenized = tokenized.rename_column("label", "labels")
tokenized.set_format("torch")
train_ds = tokenized["train"].shuffle(seed=42).select(range(1000))
eval_ds = tokenized["test"].shuffle(seed=42).select(range(1000))
train_dataloader = DataLoader(train_ds, shuffle=True, batch_size=8)
eval_dataloader = DataLoader(eval_ds, batch_size=8)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
optimizer = optim.AdamW(model.parameters(), lr=5e-5)
num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
progress_bar = tqdm(range(num_training_steps))
model.train()
for epoch in range(num_epochs):
for batch in train_dataloader:
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
metric = evaluate.load("accuracy")
model.eval()
for batch in eval_dataloader:
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
logits = outputs.logits
predictions = torch.argmax(logits, dim=-1)
metric.add_batch(predictions=predictions, references=batch["labels"])
metric.compute()
def gather(tensor, rank, list=None, root=0, group=None):
if group is None:
group = dist.group.WORLD
if rank == root:
assert list is not None
dist.gather_recv(list, tensor, group)
else:
dist.gather_send(tensor, root, group)
if __name__ == "__main__":
size = 4
processes = []
mp.set_start_method("spawn")
for rank in range(size):
p = mp.Process(target=setup, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
from flask import Flask
from prometheus_client import start_http_server,Summary,Counter,Gauge
app = Flask(__name__)
TOTAL_REQ = Counter('hello_worlds_total','Hello Worlds requested.')
LAST_TIME = Gauge('hello_world_last_time_seconds','The last time a Hello World was served.')
@app.route("/")
def hello():
LAST_TIME.set_to_current_time()
TOTAL_REQ.inc()
return "Hello World!"
if __name__ == "__main__":
start_http_server(8000)
app.run()
|
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import os
import tables
from bisect import bisect_left
from sklearn.linear_model import Ridge
from sklearn.decomposition import PCA
from sklearn.neural_network import MLPRegressor
import cv2
from tqdm import tqdm
import Regression_Utils
import Match_Mousecam_Frames_To_Widefield_Frames
def match_mousecam_to_widefield_frames(base_directory):
# Load Frame Times
widefield_frame_times = np.load(os.path.join(base_directory, "Stimuli_Onsets", "Frame_Times.npy"), allow_pickle=True)[()]
mousecam_frame_times = np.load(os.path.join(base_directory, "Stimuli_Onsets", "Mousecam_Frame_Times.npy"), allow_pickle=True)[()]
widefield_frame_times = invert_dictionary(widefield_frame_times)
widefield_frame_time_keys = list(widefield_frame_times.keys())
mousecam_frame_times_keys = list(mousecam_frame_times.keys())
mousecam_frame_times_keys.sort()
# Get Number of Frames
number_of_widefield_frames = len(widefield_frame_time_keys)
# Dictionary - Keys are Widefield Frame Indexes, Values are Closest Mousecam Frame Indexes
widfield_to_mousecam_frame_dict = {}
for widefield_frame in range(number_of_widefield_frames):
frame_time = widefield_frame_times[widefield_frame]
closest_mousecam_time = take_closest(mousecam_frame_times_keys, frame_time)
closest_mousecam_frame = mousecam_frame_times[closest_mousecam_time]
widfield_to_mousecam_frame_dict[widefield_frame] = closest_mousecam_frame
#print("Widefield Frame: ", widefield_frame, " Closest Mousecam Frame: ", closest_mousecam_frame)
# Save Directory
save_directoy = os.path.join(base_directory, "Stimuli_Onsets", "widfield_to_mousecam_frame_dict.npy")
np.save(save_directoy, widfield_to_mousecam_frame_dict)
def get_video_name(base_directory):
file_list = os.listdir(base_directory)
for file in file_list:
if "_cam_1" in file:
return file
def get_face_data(video_file, face_pixels):
# Open Video File
cap = cv2.VideoCapture(video_file)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Extract Selected Frames
face_data = []
print("Extracting Face Video Data")
for frame_index in tqdm(range(frameCount)):
#while (frame_index < frameCount and ret):
ret, frame = cap.read()
frame = frame[:, :, 0]
face_frame = []
for pixel in face_pixels:
face_frame.append(frame[pixel[0], pixel[1]])
face_data.append(face_frame)
frame_index += 1
cap.release()
face_data = np.array(face_data)
return face_data, frameHeight, frameWidth
def get_face_eigenspectrum(face_data, n_components=150):
model = PCA(n_components=n_components)
model.fit(face_data)
explained_variance_ratio = model.explained_variance_ratio_
cumulative_variance_explained_list = []
for x in range(n_components-1):
cumulative_variance_explained = np.sum(explained_variance_ratio[0:x])
cumulative_variance_explained_list.append(cumulative_variance_explained)
print("Explained Variance Ratio", explained_variance_ratio)
plt.plot(cumulative_variance_explained_list)
plt.show()
return explained_variance_ratio
def decompose_face_data(face_data, n_components=150):
model = PCA(n_components=n_components)
transformed_data = model.fit_transform(face_data)
components = model.components_
return transformed_data, components
def view_face_motion_components(base_directory, components, face_pixels, image_height, image_width):
number_of_face_pixels = np.shape(face_pixels)[0]
face_y_min = np.min(face_pixels[:, 0])
face_y_max = np.max(face_pixels[:, 0])
face_x_min = np.min(face_pixels[:, 1])
face_x_max = np.max(face_pixels[:, 1])
colourmap = Regression_Utils.get_musall_cmap()
figure_1 = plt.figure(figsize=(15, 10))
count = 1
for component in components:
template = np.zeros((image_height, image_width))
for face_pixel_index in range(number_of_face_pixels):
pixel_data = component[face_pixel_index]
pixel_position = face_pixels[face_pixel_index]
template[pixel_position[0], pixel_position[1]] = pixel_data
template = template[face_y_min:face_y_max, face_x_min:face_x_max]
template_magnitude = np.max(np.abs(template))
axis = figure_1.add_subplot(10, 15, count)
#axis.set_title(count)
axis.axis('off')
axis.imshow(template, vmax=template_magnitude, vmin=-template_magnitude, cmap='bwr')
count += 1
plt.savefig(os.path.join(base_directory, "Mousecam_analysis", "Face_Motion_Components.png"))
plt.close()
def match_face_motion_to_widefield_frames(base_directory, transformed_data):
# Load Widefield Frame Dict
widefield_frame_dict = np.load(os.path.join(base_directory, "Stimuli_Onsets", "widfield_to_mousecam_frame_dict.npy"), allow_pickle=True)[()]
print("Widefield Frame Dict Keys", list(widefield_frame_dict.keys())[0:1000])
print("Widefield Frame Dict Values", list(widefield_frame_dict.values())[0:1000])
# Visualise This
"""
ai_data = Regression_Utils.load_ai_recorder_file(base_directory)
stimuli_dictionary = Regression_Utils.create_stimuli_dictionary()
blue_led_trace = ai_data[stimuli_dictionary["LED 1"]]
mousecam_trace = ai_data[stimuli_dictionary["Mousecam"]]
mousecam_frame_times = np.load(os.path.join(base_directory, "Stimuli_Onsets", "Mousecam_Frame_Times.npy"), allow_pickle=True)[()]
mousecam_frame_times = list(mousecam_frame_times.keys())
print("mousecam frame times", mousecam_frame_times[0:100])
plt.plot(blue_led_trace, c='b')
plt.plot(mousecam_trace, c='m')
plt.scatter(mousecam_frame_times, np.ones(len(mousecam_frame_times)))
plt.show()
"""
print("Transformed Data Shape", np.shape(transformed_data))
widefield_frame_matched_motion = []
for widefield_frame in widefield_frame_dict.keys():
mousecam_frame = widefield_frame_dict[widefield_frame]
print("mousecam frame", mousecam_frame)
widefield_frame_matched_motion.append(transformed_data[mousecam_frame])
widefield_frame_matched_motion = np.array(widefield_frame_matched_motion)
return widefield_frame_matched_motion
def decompose_face_motion(base_directory):
# Match Mousecam To Widefield frames
Match_Mousecam_Frames_To_Widefield_Frames.match_mousecam_to_widefield_frames(base_directory)
# Load Facepoly
face_pixels = np.load(os.path.join(base_directory, "Mousecam_analysis", "Whisker_Pixels.npy"))
face_pixels = np.transpose(face_pixels)
print("Face Pixels", np.shape(face_pixels))
# Get Video Name
video_name = get_video_name(base_directory)
# Get Face Data
face_data, image_height, image_width = get_face_data(os.path.join(base_directory, video_name), face_pixels)
print("Face Data", "Shape", np.shape(face_data), "Size", face_data.nbytes)
# Get Face Motion
face_data = np.diff(face_data, axis=0)
print("Face Motion Data", np.shape(face_data))
# Perform Decomposition
transformed_data, components = decompose_face_data(face_data)
# Match Mousecam Motion To Widefield Frames
widefield_frame_matched_motion = match_face_motion_to_widefield_frames(base_directory, transformed_data)
# Save This
np.save(os.path.join(base_directory, "Mousecam_analysis", "Face_Motion_Transformed_Data.npy"), transformed_data)
np.save(os.path.join(base_directory, "Mousecam_analysis", "Face_Motion_Components.npy"), components)
np.save(os.path.join(base_directory, "Mousecam_analysis", "Widefield_Matched_Face_Motion.npy"), widefield_frame_matched_motion)
# View Face Motion Components
view_face_motion_components(base_directory, components, face_pixels, image_height, image_width)
session_list = [r"//media/matthew/Expansion/Control_Data/NRXN78.1D/2020_12_07_Switching_Imaging"]
for base_directory in session_list:
#match_mousecam_to_widefield_frames(base_directory)
decompose_face_motion(base_directory) |
"""
solver_strategy.py module
"""
from queue import Queue, LifoQueue
from abc import ABC, abstractmethod
class SearchStrategy(ABC):
"""
abstract search strategy class
"""
@abstractmethod
def __str__(self):
pass
@staticmethod
@abstractmethod
def get_strategy():
"""
abstract get strategy method
:return:
"""
class BreadthFirstSearchStrategy(SearchStrategy):
"""
breadth first search concrete strategy class
"""
def __str__(self):
return self.__class__.__name__
@staticmethod
def get_strategy():
"""
get strategy static method
:return:
"""
return Queue()
class DepthFirstSearchStrategy(SearchStrategy):
"""
depth first search concrete strategy class
"""
def __str__(self):
return self.__class__.__name__
@staticmethod
def get_strategy():
"""
get strategy static method
:return:
"""
return LifoQueue()
class SearchStrategyFactory:
"""
search strategy factory
"""
def __init__(self, strategy_type):
self.strategy_type = strategy_type
def __str__(self):
return self.__class__.__name__
def get_strategy(self):
"""
get concrete search strategy method
:return:
"""
return self.strategy_type.get_strategy()
|
# Generated by Django 3.2.4 on 2021-06-22 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='lang',
),
migrations.AddField(
model_name='user',
name='long',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='lat',
field=models.FloatField(blank=True, null=True),
),
]
|
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class DataImputer(BaseEstimator, TransformerMixin):
def __init__(self):
self.X = None
def fit(self, x, y=None):
return self
def transform(self, x, y=None):
try:
x.drop(['Unnamed: 9', 'visibility', 'humidity', 'humidex', 'windchill', 'wind', 'pressure'],
axis=1,
inplace=True)
except KeyError as e:
pass
x = x.interpolate(method='linear').fillna(method='bfill')
x.index = pd.to_datetime(x.index)
return x
|
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
import h5py
#File path
directory = 'D:\Data\Calibration'
measurement = 'Calibration_IQ_NOAMP-30dBm_40mV_2.csv'
path = directory + '\\' + measurement
I = np.genfromtxt(path)[1:5001,0]
Q = np.genfromtxt(path)[1:5001,1]
I_ref = np.genfromtxt(path)[1:5001,2]
Q_ref= np.genfromtxt(path)[1:5001,3]
phase = np.arctan2(Q,I)
phase_ref = np.arctan2(Q_ref,I_ref)
phase = phase - phase_ref
amp = np.sqrt(I**2+Q**2)
I = amp*np.cos(phase)
Q = amp*np.sin(phase)
plt.figure(1)
plt.plot(I,Q, '.')
plt.axis('equal')
I_mean = np.mean(I)
I_variance = np.var(I)
I_sigma = np.sqrt(I_variance)
Q_mean = np.mean(Q)
Q_variance = np.var(Q)
Q_sigma = np.sqrt(Q_variance)
# plt.figure(2)
# n, bins, patches = plt.hist(I, 50, normed=1, facecolor='green', alpha=0.75)
# x = np.linspace(min(I), max(I), 100)
# plt.plot(x, mlab.normpdf(x, I_mean, I_sigma))
# plt.figure(3)
# n, bins, patches = plt.hist(Q, 50, normed=1, facecolor='green', alpha=0.75)
# x = np.linspace(min(Q), max(Q), 100)
# plt.plot(x, mlab.normpdf(x, Q_mean, Q_sigma))
SNR = np.sqrt(I_mean**2 + Q_mean**2)/np.sqrt(I_sigma**2 + Q_sigma**2)
print (SNR)
plt.show() |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Potsdam dataset."""
import os
from typing import Callable, Optional
import matplotlib.pyplot as plt
import numpy as np
import rasterio
import torch
from matplotlib.figure import Figure
from PIL import Image
from torch import Tensor
from .geo import NonGeoDataset
from .utils import (
check_integrity,
draw_semantic_segmentation_masks,
extract_archive,
rgb_to_mask,
)
class Potsdam2D(NonGeoDataset):
"""Potsdam 2D Semantic Segmentation dataset.
The `Potsdam <https://www.isprs.org/education/benchmarks/UrbanSemLab/2d-sem-label-potsdam.aspx>`__
dataset is a dataset for urban semantic segmentation used in the 2D Semantic Labeling
Contest - Potsdam. This dataset uses the "4_Ortho_RGBIR.zip" and "5_Labels_all.zip"
files to create the train/test sets used in the challenge. The dataset can be
requested at the challenge homepage. Note, the server contains additional data
for 3D Semantic Labeling which are currently not supported.
Dataset format:
* images are 4-channel geotiffs
* masks are 3-channel geotiffs with unique RGB values representing the class
Dataset classes:
0. Clutter/background
1. Impervious surfaces
2. Building
3. Low Vegetation
4. Tree
5. Car
If you use this dataset in your research, please cite the following paper:
* https://doi.org/10.5194/isprsannals-I-3-293-2012
.. versionadded:: 0.2
""" # noqa: E501
filenames = ["4_Ortho_RGBIR.zip", "5_Labels_all.zip"]
md5s = ["c4a8f7d8c7196dd4eba4addd0aae10c1", "cf7403c1a97c0d279414db"]
image_root = "4_Ortho_RGBIR"
splits = {
"train": [
"top_potsdam_2_10",
"top_potsdam_2_11",
"top_potsdam_2_12",
"top_potsdam_3_10",
"top_potsdam_3_11",
"top_potsdam_3_12",
"top_potsdam_4_10",
"top_potsdam_4_11",
"top_potsdam_4_12",
"top_potsdam_5_10",
"top_potsdam_5_11",
"top_potsdam_5_12",
"top_potsdam_6_10",
"top_potsdam_6_11",
"top_potsdam_6_12",
"top_potsdam_6_7",
"top_potsdam_6_8",
"top_potsdam_6_9",
"top_potsdam_7_10",
"top_potsdam_7_11",
"top_potsdam_7_12",
"top_potsdam_7_7",
"top_potsdam_7_8",
"top_potsdam_7_9",
],
"test": [
"top_potsdam_5_15",
"top_potsdam_6_15",
"top_potsdam_6_13",
"top_potsdam_3_13",
"top_potsdam_4_14",
"top_potsdam_6_14",
"top_potsdam_5_14",
"top_potsdam_2_13",
"top_potsdam_4_15",
"top_potsdam_2_14",
"top_potsdam_5_13",
"top_potsdam_4_13",
"top_potsdam_3_14",
"top_potsdam_7_13",
],
}
classes = [
"Clutter/background",
"Impervious surfaces",
"Building",
"Low Vegetation",
"Tree",
"Car",
]
colormap = [
(255, 0, 0),
(255, 255, 255),
(0, 0, 255),
(0, 255, 255),
(0, 255, 0),
(255, 255, 0),
]
def __init__(
self,
root: str = "data",
split: str = "train",
transforms: Optional[Callable[[dict[str, Tensor]], dict[str, Tensor]]] = None,
checksum: bool = False,
) -> None:
"""Initialize a new Potsdam dataset instance.
Args:
root: root directory where dataset can be found
split: one of "train" or "test"
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
checksum: if True, check the MD5 of the downloaded files (may be slow)
"""
assert split in self.splits
self.root = root
self.split = split
self.transforms = transforms
self.checksum = checksum
self._verify()
self.files = []
for name in self.splits[split]:
image = os.path.join(root, self.image_root, name) + "_RGBIR.tif"
mask = os.path.join(root, name) + "_label.tif"
if os.path.exists(image) and os.path.exists(mask):
self.files.append(dict(image=image, mask=mask))
def __getitem__(self, index: int) -> dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and label at that index
"""
image = self._load_image(index)
mask = self._load_target(index)
sample = {"image": image, "mask": mask}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self) -> int:
"""Return the number of data points in the dataset.
Returns:
length of the dataset
"""
return len(self.files)
def _load_image(self, index: int) -> Tensor:
"""Load a single image.
Args:
index: index to return
Returns:
the image
"""
path = self.files[index]["image"]
with rasterio.open(path) as f:
array = f.read()
tensor = torch.from_numpy(array).float()
return tensor
def _load_target(self, index: int) -> Tensor:
"""Load the target mask for a single image.
Args:
index: index to return
Returns:
the target mask
"""
path = self.files[index]["mask"]
with Image.open(path) as img:
array: "np.typing.NDArray[np.uint8]" = np.array(img.convert("RGB"))
array = rgb_to_mask(array, self.colormap)
tensor = torch.from_numpy(array)
# Convert from HxWxC to CxHxW
tensor = tensor.to(torch.long)
return tensor
def _verify(self) -> None:
"""Verify the integrity of the dataset.
Raises:
RuntimeError: if checksum fails or the dataset is not downloaded
"""
# Check if the files already exist
if os.path.exists(os.path.join(self.root, self.image_root)):
return
# Check if .zip files already exists (if so extract)
exists = []
for filename, md5 in zip(self.filenames, self.md5s):
filepath = os.path.join(self.root, filename)
if os.path.isfile(filepath):
if self.checksum and not check_integrity(filepath, md5):
raise RuntimeError("Dataset found, but corrupted.")
exists.append(True)
extract_archive(filepath)
else:
exists.append(False)
if all(exists):
return
# Check if the user requested to download the dataset
raise RuntimeError(
"Dataset not found in `root` directory, either specify a different"
+ " `root` directory or manually download the dataset to this directory."
)
def plot(
self,
sample: dict[str, Tensor],
show_titles: bool = True,
suptitle: Optional[str] = None,
alpha: float = 0.5,
) -> Figure:
"""Plot a sample from the dataset.
Args:
sample: a sample returned by :meth:`__getitem__`
show_titles: flag indicating whether to show titles above each panel
suptitle: optional string to use as a suptitle
alpha: opacity with which to render predictions on top of the imagery
Returns:
a matplotlib Figure with the rendered sample
"""
ncols = 1
image1 = draw_semantic_segmentation_masks(
sample["image"][:3], sample["mask"], alpha=alpha, colors=self.colormap
)
if "prediction" in sample:
ncols += 1
image2 = draw_semantic_segmentation_masks(
sample["image"][:3],
sample["prediction"],
alpha=alpha,
colors=self.colormap,
)
fig, axs = plt.subplots(ncols=ncols, figsize=(ncols * 10, 10))
if ncols > 1:
(ax0, ax1) = axs
else:
ax0 = axs
ax0.imshow(image1)
ax0.axis("off")
if ncols > 1:
ax1.imshow(image2)
ax1.axis("off")
if show_titles:
ax0.set_title("Ground Truth")
if ncols > 1:
ax1.set_title("Predictions")
if suptitle is not None:
plt.suptitle(suptitle)
return fig
|
# -*- coding: utf-8 -*-
import re
class WeiboContenData:
def __init__(self,line=""):
re.split(r',(?=([^\"]*\"[^\"]*\")*[^\"]*$)',)
print len(list)
for ss in list:
print ss
if __name__ == '__main__':
weibo =WeiboContenData('3942456455792392,5516920356,5516920356,欢乐喜剧人,,我发起了一个投票 【#欢乐喜剧人#第二季第二轮排位赛,你最喜欢___?】#欢乐喜剧人#第二季第二轮排位赛,你最喜欢___?,DhOu9oiRG,微博 weibo.com,,,,,6354,5838,1416,1455438329000')
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch as tc
from torch import nn
import pandas as pd
from torchtext import data
import torchtext
import time
import argparse
from torch import autograd
from torch.autograd import Variable
from tkinter import _flatten
import numpy as np
import pandas as pd
from seqeval.metrics import precision_score, recall_score, f1_score
# In[2]:
parser = argparse.ArgumentParser()
parser.add_argument('--lr',type=float, default = 0.01, help='学习率')
parser.add_argument('--save_path',type=str, default='./Model/model_.pth',help='模型保存位置')
parser.add_argument('--char_lstm_embed_size',type=int, default= 25 , help='字符集lstm嵌入dim')
parser.add_argument('--char_lstm_hidden_size',type=int, default= 25 , help='字符集sltm隐藏层dim')
parser.add_argument('--word_embed_size',type=int, default = 200, help='word嵌入dim')
parser.add_argument('--input_embed_size',type=int, default = 250, help='lstm_input_嵌入dim')
parser.add_argument('--hidden_size',type=int , default = 250, help='decoder_lstm隐藏层dim')
parser.add_argument('--add_dropout',type= int , default = 1, help='input_embed是否dropout')
parser.add_argument('--device',type=str , default ='cuda:2', help='train device')
args = parser.parse_args()
print(f'lr = {args.lr}')
print(f'save_path = {args.save_path}')
print(f'add_dropout = {args.add_dropout}')
def argmax(vec):
# 返回行向量的最大值的index
_, idx = tc.max(vec, dim = 1)
return idx.item()
# 以数值稳定的方法计算路径概率分值
def log_sum_exp(vec):
max_score = vec[0,argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + tc.log(tc.sum(tc.exp(vec - max_score_broadcast)))
# lstm初始化
def init_lstm(input_lstm):
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l' + str(ind))
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
if input_lstm.bidirectional:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l' + str(ind) + '_reverse')
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l' + str(ind) + '_reverse')
bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l' + str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l' + str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
if input_lstm.bidirectional:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l' + str(ind) + '_reverse')
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l' + str(ind) + '_reverse')
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
# In[3]:
#定义BILSTM-CRF模型
class BiLSTM_CRF(nn.Module):
def __init__(self, tag_to_idx, vocab, char_vocab, args):
super(BiLSTM_CRF, self).__init__()
# char字符集embed层
self.embedding1 = nn.Embedding(len(char_vocab.itos), args.char_lstm_embed_size)
# word 单词集embed层
self.embedding2 = nn.Embedding(len(vocab.itos), args.word_embed_size)
self.vocab = vocab
self.hidden_size = args.hidden_size
self.tag_to_idx = tag_to_idx
self.char_lstm_hidden_size = args.char_lstm_hidden_size
self.target_size = len(tag_to_idx)
# 定义输入前的drop_out操作
self.drop_out = nn.Dropout(0.5)
self.drop_or_not = args.add_dropout
self.device = args.device
#定义Bi-lstm层, lstm1:用于加载char级别词向量,lstm2用于decoder整体输入Input
self.lstm1 = nn.LSTM(args.char_lstm_embed_size, args.char_lstm_hidden_size, num_layers=1, bidirectional = True)
init_lstm(self.lstm1)
self.lstm2 = nn.LSTM(args.input_embed_size, args.hidden_size, num_layers=1, bidirectional = True)
# 定义全连接输出层
self.decoder = nn.Linear(2 * args.hidden_size, self.target_size)
# 定义转换矩阵
self.transtitions = nn.Parameter(tc.rand(self.target_size,self.target_size))
# 初始化start和end的转换矩阵data
self.transtitions.data[tag_to_idx[START],:] = -10000
self.transtitions.data[:, tag_to_idx[STOP]] = -10000 #从j->i为转移概率
# 获取bi_lstm2的输出,对应的是k+2个标签的概率值
def _get_lstm_features_decoder(self, sentence, char_, char_len):
# sentence.shape: (seq_len,batch)
# char_.shape is (batch, seq_len, max_wordlen)
# 获取char字符embedding
char_len = char_len.squeeze(dim = 0) # shape: [seq_len]
char_ = char_.squeeze(dim = 0).permute(1,0)
char_embeddings = self.embedding1(char_)# shape : (max_wordlen, seq_len, embed_size),其中seq_len为batch
outputs, _ = self.lstm1(char_embeddings) # # outputs_shape : (max_wordlen, seq_len, hidden_size)
embeds = self.embedding2(sentence) # shape: (seq_len, batch, embed_size)
char_embeddings_process = Variable(tc.FloatTensor(tc.zeros((outputs.size(1),outputs.size(2))))).to(self.device)
# char_embeddings_process: [seq_len, 2 * char_lstm_hidden_size]
for i, index in enumerate(char_len):
char_embeddings_process[i] = tc.cat((outputs[0][i,self.char_lstm_hidden_size:], outputs[index.cpu().item()-1][i,:self.char_lstm_hidden_size]))
embeds = tc.cat((embeds.squeeze(dim= 1),char_embeddings_process), dim =1)
embeds = embeds.unsqueeze(1)
if self.drop_or_not:
embeds = self.drop_out(embeds)
outputs, _ = self.lstm2(embeds)
outputs = outputs.permute(1,0,2) # shape:(batch,seq_len,hidden_size)
outputs = self.drop_out(outputs)
outputs = self.decoder(outputs)
return outputs # shape:(batch, seq_len,target_size)
# 获取label_tags对应的概率分值
def _get_gold_score(self, feats, tags):
# # feats.shape: [seq_leb, target_size]
# tags.shape: (batch, seq_len)
temp = tc.LongTensor(range(feats.size()[0]))
tags = tags.squeeze(0)
add_start_tags = tc.cat([tc.LongTensor([self.tag_to_idx[START]]).to(self.device), tags])
add_stop_tags = tc.cat([tags, tc.LongTensor([self.tag_to_idx[STOP]]).to(self.device)])
gold_score = tc.sum(self.transtitions[add_stop_tags, add_start_tags]) + tc.sum(feats[temp,tags])
return gold_score
# 计算所有路径的概率分值
def _forward_alg(self, feats):
# feats.shape: [seq_leb, target_size]
init_alphas = tc.Tensor(1, self.target_size).fill_(-10000.)
init_alphas[0][self.tag_to_idx[START]] = 0.
forward_var = autograd.Variable(init_alphas)
forward_var = forward_var.to(self.device)
for feat in feats:
emit_score = feat.view(-1,1)
tag_var = forward_var + self.transtitions + emit_score
max_tag_var, _ = tc.max(tag_var, dim = 1)
tag_var = tag_var - max_tag_var.view(-1,1)
forward_var = max_tag_var + tc.log(tc.sum(tc.exp(tag_var), dim=1)).view(1,-1)
terminal_var = (forward_var + self.transtitions[self.tag_to_idx[STOP]]).view(1,-1)
alpha = log_sum_exp(terminal_var)
return alpha
# 获取字符列表
def _get_char_list(self, sentence):
str_list = [list(self.vocab.itos[index]) for index in sentence]
return tc.tensor(str_list)
# 计算误差值
def _net_log_likelihood(self, sentence, tags, char_, char_len):
# 输入,sentence(seq_len,batch)和真实标签(seq_len,batch),char_(seq_len, batch)
sentence = Variable(sentence)
tags = Variable(tags)
feats2 = self._get_lstm_features_decoder(sentence, char_, char_len)
tags = tags.permute(1,0)
feats2 = feats2.squeeze(0)
forward_score = self._forward_alg(feats2)
gold_score = self._get_gold_score(feats2, tags)
return forward_score - gold_score
# 预测真实标签
def _viterbi_decode(self, feats):
# feats.shape:(seq_len,target_size)
backpointers = [] # 记录路径
init_vvars = tc.full((1, self.target_size), -10000.).to(self.device)
init_vvars[0][self.tag_to_idx[START]] = 0.
forward_var = Variable(init_vvars)
forward_var = forward_var.to(self.device)
# 为何要使用list切换
# 为何要使用list切换
# 为何要使用list切换
# 为何要使用list切换
for feat in feats:
next_tag_var = forward_var.view(1,-1).expand(self.target_size, self.target_size) + self.transtitions
viterbivars_t, bptrs_t = tc.max(next_tag_var, dim=1)
forward_var = viterbivars_t + feat
backpointers.append(bptrs_t.tolist())
terminal_var = forward_var + self.transtitions[self.tag_to_idx[STOP]]
terminal_var.data[self.tag_to_idx[STOP]] = -10000.
terminal_var.data[self.tag_to_idx[START]] = -10000.
best_id = argmax(terminal_var.unsqueeze(dim=0))
best_path = [best_id]
for better_id in reversed(backpointers):
best_id = better_id[best_id]
best_path.append(best_id)
start = best_path.pop()
assert start == self.tag_to_idx[START]
best_path.reverse()
return best_path
def forward(self,sentence, char_, char_len):
# 输入,sentence(seq_len, batch)
feats = self._get_lstm_features_decoder(sentence, char_, char_len)
tag_seq = self._viterbi_decode(feats.squeeze(0))
return tag_seq
# In[4]:
# 处理数据
tag_to_idx = {'B-ORG': 0,'O': 1,'B-MISC': 2,'B-PER':3,'I-PER':4,'B-LOC': 5,'I-ORG': 6,'I-MISC': 7,'I-LOC': 8,'STOP':9,'START':10}
idx_to_tag = ['B-ORG','O','B-MISC','B-PER', 'I-PER', 'B-LOC', 'I-ORG', 'I-MISC', 'I-LOC', 'STOP', 'START']
# 构建csv文件
def process_file(base_path, tag_to_idx, Type):
with open(base_path + Type + '/seq.in', 'r') as f_seq_in, open(base_path + Type + '/seq.out', 'r') as f_seq_out:
seq_lists = [seq.strip() for seq in f_seq_in.readlines()]
tags_lists = [[str(tag_to_idx[tag]) for tag in tags.strip().split()] for tags in f_seq_out.readlines()]
print(tags_lists[0])
tags_lists = [' '.join(tags) for tags in tags_lists]
print(len(seq_lists))
df_ = pd.DataFrame({'Seq':seq_lists, 'Tag': tags_lists, 'Char_': seq_lists})
df_.to_csv('./Dataset/'+ Type + '.csv', index = False)
def get_csv_file(tag_to_idx):
base_path = './Dataset/CoNLL2003_NER/'
process_file(base_path, tag_to_idx, Type = 'train')
process_file(base_path, tag_to_idx, Type = 'test')
process_file(base_path, tag_to_idx, Type = 'valid')
def pad_char_list(char_list):
max_len = max([len(item) for item in char_list])
return [item + [1]*(max_len - len(item)) for item in char_list]
# 获取sentence数据迭代器
def get_data_iter():
#获取字符vocab分词器
def char_vocab_tokenizer(sentence):
c_lists = [[c for c in word] for word in sentence.strip().split()]
return list(_flatten(c_lists))
def tag_tokenizer(x):
rel = [int(tag) for tag in x.split()]
return rel
def _get_dataset(csv_data,char_to_idx, seq, tag, char_, char_len):
examples = []
fileds = [('Seq',seq),('Tag',tag),('Char_',char_),('Char_len',char_len)]
for seq, tag in zip(csv_data['Seq'], csv_data['Tag']):
char_list = [[char_to_idx[c] for c in word] for word in seq.strip().split()]
char_len_list = [len(word) for word in seq.strip().split()]
examples.append(data.Example.fromlist([seq, tag, pad_char_list(char_list),char_len_list], fileds))
return examples, fileds
seq = data.Field(sequential= True, use_vocab= True, lower= True)
tag = data.Field(sequential= True, lower= False, use_vocab= False, tokenize= tag_tokenizer)
char_ = data.Field(sequential=True, use_vocab = False, batch_first= True)
char_len = data.Field(sequential=True, use_vocab=False,batch_first=True)
char_vocab = data.Field(sequential=True, use_vocab = True, tokenize = char_vocab_tokenizer) #只是用来构建字符集词典
get_charvocab_fields=[('Seq',char_vocab),('None',None),('None',None)]
train = data.TabularDataset.splits(path='./Dataset', train='train.csv',format='csv',skip_header=True,fields=get_charvocab_fields)[0]
char_vocab.build_vocab(train) #字符集的词典
# 构建Dataset数据集
train_data = pd.read_csv('./Dataset/train.csv')
val_data = pd.read_csv('./Dataset/valid.csv')
test_data = pd.read_csv('./Dataset/test.csv')
train_dataset = data.Dataset(*_get_dataset(train_data,char_vocab.vocab.stoi, seq, tag,char_, char_len))
val_dataset = data.Dataset(*_get_dataset(val_data,char_vocab.vocab.stoi, seq, tag, char_, char_len))
test_dataset = data.Dataset(*_get_dataset(test_data,char_vocab.vocab.stoi, seq, tag, char_, char_len))
# 构造词典
seq.build_vocab(train_dataset, vectors = torchtext.vocab.Vectors(name ='./Dataset/glove.6B.200d.txt'))
# 构造数据迭代器
train_iter = data.BucketIterator(train_dataset, batch_size=1,shuffle=True ,sort_key = lambda x:len(x.Seq), device = tc.device('cpu'))
val_iter, test_iter = data.BucketIterator.splits((val_dataset,test_dataset),batch_sizes=(1,1), shuffle=False,repeat=False,sort=False,device=tc.device('cpu'))
return seq, char_vocab, train_iter, test_iter, val_iter
# In[5]:
def test_(net, data_iter, device, idx_to_tag):
loss_sum, acc_sum, n = 0.0, 0.0, 0
seq_pred, seq_true = [],[]
net.eval() # 进行测试模式
for batch_data in data_iter:
sentence = (batch_data.Seq).to(device)
tags = (batch_data.Tag).to(device)
char_ = (batch_data.Char_).to(device)
char_len = (batch_data.Char_len).to(device)
loss = net._net_log_likelihood(sentence, tags, char_, char_len)
tag_seq = net(sentence, char_, char_len)
loss_sum += loss.cpu().item()
n += sentence.shape[1]
# 计算准确率
true_seq = (tags.squeeze(1)).tolist()
seq_pred.append(tag_seq)
seq_true.append(true_seq)
if n % 200 == 0:
print(f'test__ n = {n}')
net.train() # 进入训练模式
seq_pred = [[idx_to_tag[idx] for idx in seq_idx]for seq_idx in seq_pred]
seq_true = [[idx_to_tag[idx] for idx in seq_idx]for seq_idx in seq_true]
return loss_sum / n , precision_score(seq_true, seq_pred), recall_score(seq_true, seq_pred), f1_score(seq_true, seq_pred)
def train(net, num_epochs, train_iter, val_iter, test_iter ,optimizer, device, idx_to_tag):
print(f'training on :{device}')
min_num = 0.884
for epoch in range(num_epochs):
loss_sum, n, start,temp_time = 0.0, 0, time.time(), time.time()
for batch_data in train_iter:
sentence = (batch_data.Seq).to(device)
tags = (batch_data.Tag).to(device)
char_ = (batch_data.Char_).to(device)
char_len = (batch_data.Char_len).to(device)
loss = net._net_log_likelihood(sentence, tags, char_, char_len)
optimizer.zero_grad()
loss.backward()
# 进行梯度裁剪
nn.utils.clip_grad_norm_(filter(lambda p:p.requires_grad, net.parameters()),5.0)
optimizer.step()
loss_sum += loss
n += 1
if n % 500 == 0:
print(f'n = %d , train loss : %.3f time: %d' %(n, loss_sum / n, time.time()-temp_time))
temp_time = time.time()
loss , P, R, f1 = test_(net, val_iter, device, idx_to_tag)
loss_test , P_test, R_test, f1_test = test_(net, test_iter, device, idx_to_tag)
if f1_test >= min_num:
min_num = f1_test
print('Save model...')
tc.save(net.state_dict() ,args.save_path)
print('---->n = %d, Train loss : %.3f, val_loss: %.3f f1-score %.3f , Take time: %.3f'%(epoch, loss_sum / n, loss, f1, time.time()-start))
print('-->Test: loss: %.3f pression: %.3f recall: %.3f F1: %.3f'%(loss_test , P_test, R_test, f1_test))
# In[6]:
# 获取数据迭代器
seq, char_, train_iter, test_iter, val_iter = get_data_iter()
START ='START'
STOP = 'STOP'
device = tc.device('cuda:2')
print(f'device = {device}')
net = BiLSTM_CRF(tag_to_idx, seq.vocab, char_.vocab, args)
# 参数初始化
net.embedding2.weight.data.copy_(seq.vocab.vectors)
net.embedding2.weight.requires_grad = False
optim = tc.optim.SGD(filter(lambda p:p.requires_grad, net.parameters()), lr= args.lr, momentum = 0.9)
net.load_state_dict(tc.load(args.save_path))
net = net.to(device)
print(f'net.device is {net.device}')
# In[7]:
num_epochs = 50
train(net, num_epochs, train_iter, val_iter, test_iter, optim, device, idx_to_tag)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os, pylint, re, sys, unittest
from pylint import epylint
ME = os.path.abspath(__file__)
# Make CAmkES importable
sys.path.append(os.path.join(os.path.dirname(ME), '../../..'))
from camkes.internal.tests.utils import CAmkESTest
class TestLint(CAmkESTest):
pass
def lint(self, path):
stdout, stderr = epylint.py_run('%s --errors-only' % path, return_std=True)
err = []
for line in [x.strip() for x in stdout] + [x.strip() for x in stderr]:
if line == 'No config file found, using default configuration':
continue
if line:
err.append(line)
if len(err) > 0:
self.fail('\n'.join(err))
srcdir = os.path.join(os.path.dirname(ME), '..')
regex = re.compile(r'.*\.py$')
sub = re.compile(r'[^\w]')
for src in os.listdir(srcdir):
if regex.match(src) is None:
continue
path = os.path.abspath(os.path.join(srcdir, src))
name = 'test_%s' % sub.sub('_', src)
setattr(TestLint, name, lambda self, path=path: lint(self, path))
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.1.1 on 2020-10-09 23:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0003_auto_20201009_2053'),
]
operations = [
migrations.AddField(
model_name='user',
name='balance',
field=models.FloatField(default=0),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
# ***************************************************
# * File : house_always_win.py
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2023-07-30
# * Version : 0.1.073019
# * Description : description
# * Link : link
# * Requirement : 相关模块版本需求(例如: numpy >= 2.1.0)
# ***************************************************
# python libraries
import os
import sys
ROOT = os.getcwd()
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
import random
import numpy as np
import matplotlib.pyplot as plt
# global variable
LOGGING_LABEL = __file__.split('/')[-1][:-3]
def play(total_money, bet_money, total_plays):
"""
在拥有 total_money 本金,每次下注 bet_money,总共下注 total_plays 次
Args:
total_money (_type_): 手中的总金额
bet_money (_type_): 每次的下注金额
total_plays (_type_): 玩的次数
"""
# 玩家下注
choice = "Even" if np.random.randint(low = 0, high = 2, size = 1) == 0 else "Odd"
# print(f"You bet on {choice} number")
if choice == "Even": # 偶数
def pick_note():
"""
生成筹码(chips)数字,
如果筹码是偶数,并且不是 10 则返回 True
"""
# 获取 1-100 之间的任意一个随机数
note = random.randint(1, 100)
# 检查游戏条件
if note % 2 != 0 or note == 10:
return False
elif note % 2 == 0:
return True
elif choice == "Odd": # 奇数
def pick_note():
"""
生成筹码(chips)数字,
如果筹码是奇数,并且不是 11 则返回 True
"""
# 获取 1-100 之间的任意一个随机数
note = random.randint(1, 100)
# 检查游戏条件
if note % 2 == 0 or note == 11:
return False
elif note % 2 == 1:
return True
# 下注次数列表
num_of_plays = []
# 钱的变化列表
money = []
for play in range(1, total_plays):
# win
if pick_note():
# add the money to funds
total_money = total_money + bet_money
# append the play number
num_of_plays.append(play)
# append the new fund amount
money.append(total_money)
else:
# add the money to funds
total_money = total_money - bet_money
# append the play number
num_of_plays.append(play)
# append the new fund amount
money.append(total_money)
# 结果可视化
plt.plot(num_of_plays, money)
plt.xlabel("Player Money in $")
plt.ylabel("Number of bets")
# 最终金额
final_fund = money[-1]
return final_fund
def multi_play(num, total_money, bet_money, total_plays):
"""
模拟多次多次 play
"""
# 每次模拟的最终金额
final_funds = []
for i in range(num):
ending_fund = play(total_money, bet_money, total_plays)
final_funds.append(ending_fund)
print(f"总共模拟了 {num} 次 play.")
print(f"每次模拟的 play 开始手里的金额为:$10,000.")
print(f"每次模拟的 play 最后手里的金额为:${final_funds}.")
print(f"经过 {num} 次模拟,play 一次剩余的平均金额为:${sum(final_funds) / len(final_funds)}")
plt.show()
# 测试代码 main 函数
def main():
# final_funds = play(total_money=10000, bet_money=100, total_plays=50)
# print(final_funds)
multi_play(num = 1000, total_money=10000, bet_money=100, total_plays=100)
if __name__ == "__main__":
main()
|
# server.py
import socket
from random import randint
import pickle
from bitarray import bitarray
import hashlib
# import Crypto
# from Crypto.PublicKey import RSA
# from Crypto import Random
# import ast
# create a socket object
import json
publkey = "001100010011000100110000001100110111"
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket3 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
# host = socket.gethostname()
port = 9999
# port2 = 9090
# port3 = 4040
# bind to the port
serversocket.bind((' ', port))
# serversocket2.bind((' ', port2))
# serversocket3.bind((' ', port3))
# queue up to 5 requests
serversocket.listen(5)
# serversocket2.listen(5)
# serversocket3.listen(5)
while True:
# establish a connection
clientsocket3,addr = serversocket.accept()
print("Got a connection from %s" % str(addr))
# privatekey = clientsocket.recv(1024)
option = clientsocket3.recv(1024)
print("Option is %s" %option.decode('ascii'));
if int(option.decode('ascii'))==1:
firstpacket = clientsocket3.recv(1024)
print("Packet 1 : %s" %firstpacket.decode('ascii'))
secondpacket = clientsocket3.recv(1024)
# passw1 = clientsocket3.recv(1024)
rstring = int(firstpacket.decode('ascii')) ^ int(publkey,2)
password = str(rstring)
print("Packet 1 Decoded: %s" %password)
clientsocket3.close()
clientsocket,addr = serversocket.accept()
print("Got a connection from %s" % str(addr))
clientsocket2,addr2 = serversocket.accept()
print("Got a connection from %s" % str(addr2))
# print("Make sure that the password that you are about to enter does not exceed 9 digits")
# password = input('Enter the password to be stored ')
OTP = int(password[0:4])
login_id = password[4:9]
password_n = int(password[9:])
#password_n = int(''.join(str(ord(each_char)) for each_char in password_char))
#password_n = int(password)
chosen_prime = 5915587277
hash_object = hashlib.md5(login_id.encode())
login = hash_object.hexdigest()
# chosen_prime = 48112959837082048697
random_r = randint(0,chosen_prime)
pass1_n = (password_n+random_r)%chosen_prime
pass1 = str(pass1_n)
dict1 = {login:pass1}
resp1 = pickle.dumps(dict1)
pass2_n = (password_n+2*random_r)%chosen_prime
pass2 = str(pass2_n)
dict2 = {login:pass2 }
resp2 = pickle.dumps(dict2)
clientsocket.send(resp1)
clientsocket2.send(resp2)
print("Packet 2 : %s" %secondpacket.decode('ascii'))
retreive_query = str(secondpacket.decode('ascii'))
#print(retreive_query)
OTP_retreive = int(retreive_query[0:4])
login_id_retreive = retreive_query[4:9]
password_char_retreive = retreive_query[9:41]
#print(password_char_retreive)
hash_object_retreive = hashlib.md5(login_id_retreive.encode())
login_query = hash_object_retreive.hexdigest()
clientsocket.send(login_query.encode('ascii'))
clientsocket2.send(login_query.encode('ascii'))
passw1 = clientsocket.recv(1024)
print("The Piece from Client 1 is %s" %passw1.decode('ascii'))
retreived_p1 = int(passw1.decode('ascii'))
passw2 = clientsocket2.recv(1024)
print("The Piece from Client 2 is %s" %passw2.decode('ascii'))
retreived_p2 = int(passw2.decode('ascii'))
retreived_pass = (2*retreived_p1-retreived_p2)%chosen_prime
print("The retreived password is %s" %retreived_pass)
packet = retreived_pass*10000+OTP
#print(packet)
packet_str = str(packet)
hash_object = hashlib.md5(packet_str.encode())
regenerated_packet = hash_object.hexdigest()
print("hashed pass with OTP is %s"%str(regenerated_packet))
password_ascii_retreive = ''.join(str(ord(c)) for c in password_char_retreive)
regenerated_ascii_packet = ''.join(str(ord(c)) for c in regenerated_packet)
#print(password_ascii_retreive)
#print(regenerated_ascii_packet)
#print(int(password_ascii_retreive) - int(regenerated_ascii_packet))
if ((int(password_ascii_retreive) - int(regenerated_ascii_packet))==0) :
print("access granted");
else :
print("password entered is wrong");
clientsocket.close()
clientsocket2.close()
|
from tealight.robot import (move,
turn,
look,
touch,
smell,
left_side,
right_side)
# Add your code here
def tri():
move()
a =str(touch())
l=str(left_side())
r=str(right_side())
if l=='fruit':
return -1
elif r=='fruit':
return 1
else:
print 'oh no im stuck'
return(2)
for i in range (0,400):
a =str(touch())
l=str(left_side())
r=str(right_side())
if a =='fruit':
move()
elif r=='fruit':
turn (1)
move()
elif l=='fruit':
turn(-1)
move()
else:
turn(tri())
move()
|
from rest_framework import viewsets
from info.models import Posting, PointOfInterest
from info.forms import CommentForm
from info.serializers import PostingSerializer
from django.views.generic import View
from django.shortcuts import render
from django.core.urlresolvers import reverse
from base.views import cek_session
from member.models import Elder, CareGiver
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponseRedirect, HttpResponse
# Create your views here.
class infos(viewsets.ReadOnlyModelViewSet):
queryset = Posting.objects.all()
serializer_class = PostingSerializer
class InfoAll(View):
@classmethod
def as_view(cls, **initkwargs):
view = super(InfoAll, cls).as_view(**initkwargs)
return login_required(view, redirect_field_name=None)
def get(self, request, page=1):
cek_session(request)
elder=None
if request.session.get('active_elder') is not None and request.session['active_elder']!=0:
elder=Elder.objects.get(pk=request.session.get('active_elder'))
elders=Elder.get_cared_elder(user=CareGiver.objects.get(user=request.user))
info=Posting.objects.filter(category__iexact='info').order_by('-id')
#p=Paginator(info,10)
#info=p.page(page)
return render(request, 'post.html', {'elders':elders, 'active_elder':elder, 'tag':'info', 'title':'Info', 'info':info, 'current':page})
def post(self, request, page=1):
return self.get(request, page)
class TipsAll(View):
@classmethod
def as_view(cls, **initkwargs):
view = super(TipsAll, cls).as_view(**initkwargs)
return login_required(view, redirect_field_name=None)
def get(self, request, page=1):
cek_session(request)
elder=None
if request.session.get('active_elder') is not None and request.session['active_elder']!=0:
elder=Elder.objects.get(pk=request.session.get('active_elder'))
elders=Elder.get_cared_elder(user=CareGiver.objects.get(user=request.user))
info=Posting.objects.filter(category__iexact='tips').order_by('-id')
#p=Paginator(info,10)
#info=p.page(page)
return render(request, 'post.html', {'elders':elders, 'active_elder':elder, 'tag':'tips', 'title':'Tips dan Trik', 'info':info, 'current':page})
def post(self, request, page=1):
return self.get(request, page)
class PostDetail(View):
@classmethod
def as_view(cls, **initkwargs):
view = super(PostDetail, cls).as_view(**initkwargs)
return login_required(view, redirect_field_name=None)
def get(self, request, type, id):
cek_session(request)
elder=None
if request.session.get('active_elder') is not None and request.session['active_elder']!=0:
elder=Elder.objects.get(pk=request.session.get('active_elder'))
elders=Elder.get_cared_elder(user=CareGiver.objects.get(user=request.user))
info=Posting.objects.filter(category=type, id=id)
if info:
return render(request, 'post_view.html', {'elders':elders, 'tag':type, 'active_elder':elder, 'info':info[0]})
return HttpResponseRedirect(reverse(type))
def post(self, request, type, id):
cek_session(request)
elder=None
if request.session.get('active_elder') is not None and request.session['active_elder']!=0:
elder=Elder.objects.get(pk=request.session.get('active_elder'))
elders=Elder.get_cared_elder(user=CareGiver.objects.get(user=request.user))
info=Posting.objects.filter(category=type, id=id)
if info:
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.owner=request.user
comment.posting=info[0]
comment.save()
return render(request, 'post_view.html', {'elders':elders, 'tag':type, 'active_elder':elder, 'success':'Komentar berhasil ditambahkan', 'info':info[0]})
return render(request, 'post_view.html', {'elders':elders, 'tag':type, 'active_elder':elder, 'error':form.errors, 'info':info[0]})
return HttpResponseRedirect(reverse(type))
class POIList(View):
@classmethod
def as_view(cls, **initkwargs):
view = super(POIList, cls).as_view(**initkwargs)
return login_required(view, redirect_field_name=None)
def get(self, request):
cek_session(request)
elder=None
if request.session.get('active_elder') is not None and request.session['active_elder']!=0:
elder=Elder.objects.get(pk=request.session.get('active_elder'))
elders=Elder.get_cared_elder(user=CareGiver.objects.get(user=request.user))
location=PointOfInterest.objects.order_by('category')
return render(request, 'location.html', {'elders':elders, 'active_elder':elder, 'location':location})
def post(self, request):
return self.get(request) |
import base64
from bsn_sdk_py.client.config import Config
from bsn_sdk_py.trans.transaction_header import get_notrust_trans_data, created_peer_proposal_signedproposal
from bsn_sdk_py.common.myecdsa256 import ecdsa_sign, hash256_sign
from bsn_sdk_py.until.bsn_logger import log_debug,log_info
class NotTrustTransRequest():
"""
assemble transaction data under Public-Key-Upload Mode
"""
def __init__(self, chainCode, funcName, userName, args:list=None, transientData: dict=None):
self.name = userName
self.chainCode = chainCode
self.funcName = funcName
self.args = args
self.transientData = transientData
def set_config(self, config:Config):
self.config = config
def _get_not_trust_private_key(self):
name = self.GetCertName()
not_trust_tran_private_path = self.config.mspDir + r'\keystore\\' + name + '_private.pem'
log_info(("user private key path", not_trust_tran_private_path))
with open(not_trust_tran_private_path, "rb") as f:
key_data = f.read()
return key_data
def GetCertName(self):
return self.name + "@" + self.config.app_code
def notrust_trans_data(self):
name = self.GetCertName()
not_trust_tran_public_path = self.config.mspDir + r'\keystore\\' + name + '_cert.pem'
peer_proposal_proposal = get_notrust_trans_data(
channelID=self.config.app_info["channelId"],
mspid=self.config.app_info["mspId"],
chainCode=self.chainCode,
cert_pub_path=not_trust_tran_public_path,
transientData=self.transientData,
args=self.args,
funcName=self.funcName)
proposal_proposal_bytes = peer_proposal_proposal.SerializeToString()
# //proposal_proposal_bytes_s = hash256_sign(proposal_proposal_bytes)
base64_sign = ecdsa_sign(proposal_proposal_bytes, self._get_not_trust_private_key())
signedproposal = created_peer_proposal_signedproposal(peer_proposal_proposal, base64_sign)
return str(base64.b64encode(signedproposal.SerializeToString()), 'utf-8')
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import unittest
from tests.utils import run_test
# copy database
# check if in folder _build
class HwtBuildReport_directive_TC(unittest.TestCase):
def test_buildreport_simple(self):
run_test("test_buildreport_simple")
if __name__ == "__main__":
unittest.main()
|
import wx
import controller
import wx.lib.masked as masked
from datetime import datetime
import wx.propgrid as wxpg
from model import Skeleton
class RecordDialog(wx.Dialog):
"""
dialog for edit and add record
"""
def __init__(self, session, row=None, title="Add", addRecord=True):
"""
Constructor
"""
super().__init__(None, title="%s Record" % title)
self.addRecord = addRecord
self.selected_row = row
self.session = session
self.result = 0
self.skeleton_id = None
self.skeleton_dict = {}
if row:
site = self.selected_row.site
location = self.selected_row.location
skeleton = self.selected_row.skeleton
observer = self.selected_row.observer
obs_date = self.selected_row.obs_date
else:
site = location = skeleton = observer = ""
obs_date = datetime.today().strftime('%Y-%m-%d')
# GUI project
# panel = wx.Panel(self)
# fgs = wx.FlexGridSizer(5, 2, 10, 25)
main_sizer = wx.BoxSizer(wx.VERTICAL)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
size = (100, -1)
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
# Site
site_lbl = wx.StaticText(self, label="Site:", size=size)
site_lbl.SetFont(font)
self.site_txt = wx.TextCtrl(
self, value=site, style=wx.TE_PROCESS_ENTER, size=(300, -1))
self.site_txt.SetMaxLength(50)
self.site_txt.Bind(wx.EVT_TEXT_ENTER, self.onEnter)
main_sizer.Add(self.row_builder(
[site_lbl, self.site_txt]), 0, wx.ALL)
# Location
location_lbl = wx.StaticText(self, label="Location:", size=size)
location_lbl.SetFont(font)
self.location_txt = wx.TextCtrl(
self, value=location, style=wx.TE_PROCESS_ENTER, size=(300, -1))
self.location_txt.SetMaxLength(50)
self.location_txt.Bind(wx.EVT_TEXT_ENTER, self.onEnter)
main_sizer.Add(self.row_builder(
[location_lbl, self.location_txt]), 0, wx.ALL)
# Skeleton
skeleton_lbl = wx.StaticText(self, label="Skeleton:", size=size)
skeleton_lbl.SetFont(font)
self.skeleton_txt = wx.TextCtrl(
self, value=skeleton, style=wx.TE_PROCESS_ENTER, size=(300, -1))
self.skeleton_txt.SetMaxLength(50)
self.skeleton_txt.Bind(wx.EVT_TEXT_ENTER, self.onEnter)
main_sizer.Add(self.row_builder(
[skeleton_lbl, self.skeleton_txt]), 0, wx.ALL)
# Observer
observer_lbl = wx.StaticText(self, label="Observer:", size=size)
observer_lbl.SetFont(font)
self.observer_txt = wx.TextCtrl(
self, value=observer, style=wx.TE_PROCESS_ENTER, size=(300, -1))
self.observer_txt.SetMaxLength(50)
self.observer_txt.Bind(wx.EVT_TEXT_ENTER, self.onEnter)
main_sizer.Add(self.row_builder(
[observer_lbl, self.observer_txt]), 0, wx.ALL)
# Observation date
obs_date_lbl = wx.StaticText(
self, label="Date:", size=size)
obs_date_lbl.SetFont(font)
self.obs_date_txt = masked.TextCtrl(self, -1, "",
mask="####-##-##",
defaultValue=obs_date,
validRequired=False,
size=(200, -1),
style=wx.TE_PROCESS_ENTER)
self.obs_date_txt.SetMaxLength(10)
self.obs_date_txt.Bind(wx.EVT_TEXT_ENTER, self.onEnter)
main_sizer.Add(self.row_builder(
[obs_date_lbl, self.obs_date_txt]), 0, wx.ALL)
# buttons
ok_btn = wx.Button(self, label="%s skeleton" % title)
ok_btn.Bind(wx.EVT_BUTTON, self.on_record)
btn_sizer.Add(ok_btn, 0, wx.ALL, 5)
cancel_btn = wx.Button(self, wx.ID_CANCEL, "Cancel")
cancel_btn.Bind(wx.EVT_BUTTON, self.on_close)
btn_sizer.Add(cancel_btn, 0, wx.ALL, 5)
main_sizer.Add(btn_sizer, 0, wx.CENTER)
self.SetSizerAndFit(main_sizer)
def onEnter(self, event):
""" go to next crtl """
event.EventObject.Navigate()
def get_data(self):
"""
Gets the data from the widgets in the dialog
Also display an error message if required fields are empty
"""
tmp_skeleton_dict = {}
site = self.site_txt.GetValue()
location = self.location_txt.GetValue()
skeleton = self.skeleton_txt.GetValue()
observer = self.observer_txt.GetValue()
obs_date = self.obs_date_txt.GetValue()
if site == "" or skeleton == "":
show_message("Site and Skeleton are required!", "Error")
return None
tmp_skeleton_dict["site"] = site
tmp_skeleton_dict["location"] = location
tmp_skeleton_dict["skeleton"] = skeleton
tmp_skeleton_dict["observer"] = observer
tmp_skeleton_dict["obs_date"] = obs_date
return tmp_skeleton_dict
def on_add(self):
"""
Add the record to the database
"""
self.skeleton_dict = self.get_data()
if self.skeleton_dict is None:
return
self.skeleton_id = controller.add_record(
self.session, self.skeleton_dict)
self.result = 1
self.Close()
def on_close(self, event):
"""
Close the dialog
"""
self.EndModal(wx.ID_CANCEL)
def on_edit(self):
"""
Edit a record in the database
"""
self.skeleton_dict = self.get_data()
controller.edit_record(
self.session, self.selected_row.skeleton_id, self.skeleton_dict)
self.selected_row.site = self.skeleton_dict['site']
self.selected_row.location = self.skeleton_dict['location']
self.selected_row.skeleton = self.skeleton_dict['skeleton']
self.selected_row.observer = self.skeleton_dict['observer']
self.selected_row.obs_date = self.skeleton_dict['obs_date']
self.Close()
def on_record(self, event):
"""
Add or edit a record
"""
if self.addRecord:
self.on_add()
else:
self.on_edit()
def row_builder(self, widgets):
"""
Helper function for building a row of widgets
"""
sizer = wx.BoxSizer(wx.HORIZONTAL)
lbl, txt = widgets
sizer.Add(lbl, 0, wx.ALL, 5)
sizer.Add(txt, 1, wx.ALL, 5)
return sizer
class IntProperty2(wxpg.PGProperty):
"""\
This is a simple re-implementation of wxIntProperty.
"""
def __init__(self, label, name=wxpg.PG_LABEL, value=-1):
wxpg.PGProperty.__init__(self, label, name)
self.SetValue(value)
def GetClassName(self):
"""\
This is not 100% necessary and in future is probably going to be
automated to return class name.
"""
return "IntProperty2"
def DoGetEditorClass(self):
return wxpg.PropertyGridInterface.GetEditorByName("TextCtrl")
def ValueToString(self, value, flags):
return str(value)
def StringToValue(self, s, flags):
""" If failed, return False or (False, None). If success, return tuple
(True, newValue).
"""
try:
v = int(s)
if self.GetValue() != v:
return (True, v)
except (ValueError, TypeError):
if flags & wxpg.PG_REPORT_ERROR:
wx.MessageBox("Cannot convert '%s' into a number." % s, "Error")
return (False, None)
def IntToValue(self, v, flags):
""" If failed, return False or (False, None). If success, return tuple
(True, newValue).
"""
if (self.GetValue() != v):
return (True, v)
return (False, None)
def ValidateValue(self, value, validationInfo):
""" Let's limit the value to range -1 and 4.
"""
# Just test this function to make sure validationInfo and
# wxPGVFBFlags work properly.
oldvfb__ = validationInfo.GetFailureBehavior()
# Mark the cell if validation failed
# validationInfo.SetFailureBehavior(wxpg.PG_VFB_MARK_CELL)
if value == None or value < -1 or value > 1000:
return False
return True
class PreservationDialog(wx.Dialog):
"""
dialog for edit state of preservation
"""
def my_enum_prep(self, name, value=-1):
return wxpg.EnumProperty(name, name,
['-1 -Not determined',
'0 - None',
'1 - up to 25%',
'2 - up to 50%',
'3 - up to 75%',
'4 - up to 100%'],
[-1, 0, 1, 2, 3, 4],
value)
def __init__(self, session, row, title="State of preservation"):
"""
Constructor
"""
super().__init__(None, title="{}: {}".format(title, row.skeleton))
self.selected_row = row
self.session = session
self.result = 0
self.skeleton_id = row.skeleton_id
skeleton_dict = {}
rekord = controller.find_skeleton(self.session, self.skeleton_id)
data = rekord.__dict__
data['vertebrae_remarks'] = rekord.vertebrae_remarks if rekord.vertebrae_remarks != None else ''
for k, v in data.items():
if v is None:
data[k] = -1
# GUI project
self.panel = panel = wx.Panel(self, wx.ID_ANY)
topsizer = wx.BoxSizer(wx.VERTICAL)
self.pg = pg = wxpg.PropertyGrid(panel, style=wxpg.PG_SPLITTER_AUTO_CENTER)
# SetPropertyValidator
# Bg Colour for 0 column
bgcCell = wx.Colour(219, 233, 255)
# Add properties
# Skull
pg.Append(wxpg.PropertyCategory("Skull inventory"))
pg.Append(self.my_enum_prep("Frontal", value=data['frontal']))
pg.SetPropertyCell("Frontal", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Sphenoid", value=data['sphenoid']))
pg.SetPropertyCell("Sphenoid", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Mandible", value=data['mandible']))
pg.SetPropertyCell("Mandible", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Parietal left", value=data['parietal_l']))
pg.SetPropertyCell("Parietal left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Parietal right", value=data['parietal_r']))
pg.SetPropertyCell("Parietal right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Nasal left", value=data['nasal_l']))
pg.SetPropertyCell("Nasal left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Nasal right", value=data['nasal_r']))
pg.SetPropertyCell("Nasal right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Palatine left", value=data['palatine_l']))
pg.SetPropertyCell("Palatine left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Palatine right", value=data['palatine_r']))
pg.SetPropertyCell("Palatine right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Occipital", value=data['occipital']))
pg.SetPropertyCell("Occipital", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Maxilla left", value=data['maxilla_l']))
pg.SetPropertyCell("Maxilla left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Maxilla right", value=data['maxilla_r']))
pg.SetPropertyCell("Maxilla right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Lacrimal left", value=data['lacrimal_l']))
pg.SetPropertyCell("Lacrimal left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Lacrimal right", value=data['lacrimal_r']))
pg.SetPropertyCell("Lacrimal right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Temporal left", value=data['temporal_l']))
pg.SetPropertyCell("Temporal left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Temporal right", value=data['temporal_r']))
pg.SetPropertyCell("Temporal right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Zygomatic left", value=data['zygomatic_l']))
pg.SetPropertyCell("Zygomatic left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Zygomatic right", value=data['zygomatic_r']))
pg.SetPropertyCell("Zygomatic right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Orbit left", value=data['orbit_l']))
pg.SetPropertyCell("Orbit left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Orbit right", value=data['orbit_r']))
pg.SetPropertyCell("Orbit right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ethmoid", value=data['ethmoid']))
pg.SetPropertyCell("Ethmoid", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Thyroid", value=data['thyroid']))
pg.SetPropertyCell("Thyroid", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Hyoid", value=data['hyoid']))
pg.SetPropertyCell("Hyoid", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Calotte", value=data['calotte']))
pg.SetPropertyCell("Calotte", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Post-cranial skeleton
pg.Append(wxpg.PropertyCategory("Post-cranial skeleton inventory"))
pg.Append(self.my_enum_prep("Ilium left", value=data['ilium_l']))
pg.SetPropertyCell("Ilium left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ilium right", value=data['ilium_r']))
pg.SetPropertyCell("Ilium right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Scapula left", value=data['scapula_l']))
pg.SetPropertyCell("Scapula left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Scapula right", value=data['scapula_r']))
pg.SetPropertyCell("Scapula right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Manubrium", value=data['manubrium']))
pg.SetPropertyCell("Manubrium", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ischium left", value=data['ischium_l']))
pg.SetPropertyCell("Ischium left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ischium right", value=data['ischium_r']))
pg.SetPropertyCell("Ischium right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Patella left", value=data['patella_l']))
pg.SetPropertyCell("Patella left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Patella right", value=data['patella_r']))
pg.SetPropertyCell("Patella right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("C sterni", value=data['c_sterni']))
pg.SetPropertyCell("C sterni", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Pubic left", value=data['pubic_l']))
pg.SetPropertyCell("Pubic left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Pubic right", value=data['pubic_r']))
pg.SetPropertyCell("Pubic right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("X process", value=data['x_process']))
pg.SetPropertyCell("X process", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Sacrum", value=data['sacrum']))
pg.SetPropertyCell("Sacrum", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Coccyx", value=data['coccyx']))
pg.SetPropertyCell("Coccyx", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Long bones
pg.Append(wxpg.PropertyCategory("Long bones"))
pg.Append(self.my_enum_prep("Clavicle left D js", value=data['clavicle_l_djs']))
pg.SetPropertyCell("Clavicle left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle left D 1/3", value=data['clavicle_l_d13']))
pg.SetPropertyCell("Clavicle left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle left M 1/3", value=data['clavicle_l_m13']))
pg.SetPropertyCell("Clavicle left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle left P 1/3", value=data['clavicle_l_p13']))
pg.SetPropertyCell("Clavicle left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle left P js", value=data['clavicle_l_pjs']))
pg.SetPropertyCell("Clavicle left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle right D js", value=data['clavicle_r_djs']))
pg.SetPropertyCell("Clavicle right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle right D 1/3", value=data['clavicle_r_d13']))
pg.SetPropertyCell("Clavicle right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle right M 1/3", value=data['clavicle_r_m13']))
pg.SetPropertyCell("Clavicle right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle right P 1/3", value=data['clavicle_r_p13']))
pg.SetPropertyCell("Clavicle right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Clavicle right P js", value=data['clavicle_r_pjs']))
pg.SetPropertyCell("Clavicle right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus left D js", value=data['humerus_l_djs']))
pg.SetPropertyCell("Humerus left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus left D 1/3", value=data['humerus_l_d13']))
pg.SetPropertyCell("Humerus left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus left M 1/3", value=data['humerus_l_m13']))
pg.SetPropertyCell("Humerus left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus left P 1/3", value=data['humerus_l_p13']))
pg.SetPropertyCell("Humerus left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus left P js", value=data['humerus_l_pjs']))
pg.SetPropertyCell("Humerus left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus right D js", value=data['humerus_r_djs']))
pg.SetPropertyCell("Humerus right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus right D 1/3", value=data['humerus_r_d13']))
pg.SetPropertyCell("Humerus right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus right M 1/3", value=data['humerus_r_m13']))
pg.SetPropertyCell("Humerus right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus right P 1/3", value=data['humerus_r_p13']))
pg.SetPropertyCell("Humerus right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Humerus right P js", value=data['humerus_r_pjs']))
pg.SetPropertyCell("Humerus right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius left D js", value=data['radius_l_djs']))
pg.SetPropertyCell("Radius left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius left D 1/3", value=data['radius_l_d13']))
pg.SetPropertyCell("Radius left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius left M 1/3", value=data['radius_l_m13']))
pg.SetPropertyCell("Radius left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius left P 1/3", value=data['radius_l_p13']))
pg.SetPropertyCell("Radius left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius left P js", value=data['radius_l_pjs']))
pg.SetPropertyCell("Radius left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius right D js", value=data['radius_r_djs']))
pg.SetPropertyCell("Radius right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius right D 1/3", value=data['radius_r_d13']))
pg.SetPropertyCell("Radius right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius right M 1/3", value=data['radius_r_m13']))
pg.SetPropertyCell("Radius right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius right P 1/3", value=data['radius_r_p13']))
pg.SetPropertyCell("Radius right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Radius right P js", value=data['radius_r_pjs']))
pg.SetPropertyCell("Radius right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna left D js", value=data['ulna_l_djs']))
pg.SetPropertyCell("Ulna left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna left D 1/3", value=data['ulna_l_d13']))
pg.SetPropertyCell("Ulna left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna left M 1/3", value=data['ulna_l_m13']))
pg.SetPropertyCell("Ulna left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna left P 1/3", value=data['ulna_l_p13']))
pg.SetPropertyCell("Ulna left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna left P js", value=data['ulna_l_pjs']))
pg.SetPropertyCell("Ulna left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna right D js", value=data['ulna_r_djs']))
pg.SetPropertyCell("Ulna right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna right D 1/3", value=data['ulna_r_d13']))
pg.SetPropertyCell("Ulna right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna right M 1/3", value=data['ulna_r_m13']))
pg.SetPropertyCell("Ulna right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna right P 1/3", value=data['ulna_r_p13']))
pg.SetPropertyCell("Ulna right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Ulna right P js", value=data['ulna_r_pjs']))
pg.SetPropertyCell("Ulna right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur left D js", value=data['femur_l_djs']))
pg.SetPropertyCell("Femur left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur left D 1/3", value=data['femur_l_d13']))
pg.SetPropertyCell("Femur left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur left M 1/3", value=data['femur_l_m13']))
pg.SetPropertyCell("Femur left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur left P 1/3", value=data['femur_l_p13']))
pg.SetPropertyCell("Femur left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur left P js", value=data['femur_l_pjs']))
pg.SetPropertyCell("Femur left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur right D js", value=data['femur_r_djs']))
pg.SetPropertyCell("Femur right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur right D 1/3", value=data['femur_r_d13']))
pg.SetPropertyCell("Femur right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur right M 1/3", value=data['femur_r_m13']))
pg.SetPropertyCell("Femur right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur right P 1/3", value=data['femur_r_p13']))
pg.SetPropertyCell("Femur right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Femur right P js", value=data['femur_r_pjs']))
pg.SetPropertyCell("Femur right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia left D js", value=data['tibia_l_djs']))
pg.SetPropertyCell("Tibia left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia left D 1/3", value=data['tibia_l_d13']))
pg.SetPropertyCell("Tibia left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia left M 1/3", value=data['tibia_l_m13']))
pg.SetPropertyCell("Tibia left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia left P 1/3", value=data['tibia_l_p13']))
pg.SetPropertyCell("Tibia left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia left P js", value=data['tibia_l_pjs']))
pg.SetPropertyCell("Tibia left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia right D js", value=data['tibia_r_djs']))
pg.SetPropertyCell("Tibia right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia right D 1/3", value=data['tibia_r_d13']))
pg.SetPropertyCell("Tibia right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia right M 1/3", value=data['tibia_r_m13']))
pg.SetPropertyCell("Tibia right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia right P 1/3", value=data['tibia_r_p13']))
pg.SetPropertyCell("Tibia right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Tibia right P js", value=data['tibia_r_pjs']))
pg.SetPropertyCell("Tibia right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula left D js", value=data['fibula_l_djs']))
pg.SetPropertyCell("Fibula left D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula left D 1/3", value=data['fibula_l_d13']))
pg.SetPropertyCell("Fibula left D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula left M 1/3", value=data['fibula_l_m13']))
pg.SetPropertyCell("Fibula left M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula left P 1/3", value=data['fibula_l_p13']))
pg.SetPropertyCell("Fibula left P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula left P js", value=data['fibula_l_pjs']))
pg.SetPropertyCell("Fibula left P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula right D js", value=data['fibula_r_djs']))
pg.SetPropertyCell("Fibula right D js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula right D 1/3", value=data['fibula_r_d13']))
pg.SetPropertyCell("Fibula right D 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula right M 1/3", value=data['fibula_r_m13']))
pg.SetPropertyCell("Fibula right M 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula right P 1/3", value=data['fibula_r_p13']))
pg.SetPropertyCell("Fibula right P 1/3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Fibula right P js", value=data['fibula_r_pjs']))
pg.SetPropertyCell("Fibula right P js", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals left 1st", value=data['metacarpals_l_1']))
pg.SetPropertyCell("Metacarpals left 1st", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals left 2nd", value=data['metacarpals_l_2']))
pg.SetPropertyCell("Metacarpals left 2nd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals left 3rd", value=data['metacarpals_l_3']))
pg.SetPropertyCell("Metacarpals left 3rd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals left 4th", value=data['metacarpals_l_4']))
pg.SetPropertyCell("Metacarpals left 4th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals left 5th", value=data['metacarpals_l_5']))
pg.SetPropertyCell("Metacarpals left 5th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals right 1st", value=data['metacarpals_r_1']))
pg.SetPropertyCell("Metacarpals right 1st", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals right 2nd", value=data['metacarpals_r_2']))
pg.SetPropertyCell("Metacarpals right 2nd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals right 3rd", value=data['metacarpals_r_3']))
pg.SetPropertyCell("Metacarpals right 3rd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals right 4th", value=data['metacarpals_r_4']))
pg.SetPropertyCell("Metacarpals right 4th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metacarpals right 5th", value=data['metacarpals_r_5']))
pg.SetPropertyCell("Metacarpals right 5th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals left 1st", value=data['metatarsals_l_1']))
pg.SetPropertyCell("Metatarsals left 1st", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals left 2nd", value=data['metatarsals_l_2']))
pg.SetPropertyCell("Metatarsals left 2nd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals left 3rd", value=data['metatarsals_l_3']))
pg.SetPropertyCell("Metatarsals left 3rd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals left 4th", value=data['metatarsals_l_4']))
pg.SetPropertyCell("Metatarsals left 4th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals left 5th", value=data['metatarsals_l_5']))
pg.SetPropertyCell("Metatarsals left 5th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals right 1st", value=data['metatarsals_r_1']))
pg.SetPropertyCell("Metatarsals right 1st", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals right 2nd", value=data['metatarsals_r_2']))
pg.SetPropertyCell("Metatarsals right 2nd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals right 3rd", value=data['metatarsals_r_3']))
pg.SetPropertyCell("Metatarsals right 3rd", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals right 4th", value=data['metatarsals_r_4']))
pg.SetPropertyCell("Metatarsals right 4th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Metatarsals right 5th", value=data['metatarsals_r_5']))
pg.SetPropertyCell("Metatarsals right 5th", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Vertebrae
pg.Append(wxpg.PropertyCategory("Vertebrae"))
pg.Append(wxpg.IntProperty("Vertebrae C 1", value=data['vertebrae_c_1']))
pg.SetPropertyCell("Vertebrae C 1", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae C 2", value=data['vertebrae_c_2']))
pg.SetPropertyCell("Vertebrae C 2", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae C 3", value=data['vertebrae_c_3']))
pg.SetPropertyCell("Vertebrae C 3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae C 4", value=data['vertebrae_c_4']))
pg.SetPropertyCell("Vertebrae C 4", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae C 5", value=data['vertebrae_c_5']))
pg.SetPropertyCell("Vertebrae C 5", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae T 1", value=data['vertebrae_t_1']))
pg.SetPropertyCell("Vertebrae T 1", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae T 2", value=data['vertebrae_t_2']))
pg.SetPropertyCell("Vertebrae T 2", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae T 3", value=data['vertebrae_t_3']))
pg.SetPropertyCell("Vertebrae T 3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae T 4", value=data['vertebrae_t_4']))
pg.SetPropertyCell("Vertebrae T 4", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae T 5", value=data['vertebrae_t_5']))
pg.SetPropertyCell("Vertebrae T 5", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae L 1", value=data['vertebrae_l_1']))
pg.SetPropertyCell("Vertebrae L 1", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae L 2", value=data['vertebrae_l_2']))
pg.SetPropertyCell("Vertebrae L 2", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae L 3", value=data['vertebrae_l_3']))
pg.SetPropertyCell("Vertebrae L 3", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae L 4", value=data['vertebrae_l_4']))
pg.SetPropertyCell("Vertebrae L 4", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Vertebrae L 5", value=data['vertebrae_l_5']))
pg.SetPropertyCell("Vertebrae L 5", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.StringProperty("Vertebrae remarks", value=data['vertebrae_remarks']))
pg.SetPropertyCell("Vertebrae remarks", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Ribs
pg.Append(wxpg.PropertyCategory("Ribs"))
pg.Append(wxpg.IntProperty("Ribs left whole", value=data['ribs_l_whole']))
pg.SetPropertyCell("Ribs left whole", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs left S end", value=data['ribs_l_send']))
pg.SetPropertyCell("Ribs left S end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs left V end", value=data['ribs_l_vend']))
pg.SetPropertyCell("Ribs left V end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs left Frag.", value=data['ribs_l_frag']))
pg.SetPropertyCell("Ribs left Frag.", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs right whole", value=data['ribs_r_whole']))
pg.SetPropertyCell("Ribs right whole", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs right S end", value=data['ribs_r_send']))
pg.SetPropertyCell("Ribs right S end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs right V end", value=data['ribs_r_vend']))
pg.SetPropertyCell("Ribs right V end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs right Frag.", value=data['ribs_r_frag']))
pg.SetPropertyCell("Ribs right Frag.", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs unknown whole", value=data['ribs_u_whole']))
pg.SetPropertyCell("Ribs unknown whole", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs unknown S end", value=data['ribs_u_send']))
pg.SetPropertyCell("Ribs unknown S end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs unknown V end", value=data['ribs_u_vend']))
pg.SetPropertyCell("Ribs unknown V end", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Ribs unknown Frag.", value=data['ribs_u_frag']))
pg.SetPropertyCell("Ribs unknown Frag.", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Phalanges
pg.Append(wxpg.PropertyCategory("Phalanges"))
pg.Append(wxpg.IntProperty("Phalanges hand proximal", value=data['phalanges_hand_p']))
pg.SetPropertyCell("Phalanges hand proximal", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Phalanges hand medial", value=data['phalanges_hand_m']))
pg.SetPropertyCell("Phalanges hand medial", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Phalanges hand distal", value=data['phalanges_hand_d']))
pg.SetPropertyCell("Phalanges hand distal", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Phalanges foot proximal", value=data['phalanges_foot_p']))
pg.SetPropertyCell("Phalanges foot proximal", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Phalanges foot medial", value=data['phalanges_foot_m']))
pg.SetPropertyCell("Phalanges foot medial", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Phalanges foot distal", value=data['phalanges_foot_d']))
pg.SetPropertyCell("Phalanges foot distal", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
# Carpals - tarsals
pg.Append(wxpg.PropertyCategory("Carpals - tarsals"))
pg.Append(self.my_enum_prep("Scaphoid left", value=data['scaphoid_l']))
pg.SetPropertyCell("Scaphoid left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Scaphoid right", value=data['scaphoid_r']))
pg.SetPropertyCell("Scaphoid right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Lunate left", value=data['lunate_l']))
pg.SetPropertyCell("Lunate left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Lunate right", value=data['lunate_r']))
pg.SetPropertyCell("Lunate right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Triquetral left", value=data['triquetral_l']))
pg.SetPropertyCell("Triquetral left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Triquetral right", value=data['triquetral_r']))
pg.SetPropertyCell("Triquetral right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Pisiform left", value=data['pisiform_l']))
pg.SetPropertyCell("Pisiform left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Pisiform right", value=data['pisiform_r']))
pg.SetPropertyCell("Pisiform right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Trapezium left", value=data['trapezium_l']))
pg.SetPropertyCell("Trapezium left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Trapezium right", value=data['trapezium_r']))
pg.SetPropertyCell("Trapezium right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Trapezoid left", value=data['trapezoid_l']))
pg.SetPropertyCell("Trapezoid left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Trapezoid right", value=data['trapezoid_r']))
pg.SetPropertyCell("Trapezoid right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Capitate left", value=data['capitate_l']))
pg.SetPropertyCell("Capitate left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Capitate right", value=data['capitate_r']))
pg.SetPropertyCell("Capitate right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Hamate left", value=data['hamate_l']))
pg.SetPropertyCell("Hamate left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Hamate right", value=data['hamate_r']))
pg.SetPropertyCell("Hamate right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Sesamoids hand", value=data['sesamoids_hand']))
pg.SetPropertyCell("Sesamoids hand", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Talus left", value=data['talus_l']))
pg.SetPropertyCell("Talus left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Talus right", value=data['talus_r']))
pg.SetPropertyCell("Talus right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Calcaneus left", value=data['calcaneus_l']))
pg.SetPropertyCell("Calcaneus left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Calcaneus right", value=data['calcaneus_r']))
pg.SetPropertyCell("Calcaneus right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("1st Cun left", value=data['cun_1_l']))
pg.SetPropertyCell("1st Cun left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("1st Cun right", value=data['cun_1_r']))
pg.SetPropertyCell("1st Cun right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("2nd Cun left", value=data['cun_2_l']))
pg.SetPropertyCell("2nd Cun left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("2nd Cun right", value=data['cun_2_r']))
pg.SetPropertyCell("2nd Cun right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("3rd Cun left", value=data['cun_3_l']))
pg.SetPropertyCell("3rd Cun left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("3rd Cun right", value=data['cun_3_r']))
pg.SetPropertyCell("3rd Cun right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Navicular left", value=data['navicular_l']))
pg.SetPropertyCell("Navicular left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Navicular right", value=data['navicular_r']))
pg.SetPropertyCell("Navicular right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Cuboid left", value=data['cuboid_l']))
pg.SetPropertyCell("Cuboid left", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(self.my_enum_prep("Cuboid right", value=data['cuboid_r']))
pg.SetPropertyCell("Cuboid right", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
pg.Append(wxpg.IntProperty("Sesamoids foot", value=data['sesamoids_foot']))
pg.SetPropertyCell("Sesamoids foot", 0, text=wx.propgrid.PG_LABEL, bgCol=bgcCell)
topsizer.Add(pg, 1, wx.EXPAND)
rowsizer = wx.BoxSizer(wx.HORIZONTAL)
btn_ok = wx.Button(panel, -1, "&Save")
btn_ok.Bind(wx.EVT_BUTTON, self.on_save_preservation)
rowsizer.Add(btn_ok, 1)
btn_cancel = wx.Button(panel, -1, "&Cancel")
btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel_preservation)
rowsizer.Add(btn_cancel, 1)
topsizer.Add(rowsizer, 0, wx.EXPAND)
panel.SetSizer(topsizer)
topsizer.SetSizeHints(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
self.SetSize((400, 600))
pg.AddActionTrigger(wx.propgrid.PG_ACTION_NEXT_PROPERTY,
wx.WXK_RETURN)
pg.DedicateKey(wx.WXK_RETURN)
self.pg.SelectProperty('Frontal', True)
self.pg.SetFocus()
def on_save_preservation(self, event):
self.skeleton_dict = self.get_preservation_data()
controller.edit_preservation(
self.session, self.selected_row.skeleton_id, self.skeleton_dict)
self.Close()
def on_cancel_preservation(self, event):
self.EndModal(wx.ID_CANCEL)
def get_preservation_data(self):
d = self.pg.GetPropertyValues(inc_attributes=True)
data = {}
data['frontal'] = d['Frontal']
data['parietal_l'] = d['Parietal left']
data['parietal_r'] = d['Parietal right']
data['occipital'] = d['Occipital']
data['temporal_l'] = d['Temporal left']
data['temporal_r'] = d['Temporal right']
data['sphenoid'] = d['Sphenoid']
data['nasal_l'] = d['Nasal left']
data['nasal_r'] = d['Nasal right']
data['maxilla_l'] = d['Maxilla left']
data['maxilla_r'] = d['Maxilla right']
data['zygomatic_l'] = d['Zygomatic left']
data['zygomatic_r'] = d['Zygomatic right']
data['mandible'] = d['Mandible']
data['palatine_l'] = d['Palatine left']
data['palatine_r'] = d['Palatine right']
data['lacrimal_l'] = d['Lacrimal left']
data['lacrimal_r'] = d['Lacrimal right']
data['orbit_l'] = d['Orbit left']
data['orbit_r'] = d['Orbit right']
data['ethmoid'] = d['Ethmoid']
data['thyroid'] = d['Thyroid']
data['hyoid'] = d['Hyoid']
data['calotte'] = d['Calotte']
data['ilium_l'] = d['Ilium left']
data['ilium_r'] = d['Ilium right']
data['scapula_l'] = d['Scapula left']
data['scapula_r'] = d['Scapula right']
data['manubrium'] = d['Manubrium']
data['ischium_l'] = d['Ischium left']
data['ischium_r'] = d['Ischium right']
data['patella_l'] = d['Patella left']
data['patella_r'] = d['Patella right']
data['c_sterni'] = d['C sterni']
data['pubic_l'] = d['Pubic left']
data['pubic_r'] = d['Pubic right']
data['x_process'] = d['X process']
data['sacrum'] = d['Sacrum']
data['coccyx'] = d['Coccyx']
data['clavicle_l_djs'] = d['Clavicle left D js']
data['clavicle_l_d13'] = d['Clavicle left D 1/3']
data['clavicle_l_m13'] = d['Clavicle left M 1/3']
data['clavicle_l_p13'] = d['Clavicle left P 1/3']
data['clavicle_l_pjs'] = d['Clavicle left P js']
data['clavicle_r_djs'] = d['Clavicle right D js']
data['clavicle_r_d13'] = d['Clavicle right D 1/3']
data['clavicle_r_m13'] = d['Clavicle right M 1/3']
data['clavicle_r_p13'] = d['Clavicle right P 1/3']
data['clavicle_r_pjs'] = d['Clavicle right P js']
data['humerus_l_djs'] = d['Humerus left D js']
data['humerus_l_d13'] = d['Humerus left D 1/3']
data['humerus_l_m13'] = d['Humerus left M 1/3']
data['humerus_l_p13'] = d['Humerus left P 1/3']
data['humerus_l_pjs'] = d['Humerus left P js']
data['humerus_r_djs'] = d['Humerus right D js']
data['humerus_r_d13'] = d['Humerus right D 1/3']
data['humerus_r_m13'] = d['Humerus right M 1/3']
data['humerus_r_p13'] = d['Humerus right P 1/3']
data['humerus_r_pjs'] = d['Humerus right P js']
data['radius_l_djs'] = d['Radius left D js']
data['radius_l_d13'] = d['Radius left D 1/3']
data['radius_l_m13'] = d['Radius left M 1/3']
data['radius_l_p13'] = d['Radius left P 1/3']
data['radius_l_pjs'] = d['Radius left P js']
data['radius_r_djs'] = d['Radius right D js']
data['radius_r_d13'] = d['Radius right D 1/3']
data['radius_r_m13'] = d['Radius right M 1/3']
data['radius_r_p13'] = d['Radius right P 1/3']
data['radius_r_pjs'] = d['Radius right P js']
data['ulna_l_djs'] = d['Ulna left D js']
data['ulna_l_d13'] = d['Ulna left D 1/3']
data['ulna_l_m13'] = d['Ulna left M 1/3']
data['ulna_l_p13'] = d['Ulna left P 1/3']
data['ulna_l_pjs'] = d['Ulna left P js']
data['ulna_r_djs'] = d['Ulna right D js']
data['ulna_r_d13'] = d['Ulna right D 1/3']
data['ulna_r_m13'] = d['Ulna right M 1/3']
data['ulna_r_p13'] = d['Ulna right P 1/3']
data['ulna_r_pjs'] = d['Ulna right P js']
data['femur_l_djs'] = d['Femur left D js']
data['femur_l_d13'] = d['Femur left D 1/3']
data['femur_l_m13'] = d['Femur left M 1/3']
data['femur_l_p13'] = d['Femur left P 1/3']
data['femur_l_pjs'] = d['Femur left P js']
data['femur_r_djs'] = d['Femur right D js']
data['femur_r_d13'] = d['Femur right D 1/3']
data['femur_r_m13'] = d['Femur right M 1/3']
data['femur_r_p13'] = d['Femur right P 1/3']
data['femur_r_pjs'] = d['Femur right P js']
data['tibia_l_djs'] = d['Tibia left D js']
data['tibia_l_d13'] = d['Tibia left D 1/3']
data['tibia_l_m13'] = d['Tibia left M 1/3']
data['tibia_l_p13'] = d['Tibia left P 1/3']
data['tibia_l_pjs'] = d['Tibia left P js']
data['tibia_r_djs'] = d['Tibia right D js']
data['tibia_r_d13'] = d['Tibia right D 1/3']
data['tibia_r_m13'] = d['Tibia right M 1/3']
data['tibia_r_p13'] = d['Tibia right P 1/3']
data['tibia_r_pjs'] = d['Tibia right P js']
data['fibula_l_djs'] = d['Fibula left D js']
data['fibula_l_d13'] = d['Fibula left D 1/3']
data['fibula_l_m13'] = d['Fibula left M 1/3']
data['fibula_l_p13'] = d['Fibula left P 1/3']
data['fibula_l_pjs'] = d['Fibula left P js']
data['fibula_r_djs'] = d['Fibula right D js']
data['fibula_r_d13'] = d['Fibula right D 1/3']
data['fibula_r_m13'] = d['Fibula right M 1/3']
data['fibula_r_p13'] = d['Fibula right P 1/3']
data['fibula_r_pjs'] = d['Fibula right P js']
data['metacarpals_l_1'] = d['Metacarpals left 1st']
data['metacarpals_l_2'] = d['Metacarpals left 2nd']
data['metacarpals_l_3'] = d['Metacarpals left 3rd']
data['metacarpals_l_4'] = d['Metacarpals left 4th']
data['metacarpals_l_5'] = d['Metacarpals left 5th']
data['metacarpals_r_1'] = d['Metacarpals right 1st']
data['metacarpals_r_2'] = d['Metacarpals right 2nd']
data['metacarpals_r_3'] = d['Metacarpals right 3rd']
data['metacarpals_r_4'] = d['Metacarpals right 4th']
data['metacarpals_r_5'] = d['Metacarpals right 5th']
data['metatarsals_l_1'] = d['Metatarsals left 1st']
data['metatarsals_l_2'] = d['Metatarsals left 2nd']
data['metatarsals_l_3'] = d['Metatarsals left 3rd']
data['metatarsals_l_4'] = d['Metatarsals left 4th']
data['metatarsals_l_5'] = d['Metatarsals left 5th']
data['metatarsals_r_1'] = d['Metatarsals right 1st']
data['metatarsals_r_2'] = d['Metatarsals right 2nd']
data['metatarsals_r_3'] = d['Metatarsals right 3rd']
data['metatarsals_r_4'] = d['Metatarsals right 4th']
data['metatarsals_r_5'] = d['Metatarsals right 5th']
data['vertebrae_c_1'] = d['Vertebrae C 1']
data['vertebrae_c_2'] = d['Vertebrae C 2']
data['vertebrae_c_3'] = d['Vertebrae C 3']
data['vertebrae_c_4'] = d['Vertebrae C 4']
data['vertebrae_c_5'] = d['Vertebrae C 5']
data['vertebrae_t_1'] = d['Vertebrae T 1']
data['vertebrae_t_2'] = d['Vertebrae T 2']
data['vertebrae_t_3'] = d['Vertebrae T 3']
data['vertebrae_t_4'] = d['Vertebrae T 4']
data['vertebrae_t_5'] = d['Vertebrae T 5']
data['vertebrae_l_1'] = d['Vertebrae L 1']
data['vertebrae_l_2'] = d['Vertebrae L 2']
data['vertebrae_l_3'] = d['Vertebrae L 3']
data['vertebrae_l_4'] = d['Vertebrae L 4']
data['vertebrae_l_5'] = d['Vertebrae L 5']
data['vertebrae_remarks'] = d['Vertebrae remarks']
data['ribs_l_whole'] = d['Ribs left whole']
data['ribs_l_send'] = d['Ribs left S end']
data['ribs_l_vend'] = d['Ribs left V end']
data['ribs_l_frag'] = d['Ribs left Frag.']
data['ribs_r_whole'] = d['Ribs right whole']
data['ribs_r_send'] = d['Ribs right S end']
data['ribs_r_vend'] = d['Ribs right V end']
data['ribs_r_frag'] = d['Ribs right Frag.']
data['ribs_u_whole'] = d['Ribs unknown whole']
data['ribs_u_send'] = d['Ribs unknown S end']
data['ribs_u_vend'] = d['Ribs unknown V end']
data['ribs_u_frag'] = d['Ribs unknown Frag.']
data['phalanges_hand_p'] = d['Phalanges hand proximal']
data['phalanges_hand_m'] = d['Phalanges hand medial']
data['phalanges_hand_d'] = d['Phalanges hand distal']
data['phalanges_foot_p'] = d['Phalanges foot proximal']
data['phalanges_foot_m'] = d['Phalanges foot medial']
data['phalanges_foot_d'] = d['Phalanges foot distal']
data['scaphoid_l'] = d['Scaphoid left']
data['scaphoid_r'] = d['Scaphoid right']
data['lunate_l'] = d['Lunate left']
data['lunate_r'] = d['Lunate right']
data['triquetral_l'] = d['Triquetral left']
data['triquetral_r'] = d['Triquetral right']
data['pisiform_l'] = d['Pisiform left']
data['pisiform_r'] = d['Pisiform right']
data['trapezium_l'] = d['Trapezium left']
data['trapezium_r'] = d['Trapezium right']
data['trapezoid_l'] = d['Trapezoid left']
data['trapezoid_r'] = d['Trapezoid right']
data['capitate_l'] = d['Capitate left']
data['capitate_r'] = d['Capitate right']
data['hamate_l'] = d['Hamate left']
data['hamate_r'] = d['Hamate right']
data['sesamoids_hand'] = d['Sesamoids hand']
data['talus_l'] = d['Talus left']
data['talus_r'] = d['Talus right']
data['calcaneus_l'] = d['Calcaneus left']
data['calcaneus_r'] = d['Calcaneus right']
data['cun_1_l'] = d['1st Cun left']
data['cun_1_r'] = d['1st Cun right']
data['cun_2_l'] = d['2nd Cun left']
data['cun_2_r'] = d['2nd Cun right']
data['cun_3_l'] = d['3rd Cun left']
data['cun_3_r'] = d['3rd Cun right']
data['navicular_l'] = d['Navicular left']
data['navicular_r'] = d['Navicular right']
data['cuboid_l'] = d['Cuboid left']
data['cuboid_r'] = d['Cuboid right']
data['sesamoids_foot'] = d['Sesamoids foot']
return data
def show_message(message, caption, flag=wx.ICON_ERROR):
"""
Show a message dialog
"""
msg = wx.MessageDialog(None, message=message,
caption=caption, style=flag)
msg.ShowModal()
msg.Destroy()
def ask_message(message, caption):
"""
Ask a question message
"""
msg = wx.MessageDialog(None, message=message, caption=caption,
style=wx.YES_NO | wx.NO_DEFAULT)
if msg.ShowModal() == wx.ID_YES:
return True
else:
return False
|
from bs4 import BeautifulSoup
import requests
import urllib3
def get_homework_info(Username, Password):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = 'https://qytsystem.qytang.com/accounts/login/'
username = str(Username)
password = str(Password)
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://qytsystem.qytang.com/accounts/login/',
'Accept - Encoding': 'gzip, deflate, br',
'Accept - Language': 'zh - CN, zh;q = 0.9',
}
# 建立并保持会话
client = requests.session()
# 获取登录页面的内容
qytang_home = client.get(url, verify=False)
qytang_soup = BeautifulSoup(qytang_home.text, 'lxml')
# 找到csrf令牌的值
csrftoken = qytang_soup.find('input', attrs={'type': "hidden", "name": "csrfmiddlewaretoken"}).get('value')
# 构建用户名, 密码和csrf值的POST数据
login_data = {'username': username, 'password': password, "csrfmiddlewaretoken": csrftoken}
# POST提交数据到登录页面
client.post(url, headers=header, data=login_data, verify=False)
r = client.get('https://qytsystem.qytang.com/python_enhance/python_enhance_homework')
home_work_soup = BeautifulSoup(r.text, 'lxml')
dict_courses_num = {}
dict_courses_level = {}
home_work_list_info = home_work_soup.find('table', id='table-for-student').find('tbody').find_all('tr')
for evey_home_info in home_work_list_info:
info = evey_home_info.find_all('td')
# 列表的1号位课程信息,7号为成绩
if dict_courses_num.get(info[1].text):
dict_courses_num[info[1].text] += 1
else:
dict_courses_num[info[1].text] = 1
if dict_courses_level.get(info[7].text):
dict_courses_level[info[7].text] += 1
else:
dict_courses_level[info[7].text] = 1
return [dict_courses_num, dict_courses_level]
|
#!/usr/bin/python
import httplib2
import os
import sys
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
import datetime
import sys
from sendgrid import *
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets_1.json"
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user"s account.
YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = "https://www.googleapis.com/auth/youtube"
CLIENT_SECRET_FILE = "client_secret.json"
APPLICATION_NAME = "Gmail API Python Quickstart"
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser("~")
credential_dir = os.path.join(home_dir, ".credentials")
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
"youtube-python-quickstart.json")
print credential_path
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
# if flags:
credentials = tools.run_flow(flow, store)
# else: # Needed only for compatibility with Python 2.6
# credentials = tools.run(flow, store)
print("Storing credentials to " + credential_path)
return credentials
# This method calls the API"s youtube.subscriptions.insert method to add a
# subscription to the specified channel.
def get_subscription(youtube):
# print(youtube)
subscription_response = youtube.subscriptions().list(part="snippet",
mine=True,maxResults = 50).execute()
#for i in add_subscription_response:
# print i
return subscription_response
def get_videos(youtube, channelId):
video_response = youtube.search().list(part="snippet", channelId = channelId, maxResults = 50, order = "rating").execute()
return video_response
def main():
subscriptions = {}
videos = {}
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build("youtube", "v3", http=http)
for i in get_subscription(service)["items"]:
subscriptions[i["snippet"]["title"]]= i["snippet"]["resourceId"]["channelId"]
print subscriptions
for i in subscriptions.keys():
print get_videos(service, subscriptions[i])
videos[i] = get_videos(service,subscriptions[i])
print videos
if __name__ == "__main__":
main()
# if __name__ == "__main__":
# argparser.add_argument("--channel-id", help="ID of the channel to subscribe to.",
# default="UCtVd0c0tGXuTSbU5d8cSBUg")
# args = argparser.parse_args()
# youtube = get_authenticated_service(args)
# try:
# channel_title = add_subscription()
# print channel_title
# except HttpError, e:
# print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
# else:
# print "A subscription to "%s" was added." % channel_title |
#!/usr/local/bin/python
import numpy as np
import datetime as dt
import argparse
import pdb
import matplotlib as mpl
mpl.use("Agg")
from mpl_toolkits.basemap import Basemap, cm
import matplotlib.pyplot as plt
############################# set parameters #############################
MAXY = 200
MAXC = 500
cellsize = 0.0625
start_date = dt.datetime(year=1920, month=1, day=1)
end_date = dt.datetime(year=2014, month=9, day=30)
duration = end_date - start_date
nday = duration.days + 1
nyear = (end_date.year - start_date.year) + 1
start_year = start_date.year
end_year = end_date.year
nyear_plot = 55
parser = argparse.ArgumentParser()
parser.add_argument("--latlonlist", help="Latlon list")
parser.add_argument("--arealist", help="area list [lat; lon; area]")
parser.add_argument("--ind", help="input VIC output files directory")
parser.add_argument("--outf", help="output file")
parser.add_argument("--outmap", help="output map file")
args = parser.parse_args()
latlonlist = np.loadtxt(args.latlonlist)
arealist = np.loadtxt(args.arealist)
nfile = np.shape(latlonlist)[0]
############################# load data and calculate #######################
tot_area = 0
runoff_CT_ave = np.zeros(nyear)
trend_CT = np.empty(nfile)
for i in range(nfile):
print 'Grid cell %d' %(i+1)
if arealist[i,0]!=latlonlist[i,0] or arealist[i,1]!=latlonlist[i,1]:
print "Error: area list does not match with latlon list!"
exit()
area = arealist[i,2]
tot_area = tot_area + area
# load data for this grid cell
filename = 'fluxes_%.5f_%.5f' %(latlonlist[i,0], latlonlist[i,1])
data = np.loadtxt('%s/%s' %(args.ind, filename)) # year; month; day; prec; evap; runoff; baseflow; airT; sm1; sm2; sm3; swe
# calculation
runoff_CT_year = np.zeros(nyear)
runoff_year = np.zeros(nyear)
for t in range(nday):
date = start_date + dt.timedelta(days=t)
year = date.year
month = date.month
day = date.day
year_ind = year - start_year # year index; starts from 0
runoff = data[t,5]
baseflow = data[t,6]
if month>=10: # if Oct-Dec, add it to the next water year
ti = (date - dt.datetime(year=year, month=11, day=1)).days + 1 # time in days from the beginning of the water year, start from 1
runoff_CT_year[year_ind+1] = runoff_CT_year[year_ind+1] + (runoff+baseflow)*ti
runoff_year[year_ind+1] = runoff_year[year_ind+1] + (runoff+baseflow)
elif month<=9: # if Jan-Sep, add it to this water year
ti = (date - dt.datetime(year=year-1, month=10, day=1)).days + 1 # time in days from the beginning of the water year, start from 1
runoff_CT_year[year_ind] = runoff_CT_year[year_ind] + (runoff+baseflow)*ti
runoff_year[year_ind] = runoff_year[year_ind] + (runoff+baseflow)
for y in range(nyear):
runoff_CT_year[y] = runoff_CT_year[y] / runoff_year[y] # unit: day
runoff_CT_ave[y] = runoff_CT_ave[y] + runoff_CT_year[y] * area
# calculate linear trend of CT for this grid cell (only consider 1948-2002)
x = range(1948, 2003)
x = np.asarray(x).T
A = np.array([x, np.ones(np.shape(x)[0])])
y = runoff_CT_year[28:83]
w = np.linalg.lstsq(A.T, y)[0]
trend_CT[i] = w[0] # day/year
runoff_CT_ave = runoff_CT_ave / tot_area
############################## plot trend map ##################################
fig = plt.figure(figsize=(8,8))
ax = plt.axes([0, 0.08, 1, 0.75])
m = Basemap(projection='mill', llcrnrlat=32, urcrnrlat=44,\
llcrnrlon=-126, urcrnrlon=-112, resolution='l')
m.drawcoastlines()
m.drawparallels(np.arange(-90., 91., 5.), labels=[True,True,False,False], fontsize=16)
m.drawmeridians(np.arange(-180., 181., 5.), labels=[False,False,True,True], fontsize=16)
m.drawmapboundary(fill_color='0.85')
m.fillcontinents(zorder=0, color='0.75')
m.drawcountries()
m.drawstates()
x, y = m(latlonlist[:,1], latlonlist[:,0])
data = trend_CT * nyear_plot # T change [deg C]
cs = plt.scatter(x[0:nfile], y[0:nfile], s=10, c=data, cmap='RdBu', vmax=10, vmin=-10, marker='s', linewidth=0)
cbar = plt.colorbar(cs)
cbar.set_label('CT change (day)')
plt.text(0.5, 1.1, "Runoff CT change over 1948-2002", \
horizontalalignment='center', \
fontsize=16, transform = ax.transAxes)
fig.savefig(args.outmap, format='png')
############################### print out results ################################
#f = open(args.outf, 'w')
#for y in range(1, nyear-1): # only print out 1921-2013
# f.write("%4d %.4f\n" %(start_year+y, runoff_CT_ave[y]))
#f.close()
|
"""Replay analysis - playground
"""
import bambi.tools.matlab
import bambi.tools.activity_loading
import bambi.analysis.maximum_likelihood
import matplotlib.pyplot as plt
events_filename = r'D:\data_for_analyzing_real_time_event_detector\c40m3_day1\events.mat'
frame_log_filename = r'D:\data_for_analyzing_real_time_event_detector\c40m3_day1\frameLog.csv'
movment_filename = r'D:\data_for_analyzing_real_time_event_detector\c40m3_day1\behavioral.mat'
events_mat = bambi.tools.matlab.load_events_file(events_filename)
frames_indices = bambi.tools.matlab.load_frame_log_file(frame_log_filename)
movment_data = bambi.tools.matlab.load_mvmt_file(movment_filename)
events_trace = bambi.tools.activity_loading.order_events_into_trials(events_mat, frames_indices)
# Train maximum likelihood decoder on all trials but one and test on the remaining
# trial
linear_trials_indices =[2,3,5,6]
[train_bins1, train_events] = bambi.tools.activity_loading.create_training_data(movment_data, events_trace, linear_trials_indices)
train_bins = bambi.tools.activity_loading.wide_binning(train_bins1,24, 3)
p_neuron_bin = bambi.analysis.maximum_likelihood.create_delayed_p_r_s(train_bins, train_events, 20)
[test_bins1, test_events] = bambi.tools.activity_loading.create_training_data(movment_data, events_trace, [4])
test_bins = bambi.tools.activity_loading.wide_binning(test_bins1,24, 3)
estimated_bins, estimated_prob = bambi.tools.activity_loading.decode_entire_trial (test_events, p_neuron_bin, 20)
fig, axx = plt.subplots(1,2)
axx[0].plot(test_bins,'r')
axx[0].plot(estimated_bins,'b')
axx[1].plot(estimated_prob)
fig.show()
raw_input('press enter')
|
cars = 100 #specify number of cars
space_in_a_car = 4.0 #specify average space in a car in floating no. style
drivers = 30 #specify number of drivers
passengers = 90 #specify number of passengers
cars_not_driven = cars - drivers # calculates number of cars no driven
cars_driven = drivers #cars that are driven
carpool_capacity = cars_driven * space_in_a_car #specify carpool capacity in floating style
average_passengers_per_car = passengers / cars_driven #estimates average passenger per car
print("There are", cars, "cars available.")
print("There are only", drivers, "drivers available.")
print("There will be", cars_not_driven, "empty cars today.")
print("We can transport", carpool_capacity, "people today.")
print("We have", passengers, "to carpool today.")
print("We need to put about", average_passengers_per_car,"in each car.")
# Study Drill
# car_pool_capacity is not initialised and while running code program
# doesn't know the value of the above variable and hence
# NameError is thrown
# 1
space_in_a_car = 4 #if it is 4 instead of 4.0
carpool_capacity = cars_driven * space_in_a_car
# carpool_capacity will be 120 instead of 120.0, hence it will be in int |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module, Sequential, Conv2d, ReLU,AdaptiveMaxPool2d, AdaptiveAvgPool2d, \
NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
from torch.nn import functional as F
from torch.autograd import Variable
#from model.coordatt import CoordAtt
class BGModel(nn.Module):
def __init__(self, channel,e1_stride, e2_stride):
super(BGModel, self).__init__()
self.relu = nn.ReLU()
self.conv0 = nn.Conv2d(channel, channel, 5, e1_stride, 2)
self.gamma = nn.Conv2d(channel, channel, 5, e2_stride, 2)
self.conv1 = nn.Conv2d(channel, channel, 5, 1, 2)
self.conv2 = nn.Conv2d(channel, channel, 3, 1, 1)
self.conv3 = nn.Conv2d(channel*2, channel, 3, 1, 1)
self.conv4 = nn.Conv2d(2*channel, channel, 3, 1, 1)
self.edge_pre = nn.Conv2d(channel, 1, 1)
self.fea_pre = nn.Conv2d(channel, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01) #feature 22,44,88 edge:88
m.bias.data.fill_(0) #stride1 stride2 padding
def forward(self, y, x): # x= feature, y=edge
x = x * F.sigmoid(x)
x = self.conv1(x)
y = y * F.sigmoid(y)
y = self.relu(self.conv0(y))
y = self.relu(self.gamma(y))
edge = self.relu(self.conv2( x * y))
e_pre = self.edge_pre(edge)
fea = self.relu(self.conv3(torch.cat((x,y),1)))
f_pre = self.fea_pre(fea)
x = self.conv4(torch.cat((edge, fea),1))
return x
class GGM(nn.Module): # get global feature
def __init__(self, in_channels):
super(GGM, self).__init__()
self.branch0 = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 3, 1, 1),
nn.ReLU()
)
self.branch1 = nn.Sequential(
nn.AdaptiveAvgPool2d((3, 3)),
nn.Conv2d(in_channels, in_channels, 1, 1),
nn.ReLU()
)
self.branch2 = nn.Sequential(
nn.AdaptiveAvgPool2d((5, 5)),
nn.Conv2d(in_channels, in_channels, 1, 1),
nn.ReLU()
)
self.branch3 = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_channels, in_channels, 1, 1),
nn.ReLU()
)
self.con = nn.Conv2d(in_channels * 4, in_channels, 3, 1, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(std=0.01)
m.bias.data.fill_(0)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x1 = F.upsample(x1, x0.size()[2:], mode='bilinear', align_corners=True)
x2 = self.branch2(x)
x2 = F.upsample(x2, x0.size()[2:], mode='bilinear', align_corners=True)
x3 = self.branch3(x)
x3 = F.upsample(x3, x0.size()[2:], mode='bilinear', align_corners=True)
x = self.con(torch.cat((x0, x1, x2, x3), 1))
return x
class CAM_Module(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Module, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out
|
# -*- coding: utf-8 -*-
"""
806. Number of Lines To Write String
Created on Fri May 11 09:25:42 2018
@author: FT
"""
class Solution:
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
SumWidths = 0
count = 0
for char in S:
CharWidth = widths[ord(char) - 97]
if(100-SumWidths<CharWidth):
count += 1
SumWidths = 0
SumWidths += CharWidth
if(SumWidths > 100):
count += 1
SumWidths = SumWidths%100
return [count+1,SumWidths]
|
from django.urls import path
from paginator_app import views
urlpatterns = [
path('hello/', views.hello, name='hello'),
path('getdata/', views.getdata, name='getdata')
] |
#! /usr/bin/python3
import pdb
import time
import iota.harness.api as api
import iota.test.apulu.config.api as config_api
import apollo.config.agent.api as agent_api
import iota.test.utils.traffic as traffic_utils
import iota.test.apulu.utils.flow as flow_utils
import apollo.config.utils as utils
from iota.harness.infra.glopts import GlobalOptions
from apollo.config.store import EzAccessStore
import iota.test.apulu.utils.pdsctl as pdsctl
class PolicerUpdateSpec:
def __init__(self, policer):
if policer.direction == 'egress':
self.TxPolicer = policer
self.RxPolicer = None
else:
self.RxPolicer = policer
self.TxPolicer = None
return
def UpdatePolicer(tc, workload):
if not workload.IsNaples():
return 0
PolicerClient = EzAccessStore.GetConfigClient(agent_api.ObjectTypes.POLICER)
policer = PolicerClient.GetMatchingPolicerObject(workload.node_name,\
tc.iterators.direction, tc.iterators.policertype)
if policer:
spec = PolicerUpdateSpec(policer)
workload.vnic.Update(spec)
tokens = ((policer.rate * tc.duration) + policer.burst)
vnic_id = workload.vnic.UUID.String()
pdsctl.ExecutePdsctlCommand(workload.node_name,\
f"clear vnic statistics -i {vnic_id}", None, yaml=False)
else:
tokens = 0
return tokens
def SetupPolicer(tc):
# first, reduce to exactly one pair
del tc.workload_pairs[1:]
w1, w2 = tc.workload_pairs[0]
# install policer on workload pair
tokens1 = UpdatePolicer(tc, w1)
tokens2 = UpdatePolicer(tc, w2)
if tokens1 == 0 and tokens2 == 0:
api.Logger.error(f"Skipping Testcase due to no {tc.iterators.direction}"\
" {tc.iterators.policertype} policer rules.")
return False
# find min of the tokens. tokens 0 indicates no policer installed
if tokens1 == 0:
tc.expected_tokens = tokens2
elif tokens2 == 0:
tc.expected_tokens = tokens1
else:
tc.expected_tokens = tokens1 if tokens1 < tokens2 else tokens2
# handle duplex
#tc.expected_tokens = tc.expected_tokens / 2
# clear counters before the run
return True
def Setup(tc):
tc.num_streams = getattr(tc.args, "num_streams", 1)
tc.duration = getattr(tc.args, "duration", 10)
tc.workload_pairs = config_api.GetPingableWorkloadPairs(
wl_pair_type = config_api.WORKLOAD_PAIR_TYPE_REMOTE_ONLY)
if len(tc.workload_pairs) == 0:
api.Logger.error("Skipping Testcase due to no workload pairs.")
return api.types.status.FAILURE
if not SetupPolicer(tc):
return api.types.status.FAILURE
return api.types.status.SUCCESS
def Trigger(tc):
tc.num_pairs = 0
for pair in tc.workload_pairs:
tc.num_pairs += 1
api.Logger.info("iperf between %s and %s" % (pair[0].ip_address, pair[1].ip_address))
tc.cmd_cookies, tc.resp = traffic_utils.iperfWorkloads(tc.workload_pairs,\
tc.iterators.ipaf, tc.iterators.protocol, tc.iterators.pktsize,\
num_of_streams=tc.num_streams, time=tc.duration, sleep_time=10)
return api.types.status.SUCCESS
def Verify(tc):
if api.IsDryrun():
return api.types.status.SUCCESS
#min_tokens = int(tc.expected_tokens * 0.9)
#max_tokens = int(tc.expected_tokens * 1.1)
min_tokens = int(tc.expected_tokens * 0.1) # should be non-zero, we use 10%
max_tokens = int(tc.expected_tokens * 1.05) # shouldn't exceed 5% of max expected
w1, w2 = tc.workload_pairs[0]
w2.vnic.Read()
if tc.iterators.policertype == 'pps':
actual_tokens = w2.vnic.Stats.RxPackets
else:
actual_tokens = w2.vnic.Stats.RxBytes
if actual_tokens < min_tokens:
api.Logger.error(f"Recieved rate lower than expected: {actual_tokens} < {min_tokens}");
return api.types.status.FAILURE
if actual_tokens > max_tokens:
api.Logger.error(f"Recieved rate higher than expected: {actual_tokens} > {max_tokens}");
return api.types.status.FAILURE
api.Logger.info(f"Passed: {min_tokens} < {actual_tokens} < {max_tokens}")
return api.types.status.SUCCESS
def Teardown(tc):
for x,y in tc.workload_pairs:
if x.IsNaples(): x.vnic.RollbackUpdate()
if y.IsNaples(): y.vnic.RollbackUpdate()
return flow_utils.clearFlowTable(tc.workload_pairs)
|
def vecteur_coord ():
x_a = int(input("Coordonnées x de A: "))
y_a = int(input("Coordonnées y de A: "))
x_b = int(input("Coordonnées x de B: "))
y_b = int(input("Coordonnées y de B: "))
coord_X = x_b - x_a
coord_Y = y_b - y_a
print("")
print("coordonnées du vecteur AB: ")
print(str(coord_X) + "x")
print(str(coord_Y) + "y")
vecteur_coord()
|
import facebook
import webapp2
import os
import jinja2
import urllib2
import models
import app_config
import json
import datetime
import logging
import quopri
import random
import math
import string
import base64
from google.appengine.ext import db
from webapp2_extras import sessions
from google.appengine.api import mail
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import blobstore
from google.appengine.api import images
# from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import search
FACEBOOK_APP_ID = app_config.FACEBOOK_APP_ID
FACEBOOK_APP_SECRET = app_config.FACEBOOK_APP_SECRET
# base handler that always checks to make sure the user is signed in and caches
# user information
# TODO: handle session expire - ask user to login again
class BaseHandler(webapp2.RequestHandler):
"""Provides access to the active Facebook user in self.current_user
The property is lazy-loaded on first access, using the cookie saved
by the Facebook JavaScript SDK to determine the user ID of the active
user. See http://developers.facebook.com/docs/authentication/ for
more information.
"""
@property
def current_user(self):
if self.session.get("user"):
# model.User is logged in
return self.session.get("user")
else:
# Either used just logged in or just saw the first page
# We'll see here
cookie = facebook.get_user_from_cookie(self.request.cookies,
FACEBOOK_APP_ID,
FACEBOOK_APP_SECRET)
if cookie:
# Okay so user logged in.
# Now, check to see if existing user
logging.info(cookie["uid"])
user = models.User.get_by_key_name(cookie["uid"])
graph = facebook.GraphAPI(cookie["access_token"])
if not user:
# Not an existing user so get user info
profile = graph.get_object("me")
settings = models.Settings()
settings.put()
word_cloud = models.WordCloud()
word_cloud.put()
user = models.User(
key_name=str(profile["id"]),
id=str(profile["id"]),
name=profile["name"],
display_name=profile["name"],
profile_url=profile["link"],
access_token=cookie["access_token"],
settings=settings,
word_cloud=word_cloud
)
user.put()
elif user.access_token != cookie["access_token"]:
user.access_token = cookie["access_token"]
user.put()
# User is now logged in
self.session["user"] = {
'name':user.name,
'profile_url':user.profile_url,
'id':user.id,
'access_token':user.access_token,
# 'public_user':user.public_user,
'public_user':user.user_type,
'friends_list':graph.get_connections("me", "friends")
}
return self.session.get("user")
return None
def dispatch(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
return self.session_store.get_session()
# creates a notification any time the current user cheers, comments, or
# mentions
# TODO: don't create notifications for current user
def notify(self,event_type,to_user,event_id):
from_user_id = str(self.current_user['id'])
from_user = models.User.get_by_key_name(from_user_id)
if from_user != to_user:
notification = models.Notification(
from_user=from_user,
to_user=to_user,
event_type=event_type,
event_id=str(event_id),
)
notification.put()
return from_user != to_user
# handler for home pages
class HomeHandler(BaseHandler):
# check if user if user is logged in and public/private
# serve landing page, public home page, or private home page
def get(self):
current_user = self.current_user
if current_user:
survey_no = -1
survey_alert = False
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
# user_type = self.request.get('user_type')
# check if user already get a type assigned
# check if user already fillied in survey
has_init_survey = False
if user.survey_id == None or user.survey_id == "":
# if "survey_id" in
logging.info(self.request.cookies)
if "survey_id" in self.request.cookies:
survey_id = self.request.cookies['survey_id']
else:
survey_id = None
if survey_id != None and survey_id != "":
user.survey_id = survey_id
survey = models.Survey.get_by_key_name(survey_id)
if survey != None and survey.CESD_20 != None:
user.email = survey.email
user.put()
has_init_survey = True
else:
has_init_survey = False
else:
has_init_survey = False
else:
has_init_survey = True
if (has_init_survey):
user_type = self.request.cookies.get('user_type')
logging.info("user_type in cookies:")
logging.info(user_type)
logging.info("user.user_type=" + str(user.user_type))
if user.user_type == -1:
if (user_type != "" and user_type != None):
user.user_type = int(user_type)
else:
user.user_type = int(math.floor(random.random()*3))
user.put()
if user.user_type != user_type:
self.response.set_cookie('user_type', str(user.user_type), max_age=360)
#Check if any survey due:
date_since_enroll = (datetime.datetime.now() - user.created).days
logging.info("date_since_enroll=" + str(date_since_enroll))
if(date_since_enroll < 14 and date_since_enroll >= 7 and user.survey_1_id is None):
survey_no = 1
if(date_since_enroll >= 12):
survey_alert = True
elif(date_since_enroll < 50 and date_since_enroll >= 30 and user.survey_2_id is None):
survey_no = 2
if(date_since_enroll >= 43):
survey_alert = True
elif(date_since_enroll < 110 and date_since_enroll >= 90 and user.survey_3_id is None):
survey_no = 3
if(date_since_enroll >= 103):
survey_alert = True
elif(date_since_enroll >= 180 and user.survey_4_id is None):
survey_no = 4
if(date_since_enroll >= 192):
survey_alert = True
# if user.public_user:
if user.user_type == 2:
template = jinja_environment.get_template('public_main.html')
# elif user.public_user is private user:
elif user.user_type == 1:
template = jinja_environment.get_template('private_main.html')
# elif user.public_user is placebo:
elif user.user_type == 0:
template = jinja_environment.get_template('memory_main.html')
# else:
# template = jinja_environment.get_template('landing.html')
# return None
template_values = {
'facebook_app_id':FACEBOOK_APP_ID,
'current_user':current_user,
'survey_no':survey_no,
'survey_alert':survey_alert
}
else:
survey_no = 0
template = jinja_environment.get_template('survey.html')
template_values = {'resubmit':'True', 'survey_no':survey_no}
# template = jinja_environment.get_template('intro.html')
# template_values = {'resubmit':'True'}
logging.info("survey_no=" + str(survey_no))
self.response.out.write(template.render(template_values))
else:
if ('user_type' not in self.request.cookies and 'survey_id' not in self.request.cookies):
self.redirect('/intro')
else:
if ('user_type' not in self.request.cookies):
user_type = int(math.floor(random.random()*3))
self.response.set_cookie('user_type', str(user_type), max_age=360)
else:
user_type = int(self.request.cookies['user_type'])
# if(int(user_type) == -1):
# user_type = int(math.floor(random.random()*3))
# self.response.set_cookie('user_type', str(user_type), max_age=360)
logging.info("In Home Handler, not current user, user_type=" + str(user_type))
template = jinja_environment.get_template('landing.html')
template_values = {
'public_user': user_type,
}
logging.info(template_values)
self.response.out.write(template.render(template_values))
# intro page for first time users
class IntroHandler(BaseHandler):
# serve the intro page
def get(self):
current_user = self.current_user
logging.info("IntroHandler")
# logging.info(self.request.cookies)
# logging.info("survey_id" in self.request.cookies)
if(current_user or "survey_id" in self.request.cookies):
self.redirect('/')
else:
template = jinja_environment.get_template('intro.html')
template_values = {
# 'facebook_app_id':FACEBOOK_APP_ID,
# 'current_user':current_user,
}
self.response.out.write(template.render(template_values))
# update the public/private field after the user has passed through the intro
# screen.
class LandingHandler(BaseHandler):
def get(self):
# public_user = self.request.get('public_user')
user_type = int(math.floor(random.random()*3))
current_user = self.current_user
if current_user:
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
if user.user_type == -1:
user.user_type = user_type
user.put()
template_values = {
'public_user':user_type,
}
logging.info("public_user=" + str(user_type))
template = jinja_environment.get_template('landing.html')
self.response.set_cookie('user_type', str(user_type), max_age=365)
self.response.out.write(template.render(template_values))
class SurveyHandler(BaseHandler):
def get(self):
template_values = {'resubmit':'False'}
survey_no = int(self.request.get('survey_no'))
if(survey_no == 0):
logging.info("survey page 0")
template = jinja_environment.get_template('survey.html')
else:
logging.info("survey page " + str(survey_no))
template = jinja_environment.get_template('survey_followup.html')
self.response.out.write(template.render(template_values))
def post(self):
survey_type = self.request.get('type')
logging.info("survey_type=" + survey_type)
# submit email
if(survey_type == '0'):
email = self.request.get('email')
if (email != None and email != ''):
survey_no = 0
survey_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
survey_id += ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(6)])
logging.info("survey_id:" + survey_id)
# survey = models.Survey.get_by_key_name(survey_id)
survey = models.Survey(
key_name=survey_id,
email=email,
survey_no=survey_no
)
survey.put()
result = {"survey_id":survey_id, "survey_no":survey_no}
# template = jinja_environment.get_template('intro.html')
# self.response.out.write(template.render(template_values))
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# submit demographic
elif (survey_type == '1'):
logging.info("survey_type=1")
survey_id = self.request.get('survey_id')
survey = models.Survey.get_by_key_name(survey_id)
logging.info("survey_id=" + survey_id)
survey.age = self.request.get('survey-age')
survey.gender = self.request.get('survey-gender')
logging.info("age=" + survey.age)
logging.info("gender=" + survey.gender)
survey.put()
# submit ipip
elif (survey_type == '2'):
survey_id = self.request.get('survey_id')
survey = models.Survey.get_by_key_name(survey_id)
survey.IPIP_1 = self.request.get('IPIP-1')
survey.IPIP_2 = self.request.get('IPIP-2')
survey.IPIP_3 = self.request.get('IPIP-3')
survey.IPIP_4 = self.request.get('IPIP-4')
survey.IPIP_5 = self.request.get('IPIP-5')
survey.IPIP_6 = self.request.get('IPIP-6')
survey.IPIP_7 = self.request.get('IPIP-7')
survey.IPIP_8 = self.request.get('IPIP-8')
survey.IPIP_9 = self.request.get('IPIP-9')
survey.IPIP_10 = self.request.get('IPIP-10')
survey.IPIP_11 = self.request.get('IPIP-11')
survey.IPIP_12 = self.request.get('IPIP-12')
survey.IPIP_13 = self.request.get('IPIP-13')
survey.IPIP_14 = self.request.get('IPIP-14')
survey.IPIP_15 = self.request.get('IPIP-15')
survey.IPIP_16 = self.request.get('IPIP-16')
survey.IPIP_17 = self.request.get('IPIP-17')
survey.IPIP_18 = self.request.get('IPIP-18')
survey.IPIP_19 = self.request.get('IPIP-19')
survey.IPIP_20 = self.request.get('IPIP-20')
survey.put()
# submit perma
elif (survey_type == '3'):
survey_no = int(self.request.get('survey_no'))
if(survey_no is not 0):
survey_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
survey_id += ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(6)])
logging.info("survey_id:" + survey_id)
# survey = models.Survey.get_by_key_name(survey_id)
survey = models.Survey(
key_name=survey_id,
survey_no=survey_no
)
survey.put()
result = {"survey_id":survey_id, "survey_no":survey_no}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
else:
survey_id = self.request.get('survey_id')
survey = models.Survey.get_by_key_name(survey_id)
survey.PERMA_1 = self.request.get('PERMA-1')
survey.PERMA_2 = self.request.get('PERMA-2')
survey.PERMA_3 = self.request.get('PERMA-3')
survey.PERMA_4 = self.request.get('PERMA-4')
survey.PERMA_5 = self.request.get('PERMA-5')
survey.PERMA_6 = self.request.get('PERMA-6')
survey.PERMA_7 = self.request.get('PERMA-7')
survey.PERMA_8 = self.request.get('PERMA-8')
survey.PERMA_9 = self.request.get('PERMA-9')
survey.PERMA_10 = self.request.get('PERMA-10')
survey.PERMA_11 = self.request.get('PERMA-11')
survey.PERMA_12 = self.request.get('PERMA-12')
survey.PERMA_13 = self.request.get('PERMA-13')
survey.PERMA_14 = self.request.get('PERMA-14')
survey.PERMA_15 = self.request.get('PERMA-15')
survey.PERMA_16 = self.request.get('PERMA-16')
survey.PERMA_17 = self.request.get('PERMA-17')
survey.PERMA_18 = self.request.get('PERMA-18')
survey.PERMA_19 = self.request.get('PERMA-19')
survey.PERMA_20 = self.request.get('PERMA-20')
survey.PERMA_21 = self.request.get('PERMA-21')
survey.PERMA_22 = self.request.get('PERMA-22')
survey.PERMA_23 = self.request.get('PERMA-23')
survey.put()
#submit cesd
elif (survey_type == '4'):
survey_id = self.request.get('survey_id')
survey = models.Survey.get_by_key_name(survey_id)
survey.CESD_1 = self.request.get('CESD-1')
survey.CESD_2 = self.request.get('CESD-2')
survey.CESD_3 = self.request.get('CESD-3')
survey.CESD_4 = self.request.get('CESD-4')
survey.CESD_5 = self.request.get('CESD-5')
survey.CESD_6 = self.request.get('CESD-6')
survey.CESD_7 = self.request.get('CESD-7')
survey.CESD_8 = self.request.get('CESD-8')
survey.CESD_9 = self.request.get('CESD-9')
survey.CESD_10 = self.request.get('CESD-10')
survey.CESD_11 = self.request.get('CESD-11')
survey.CESD_12 = self.request.get('CESD-12')
survey.CESD_13 = self.request.get('CESD-13')
survey.CESD_14 = self.request.get('CESD-14')
survey.CESD_15 = self.request.get('CESD-15')
survey.CESD_16 = self.request.get('CESD-16')
survey.CESD_17 = self.request.get('CESD-17')
survey.CESD_18 = self.request.get('CESD-18')
survey.CESD_19 = self.request.get('CESD-19')
survey.CESD_20 = self.request.get('CESD-20')
survey.put()
survey_no = int(self.request.get('survey_no'))
if(survey_no != 0):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
if (survey_no == 1):
user.survey_1_id = survey_id
elif (survey_no == 2):
user.survey_2_id = survey_id
elif (survey_no == 3):
user.survey_3_id = survey_id
elif (survey_no == 4):
user.survey_4_id = survey_id
user.put()
class FileUploadHandler(blobstore_handlers.BlobstoreUploadHandler, BaseHandler):
def get(self):
upload_url = blobstore.create_upload_url('/post')
logging.info(upload_url)
result = {'upload_url': upload_url}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
class PostHandler(blobstore_handlers.BlobstoreUploadHandler, BaseHandler):
def post(self):
user_id = str(self.current_user['id'])
view = self.request.get('view')
cursor_str = self.request.get('cursor')
# logging.info("good_things_cursor=" + cursor_str)
if(cursor_str != ""):
good_things_cursor = Cursor.from_websafe_string(cursor_str.encode('utf-8'))
else:
good_things_cursor = None
# tz_offset = self.request.get('tzoffset')
# if the client isn't saving a post
upload_url = blobstore.create_upload_url('/post')
logging.info("in the post, view=" + view)
if view != '':
all_good_things = models.GoodThing.all().order('-created').filter('deleted =',False)
user = models.User.get_by_key_name(user_id)
# if (user.public_user == False):
if view == 'search':
good_things = all_good_things.filter('user =',user).filter('memory =',False).fetch(limit=10, start_cursor=good_things_cursor)
else:
if (user.user_type != 2):
if(user.user_type == 0):
#placebo user
good_things = all_good_things.filter('user =',user).filter('memory =',True).fetch(limit=10, start_cursor=good_things_cursor)
logging.info("placebo user")
else:
#private user
good_things = all_good_things.filter('user =',user).filter('memory =',False).fetch(limit=10, start_cursor=good_things_cursor)
logging.info("private user")
good_things_cursor = all_good_things.cursor()
result = [x.template(user_id,good_things_cursor, upload_url) for x in good_things]#[::-1]
# logging.info(upload_url)
logging.info(result)
else:
#public user
# return just the current user's posts
if view == 'me':
user = models.User.get_by_key_name(user_id)
# good_things.filter('user =',user)
good_things = all_good_things.filter('user =',user).filter('memory =',False).fetch(limit=10, start_cursor=good_things_cursor)
good_things_cursor = all_good_things.cursor()
result = [x.template(user_id,good_things_cursor,upload_url) for x in good_things]#[::-1]
logging.info("view == me")
# logging.info(result)
# return all public posts and current user's private posts
elif view == 'all':
user = models.User.get_by_key_name(user_id)
result = []
while(len(result) < 10):
if (isinstance(good_things_cursor, str)):
good_things_cursor = Cursor.from_websafe_string(good_things_cursor.encode('utf-8'))
good_things = all_good_things.filter('memory =',False).fetch(limit=10, start_cursor=good_things_cursor)
good_things_cursor = all_good_things.cursor()
result += [x.template(user_id,good_things_cursor, upload_url) for x in good_things if (x.public or x.user.id == user.id)]#[::-1]
logging.info("view == all")
elif view == 'tag':
user = models.User.get_by_key_name(user_id)
mword = user.name
result = []
logging.info(mword)
index = search.Index(name="tagged_posts")
# results = index.search("mentions:" + mword)
results = index.search(search.Query(
query_string = "mentions:" + mword,
options = search.QueryOptions(limit=1000)
))
mention_list = []
for aDocument in results:
goodthing_id = long(aDocument.doc_id)
mention = models.GoodThing.get_by_id(goodthing_id)
mention_list.append(mention)
mention_list = sorted(mention_list, key = lambda goodthing:goodthing.created, reverse=True)
result += [good_thing.template(user_id, upload_url=upload_url) for good_thing in mention_list if good_thing is not None]
logging.info("view == tagged")
# logging.info(result)
elif view == 'profile':
profile_user_id = str(self.request.get('userid'))
if (profile_user_id == user_id):
user = models.User.get_by_key_name(user_id)
good_things = all_good_things.filter('memory =',False).filter('user =',user).fetch(limit=10, start_cursor=good_things_cursor)
good_things_cursor = all_good_things.cursor()
result = [x.template(user_id,good_things_cursor) for x in good_things]#[::-1]
logging.info("view == profile_me")
else:
user = models.User.get_by_key_name(profile_user_id)
good_things = all_good_things.filter('memory =',False).filter('user =',user).filter('public =',True).fetch(limit=10, start_cursor=good_things_cursor)
good_things_cursor = all_good_things.cursor()
result = [x.template(user_id,good_things_cursor) for x in good_things]#[::-1]
logging.info("view == profile_others")
else:
profile_user_id = str(self.request.get('view'))
profile_user = models.User.get_by_key_name(profile_user_id)
good_things = all_good_things.filter('user =',profile_user).filter('public =',True).filter('memory =',False).fetch(limit=10, start_cursor=good_things_cursor)
good_things_cursor = all_good_things.cursor()
result = [x.template(user_id,good_things_cursor) for x in good_things]#[::-1]
logging.info("view == else")
# logging.info(result)
# logging.info("result=" + str(result))
if (len(result) == 0):
result = [{'upload_url': upload_url, 'cursor': good_things_cursor}]
# logging.info("no post: upload_url=" + str(result
# save a post. separate this into the post() method
else:
# logging.info("save a post")
result = [self.save_post().template(user_id, good_things_cursor, upload_url)]
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# save a post to the datastore and return that post. this should be turned
# into the post() method
def save_post(self):
logging.info("save_post")
good_thing_text = self.request.get('good_thing')
logging.info(good_thing_text)
try:
good_thing_text = base64.b64decode(good_thing_text).decode('utf-8')
except:
pass
good_thing_text = good_thing_text.replace('=\r\n','')
logging.info(good_thing_text)
reason = self.request.get('reason')
try:
reason = base64.b64decode(reason).decode('utf-8')
except:
pass
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
#check if is_memory
if user.user_type == 0:
is_memory = True
else:
is_memory = False
logging.info("is_memory=" + str(is_memory))
tz_offset = self.request.get('tzoffset')
local_time = datetime.datetime.now() - datetime.timedelta(hours=int(tz_offset))
upload = self.get_uploads()
# raw_img = self.request.get('img')
if len(upload) != 0:
img_key = upload[0].key()
else:
img_key = None
# if user.public_user:
if user.user_type == 2:
if self.request.get('wall') == 'on':
wall = True
else:
wall = False
if self.request.get('public') == 'on':
public = True
else:
public = False
else:
public = False
wall = False
mentions = []
# print wall, self.request.get('wall')
good_thing = models.GoodThing(
good_thing=good_thing_text,
reason=reason,
created_origin=local_time,
user=user,
public=public,
wall=wall,
memory=is_memory,
blob_key = img_key,
)
good_thing.put()
# handle mentions here
msg_tags=[]
photo_tags = []
if self.request.get('mentions') != '':
# logging.info(self.request.get('mentions'))
# mention_list = json.loads(str(self.request.get('mentions')))
mention_list = json.loads(quopri.decodestring(str(self.request.get('mentions'))))
logging.info(mention_list)
for to_user_id in mention_list:
if 'app_id' in to_user_id:
to_user = models.User.get_by_key_name(str(to_user_id['app_id']))
fb_app_id = to_user_id['app_id']
event_id = good_thing.key().id()
# handle mention notification
if (user.user_type == 2 and to_user.user_type == 2 and user.id != to_user.id and good_thing.public):
self.notify(event_type='mention',
to_user=to_user,
event_id=event_id)
else:
to_user = None
mention = models.Mention(
parent=good_thing,
to_user=to_user,
good_thing=good_thing,
to_fb_user_id = to_user_id['id'],
to_user_name = to_user_id['name']
)
mention.put()
logging.info("mention to_user_id:" + str(to_user_id['name']))
msg_tags.append(to_user_id['id'].encode('utf-8'))
photo_tags.append({'tag_uid':to_user_id['id'].encode('utf-8'), 'tag_text':to_user_id['name'].encode('utf-8')})
# handle posting to fb
if wall:
graph = facebook.GraphAPI(self.current_user['access_token'])
if img_key:
img = images.get_serving_url(img_key, size=400)
# graph.put_photo('',message=good_thing.good_thing, url=img, tags=photo_tags)
graph.put_photo('',message=good_thing.good_thing, url=img)
# graph.put_wall_post(message=good_thing.good_thing, attachment={'picture':img}, tags=msg_tags)
# graph.put_photo('', message=good_thing.good_thing, url=img, tag=str(msg_tags).strip('[]'))
else:
# logging.info(msg_tags)
graph.put_object('me','feed',message=good_thing.good_thing, place='message', tags=msg_tags)
# print "before return good thing"
return good_thing
# API for saving and serving cheers
class CheerHandler(BaseHandler):
def post(self):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
good_thing_id = long(self.request.get('good_thing'))
good_thing = models.GoodThing.get_by_id(good_thing_id)
cheer = good_thing.cheer_set.filter('user =',user).get()
# if the user has not cheered this post, create a new cheer
if not cheer:
cheer = models.Cheer(
user=user,
good_thing=good_thing,
)
cheer.put()
cheered = True
self.notify(event_type='cheer',
to_user=good_thing.user,
event_id=good_thing_id)
# if the user has already cheered this post, delete the cheer
else:
cheer.delete()
cheered = False
self.response.headers['Content-Type'] = 'application/json'
result = {
'cheers':good_thing.num_cheers(),
'cheered':cheered
}
self.response.out.write(json.dumps(result))
class APostHandler(BaseHandler):
# /posts/?postid=XXXXXX
def get(self,postid):
# user_id = str(self.request.get('userid'))
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
# if (user.user_type == 2):
# logging.info("public profile")
template = jinja_environment.get_template('apost.html')
template_values = {
'facebook_app_id':FACEBOOK_APP_ID,
'user_id':user_id,
'user_name':user.name,
# 'current_user_id': current_user_id
}
# elif (user_id == current_user_id):
# logging.info("entering private user's profile")
# template = jinja_environment.get_template('private_profile.html')
# template_values = {
# 'facebook_app_id':FACEBOOK_APP_ID,
# 'user_id':user_id,
# 'user_name':user.name,
# 'current_user_id': current_user_id
# }
self.response.out.write(template.render(template_values))
def post(self,postid):
user_id = str(self.current_user['id'])
post_id = long(self.request.get('postid'))
a_post = models.GoodThing.get_by_id(post_id)
result = [a_post.template(user_id)]
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# API for saving and serving comments. should be separated like good thing handler
class CommentHandler(BaseHandler):
def post(self):
comment_text = self.request.get('comment_text')
good_thing_id = long(self.request.get('good_thing'))
good_thing = models.GoodThing.get_by_id(good_thing_id)
user_id = str(self.current_user['id'])
# if the client is trying to save a comment, create a new comment, save
# to the datastore and return the comment
if comment_text != '':
user = models.User.get_by_key_name(user_id)
result = [self.save_comment(comment_text=comment_text,
user=user,
good_thing=good_thing).template(user_id)]
# return all comments associated with a good thing
else:
# comments = good_thing.comment_set.order('-created').filter('deleted =',False).fetch(limit=None)
comments = good_thing.comment_set.order('created').filter('deleted =',False).fetch(limit=None)
result = [x.template(user_id) for x in comments]
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# save a comment to the datastore
def save_comment(self, comment_text, user, good_thing):
comment = models.Comment(
comment_text=comment_text,
user=user,
good_thing=good_thing,
)
comment.put()
event_id = good_thing.key().id()
if (user.user_type == 2 and user.id != good_thing.user.id):
self.notify(event_type='comment',
to_user=good_thing.user,
event_id=event_id)
return comment
# API for deleting a good thing or a comment
class DeleteHandler(BaseHandler):
def post(self):
obj_id = long(self.request.get('id'))
if self.request.get('type') == 'good_thing':
good_thing = models.GoodThing.get_by_id(obj_id)
good_thing.deleted = True
good_thing.put()
elif self.request.get('type') == 'comment':
comment = models.Comment.get_by_id(obj_id)
comment.deleted = True
comment.put()
result = {'num_comments':comment.good_thing.num_comments()}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# log the current user out and redirect to the landing page
class LogoutHandler(BaseHandler):
def get(self):
if self.current_user is not None:
self.session['user'] = None
self.redirect('/')
# API for updating a user's settings
class SettingsHandler(BaseHandler):
# update the current user's settings
def post(self):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
settings = user.settings
reminder_days = self.request.get('reminder_days')
email = self.request.get('email')
display_name = self.request.get('display_name')
user.display_name = display_name
if reminder_days != '' and reminder_days >= 1:
settings.reminder_days = int(reminder_days)
else:
settings.reminder_days = -1
if self.request.get('default_fb') == 'on':
settings.default_fb = True
else:
settings.default_fb = False
if self.request.get('default_public') == 'on':
settings.default_public = True
else:
settings.default_public = False
if email != None and email != '':
user.email = email
user.put()
# if self.request.get('')
settings.put()
result = settings.template(user.name, user.display_name)
result['email'] = str(user.email)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# get the current user's settings
def get(self):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
result = user.settings.template(user.name, user.display_name)
result['email'] = user.email
logging.info(result)
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# serve the privacy page
class PrivacyHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('privacy.html')
template_values = {}
self.response.out.write(template.render(template_values))
# API for getting a user's stats (word cloud, good things today). can be used
# any public user, not just current user
class StatHandler(BaseHandler):
def post(self):
# print self.request.get('user_id')
view = self.request.get('view')
tz_offset = int(self.request.get('tzoffset'))
today = (datetime.datetime.now() - datetime.timedelta(hours = tz_offset)).date()
if (view != '' and view == 'profile'):
user_profile_id = self.request.get('user_id')
user_id = str(self.current_user['id'])
if(user_profile_id != user_id):
user = models.User.get_by_key_name(user_profile_id)
posts = user.goodthing_set.filter('deleted =',False).filter('public =', True).count()
posts_today = user.goodthing_set.filter('created_origin >=', today).filter('deleted =',False).filter('public =', True).count()
total_days = (datetime.datetime.now() - user.created).days
average_posts = '%.2f' %(float(posts) / float(total_days))
user.word_cloud.update_word_dict('public')
else:
user = models.User.get_by_key_name(user_id)
posts = user.goodthing_set.filter('deleted =',False).count()
posts_today = user.goodthing_set.filter('created_origin >=', today).filter('deleted =',False).count()
total_days = (datetime.datetime.now() - user.created).days
average_posts = '%.2f' %(float(posts) / float(total_days))
user.word_cloud.update_word_dict('private')
else:
if self.request.get('user_id') == '':
user_id = str(self.current_user['id'])
else:
user_id = self.request.get('user_id')
user = models.User.get_by_key_name(user_id)
posts = user.goodthing_set.filter('deleted =',False).count()
posts_today = user.goodthing_set.filter('created_origin >=', today).filter('deleted =',False).count()
total_days = (datetime.datetime.now() - user.created).days
average_posts = '%.2f' %(float(posts) / float(total_days))
user.word_cloud.update_word_dict('private')
# posts_today = user.goodthing_set.filter('created_origin >=',datetime.date.today()).filter('deleted =',False).count()
progress = int((float(posts_today)/3)*100)
if progress > 100:
progress = 100
progress = str(progress) + '%'
word_cloud = user.word_cloud.get_sorted_word_dict()
reason_cloud = user.word_cloud.get_sorted_reason_dict()
friend_cloud = user.word_cloud.get_sorted_friend_dict()
result = {
'posts_today':posts_today,
'progress':progress,
'posts':posts,
'average_posts':str(average_posts),
'word_cloud':word_cloud,
'reason_cloud':reason_cloud,
'friend_cloud':friend_cloud
}
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# API for searching for good things matching for good_thing words or reasons
class SearchHandler(BaseHandler):
def get(self):
# profile_user_id = str(self.request.get('userid'))
# if (profile_user_id == user_id):
# user = models.User.get_by_key_name(user_id)
# # good_things.filter('user =',user)
# good_things = all_good_things.filter('memory =',False).filter('user =',user).fetch(limit=10, start_cursor=good_things_cursor)
# good_things_cursor = all_good_things.cursor()
# result = [x.template(user_id,good_things_cursor) for x in good_things]#[::-1]
# logging.info("view == profile_me")
# else:
# user = models.User.get_by_key_name(profile_user_id)
# # good_things.filter('user =',user)
# good_things = all_good_things.filter('memory =',False).filter('user =',user).filter('public =',True).fetch(limit=10, start_cursor=good_things_cursor)
# good_things_cursor = all_good_things.cursor()
# result = [x.template(user_id,good_things_cursor) for x in good_things]#[::-1]
# logging.info("view == profile_others")
goodthing_user = self.request.get('user_id')
current_user = str(self.current_user['id'])
if(goodthing_user is None or goodthing_user == ""):
goodthing_user = current_user
logging.info("goodthing_user=" + str(goodthing_user))
logging.info("current_user=" + str(self.current_user['id']))
goodthing_word = self.request.get('goodthing_word')
reason_word = self.request.get('reason_word')
mention_name = self.request.get('friend_word')
upload_url = blobstore.create_upload_url('/post')
if(goodthing_word):
gword = str(goodthing_word)
index = search.Index(name=goodthing_user)
# logging.info(goodthing_user)
# logging.info("goodthing:" + gword)
# results = index.search("good_thing:" + gword)
results = index.search(search.Query(
query_string = "good_thing:" + gword,
options = search.QueryOptions(limit=1000)
))
# logging.info(results)
goodthing_list = []
for aDocument in results:
# goodthing_id = long(aDocument.fields[0].value);
goodthing_id = long(aDocument.doc_id)
goodthing = models.GoodThing.get_by_id(goodthing_id)
goodthing_list.append(goodthing)
goodthing_list = sorted(goodthing_list, key = lambda goodthing:goodthing.created, reverse=True)
# logging.info("goodthing_user=" + str(goodthing_user))
# logging.info("goodthing.user=" + str(goodthing.user.id))
result = [x.template(current_user, upload_url=upload_url) for x in goodthing_list]
elif(reason_word):
rword = str(reason_word)
index = search.Index(name=goodthing_user)
# results = index.search("reason:" + rword)
results = index.search(search.Query(
query_string = "reason:" + rword,
options = search.QueryOptions(limit=1000)
))
reason_list = []
for aDocument in results:
# goodthing_id = long(aDocument.fields[0].value);
goodthing_id = long(aDocument.doc_id)
reason = models.GoodThing.get_by_id(goodthing_id)
reason_list.append(reason)
reason_list = sorted(reason_list, key = lambda goodthing:goodthing.created, reverse=True)
result = [x.template(current_user, upload_url=upload_url) for x in reason_list]
elif(mention_name):
logging.info(mention_name)
# try:
# mention_name.decode('ascii')
# except UnicodeDecodeError:
# mword = ''.join(chr(int(i)) for i in mention_name)
# else:
# mention_name = u'%s' %(mention_name)
mword = mention_name
logging.info(mword)
# mword = str(mention_name)
index = search.Index(name=goodthing_user)
# results = index.search("mentions:" + mword)
results = index.search(search.Query(
query_string = "mentions:" + mword,
options = search.QueryOptions(limit=1000)
))
mention_list = []
for aDocument in results:
# goodthing_id = long(aDocument.fields[0].value);
goodthing_id = long(aDocument.doc_id)
mention = models.GoodThing.get_by_id(goodthing_id)
mention_list.append(mention)
mention_list = sorted(mention_list, key = lambda goodthing:goodthing.created, reverse=True)
result = [x.template(current_user, upload_url=upload_url) for x in mention_list]
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# API for getting all of the current user's unread notifications
# after this API has been called once all notifications are marked as read
class NotificationHandler(BaseHandler):
def get(self):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
notification_list = models.Notification.all().filter('to_user =',user).filter('read =',False)
result = []
for notification in notification_list:
result.append(notification.template())
notification.read = True
notification.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(result))
# serve the profile page for a public user
class ProfileHandler(BaseHandler):
def get(self, userid):
user_id = str(self.request.get('userid'))
current_user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
if (user.user_type == 2):
logging.info("public profile")
template = jinja_environment.get_template('profile.html')
template_values = {
'facebook_app_id':FACEBOOK_APP_ID,
'user_id':user_id,
'user_name':user.name,
'current_user_id': current_user_id
}
elif (user_id == current_user_id):
logging.info("entering private user's profile")
template = jinja_environment.get_template('private_profile.html')
template_values = {
'facebook_app_id':FACEBOOK_APP_ID,
'user_id':user_id,
'user_name':user.name,
'current_user_id': current_user_id
}
self.response.out.write(template.render(template_values))
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),'templates')),
autoescape=True,
extensions=['jinja2.ext.autoescape'],
)
# send email reminder
class ReminderHandler(BaseHandler):
def get(self):
logging.info("In ReminderHandler")
users = models.User.all()
# for user in users:
# user.display_name = user.name
# user.put()
# user.word_cloud.update_word_dict('private')
# if (user.public_user == True):
# user.user_type = 2
# else:
# user.user_type = 1
# user.put()
# if (user.id == "486465554889959" or user.id == "1598935150376651"):
# user.survey_id = ""
# user.put()
#=======================================================================
# Reminder starts here
# reminder_days = int(user.settings.reminder_days)
# logging.info(str(user.name) + ", reminder_days=" + str(reminder_days))
# if(reminder_days != -1):
# last_date_to_post = (datetime.datetime.now() - datetime.timedelta(days = reminder_days)).date()
# num_posts = user.goodthing_set.filter('created >=', last_date_to_post).filter('deleted =',False).count()
# logging.info("last_date_to_post=" + str(last_date_to_post) + ", num_posts=" + str(num_posts))
# if(num_posts <= 0):
# latest_post = user.goodthing_set.order('-created').get()
# if (latest_post is None):
# latest_post_date = user.created
# else:
# latest_post_date = latest_post.created
# days_no_posting = (datetime.datetime.now() - latest_post_date).days
# logging.info(str(days_no_posting) + " days no posting")
# # if(user.name == "Magi Chung"):
# if (days_no_posting > 0 and user.email != "" and user.email is not None):
# message = mail.EmailMessage()
# if(user.user_type == 0):
# message.subject = "Reminder: Post a early memory to 3gt!"
# message.sender = "happyapp@uw.edu" #TODO: change sender address
# message.to = user.email
# message.body = "Dear %s, you haven't posted your early memories for the past %d day(s).\n" %(user.name, days_no_posting)
# message.body += "Post your early memory today at http://tgt-dev.appspot.com/ !\n"
# else:
# message.subject = "Reminder: Post a good thing to 3gt!" #TOOD: change subject
# message.sender = "happyapp@uw.edu" #TODO: change sender address
# message.to = user.email
# message.body = "Dear %s, you haven't posted your good things for the past %d day(s).\n" %(user.name, days_no_posting)
# message.body += "Post your good thing today at http://tgt-dev.appspot.com/ !\n"
# message.send()
# logging.info("Sent reminder to " + str(message.to))
# else:
# logging.info(user.name + " do not have an email in the record.")
# #=======================================================================
# # Survey reminder starts here
# date_since_enroll = (datetime.datetime.now() - user.created).days
# logging.info("date_since_enroll=" + str(date_since_enroll))
# survey_no = -1
# if((date_since_enroll == 7 or date_since_enroll == 10 or date_since_enroll == 14) and user.survey_1_id is None):
# survey_no = 1
# elif((date_since_enroll == 30 or date_since_enroll == 37 or date_since_enroll == 44) and user.survey_2_id is None):
# survey_no = 2
# elif((date_since_enroll == 90 or date_since_enroll == 97 or date_since_enroll == 104) and user.survey_3_id is None):
# survey_no = 3
# elif((date_since_enroll == 180 or date_since_enroll == 187 or date_since_enroll == 194) and user.survey_4_id is None):
# survey_no = 4
# if (survey_no > 0 and user.email != "" and user.email is not None):
# message = mail.EmailMessage()
# message.subject = "[Online Positive Psychology Study] Answer some questions to earn a chance for lottery!"
# message.sender = "happyapp@uw.edu" #TODO: change sender address
# message.to = user.email
# message.body = "Dear %s, Thank you for participating in the online positive psychology study.\n" %(user.name)
# message.body += "Please answer a few questions and get a chance to win in the raffle of the six Amazon gift cards (one $500 grand prize and five $100 gift cards)!\n"
# message.body += "You can access the survey here: http://tgt-dev.appspot.com/survey?survey_no=%d\n" %(survey_no)
# message.send()
# logging.info("Sent survey reminder to " + str(message.to))
# else:
# logging.info(user.name + " do not have an email in the record.")
#=========================================================================
# clean search index
# docindex = search.Index(name='563244175490')
# try:
# while True:
# document_ids = [document.doc_id for document in docindex.get_range(ids_only=True)]
# if not document_ids:
# break
# # logging.info(len(document_ids))
# docindex.delete(document_ids)
# except search.Error:
# logging.exception("Error deleting documents:")
# crob job to create tagged index
class TaggedIndexHandler(BaseHandler):
def get(self):
all_good_things = models.GoodThing.all().order('-created').filter('public =',True).filter('deleted =',False).fetch(limit=None)
logging.info("In TaggedIndex Handler. all_good_things = " + str(len(all_good_things)))
for good_thing in all_good_things:
if(good_thing.num_mentions() > 0):
mentioned_name = good_thing.get_mentions()
names = [mention['name'].encode('utf-8') for mention in mentioned_name]
mention_str = str(names).strip('[]')
good_thing_Document = search.Document(
doc_id=str(good_thing.key().id()),
fields=[search.TextField(name='mentions', value=mention_str)],
language = 'en')
index = search.Index(name='tagged_posts')
try:
index.put(good_thing_Document)
except search.Error:
logging.exception('Failed to put document in tagged_posts')
# change user type
class AdminHandler(BaseHandler):
def get(self):
current_user = self.current_user
template = jinja_environment.get_template('admin.html')
template_values = {
'facebook_app_id':FACEBOOK_APP_ID,
'current_user':current_user,
}
self.response.out.write(template.render(template_values))
# update the public/private field after the user has passed through the intro
# screen.
def post(self):
user_id = str(self.current_user['id'])
user = models.User.get_by_key_name(user_id)
logging.info("user_type=" + str(self.request.get('user_type')))
# public version of the app
if self.request.get('user_type') == 'public':
# user.public_user = True
user.user_type = 2
# private version of the app
elif self.request.get('user_type') == 'private':
# user.public_user = False
user.user_type = 1
# placebo version of the app
elif self.request.get('user_type') == 'placebo':
user.user_type = 0
user.put()
|
import numpy as np
import os
import glob
import argparse
def get_obs_pred(data, observed_frame_num, predicting_frame_num, pos=True):
obs = []
pred = []
count = 0
if len(data) >= observed_frame_num + predicting_frame_num:
seq = int((len(data) - (observed_frame_num + predicting_frame_num)) / observed_frame_num) + 1
for k in range(seq):
obs_pedIndex = []
pred_pedIndex = []
count += 1
for i in range(observed_frame_num):
obs_pedIndex.append(data[i + k * observed_frame_num])
for j in range(predicting_frame_num):
pred_pedIndex.append(data[k * observed_frame_num + j + observed_frame_num])
if pos == True:
obs_pedIndex = np.reshape(obs_pedIndex, [observed_frame_num, 2])
pred_pedIndex = np.reshape(pred_pedIndex, [predicting_frame_num, 2])
else:
obs_pedIndex = np.reshape(obs_pedIndex, [observed_frame_num, 1])
pred_pedIndex = np.reshape(pred_pedIndex, [predicting_frame_num, 1])
obs.append(obs_pedIndex)
pred.append(pred_pedIndex)
if pos==True:
obs = np.reshape(obs, [count, observed_frame_num, 2])
pred = np.reshape(pred, [count, predicting_frame_num, 2])
else:
obs = np.reshape(obs, [count, observed_frame_num, 1])
pred = np.reshape(pred, [count, predicting_frame_num, 1])
return np.expand_dims(obs[0], axis=0), np.expand_dims(pred[0], axis=0)
def read_imas(file, pos1, pos2):
data = np.genfromtxt(file, delimiter=',')
imas_1_index = 1
imas_2_index = 7
imas = [5, 4, 3, 2, 1]
imas1_predictions = [2, 3, 4, 5, 6]
imas2_predictions = [8, 9, 10, 11, 12]
imas1_values = []
imas2_values = []
for i in range(len(data)-1):
imas1 = data[i+1][imas1_predictions[:]]
imas2 = data[i+1][imas2_predictions[:]]
##
p1_imas = np.dot(imas1, imas)
p2_imas = np.dot(imas2, imas)
imas1_values.append(p1_imas)
imas2_values.append(p2_imas)
return imas1_values, imas2_values
def read_positions(file):
data = np.genfromtxt(file, delimiter=',')
pos1_x_index = 2
pos1_y_index = 3
pos2_x_index = 5
pos2_y_index = 6
pos1_values = []
pos2_values = []
for i in range(len(data)-1):
pos1_values.append([data[i + 1][pos1_x_index], data[i + 1][pos1_y_index]])
pos2_values.append([data[i + 1][pos2_x_index], data[i + 1][pos2_y_index]])
pos1_values = np.reshape(pos1_values, [len(pos1_values), 2])
pos2_values = np.reshape(pos2_values, [len(pos2_values), 2])
return pos1_values, pos2_values
def get_data(path):
observed_frame_num = 40
predicting_frame_num = 30
#imas_obs = []
#pos_obs = []
#pred_obs = []
files = []
count = 0
for r, d, f in os.walk(path):
for file in f:
if file == "dbn_prediction.csv":
imas_file = os.path.join(r, file)
positions_file = os.path.join(r, "pedestrian_positions.csv")
pos1, pos2 = read_positions(positions_file)
imas1, imas2 = read_imas(imas_file, pos1, pos2)
obs_imas1, _ = get_obs_pred(imas1, observed_frame_num, predicting_frame_num, pos=False)
obs_imas2, _ = get_obs_pred(imas2, observed_frame_num, predicting_frame_num, pos=False)
obs_pos1, pred_pos1 = get_obs_pred(pos1, observed_frame_num, predicting_frame_num, pos=True)
obs_pos2, pred_pos2 = get_obs_pred(pos2, observed_frame_num, predicting_frame_num, pos=True)
for i in range(obs_pos1.shape[0]*2):
files.append(r)
if count==0:
imas_obs = obs_imas1
imas_obs = np.concatenate((imas_obs, obs_imas2), axis=0)
pos_obs = obs_pos1
pos_obs = np.concatenate((pos_obs, obs_pos2), axis=0)
pred_obs = pred_pos1
pred_obs = np.concatenate((pred_obs, pred_pos2), axis=0)
count += 1
continue
imas_obs = np.concatenate((imas_obs, obs_imas1), axis=0)
imas_obs = np.concatenate((imas_obs, obs_imas2), axis=0)
pos_obs = np.concatenate((pos_obs, obs_pos1), axis=0)
pos_obs = np.concatenate((pos_obs, obs_pos2), axis=0)
pred_obs = np.concatenate((pred_obs, pred_pos1), axis=0)
pred_obs = np.concatenate((pred_obs, pred_pos2), axis=0)
#print(imas_obs.shape)
#print(pos_obs.shape)
#print(pred_obs.shape)
#print(len(files))
np.save('data/imas_obs_40.npy', imas_obs)
np.save('data/pos_obs_40.npy', pos_obs)
np.save('data/pos_pred_30.npy', pred_obs)
np.save('data/files_obs40_pred30.npy', files)
parser = argparse.ArgumentParser()
parser.add_argument("--data", help="Path to dataset")
args = parser.parse_args()
get_data(args.data)
|
from utils_dir import my_utils
import cv2
import os
import time
import argparse
import numpy as np
import random
def calculate_rgb_mean(img_paths):
start_time = time.time()
num_err_files = 0
imgs = []
print("\nStart process {} images ...".format(len(img_paths)))
for i, img_path in enumerate(img_paths):
if (i + 1) % 100 == 0:
print("Read {}/{} images ...".format(i + 1, len(img_paths)))
try:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img_shape = img.shape
# if i == 0:
# print("Image shape : ", img_shape)
img = np.sum(img, axis=0).sum(axis=0)
img = img / (img_shape[0] * img_shape[1])
imgs.append(img)
except:
num_err_files += 1
rgb_mean = np.mean(np.array(imgs), axis=0)
exec_time = time.time() - start_time
print("Num error files : ", num_err_files)
print("Time : {:.2f} s".format(exec_time))
print("RGB mean : ", rgb_mean)
return rgb_mean
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--folder", help="Folder contain images to calculate rgb mean", required=True)
# parser.add_argument("--ext", default="jpg", help="Extension of image file, example jpg")
parser.add_argument("--type", default="list", choices=["img", "list"], help="type = img (folder contain img file), "
"type = list (folder contain file img list")
args = parser.parse_args()
if args.type == "img":
img_paths = my_utils.get_all_file_paths(args.folder)
# img_paths = [img_path for img_path in img_paths if img_path.endswith(args.ext)]
else:
img_list_path = os.path.join(args.folder, "img_list.txt")
img_list = my_utils.load_list(img_list_path)
img_list = [line.split(" ")[0] for line in img_list]
img_paths = [os.path.join(args.folder, "images", line) for line in img_list]
show_paths = random.sample(img_paths, 10)
print(show_paths)
rgb_mean = calculate_rgb_mean(img_paths)
|
from snovault.project.access_key import SnovaultProjectAccessKey
class FourfrontProjectAccessKey(SnovaultProjectAccessKey):
def access_key_has_expiration_date(self):
return False
|
import tensorflow as tf
from keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import cv2
import csv
tf.python.control_flow_ops = tf
# For each frame do the following transform:
# 1. Crop the top and bottom section of the image, since top part are usually environments not related to the navigation (Eg. trees, hills, posts) and bottom part is mostly occupied by the hood of the car.
# 2. Gaussion smoothing of kernel 3x3
# 3. Resize the result to a 64 by 64 square image.
# 4. Convert the image from RGB space to YUV space
def image_conversion(img):
img = img[50:140,:,:]
img = cv2.GaussianBlur(img, (3,3), 0)
img = cv2.resize(img,(64, 64), interpolation = cv2.INTER_AREA)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
return img
def data_generator(images, angles, batch_size):
x,predict = ([],[])
images, angles = shuffle(images, angles)
while True:
for i in range(len(angles)):
img = cv2.imread(images[i])
angle = angles[i]
img = image_conversion(img)
x.append(img)
predict.append(angle)
if len(x) == batch_size:
yield (np.array(x), np.array(predict))
x, predict = ([],[])
images, angles = shuffle(images, angles)
# if has large steering angle, append the mirror as training data as well
if abs(angle) > 0.3:
x.append(cv2.flip(img, 1))
predict.append(angle*-1)
if len(x) == batch_size:
yield (np.array(x), np.array(predict))
x, predict = ([],[])
images, angles = shuffle(images, angles)
# model is Keras model
def addCNNAndMaxPoolingLayers(model, filter_size, kernel_size):
model.add(Convolution2D(filter_size, kernel_size, kernel_size, border_mode='same', subsample=(2, 2)))
model.add(Activation('relu'))
# pool size and and strides are all the same
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
# model is Keras model
def addFullyConnectedLayers(model, output):
model.add(Dense(output))
model.add(Activation('relu'))
### Make reading data ###
# loading csv file into memory
data_dir = '../P3_training_data/'
with open(data_dir + '/driving_log.csv', newline='') as f:
csv_data = list(csv.reader(f, skipinitialspace=True,
delimiter=',', quoting=csv.QUOTE_NONE))
images = []
angles = []
#csv data: center,left,right,steering,throttle,brake,speed
for row in csv_data[1:]:
center = row[0]
left = row[1]
right = row[2]
angle = float(row[3])
speed = float(row[6])
if speed < 0.5:
# if the car is almost still, training such data won't make too much sense
continue
# For each row, generate left, center and right training data. Since the steering angle is based on the center camera,
# Needs to add offset for the visual from left and right cameras
images.append(data_dir + center)
angles.append(angle)
# Below +/-0.23 should be a constant generated from camera calibration. For now seems this assumed value works well
images.append(data_dir + left)
angles.append(angle + 0.23)
images.append(data_dir + right)
angles.append(angle - 0.23)
images = np.array(images)
angles = np.array(angles)
# split into train/test sets. Use 5% as testing data
images_train, images_test, angles_train, angles_test = train_test_split(images, angles, test_size=0.05, random_state=42)
print('Training size:', images_train.shape[0])
print('Testing size:', images_test.shape[0])
### Make the Keras model ###
# A mimic of https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
model = Sequential()
# Normalization
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(64, 64, 3)))
# Convolutional and maxpooling layers
addCNNAndMaxPoolingLayers(model, 24, 5)
addCNNAndMaxPoolingLayers(model, 36, 5)
addCNNAndMaxPoolingLayers(model, 48, 5)
addCNNAndMaxPoolingLayers(model, 64, 3)
addCNNAndMaxPoolingLayers(model, 64, 3)
model.add(Flatten())
# Fully connected layers
addFullyConnectedLayers(model, 1164)
addFullyConnectedLayers(model, 100)
addFullyConnectedLayers(model, 50)
addFullyConnectedLayers(model, 10)
model.add(Dense(1))
model.compile(optimizer=Adam(0.001), loss="mse")
model.summary()
generator = data_generator(images_train, angles_train, 64)
validation_generator = data_generator(images_train, angles_train, 64)
history = model.fit_generator(generator,
validation_data = validation_generator,
nb_val_samples=6000,
samples_per_epoch=24000,
nb_epoch=10,
verbose=1)
print('Eventual Loss: ', model.evaluate_generator(data_generator(images_test, angles_test, 64), 64))
# Save model data
model.save('./model.h5') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Dec 07 14:27 2018
@author: phongdk
"""
"""
TODO:
1. get all unique users from various sources
2. Compute score base on its properties like gender, age, hardware, or so
"""
import os
import gc
import time
import numpy as np
import pandas as pd
import sys
sys.path.append('src/property')
sys.path.append('src/administrativeArea')
import warnings
warnings.filterwarnings("ignore")
# from score import map_score
from airline import Airline
from hotel import Hotel
from luxury import Luxury
from tour import Tour
from shopping import Shopping
from resort import Resort
from gender import Gender
from age import Age
# from os_name import OS_name
from device import Device
from address import Address
# from vietnamADM import AdministrativeArea_KDTree
import dask.dataframe as dd
from dask.distributed import Client, LocalCluster
VIETNAM_REGIONS = "external_data/location/Vietnam regions.xlsx"
def load_data(path, filename, nrows=None):
# be carefull with data, since it could have duplicate user_id in different day
print("Load data from file : {}".format(filename))
df = pd.read_csv(os.path.join(path, filename), dtype={'user_id': str}, nrows=nrows)
return df
def process_url_property(path, filename, property):
name = property.get_name()
threshold = property.get_threshold()
print('------------------Process {} -----------------'.format(name.upper()))
df_property = load_data(path, filename)
print('Filter with THRESHOLD : {}'.format(threshold))
df = pd.DataFrame()
df['user_id'] = df_property['user_id']
print("number unique users : {}".format(len(df['user_id'].unique())))
# filter all users (with duplicate) with count in a day >= threshold
df['flag'] = df_property['count'].apply(lambda x: int(x >= threshold))
# if all day a user read n_shopping pages < threshold -> not potential customer
# it means sum of all day for that user = 0
df = df.groupby('user_id').sum()
df[name] = df['flag'].apply(lambda x: int(x > 0))
print('Unique number of users in days is considered "active customer" : {}'.format(len(df[df['flag'] > 0])))
print('Unique number of users in days is considered "non-active customer" : {}'.format(len(df[df['flag'] == 0])))
# df.drop(columns=['flag'], inplace=True)
df[name + "_score"] = df[name].apply(lambda x: property.get_score(x))
return df[name + "_score"]
def process_demography(path, filename, list_properties, nrows=None):
print("---------------------------PROCESS DEMOGRAPHY------------------")
df = load_data(path, filename, nrows=nrows)
# df = df[['user_id', 'gender', 'age']]
for col in ['gender', 'age']:
df[col] = df[col].astype(np.int8)
columns = []
for property in list_properties:
col_name = f"{property.get_name()}_score"
columns.append(col_name)
df[col_name] = df[property.get_name()].apply(lambda x: property.get_score(x))
df.set_index('user_id', inplace=True)
return df[columns]
def process_hardware(path, filename, property, nrows=None):
print("---------------------------PROCESS HARDWARE------------------") # convert to DASK for parallel processing
df = load_data(path, filename, nrows=nrows)
# df = df[['user_id', 'os_name', 'hw_class', 'cpu', 'sys_ram_mb', 'screen_height', 'screen_width']]
for col in ['sys_ram_mb', 'screen_height', 'screen_width']:
df[col] = df[col].fillna(-1).astype(np.int8)
for col in ['cpu']:
df[col] = df[col].fillna(-1)
print('Get score device')
print(df.shape)
df_dask = dd.from_pandas(df, npartitions=N_JOBS)
score_device = df_dask[['os_name', 'hw_class', 'cpu', 'sys_ram_mb', 'screen_height',
'screen_width']].apply(lambda x: property.get_score(x), axis=1).compute()
col_name = f"{property.get_name()}_score"
df[col_name] = score_device
df.set_index('user_id', inplace=True)
# df[col_name] = df[['os_name', 'hw_class', 'cpu', 'sys_ram_mb', 'screen_height',
# 'screen_width']].apply(lambda x: property.get_score(x), axis=1)
return df[col_name]
def get_code2address(filename="external_data/location/Vietnam regions.xlsx"):
CITY = ["hà nội", "hồ chí minh", "đà nẵng", "hải phòng", "cần thơ"]
df = pd.read_excel(filename, sheet_name='Districts')
df.columns = ["district_id", "district_name", "province_id", "province_name"]
df.set_index("district_id", inplace=True)
df["district_name"] = df["district_name"].apply(lambda x: x.lower().strip())
df["province_name"] = df["province_name"].apply(lambda x: x.lower().strip())
df["address"] = df.apply(lambda x: x["province_name"] if x["province_name"] in CITY else x["district_name"], axis=1)
new_dict = df["address"].to_dict()
return new_dict
def process_location(path, filename, property, nrows=None):
print("---------------------------PROCESS LOCATION------------------")
code2address = get_code2address(filename=VIETNAM_REGIONS)
address_map_score = property.get_address_map_score()
travel_map_score = property.get_travel_map_score()
df = load_data(path, filename, nrows=nrows)
df["province"] = df["location"].apply(lambda x: str(x)[1:4]) # get name province
df = df.drop_duplicates(subset=["user_id", "province"], keep="first")
# compute address_score for each address using a predefinded dict
df["address"] = df["location"].map(code2address).map(address_map_score).fillna(address_map_score["others"])
score = df.groupby('user_id').agg({'address': 'mean', 'province': 'size'}) # count how many provinces for each uid
score.columns = ['address_score', 'num_travel']
# compute travel_score for each uid using a predefinded dict
score['travel_score'] = score['num_travel'].map(travel_map_score).fillna(travel_map_score["others"])
return score[["address_score", "travel_score"]]
def get_unique_user_id(list_df):
list_index = []
for x in list_df:
list_index.extend(list(x.index))
print("Number of unique users (with duplicates) : {}".format(len(list_index)))
unique_users = sorted(set(list_index))
print("Number of unique users : {}".format(len(unique_users)))
return unique_users
def merge_data(list_df):
print("-----------------------Merge data-----------------------------")
for (i, df) in enumerate(list_df):
if i == 0:
df_total = df[:]
else:
df_total = pd.merge(df_total, df, left_index=True, right_index=True, how='outer')
df_total.fillna(0, inplace=True)
return df_total
def process():
nrows = None
start = time.time()
df_hardware = process_hardware(PATH, filename_hardware, Device('device'), nrows=nrows)
print(df_hardware.head())
print("Time ------------------------------------------- {:.2f} (s)".format(time.time() - start))
start = time.time()
df_demography = process_demography(PATH, filename_demography, [Gender('gender'), Age('age')], nrows=nrows)
print(df_demography.head())
print("Time ------------------------------------------- {:.2f} (s)".format(time.time() - start))
start = time.time()
df_location = process_location(PATH, filename_location, Address('address'), nrows=nrows)
print(df_location.head())
print("Time ------------------------------------------- {:.2f} (s)".format(time.time() - start))
# start = time.time()
list_df = [df_demography, df_hardware, df_location]
for (filename, class_property) in zip(filename_based_url, class_property_based_url):
list_df.append(process_url_property(PATH, filename, class_property))
# properties = [Gender('gender'), Age('age')] + class_property_based_url + [Device('device')]
# print(len(list_df), len(properties))
start_time = time.time()
df_total = merge_data(list_df)
print("Time ------------------------------------------- {:.2f} (s)".format(time.time() - start_time))
print(df_total.head())
print(df_total.shape)
df_total.to_csv('/home/phongdk/tmp/score.gz', compression='gzip', index=True)
# # print("--- %s seconds ---" % (time.time() - start_time))
# # print("Memory usage of properties dataframe is :", df_total.memory_usage().sum() / 1024 ** 2, " MB")
#
# # properties = [Gender('gender'), Age('age'), OS_name('os_name')] + class_property_based_url
# compute_score(df_total, properties)
# print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == '__main__':
from_date = '2019-04-30'
end_date = '2019-05-13'
PATH = '/home/phongdk/data_user_income_targeting/2019-05-13'
# have to put cluster, client inside main function
cluster = LocalCluster(ip="0.0.0.0")
client = Client(cluster)
print(client)
N_JOBS = 32
if not os.path.exists(PATH):
os.makedirs(PATH)
filename_demography = "demography_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_hardware = "hardware_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_location = "location_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_airline = "airline_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_luxury = "luxury_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_booking_resort = "booking_resort_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_booking_hotel = "booking_hotel_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_tour = "tour_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_shopping = "shopping_from_{}_to_{}.csv.gz".format(from_date, end_date)
filename_based_url = [filename_airline, filename_luxury, filename_booking_resort, filename_booking_hotel,
filename_tour, filename_shopping]
class_property_based_url = [Airline('airline'), Luxury('luxury'), Resort('resort'), Hotel('hotel'),
Tour('tour'), Shopping('shopping')]
assert len(filename_based_url) == len(class_property_based_url)
process()
|
# Load libraries
import pandas as pd #Data analysis library
from pandas.tools.plotting import scatter_matrix #Graphics library
import matplotlib.pyplot as plt #Graphics library
from sklearn import model_selection #Machine learning models
from sklearn.metrics import classification_report #Build a text report showing the main classification metrics
from sklearn.metrics import confusion_matrix #Compute confusion matrix to evaluate the accuracy of a classification
from sklearn.metrics import accuracy_score #In multilabel classification, this function computes subset accurac
from sklearn.linear_model import LogisticRegression #Charge LogisticRegression model
from sklearn.tree import DecisionTreeClassifier #Charge DecisionTreeClassifier model
from sklearn.neighbors import KNeighborsClassifier #Charge KNeighborsClassifier model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis #Charge LinearDiscriminantAnalysis model
from sklearn.naive_bayes import GaussianNB #Charge GaussianNB model
from sklearn.svm import SVC #Used to load the data
import io
import requests
# Load dataset
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class'] #Parameters names of the data
dataset = pd.read_csv("IRIS.csv", names=names) #Preparing dataset associate data with out parameters names
# Split-out validation dataset
array = dataset.values #Insert the dataset into an array
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.80 #Size of the training dataset
seed = 7 #random seed to test where the data is going to start
scoring = 'accuracy' #the precission
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Spot Check Algorithms
models = [] #ML models
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed) #Split dataset into k consecutive folds
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring) #collects all the results
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
knn = KNeighborsClassifier() #KNeighborsClassifier was the one with best accuracy_score
knn.fit(X_train, Y_train) #fit the data to train knn
predictions = knn.predict(X_validation) #get the predictiones from the training
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
|
# list는 []로 감싼다
# empty
empty_list = []
empty_list2 = list()
print(empty_list == empty_list2) # True
# lists. list 는 어떤 자료형도 포함 가능
odd = [1, 3, 7]
e = [1, 2, ['Life', 'is']]
# indexing, slicing 은 문자열을 다루는 것과 거의 동일함
print(e[2][1])
print(e[1:])
# 리스트 더하기
print([1,2,3] + ['a','b'])
# 리스트 곱셈
print([1,2,3] * 2)
# 리스트 길이
print(len([1,2,3,]))
# 리스트 조작
a = [1, 2, 3, 4]
# 리스트에 값이 있나 확인
print(2 in a)
a[1] = 'a'
# del은 파이썬 구문
del a[2]
print(a)
# del에 슬리이싱 기법으로 n개 제거 가능
del a[1:]
print(a)
# 리스트 함수
a = [1,2,3,4]
a.append(5) # 끝에 element 1개 추가 (push)
a.sort() # 리스트 자체를 정렬한다.
b = sorted(a) # 리스트의 정렬된 복사본을 반환
a.sort(reverse=True) # 역순 정렬
a.reverse() # 위와 동일
a.index(3) # 값으로 index찾기. 3의 첫번째 idx. string 처럼 없으면 에러다
a.insert(0, 'a') # index 지정하여 리스트에 추가
a.remove('a') # 값으로 제거. 첫번째 'a'를 찾아 제거
# a.remove('b') # 없는걸 지우려해도 에러
print(a)
a.pop() # stack의 pop. -1을 인자로 줘도 동일.
a.pop(0) # 위치지정이 가능. 0이면 dequeue인 셈.
print(a)
# list내의 특정 값이 몇 개 있나 세기
print(a.count(2))
# extend : 다른 리스트 병합
e1 = [1,2,3]
e1.extend(['a', 'b'])
e2 = [1,2,3] + ['a', 'b']
print(f'e1 is {e1}')
print(f'e2 is {e2}')
print(e1 == e2) # True
# list copy하기
# copy함수는 shallow라고 주석에 나와있음. 다른 것도 알아서 deep copy가 되지는 않을 것 같고 shallow가 아닐까?
a = [1,2,3]
b = a.copy()
c = list(a)
d = a[:] |
""" file: utilities.py (bomber)
author: Jess Robertson
CSIRO Minerals Resources Flagship
date: June 2015
description: utility functions for bomber
"""
from __future__ import print_function, division
from .converters import grid_to_geotiff
import requests
import subprocess
def download(uri, options, filename, fmt='geotiff'):
""" Download data to GeoTIFF
"""
# Download and munge data
response = requests.get(uri.format(**options))
if response.ok:
zipfilename = filename + '.Z'
with open(zipfilename, 'wb') as sink:
sink.write(response.content)
subprocess.call(['uncompress', zipfilename])
# Convert data to geotiff
if fmt.lower() == 'geotiff':
filename = grid_to_geotiff(filename)
print('Downloaded data to {0}'.format(filename))
return filename
else:
print('Download failed')
return None
def option_checker(**arguments):
""" Generate option checkers using some supplied checks
Provide the checker with a list of checks, where each check is
an argument name, and a list of allowed balues
"""
err_str = ('Argument {0}={1} is unknown. Allowed '
'values for {0} are {2}.')
for arg, (value, expected) in arguments.items():
if value not in expected:
err = err_str.format(arg, value, expected)
raise ValueError(err)
|
# coding: utf-8
# 참고문헌: http://learnpythonthehardway.org/python3/ex4.html
# 입력 후 add, commit / Enter source code, add, and commit
# 각 행 주석 입력 후 commit / Enter comment for each line and commit
# 각자 Study drills 시도 후 필요시 commit / Try Study Drills and commit if necessary
# print 'abc' -> print('abc')
# print 'abc', 123 -> print('abc %s' % (123))
# print 'abc', abc, 'def' -> print('abc %s def' % (abc))
# variables
자동차 = 100
차_안_공간 = 4.0
# declarations
운전사 = 30
승객 = 90
# operations
운행_안하는_차 = 자동차 - 운전사
운행하는_차 = 운전사
총_정원 = 운행하는_차 * 차_안_공간
차당_평균_승객 = 승객 / 운행하는_차
# print lines
print("자동차", 자동차, "대가 있습니다.")
print("운전자는", 운전사, "명 뿐입니다.")
print("오늘은 빈 차가", 운행_안하는_차, "대일 것입니다.")
print("오늘은", 총_정원, "명을 태울 수 있습니다.")
print("함께 탈 사람은", 승객, "명 있습니다.")
print("차마다", 차당_평균_승객, "명 정도씩 타야 합니다.")
|
# Generated by Django 3.0.3 on 2020-04-06 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20200406_2222'),
]
operations = [
migrations.AddField(
model_name='profile',
name='subtitle',
field=models.CharField(default=1, max_length=60, verbose_name=' نبذه عنك'),
preserve_default=False,
),
]
|
from sqlalchemy import Column, BigInteger, DateTime, LargeBinary, Integer, Float
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import relationship
from db import Base, table_args
class ElecData(object):
_mapper = {}
base_class_name = "elec_data"
@classmethod
def model(cls, station_id: int, inner_id: int, base: DeclarativeMeta = Base):
class_name = cls.base_class_name + "_{0}_{1}".format(station_id, inner_id)
ModelClass = cls._mapper.get(class_name, None)
if ModelClass is None:
ModelClass = type(
class_name,
(base,),
dict(
__module__=__name__,
__name__=class_name,
__tablename__=class_name,
id=Column(BigInteger, primary_key=True),
time=Column(DateTime, index=True),
ucur=Column(LargeBinary, nullable=False),
vcur=Column(LargeBinary, nullable=False),
wcur=Column(LargeBinary, nullable=False),
uvolt=Column(LargeBinary, nullable=False),
vvolt=Column(LargeBinary, nullable=False),
wvolt=Column(LargeBinary, nullable=False),
__table_args__=table_args,
),
)
cls._mapper[class_name] = ModelClass
mapper = ModelClass
return mapper
class VibData(object):
_mapper = {}
base_class_name = "vib_data"
@classmethod
def model(cls, station_id: int, inner_id: int, base: DeclarativeMeta = Base):
class_name = cls.base_class_name + "_{0}_{1}".format(station_id, inner_id)
ModelClass = cls._mapper.get(class_name, None)
if ModelClass is None:
ModelClass = type(
class_name,
(base,),
dict(
__module__=__name__,
__name__=class_name,
__tablename__=class_name,
id=Column(Integer, primary_key=True),
time=Column(DateTime, index=True),
rms=Column(Float, default=0),
ima=Column(LargeBinary, nullable=False),
__table_args__=table_args,
),
)
cls._mapper[class_name] = ModelClass
mapper = ModelClass
return mapper
|
import pygame
from game import gamelogic
from game import SettingGame
from PIL import Image, ImageDraw, ImageOps
RED = (255, 0, 0)
COLOR_PADDLE = (255, 0, 0)
COLOR_BOARD_LINE = (0, 255, 0)
COLOR_BACKGROUND = (100, 0, 100)
COLOR_BALL = (222, 222, 222)
class Painter:
def __init__(self, SCREEN: pygame.display, game: gamelogic.Game, game_setting: SettingGame.SettingGame):
self.SCREEN = SCREEN
self.GAME = game
self.WIDTH = game_setting.WIDTH
self.HEIGHT = game_setting.HEIGHT
self.center_X = game_setting.HEIGHT // 2
self.center_Y = game_setting.WIDTH // 2
self.font = pygame.font.SysFont(None, 40)
self.LEFT_TOP_CORNER = game_setting.LEFT_TOP_CORNER
self.LEFT_BOT_CORNER = game_setting.LEFT_BOT_CORNER
self.RIGHT_TOP_CORNER = game_setting.RIGHT_TOP_CORNER
self.RIGHT_BOT_CORNER = game_setting.RIGHT_BOT_CORNER
self.paddle_size_x = game_setting.paddle_size
self.paddle_size_y = 100
self.paddle_r = game_setting.paddle_r_size
self.ball_size = game_setting.ball_size
self.paddle_image_1 = self.create_paddle_py_image(self.paddle_r, 1)
self.paddle_image_2 = self.create_paddle_py_image(self.paddle_r, 2)
self.paddle_image_1_size_y = 100
self.paddle_image_2_size_y = 100
self.paddle_image_size = 100
def draw_rect(self, pos_x, pos_y):
pygame.draw.rect(self.SCREEN, (255, 0, 0,), (100, 100, self.paddle_size_x, self.paddle_size_y))
def create_paddle_py_image(self, paddle_r, player_nr):
paddle_image = Image.new("RGBA", (self.paddle_r, self.paddle_r))
paddle_draw = ImageDraw.Draw(paddle_image)
paddle_draw.pieslice((0, 0, self.paddle_r, self.paddle_r), 0, 360, fill=COLOR_PADDLE)
cut = (
self.paddle_r - self.paddle_r * 0.1 / 2, self.paddle_r // 3, self.paddle_r,
self.paddle_r - self.paddle_r // 3)
paddle_image = paddle_image.crop(cut)
paddle_image = self.resize_image(0.3, paddle_image)
mode = paddle_image.mode
size = paddle_image.size
self.paddle_size_x = size[0]
self.paddle_image_1_size_y = size[1]
if player_nr == 2:
paddle_image = paddle_image.rotate(180)
data = paddle_image.tobytes()
paddle_image = pygame.image.fromstring(data, size, mode)
return paddle_image
def draw_paddle_1(self, pos):
self.SCREEN.blit(self.paddle_image_1, (pos[0] - self.paddle_size_x, pos[1] - self.paddle_image_1_size_y // 2))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1], 5, 5))
def draw_paddle_2(self, pos):
self.SCREEN.blit(self.paddle_image_2, (pos[0], pos[1] - self.paddle_image_1_size_y // 2))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1] - 10, 5, 5))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1] - 30, 5, 5))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1] - 20, 5, 5))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1] - 40, 5, 5))
# pygame.draw.rect(self.SCREEN, (255, 222, 222), (pos[0], pos[1] - 50, 5, 5))
def resize_image(self, factory, image: Image):
size = image.size
new_size = (int(size[0] * factory), int(size[1] * factory))
image = image.resize(new_size)
return image
def draw_background(self):
self.SCREEN.fill(COLOR_BACKGROUND)
def draw_board_line(self):
pygame.draw.line(self.SCREEN, COLOR_BOARD_LINE, self.LEFT_TOP_CORNER, self.RIGHT_TOP_CORNER)
pygame.draw.line(self.SCREEN, COLOR_BOARD_LINE, self.LEFT_BOT_CORNER, self.RIGHT_BOT_CORNER)
pygame.draw.line(self.SCREEN, COLOR_BOARD_LINE, self.LEFT_TOP_CORNER, self.LEFT_BOT_CORNER)
pygame.draw.line(self.SCREEN, COLOR_BOARD_LINE, self.RIGHT_TOP_CORNER, self.RIGHT_BOT_CORNER)
def draw_ball(self, pos):
pygame.draw.circle(self.SCREEN, COLOR_BALL, (pos[0], pos[1]),
self.ball_size // 2)
def draw_all(self):
self.draw_background()
self.draw_paddle_1(self.GAME.paddle_1_pos())
self.draw_paddle_2(self.GAME.paddle_2_pos())
self.draw_board_line()
self.draw_ball(self.GAME.ball_pos())
self.draw_score()
def draw_score(self):
img3 = self.font.render(str(self.GAME.player_1_score), True, (0, 0, 255))
img2 = self.font.render(":", True, (0, 0, 255))
img1 = self.font.render(str(self.GAME.player_2_score), True, (0, 0, 255))
self.SCREEN.blit(img1, (500 + 80, 10))
self.SCREEN.blit(img2, (525 + 80, 10))
self.SCREEN.blit(img3, (540 + 80, 10))
|
import os
PROJECT_PATH = os.getenv("PROJECT_PATH")
SERVER_HOST = os.getenv("SERVER_HOST")
CONFIG_PATH = os.path.join(PROJECT_PATH, "config")
DATA_PATH = os.path.join(PROJECT_PATH, "data")
ENVS_PATH = os.path.join(PROJECT_PATH, "envs")
LOGS_PATH = os.path.join(PROJECT_PATH, "logs")
config_file = os.path.join(CONFIG_PATH, "config.yaml")
text_file = os.path.join(DATA_PATH, "quote.txt")
|
from queue import PriorityQueue
import threading
import random
import time
class MTQ:
def __init__(self, q, lock):
self.q = q
self.lock = lock
def push(self, val):
self.lock.acquire()
self.q.put(val)
self.lock.release()
print("ADD", val)
def pop(self):
self.lock.acquire()
if not self.q.empty():
data = self.q.pop()
print("REMOVE", data)
self.lock.release()
def p(Q):
Q.push((random.randint(), random.randint()))
time.sleep(2)
def c(Q):
Q.pop()
lock = threading.Lock()
q = MPQ(PriorityQueue(), lock)
t = []
for i in range(1,12):
if (i %2):
t.append(threading.Thread(None, q.add,i,(random.randint(1,100),)))
else:
t.append(threading.Thread(None,q.get,i))
for i in t:
i.start()
for i in t:
i.join() |
from log_into_wiki import *
import mwparserfromhell
limit = -1
site = login('bot', 'fortnite-esports')
summary = 'Automatically create player pages for Power Rankings'
result = site.api('cargoquery',
tables = 'TournamentResults=TR,TournamentResults__RosterLinks=RL,_pageData=PD',
join_on = 'TR._ID=RL._rowID,RL._value=PD._pageName',
where = 'PD._pageName IS NULL AND RL._value IS NOT NULL AND TR.PRPoints > "0"',
fields = 'RL._value=name',
group_by = 'RL._value',
limit = 'max'
)
default_text = site.pages['Help:Player Template'].text()
default_text = default_text.replace('<noinclude>','').replace('</noinclude>','').strip()
wikitext = mwparserfromhell.parse(default_text)
this_template = None
for template in wikitext.filter_templates():
if template.name.matches('Infobox Player'):
this_template = template
this_template.add('pronly','Yes')
break
def get_residency(name):
print(name)
res_response = site.api('cargoquery',
tables='Tournaments=T,TournamentResults=TR,TournamentResults__RosterLinks=RL',
join_on='T._pageName=TR.OverviewPage,TR._ID=RL._rowID',
where='RL._value="%s"' % name,
fields='T.Region',
group_by='T.Region'
)
res_result = res_response['cargoquery']
if len(res_result) == 1:
return res_result[0]['title']['Region']
return ''
lmt = 0
for item in result['cargoquery']:
if lmt == limit:
break
lmt = lmt + 1
name = item['title']['name']
if site.pages[name].text() != '':
print('Page %s already exists, skipping' % name)
continue
print('Processing page %s...' % name)
this_template.add('residency', get_residency(name))
this_template.add('id', name)
text = str(wikitext)
site.pages[name].save(text, summary=summary)
|
#!/usr/bin/env python
import torch as th
def get_model(in_size, out_size, sizes=[64, 64]):
params = [th.nn.Linear(in_size, sizes[0])]
for i in range(1, len(sizes)):
params.append(th.nn.Sigmoid())
params.append(th.nn.Linear(sizes[i-1], sizes[i]))
params.append(th.nn.Linear(sizes[-1], out_size))
return params
def get_optimizer(model, lr=0.01):
return th.optim.SGD(model.parameters(), lr=lr, momentum=0.95)
def get_loss(regression=True):
return th.nn.SmoothL1Loss()
|
import dog
sugar = dog.Dog('Sugar', 'Border Collie')
print(sugar.tricks)
sugar.teach('frisbee')
print(sugar.tricks)
sugar.knows('frisbee')
sugar.teach('fetch')
sugar.knows('fetch')
sugar.knows('arithmetic')
print(dog.Dog.species)
print(sugar.species) |
from core.decorators import instance
from core.registry import Registry
from tools.logger import Logger
from __init__ import get_attrs
import time
@instance()
class EventManager:
def __init__(self):
self.handlers = {}
self.logger = Logger("event_manager")
self.event_types = []
self.last_timer_event = 0
def inject(self, registry):
self.db = registry.get_instance("db")
self.util = registry.get_instance("util")
def pre_start(self):
self.register_event_type("timer")
def start(self):
# process decorators
for _, inst in Registry.get_all_instances().items():
for name, method in get_attrs(inst).items():
if hasattr(method, "event"):
event_type, description = getattr(method, "event")
handler = getattr(inst, name)
module = self.util.get_module_name(handler)
self.register(handler, event_type, description, module)
def register_event_type(self, event_type):
event_type = event_type.lower()
if event_type in self.event_types:
self.logger.error("Could not register event type '%s': event type already registered" % event_type)
return
self.logger.debug("Registering event type '%s'" % event_type)
self.event_types.append(event_type)
def is_event_type(self, event_base_type):
return event_base_type in self.event_types
def register(self, handler, event_type, description, module):
event_base_type, event_sub_type = self.get_event_type_parts(event_type)
module = module.lower()
handler_name = self.util.get_handler_name(handler).lower()
if event_base_type not in self.event_types:
self.logger.error("Could not register handler '%s' for event type '%s': event type does not exist" % (
handler_name, event_type))
return
if not description:
self.logger.warning("No description for event_type '%s' and handler '%s'" % (event_type, handler_name))
row = self.db.find('event_config', {'event_type': event_base_type, 'handler': handler_name})
if row is None:
# add new event commands
self.db.insert('event_config', {
'event_type': event_base_type,
'event_sub_type': event_sub_type,
'handler': handler_name,
'description': description,
'module': module,
'verified': 1,
'enabled': 1,
'next_run': int(time.time())
})
else:
# mark command as verified
self.db.update('event_config', {
'event_type': event_base_type,
'handler': handler_name
}, {
'verified': 1,
'module': module,
'description': description,
'event_sub_type': event_sub_type,
})
# load command handler
self.handlers[handler_name] = handler
def fire_event(self, event_type, event_data=None):
event_base_type, event_sub_type = self.get_event_type_parts(event_type)
if event_base_type not in self.event_types:
self.logger.error("Could not fire event type '%s': event type does not exist" % event_type)
return
data = self.db.find_all('event_config', {
'event_type': event_base_type,
'event_sub_type': event_sub_type,
'enabled': 1
})
for row in data:
handler = self.handlers.get(row['handler'], None)
if not handler:
self.logger.error(
"Could not find handler callback for event type '%s' and handler '%s'" % (event_type, row.handler))
return
try:
handler(event_type, event_data)
except Exception as e:
self.logger.error("error processing event '%s'" % event_type, e)
def get_event_type_parts(self, event_type):
parts = event_type.lower().split(":", 1)
if len(parts) == 2:
return parts[0], parts[1]
else:
return parts[0], ""
def get_event_type_key(self, event_base_type, event_sub_type):
return event_base_type + ":" + event_sub_type
def check_for_timer_events(self, timestamp):
data = self.db.find('event_config',
{
'enabled': 1,
'event_type': 'timer',
'next_run': {
'$gte': timestamp
}
})
if data is not None:
for row in data:
event_type_key = self.get_event_type_key(row['event_type'], row['event_sub_type'])
# timer event run times should be consistent, so we base the next run time off the last run time,
# instead of the current timestamp
next_run = row['next_run'] + int(row['event_sub_type'])
# prevents timer events from getting too far behind, or having a large "catch-up" after
# the bot has been offline for a time
if next_run < timestamp:
next_run = timestamp + int(row['event_sub_type'])
self.db.update('event_config',
{
'event_type': 'timer',
'handler': row['handler']
},
{
'next_run': next_run
})
self.fire_event(event_type_key)
|
'''
Write a Python program to read an entire text file.
'''
fhand = open('text.txt', 'r')
print (fhand.read())
fhand.close() |
# -*- coding: utf-8 -*-
"""
File: shapes.py
Date:21/09/2016
Author: prashanthisudha kosgi
Course: CSE 7014
Instructor:Nguyen Thai
Description:A simple python program to introduce the turtle module.
"""
import turtle
#set up screen
turtle.setup(800,300) #window size
screen = turtle.Screen() # instantiate screen class
screen.bgcolor("white")
screen.title("Draw Circles")
#create a ball pen
ballpen = turtle.Turtle() #Instantiate turtle class
ballpen.pensize(4)
#Draw Triangle
ballpen.color("red")
ballpen.begin_fill()
ballpen.penup()
ballpen.goto(-300, 0)
ballpen.pendown()
ballpen.circle(40, steps=3)
ballpen.end_fill()
ballpen.penup()
ballpen.goto(-350, -50)
ballpen.write("Triangle", font=("Times", 18, "bold"))
#Draw circle
ballpen.color("purple")
ballpen.begin_fill()
ballpen.penup()
ballpen.goto(150, 0)
ballpen.pendown()
ballpen.circle(40)
ballpen.end_fill()
ballpen.penup()
ballpen.goto(130, -50)
ballpen.write("Circle", font=("Times", 18, "bold"))
# Draw Square (4 sides: blue)
ballpen.color("Blue")
ballpen.begin_fill()
ballpen.penup()
ballpen.goto(-200, 0)
ballpen.pendown()
ballpen.circle(40, steps=4)
ballpen.end_fill()
ballpen.penup()
ballpen.goto(-240, -50)
ballpen.write("square", font=("Times", 18, "bold"))
# Draw Pentagon (5 sides: Light Green)
ballpen.color("Light green")
ballpen.begin_fill()
ballpen.penup()
ballpen.goto(-80, 0)
ballpen.pendown()
ballpen.circle(40, steps=5)
ballpen.end_fill()
ballpen.penup()
ballpen.goto(-120, -50)
ballpen.write("Pentagon", font=("Times", 18, "bold"))
# Draw Hexagon (6 sides: Yellow)
ballpen.color("Yellow")
ballpen.begin_fill()
ballpen.penup()
ballpen.goto(30, 0)
ballpen.pendown()
ballpen.circle(40, steps=6)
ballpen.end_fill()
ballpen.penup()
ballpen.goto(10, -50)
ballpen.write("Hexagon", font=("Times", 18, "bold"))
ballpen.color("Light Green")
ballpen.goto(-200, 100)
ballpen.write("Cool Colorful Shapes", font=("Times", 18, "bold"))
#Hide drawing pen
ballpen.hideturtle()
turtle.done()
|
"""chat URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from .views import Register,Login,Check_Session,Create_room,Join_room,Delete_session,Check_Session_Room,Room_Size
urlpatterns = [
path('register', Register.as_view()),
path('login',Login.as_view()),
path('check_session',Check_Session.as_view()),
path('create-room',Create_room.as_view()),
path('join-room',Join_room.as_view()),
path('delete_session',Delete_session.as_view()),
path('check-room-session',Check_Session_Room.as_view()),
path('increase-room-size',Room_Size.as_view())
]
|
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import requests
PORT = 10000
GOOGLE_SERVICE_URL_HEADER = 'goog_service_url'
HOST_HEADER = 'host'
class GoogleCertificateProxy(SimpleHTTPServer.SimpleHTTPRequestHandler):
# The handler for all 'GET' request.
def do_GET(self):
request_headers = self.headers.dict
google_service_url = request_headers.pop(GOOGLE_SERVICE_URL_HEADER, None)
# host header is set to http://localhost:PORT by the request
# and will confuse the outbound http request if not removed.
request_headers.pop(HOST_HEADER, None)
google_service_response = requests.get(google_service_url, headers=request_headers)
# Set status.
self.send_response(google_service_response.status_code)
# Set headers.
for key in google_service_response.headers:
# SimpleHTTPRequestHandler provides the 'Content-Length' header.
# An 'Illegal chunked encoding' errors will come up
# if 'Transfer-Encoding: chuncked' is further added.
if 'Transfer-Encoding' not in key:
self.send_header(key, google_service_response.headers[key])
self.end_headers()
# Set body of the response to the proxy caller.
self.wfile.write(google_service_response.text)
httpd = SocketServer.ForkingTCPServer(('', PORT), GoogleCertificateProxy)
print ("Now serving at " + str(PORT))
httpd.serve_forever() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-18 06:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20171118_0536'),
]
operations = [
migrations.CreateModel(
name='Circular',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('file', models.FileField(unique=True, upload_to='circular/')),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('file', models.FileField(unique=True, upload_to='note/')),
],
),
migrations.CreateModel(
name='Poster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('file', models.FileField(unique=True, upload_to='poster/')),
],
),
migrations.CreateModel(
name='PracticeSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('file', models.FileField(unique=True, upload_to='practice_session/')),
],
),
migrations.CreateModel(
name='QuestionPaper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('file', models.FileField(unique=True, upload_to='question_paper/')),
],
),
migrations.CreateModel(
name='SliderImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(unique=True, upload_to='slider_image/')),
],
),
]
|
import pytchat
chat = pytchat.create(video_id="11 character live video ID")
wordlist = ("?", "what", "when", "where", "which", "who", "why", "how")
while chat.is_alive():
for c in chat.get().sync_items():
for i in wordlist:
if i in c.message:
print(f"QUESTION ASKED FROM - [{c.author.name}]")
print(f"{c.datetime} : {c.message}") |
# MapSeq.py
# Eric Johannsen Apr 2011
#
#
# Given a PSSM for a pattern, will output a flat file that can be pasted into WebLogo to make a log
# Only works if each position in the matrix adds up to the same number (i.e., A + C + G + T = constant)
import sys
import string
import math
def order(profile):
ord = 0.0
h = 0.0
for c in xrange(len(profile[0])):
colsum = 0
for r in xrange(4):
colsum += profile[r][c]
for r in xrange(4):
if (profile[r][c] != 0):
odds = float(profile[r][c]) / float(colsum)
value = odds * math.log(odds,2)
h += value
else:
value = 0.0
ord += (0.5 + value)
h *= -1
return ord, h
def entropy(profile):
h = 0.0
for r in xrange(len(profile)):
for c in xrange(len(profile[0])):
if (profile[r][c] != 0):
h += profile[r][c] * math.log(profile[r][c],2)
h = h * -1.0
return h
##########
# main():
##########
if (len(sys.argv) < 2):
print "Usage: python", sys.argv[0], "<matrix-filename>"
exit()
else:
print "\n\nmatrix filename = ", sys.argv[1]
mp = open(sys.argv[1])
###############
# read inputs #
###############
matrix = [[] for row in xrange(4)]
tfname = mp.readline().replace("\n", "").replace(">", "")
text = mp.readline().replace("\n","")
while text:
line = text.split(" ")
for row in xrange(4):
matrix[row].append(int(line[row+1]))
text = mp.readline().replace("\n","")
patternlen = len(matrix[0])
mh = order(matrix)
print "Matrix order = ", '%.1f' % (mh[0])
print "Matrix entropy = ", '%.1f' % (mh[1])
################
# open outputs #
################
outname = tfname + ".logo"
op = open(outname, 'w')
seqnum = 0
for r in xrange(4):
seqnum += matrix[r][0]
for seqs in xrange(seqnum):
text = ""
for c in xrange(patternlen):
for r in xrange(4):
if matrix[r][c] > 0:
text += str(r)
matrix[r][c] -= 1
break
decode = text.translate(string.maketrans("0123", "ACGT"))
print >>op, decode
exit()
|
import numpy as np
from sklearn.linear_model import LinearRegression
X = np.array([[3],[2],[1]])
Y = np.array([8,6,4])
alpha = 0.1
weights = np.ones((1, 2))
def add_bias(X):
return np.hstack((np.ones((X.shape[0],1)),X))
def batch_gradient_descent(X,Y,num_iter=1000):
X = add_bias(X)
weights = np.zeros(X.shape[1])
for _ in xrange(num_iter):
y_hat = np.dot(weights,X.T)
errors = Y - y_hat
gradient = np.dot(errors,X)
weights = weights + alpha * gradient
print(weights)
return weights
def stochastic_gradient_descent(X,Y,num_iter=1000):
X = add_bias(X)
weights = np.zeros(X.shape[1])
for _ in xrange(num_iter):
for idx,row in enumerate(X):
y_hat = np.dot(weights,row.T)
error = Y[idx] - y_hat
gradient = error * row
weights = weights + alpha * gradient
print(weights)
return weights
print("Stochastic gradient descent",stochastic_gradient_descent(X,Y))
print("Batch gradient descent", batch_gradient_descent(X,Y))
model = LinearRegression()
model.fit(X,Y)
print(model.predict(np.array([[5]])))
|
# @Author: sachan
# @Date: 2019-03-09T22:29:01+05:30
# @Last modified by: sachan
# @Last modified time: 2019-03-10T00:17:45+05:30
import torch
import torch.nn.functional as F
class gap_model1(torch.nn.Module):
""" Bilinear model with softmax"""
def __init__(self, embedding_size):
super(gap_model1, self).__init__()
self.embedding_size = embedding_size
self.W = torch.randn((embedding_size, embedding_size), requires_grad=True)
self.b = torch.randn(1, requires_grad=True)
def forward(self, x1, x2):
bilinear_score = torch.mm(x1, torch.mm(self.W, x2.t())) + self.b
softmax_score = F.softmax(bilinear_score, dim=0)
return softmax_score
if __name__ == '__main__':
x1 = torch.randn(1000,764)
x2 = torch.randn(1,764)
model = gap_model1(764)
pred = model.forward(x1, x2)
print(torch.argmax(pred))
print(sum(pred))
print(pred.shape)
|
from getpass import getpass
correct_pin = "1234"
attempt_n = 1
total_attempts = 3
while attempt_n <= total_attempts:
supplied_pin = getpass("Enter your PIN: ")
if supplied_pin == correct_pin:
print('Pin accepted')
break
elif attempt_n < total_attempts:
print('Pin incorrect, this is attempt no. ', attempt_n)
else:
print('You have no more attempts remaining')
attempt_n += 1
|
line = input()
text = ""
new_letter = ""
for letter in line:
if not letter == new_letter:
text += letter
new_letter = letter
print(text) |
import subprocess
import os, logging
import datetime
def log_event(msg, level = "i"):
timestamp = "%s " % datetime.datetime.now()
if level.lower() == "w":
logging.warn(timestamp + msg)
elif level.lower() == "e":
logging.error(timestamp + msg)
else:
logging.info(timestamp + msg)
messages = []
def add_message(message, error=False):
global messages
message_string = "{}: {}".format(datetime.datetime.now().strftime("[%Y/%m/%d %H:%M:%S]"), message)
if error:
messages.append("Error:" + message_string)
log_event(message_string, "e")
else:
messages.append(message_string)
log_event(message_string)
def create_thumbnail(path):
if not os.path.isfile(path):
print path
print os.getcwd()
raise Exception("There is no such file")
name = os.path.split(path)[1]
name = name.split('.')[0]
folder = os.path.abspath(os.path.join(os.getcwd(), "clientManager", "static", "clientManager","thumbs"))
if os.path.isfile(folder+ name + ".png"):
os.remove(folder+ name + ".png")
if not os.path.exists(folder):
os.mkdir(folder)
subprocess.call(['vlc', path, '--rate=1', '--video-filter=scene', '--vout=dummy', '--start-time=4', '--stop-time=5', '--scene-format=png', '--scene-ratio=60', '--scene-prefix={}'.format(name), '--scene-path={}'.format(folder), 'vlc://quit'
])
old_path =os.path.join(folder, name + "00001.png")
new_path = os.path.join(folder, name + ".png")
if os.path.isfile(new_path):
os.remove(new_path)
os.rename(old_path, new_path)
return "/static/clientManager/thumbs/" + name + ".png"
|
#!/usr/bin/env python3
'''
转换5分钟K线的原始数据
默认先查找当天数据库的所有股票,然后按每日便利每只股票
'''
import os, sys, datetime
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
sys.path.append(PROJECT_ROOT)
from DataTransform.Transform5M import process_single_shot
if __name__ == "__main__":
if len(sys.argv) != 3:
print(("{0} start_date end_date".format(sys.argv[0])))
exit(0)
code = str(sys.argv[1])
date = datetime.datetime.strptime(str(sys.argv[2]), "%Y-%m-%d").date()
process_single_shot(code, date) |
#!/usr/bin/python
#encoding=utf-8
'''
Created on 2016/12/23
@author:Ljx
'''
'''
提取种子站点的一级目录和二级目录页面上的中文,提取关键字Top50:
1.爬虫(bs4)
1.1连接数据库,读取种子站点
1.2通过正则匹配把首页上所有的http链接都爬下来
1.3筛选链接
筛选规则:保留本域名的链接,去除含数字的链接
2.提取关键字
2.1读取链接页面上的内容
2.2通过正则匹配选取中文
2.2导入LDA模型提取关键字Top50
2.3把关键字插入到数据库中
'''
#import bs4
#from bs4 import BeautifulSoup
import jieba
import jieba.analyse
import urllib
import urllib2
import re
import MySQLdb
#检测编码模块
import chardet
from chardet.universaldetector import UniversalDetector
from socket import error as SocketError
#errno系统符号
import errno
#用于在内存缓冲区中读写数据,类似于对文件的操作方法
import StringIO
import traceback
import time
#系统模块
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#数据库信息
db_host = 'localhost'
db_username = 'root'
db_password = ''
db_database_name = 'freebuf_secpulse'
db_table_name = 'grabsite'
#设定结巴函数中提取关键字的数量topN
topK = 50
#函数:连接数据库
def getMysqlConn():
return MySQLdb.connect(host = db_host,user = db_username,passwd = db_password,db = db_database_name,charset = "utf8")
#函数:数据库查询语句
def getSelectMysql():
select_sql = "select siteDomain from " + db_table_name
return select_sql
'''
#函数:插入到数据库
def insert_url():
insert_sql = "insert into urls(siteDomain,url) "+"values(%s,%s)"
return insert_sql
#函数:插入到数据库
def insert_tip():
insert_sql = "insert into tips(siteDomain,tips) "+"values(%s,%s)"
return insert_sql
'''
#函数:爬取所有的url
def spider_url(url):
try:
#抓取页面内容
headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6' }
req = urllib2.Request(url, headers = headers)
html_1 = urllib2.urlopen(req).read()
#输入html页面
#print html_1
except:
pass
#判断网站的编码方式,统一为utf-8编码
encoding_dict =chardet.detect(html_1)
web_encoding = encoding_dict['encoding']
if web_encoding == 'utf-8' or web_encoding == 'UTF-8':
html = html_1
#print html
else:
html = html_1.decode('gbk','ignore').encode('utf-8')
#print html
#正则匹配http
#re_http = re.compile(u"https?://.+?\s")
re_http = re.compile(u"https?://[a-zA-Z0-9_-]+\..+?\"")
res = re.findall(re_http, html)
#输出页面所有的url
#print res
return res
#函数:url的筛选
def filter_url(get_url,domain):
get_urls = []
#筛选出含有本域名的url
for url in get_url:
if url.startswith(domain):
#去除含有数字的url|(\.png\")
re_url = re.compile(u"(\d+\.html\")|(\d+\.html#.+?\")|(\.gif\")|(\.png\")|(\.jpg\")|(\.jpng\")|(\.js\")|(\.css\")|(\.swf\")")
res = re.findall(re_url, url)
if res != []:
continue
get_urls.append(url)
#输出过滤后的url
#print get_urls
return get_urls
#函数:爬取页面内容
def get_content(url):
#抓取页面内容,缓存处理
try:
#包含头数据的字典
headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6' }
#地址创建一个request对象,通过调用urlopen并传入request对象,将返回一个相关请求response对象,这个应答对象如同一个文件对象,然后在Response中调用.read()。
req = urllib2.Request(url, headers = headers)
#返回一个response对象
#response = urlli2.urlopen('url')
#response调用.read()
#html = response.read()
#html将两条代码合在一起
html = urllib2.urlopen(req).read()
except:
#print "pass error"
return 0
#检测页面编码
#coding = chardet.detect(str1)
#对大文件进行编码识别
#创建一个检测对象
detector = UniversalDetector()
#在内存中读写str
buf = StringIO.StringIO(html)
for line in buf.readlines():
#分块进行测试,直到达到阈值
#print line
detector.feed(line)
if detector.done:
break
#关闭检测对象
detector.close()
buf.close()
#检测结果
coding = detector.result
#匹配中文部分
#coding['encoding']!=0,即存在就执行
if coding['encoding']:
#content = unicode(str1,coding['encoding'])
content = html.decode(coding['encoding'],'ignore')
#正则表达式匹配至少一个中文
re_words = re.compile(u"[\u4e00-\u9fa5]+")
#以列表形式返回匹配的字符串
res = re.findall(re_words, content)
#返回的res已经是字符串格式了,为什么还要转成字符串格式???
#res输出是uncoide,str_convert输出是中文
str_convert = ' '.join(res)
#输出中文页面
#print str_convert
return str_convert
#函数:提取关键字
def get_tips(content):
#结巴分词提取关键字
tags = jieba.analyse.extract_tags(content,topK)
tag = ",".join(tags)
print tag
return tag
#主函数
if __name__ == "__main__":
#连接数据库
conn = getMysqlConn()
cur = conn.cursor()
#print "connect mysql success"
#查询种子站点
select_sql = getSelectMysql()
cur.execute(select_sql)
urls = cur.fetchall()
#charu_url = insert_url()
#charu_tip = insert_tip()
for domain in urls[:3]:
try:
#url = 'http://www.freebuf.com'
#获取全部url
domain = domain[0]
print "===================="
print domain
get_url = spider_url(domain)
#print get_url
#筛选url
select_url = filter_url(get_url,domain)
#print select_url
content_all = []
for url in select_url:
#print url
#把过滤后的url和种子站点插入数据库
sql="insert into freebuf_secpulse.urls(siteDomain,url) values('%s','%s')" %(domain,url)
try:
print sql
cur.execute(sql)
conn.commit()
except:
traceback.print_exc()
conn.rollback()
#获取页面中文内容
content = get_content(url)
if content != 0:
content_all.append(content)
content_url = " ".join(content_all)
tips = get_tips(content_url)
time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
#把标签和url插入到数据库
sql="insert into freebuf_secpulse.tips(siteDomain,tips,createTime) values('%s','%s','%s')" %(domain,tips,time)
try:
print sql
cur.execute(sql)
conn.commit()
except:
traceback.print_exc()
conn.rollback()
except:
traceback.print_exc()
#提交数据,关闭数据库
cur.close()
conn.close()
|
# from skopt.space import Categorical, Integer, Real
from skopt.space import Integer
from src.hyper_opt.run import run_hyper_opt
from src.rf.forest import RandomForestRegressor
space = (
Integer(1, 100, name="n_estimators"),
# Categorical([None, 1, 10, 100, 1000], name="max_depth"),
# Categorical([None, 1, 10, 100, 1000], name="max_leaf_nodes"),
# Categorical(["mse", "mae"], name="criterion"),
# Integer(1, 100, name="min_samples_split"),
)
def build_model_and_run(params, X_train, y_train, X_test):
model = RandomForestRegressor(**params, random_state=0)
model.fit(X_train, y_train)
y_pred, y_var = model.predict(X_test)
# y_var[y_var <= 0] = 1e-6
return y_pred, y_var
run_hyper_opt(
build_model_and_run,
space=space,
log_dir_model="rf/hyper_opt",
labels="resistivity",
# use_robust_mse=True,
)
|
# -*- coding: utf-8 -*-
# # # #
# NER_BERT_CRF.py
# @author Zhibin.LU
# @created Fri Feb 15 2019 22:47:19 GMT-0500 (EST)
# @last-modified Sun Mar 31 2019 12:17:08 GMT-0400 (EDT)
# @website: https://louis-udm.github.io
# @description: Bert pytorch pretrainde model with or without CRF for NER
# The NER_BERT_CRF.py include 2 model:
# - model 1:
# - This is just a pretrained BertForTokenClassification, For a comparision with my BERT-CRF model
# - model 2:
# - A pretrained BERT with CRF model.
# - data set
# - [CoNLL-2003](https://github.com/FuYanzhe2/Name-Entity-Recognition/tree/master/BERT-BiLSTM-CRF-NER/NERdata)
# # # #
# %%
import sys
import os
import time
import importlib
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.autograd as autograd
import torch.optim as optim
from torch.utils.data.distributed import DistributedSampler
from torch.utils import data
from torch.utils.data import SequentialSampler
from tqdm import tqdm, trange
import collections
from pytorch_pretrained_bert.modeling import BertModel, BertForTokenClassification, BertLayerNorm
import pickle
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from pytorch_pretrained_bert.tokenization import BertTokenizer
from data_utils import NERDataProcessor, NerDataset
from BERT_biLSTM_CRF import BERT_biLSTM_CRF
from BERT_CRF import BERT_CRF
import metric_utils
import argparse
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
def get_optimizer(model, hp, total_train_steps):
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
new_param = ['transitions', 'hidden2label.weight', 'hidden2label.bias']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay) \
and not any(nd in n for nd in new_param)], 'weight_decay': hp.weight_decay_finetune},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) \
and not any(nd in n for nd in new_param)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if n in ('transitions','hidden2label.weight')] \
, 'lr':hp.lr0_crf_fc, 'weight_decay': hp.weight_decay_crf_fc},
{'params': [p for n, p in param_optimizer if n == 'hidden2label.bias'] \
, 'lr':hp.lr0_crf_fc, 'weight_decay': 0.0}
]
return BertAdam(optimizer_grouped_parameters, lr=hp.learning_rate0,
warmup=hp.warmup_proportion, t_total=total_train_steps)
# optimizer = optim.Adam(model.parameters(), lr=learning_rate0)
if __name__=="__main__":
print('Python version ', sys.version)
print('PyTorch version ', torch.__version__)
# set_work_dir()
# print('Current dir:', os.getcwd())
cuda_yes = torch.cuda.is_available()
# cuda_yes = False
print('Cuda is available?', cuda_yes)
device = torch.device("cuda:0" if cuda_yes else "cpu")
print('Device:', device)
parser = argparse.ArgumentParser()
parser.add_argument("--bert_model_scale", type=str, default="bert-base-cased-pt-br")
parser.add_argument("--vocab", type=str, default="vocab.txt")
parser.add_argument("--model", type=str, default="token") #token, crf, bilstm_crf
parser.add_argument("--bert_output", type=str, default="last") #last , sum
parser.add_argument("--data_dir", type=str, default="./data/")
parser.add_argument("--n_epochs", type=int, default=10)
parser.add_argument("--max_seq_length", type=int, default=128)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--eval_batch_size", type=int, default=8)
parser.add_argument("--output_dir", type=str, default="./output/")
parser.add_argument("--learning_rate0", type=float, default=1e-5)
parser.add_argument("--lr0_crf_fc", type=float, default=8e-5)
parser.add_argument("--weight_decay_finetune", type=float, default=1e-5)
parser.add_argument("--weight_decay_crf_fc", type=float, default=5e-6)
parser.add_argument("--gradient_accumulation_steps", type=float, default=1)
# "Proportion of training to perform linear learning rate warmup for. "
# "E.g., 0.1 = 10% of training."
parser.add_argument("--warmup_proportion", type=float, default=0.1)
parser.add_argument("--finetuning", dest="finetuning", action="store_true")
parser.add_argument("--no_finetuning", dest="finetuning", action="store_false")
parser.add_argument("--load_checkpoint", dest="load_checkpoint", action="store_true")
parser.add_argument("--no_load_checkpoint", dest="load_checkpoint", action="store_false")
parser.add_argument("--do_lower_case", dest="do_lower_case", action="store_true")
parser.add_argument("--logdir", type=str, default="checkpoints/01")
hp = parser.parse_args()
np.random.seed(44)
torch.manual_seed(44)
if cuda_yes:
torch.cuda.manual_seed_all(44)
print('Loading data...')
# Load pre-trained model tokenizer (vocabulary)
nerDataProcessor = NERDataProcessor()
label_list = nerDataProcessor.get_labels()
label_map = nerDataProcessor.get_label_map()
inv_label_map = {v: k for k, v in label_map.items()}
train_examples = nerDataProcessor.get_train_examples(hp.data_dir)
dev_examples = nerDataProcessor.get_dev_examples(hp.data_dir)
test_examples = nerDataProcessor.get_test_examples(hp.data_dir)
total_train_steps = int(len(train_examples) / hp.batch_size / hp.gradient_accumulation_steps * hp.n_epochs)
print("***** Running training *****")
print(" Num examples = %d"% len(train_examples))
print(" Batch size = %d"% hp.batch_size)
print(" Num steps = %d"% total_train_steps)
tokenizer = BertTokenizer.from_pretrained(hp.vocab, do_lower_case=hp.do_lower_case)
train_dataset = NerDataset(train_examples,tokenizer,label_map,hp.max_seq_length)
dev_dataset = NerDataset(dev_examples,tokenizer,label_map,hp.max_seq_length)
test_dataset = NerDataset(test_examples,tokenizer,label_map,hp.max_seq_length)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=hp.batch_size,
shuffle=True,
num_workers=4,
collate_fn=NerDataset.pad)
dev_dataloader = torch.utils.data.DataLoader(dataset=dev_dataset,
batch_size=hp.batch_size,
shuffle=False,
num_workers=4,
collate_fn=NerDataset.pad)
test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=hp.batch_size,
shuffle=False,
num_workers=4,
collate_fn=NerDataset.pad)
start_label_id = nerDataProcessor.get_start_label_id()
stop_label_id = nerDataProcessor.get_stop_label_id()
print('Loading model...')
bert_model = BertModel.from_pretrained(hp.bert_model_scale)
if hp.model == 'bilstm_crf':
model = BERT_biLSTM_CRF(bert_model, start_label_id, stop_label_id, len(label_list),
hp.max_seq_length, hp.batch_size, device, hp.bert_output,
hp.finetuning)
elif hp.model == 'crf':
model = BERT_CRF(bert_model, start_label_id, stop_label_id, len(label_list),
hp.max_seq_length, hp.batch_size, device, hp.bert_output,
hp.finetuning)
elif hp.model =='token':
model = BertForTokenClassification.from_pretrained(
hp.bert_model_scale, num_labels=len(label_list))
if hp.load_checkpoint and os.path.exists(hp.output_dir+'/checkpoint.pt'):
checkpoint = torch.load(hp.output_dir+'/checkpoint.pt', map_location='cpu')
start_epoch = checkpoint['epoch']+1
valid_acc_prev = checkpoint['valid_acc']
valid_f1_prev = checkpoint['valid_f1']
if hp.model =='token':
model = BertForTokenClassification.from_pretrained(hp.bert_model_scale,
state_dict=checkpoint['model_state'],
num_labels=len(label_list))
else:
pretrained_dict=checkpoint['model_state']
net_state_dict = model.state_dict()
pretrained_dict_selected = {k: v for k, v in pretrained_dict.items() if k in net_state_dict}
net_state_dict.update(pretrained_dict_selected)
model.load_state_dict(net_state_dict)
print('Loaded the pretrain model, epoch:', checkpoint['epoch'],'valid acc:',
checkpoint['valid_acc'], 'valid f1:', checkpoint['valid_f1'])
else:
start_epoch = 0
valid_acc_prev = 0
valid_f1_prev = 0
model.to(device)
optimizer = get_optimizer(model, hp, total_train_steps)
############################ train procedure ######################################
print('Trainning...')
global_step_th = int(len(train_examples) / hp.batch_size / hp.gradient_accumulation_steps * start_epoch)
for epoch in tqdm(range(start_epoch, hp.n_epochs)):
tr_loss = 0
train_start = time.time()
model.train()
optimizer.zero_grad()
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, predict_mask, label_ids = batch
if hp.model =='token':
loss = model(input_ids, segment_ids, input_mask, label_ids)
if hp.gradient_accumulation_steps > 1:
loss = loss / hp.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
else:
neg_log_likelihood = model.neg_log_likelihood(input_ids, segment_ids,
input_mask, label_ids)
if hp.gradient_accumulation_steps > 1:
neg_log_likelihood = neg_log_likelihood / hp.gradient_accumulation_steps
neg_log_likelihood.backward()
tr_loss += neg_log_likelihood.item()
if (step + 1) % hp.gradient_accumulation_steps == 0:
# modify learning rate with special warm up BERT uses
lr_this_step = hp.learning_rate0 * warmup_linear(global_step_th/total_train_steps, hp.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step_th += 1
if hp.model =='token':
print("Epoch:{}-{}/{}, CrossEntropyLoss: {} ".format(epoch, step, len(train_dataloader), loss.item()))
else:
print("Epoch:{}-{}/{}, Negative loglikelihood: {} ".format(epoch, step, len(train_dataloader), neg_log_likelihood.item()))
print("Epoch:{} completed, Total training's Loss: {}, Spend: {}m".format(epoch, tr_loss, (time.time() - train_start)/60.0))
valid_acc, valid_f1 = metric_utils.evaluate(model, dev_dataloader, epoch,
'Valid_set', inv_label_map, device)
# Save a checkpoint
if valid_f1 > valid_f1_prev:
torch.save({'epoch': epoch, 'model_state': model.state_dict(), 'valid_acc': valid_acc,
'valid_f1': valid_f1, 'max_seq_length': hp.max_seq_length, 'lower_case': hp.do_lower_case},
os.path.join(hp.output_dir, 'checkpoint.pt'))
valid_f1_prev = valid_f1
print('Finished train!')
################################################################################################
print('Last epoch in test set:')
metric_utils.evaluate(model, test_dataloader, hp.n_epochs-1,
'Test_set', inv_label_map, device)
print('Best epoch in test set:')
'''
Test_set prediction using the best epoch of model
'''
checkpoint = torch.load(os.path.join(hp.output_dir, 'checkpoint.pt'), map_location='cpu')
epoch = checkpoint['epoch']
valid_acc_prev = checkpoint['valid_acc']
valid_f1_prev = checkpoint['valid_f1']
pretrained_dict=checkpoint['model_state']
net_state_dict = model.state_dict()
pretrained_dict_selected = {k: v for k, v in pretrained_dict.items() if k in net_state_dict}
net_state_dict.update(pretrained_dict_selected)
model.load_state_dict(net_state_dict)
# model = BertForTokenClassification.from_pretrained(
# bert_model_scale, state_dict=checkpoint['model_state'], num_labels=len(label_list))
print('Loaded the pretrain model, epoch:',checkpoint['epoch'],'valid acc:',
checkpoint['valid_acc'], 'valid f1:', checkpoint['valid_f1'])
model.to(device)
metric_utils.evaluate(model, test_dataloader, epoch, 'Test_set', inv_label_map, device)
eval_sampler = SequentialSampler(test_dataset)
demon_dataloader = data.DataLoader(dataset=test_dataset,
sampler=eval_sampler,
batch_size=hp.eval_batch_size,
shuffle=False,
num_workers=4,
collate_fn=NerDataset.pad)
metric_utils.evaluate(model, demon_dataloader, hp.n_epochs-1,
'Test_set', inv_label_map, device,
output_file=os.path.join(hp.output_dir, 'eval_results.txt'))
|
import os
import time
print("MadLibs Final Project")
print("By Indigo Suh")
print()
answer = input("Ready to play? Type yes or no: ")
if answer == "yes" or answer == "Yes" or answer == "yes." or answer == "yes." or answer == "Yes!" or answer == "yes!" :
import random
madlib=random.randint(1, 2)
if madlib==1:
import madlib1
elif madlib==2:
import madlib2
else:
print("Okay! Have a nice day.")
exit()
|
import random
class Laboratory(object):
def __init__(self, shelf1, shelf2):
self.shelf1 = shelf1
self.shelf2 = shelf2
def can_react(self, substance1, substance2):
condition1 = (substance1 == "anti" + substance2)
condition2 = (substance2 == "anti" + substance1)
return condition1 or condition2
def update_shelves(self, shelf1, shelf2, substance1, substance2_index):
index1 = shelf1.index(substance1)
shelf1 = shelf1[:index1] + shelf1[index1+1:]
shelf2 = self.shelf2[:substance2_index] + shelf2[substance2_index+1:]
return shelf1, shelf2
def do_a_reaction(self, shelf1, shelf2):
for substance1 in shelf1:
possible_targets = [i for i, target in enumerate(shelf2) if
self.can_react(substance1, target)]
if not possible_targets:
continue
else:
substance2_index = random.choice(possible_targets)
return self.update_shelves(shelf1, shelf2, substance1,
substance2_index)
return shelf1, shelf2
def run_full_experiment(self, shelf1, shelf2):
count = 0
ended = False
while not ended:
shelf1_new, shelf2_new = self.do_a_reaction(shelf1, shelf2)
if shelf1_new != shelf1:
count += 1
ended = (shelf1_new == shelf1) and (shelf2_new == shelf2)
shelf1, shelf2 = shelf1_new, shelf2_new
return shelf1, shelf2, count
|
def sum():
x = int(input())
for q in range(x):
s = input()
s = list(s)
lengden = len(s)
s.reverse()
for i in range(lengden):
if i % 2 == 1:
n = int(s[i])
n *= 2
if n > 9:
n = str(n)
n = list(n)
n = int(n[0]) + int(n[1])
s[i] = n
total = 0
for e in s:
total += int(e)
if (total % 10 == 0):
print("PASS")
else:
print("FAIL")
sum() |
#!/usr/bin/env python
'''
This script will attempt to open your webbrowser,
perform OAuth 2.0 authentication and print your access token.
To install dependencies from PyPI:
$ pip install oauth2client
Then run this script:
$ python get_oauth2_token.py
This is a combination of snippets from:
https://developers.google.com/api-client-library/python/guide/aaa_oauth
https://gist.github.com/burnash/6771295
'''
import os, sys
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from oauth2client.file import Storage
def return_token():
return get_oauth2_token();
def disable_stout():
o_stdout = sys.stdout
o_file = open(os.devnull, 'w')
sys.stdout = o_file
return (o_stdout, o_file)
def enable_stout(o_stdout, o_file):
o_file.close()
sys.stdout = o_stdout
def get_oauth2_token():
CLIENT_ID = '<Client ID from Google API Console>'
CLIENT_SECRET = '<Client secret from Google API Console>'
SCOPE = '<OAuth 2.0 scope>'
REDIRECT_URI = '<Redirect URI from Google API Console>'
o_stdout, o_file = disable_stout()
flow = OAuth2WebServerFlow(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
scope=SCOPE,
redirect_uri=REDIRECT_URI)
storage = Storage('creds.data')
credentials = run_flow(flow, storage)
enable_stout(o_stdout, o_file)
return "access_token: %s" % credentials.access_token
if __name__ == '__main__':
return_token()
|
#访问限制
#!/usr/bin/bash
class Student(object):
def __init__(self, name, score):
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def set_score(self, score):
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError('bad score')
def get_name(self):
return self.__name
def get_score(self):
return self.__score
'''
如果要让内部属性不被外部访问,可以把属性的名称前加上两个下划线__,在Python中,
实例的变量名如果以__开头,就变成了一个私有变量(private),只有内部可以访问,
外部不能访问,所以,我们把Student类改一改:
'''
"""最后注意下面的这种错误写法:"""
bart = Student('Bart Simpson', 59)
print(bart.get_name())
bart.__name = 'New Name' # 设置__name变量!
print(bart.__name)
'''
表面上看,外部代码“成功”地设置了__name变量,但实际上这个__name变量和class内部的__name变量
不是一个变量!内部的__name变量已经被Python解释器自动改成了_Student__name,而外部代码给bart
新增了一个__name变量。不信试试:
'''
print(bart.get_name()) # get_name()内部返回self.__name
|
from src.map_module.worldmap import WorldMap
from src.creature_module.player import Player
class GameState:
def __init__(self, player: Player, wmap: WorldMap):
# player character data
self.player: Player = player
# creatures actively taking actions/moving
self.active_creatures = []
# world map data
self.world_map: WorldMap = wmap
|
a=dict()
b=dict()
word=input()
for x in word:
a[x]=a.get(x,0)+1
word2=input()
for x in word2:
b[x]=b.get(x,0)+1
for i in a.keys():
if i in b.keys():
if a[i]!=b[i]:
print("NO")
break
else:
print("NO")
break
else:
print("YES")
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool, Float64, Int32
from styx_msgs.msg import Lane, TrafficLightArray
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped, PoseStamped
# from tl_detector import TLDetector
import math
import numpy as np
from twist_controller import Controller
from pid import PID
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
self.vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
self.wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
self.wheel_base = rospy.get_param('~wheel_base', 2.8498)
self.steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
# TODO: Subscribe to all the topics you need to
self.maximum_velocity = self.kmph2mps(rospy.get_param('~velocity')) # change km/h to m/s and subtract 1 to make sure it is always lower
# self.maximum_velocity = 20
self.cte = 0
self.cte_bool = False
self.prev_sample_time = None
self.current_velocity = 0
self.current_angular_velocity = 0
#self.current_velocity_sub = rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_function)
self.linear_velocity = 0
self.angular_velocity = 0
self.steer_direction = 0
self.base_waypoints = None
self.prev_position = None
self.prev_msg = np.array([-1 , -1])
self.prev_midpoint = None
self.prev_prev_midpoint = None
self.two_closest_points = None
self.prev_light_msg = -1
self.light_msg = -1
self.drive_model = -1
self.prev_cte = 0
self.cte_sign = 1
kp = 0.0 # or try these values:
ki = 0.0 # kp=0.3, ki=0.0, kd=0.57
kd = 0.0
self.c_position = None
self.pid_controller_cte = PID(kp, ki, kd)
self.pid_controller_angle = PID(kp, ki, kd)
self.base_waypoints_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
self.current_velocity_sub = rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_function)
# self.cte_sub = rospy.Subscriber('/cross_track_error',Float64, self.cte_function)
#self.twist_cmd_sub = rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cmd_function)
self.dbw_enabled_bool = False
self.dbw_enabled_sub = rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_function)
self.current_pose_sub = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb_function)
self.traffic_waypoint = rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# obtain min_speed for the yaw controller by adding the deceleration times time to the current velocity
self.min_speed = 0 #max(0, decel_limit*time + self.current_velocity(needs to be finished))
# TODO: Create `Controller` object
# The Controller object returns the throttle and brake.
self.controller = Controller(self.wheel_base, self.steer_ratio, self.min_speed, max_lat_accel, max_steer_angle, self.vehicle_mass, self.wheel_radius)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
#place the TLDETECTOR in the dbw for analysis
# self.tl_detector = TLDetector()
self.loop_rate = 10
self.loop()
# rospy.spin()
def loop(self):
rate = rospy.Rate(self.loop_rate) # 1Hz
while not rospy.is_shutdown():
# self.traffic_cb(self.tl_detector.actual_image_test(rospy.wait_for_message('/vehicle/traffic_lights', TrafficLightArray)))
self.pose_cb(self.c_position)
rate.sleep()
def kmph2mps(self, velocity_kmph):
return (velocity_kmph * 1000.) / (60. * 60.)
def waypoints_cb(self, waypoints):
# TODO: Implement
# rospy.loginfo("Oncoming Waypoints are loading")
self.base_waypoints = []
for waypoint in waypoints.waypoints:
# add to the waypoints list
self.base_waypoints.append([waypoint.pose.pose.position.x, waypoint.pose.pose.position.y])
self.base_waypoints = np.array(self.base_waypoints)
# rospy.loginfo("The number of oncoming waypoints are: " + str(self.base_waypoints.shape))
def pose_cb_function(self, msg):
self.c_position = msg
def pose_cb(self, msg):
if msg is None:
return
if self.base_waypoints is None:
return
rospy.loginfo("Position is updated: " + str(msg.pose.position.x) + "," + str(msg.pose.position.y))
msg = np.array([msg.pose.position.x, msg.pose.position.y])
#Find the closest two waypoints given the position.
self.steer = 0
if self.prev_sample_time is None:
self.sample_time = 0.2
self.prev_sample_time = rospy.get_time()
else:
time = rospy.get_time()
self.sample_time = time - self.prev_sample_time
# rospy.loginfo("Delta Time: " + str(self.sample_time))
self.prev_sample_time = time
if self.base_waypoints is not None:
if np.all(msg==self.prev_msg):
steer_value,pid_step_cte = 0,0
else:
#the distances from the current position for all waypoints
wp_distances = ((self.base_waypoints-msg)**2).sum(axis=1)
#find and append the closest, fourth, and eighth point
circle_points = self.base_waypoints[np.argmin(wp_distances)]
# rospy.loginfo("circle_points: " + str(circle_points))
circle_points = np.vstack((circle_points, self.base_waypoints[(np.argmin(wp_distances)+5)%len(wp_distances)]))
# rospy.loginfo("circle_points: " + str(circle_points.shape))
circle_points = np.vstack((circle_points, self.base_waypoints[(np.argmin(wp_distances)+10)%len(wp_distances)]))
# rospy.loginfo("circle_points: " + str(circle_points.shape))
#use the three points to find the radius of the circle
eval_matrix = np.vstack((-2*circle_points[:,0],-2*circle_points[:,1],(circle_points**2).sum(axis=1))).T
# rospy.loginfo("eval_matrix: " + str(eval_matrix.shape))
#subtract the last entry of the eval matrix from the others and keep the first two rows
eval_matrix = np.subtract(eval_matrix,eval_matrix[2])[0:2]
try:
x = np.linalg.solve(eval_matrix[:,0:2],eval_matrix[:,2])
# rospy.loginfo("X obtained: " + str(x))
radius = (((msg-x)**2).sum()*1.0)**(1.0/2)
# rospy.loginfo("Radius: " + str(radius))
#convert the angle into degrees then divide by the steering ratio to get the steer value
angle = np.arcsin(self.wheel_base/radius) #* (180.0/np.pi)
steer_value = angle * self.steer_ratio
except:
steer_value = 0
#the sign of the steer value depends on the slope from the first to the last point
two_closest_points = self.base_waypoints[np.sort(wp_distances.argsort()[:2])].copy()
# make sure the waypoints are in the correct order by comparing their distance from the previous midpoint
# keep the last two midpoints
if self.prev_midpoint is None:
self.two_closest_points = two_closest_points.copy()
self.prev_midpoint = np.divide(np.add(two_closest_points[0],two_closest_points[1]),2.0).copy()
self.prev_prev_midpoint = self.prev_midpoint.copy()
elif np.all(self.prev_midpoint==np.divide(np.add(two_closest_points[0],two_closest_points[1]),2.0)):
two_closest_points = self.two_closest_points.copy()
else:
#if the midpoints are not equal, sort by proximity to the previous midpoint
# rospy.loginfo("Closest points may change: " + str(two_closest_points[0][0]) + "," + str(two_closest_points[0][1]))
# rospy.loginfo("Closest points may change: " + str(two_closest_points[1][0]) + "," + str(two_closest_points[1][1]))
self.two_closest_points = two_closest_points[((two_closest_points-self.prev_midpoint)**2).sum(axis=1).argsort()].copy()
two_closest_points = self.two_closest_points.copy()
self.prev_prev_midpoint = self.prev_midpoint.copy()
self.prev_midpoint = np.divide(np.add(two_closest_points[0],two_closest_points[1]),2.0).copy()
# rospy.loginfo("Closest points: " + str(two_closest_points[0][0]) + "," + str(two_closest_points[0][1]))
# rospy.loginfo("Closest points: " + str(two_closest_points[1][0]) + "," + str(two_closest_points[1][1]))
# get the direction for the steer value
if (np.cross(two_closest_points[0]-self.prev_prev_midpoint,circle_points[2]-self.prev_prev_midpoint)<0):
steer_value *= -1
if np.all(self.prev_prev_midpoint == self.prev_midpoint):
steer_value *= -1
self.cte = abs(np.linalg.norm(np.cross(two_closest_points[0]-two_closest_points[1], two_closest_points[1]-msg))/np.linalg.norm(two_closest_points[0]-two_closest_points[1]))
# rospy.loginfo("The CTE: " + str(self.cte))
#the cross product will determine the direction. if the cross product is positive, the the car is to the left, cte is negative
# rospy.loginfo("two_closest_points[0]-self.prev_prev_midpoint: " + str(two_closest_points[0]-self.prev_prev_midpoint))
# rospy.loginfo("msg-self.prev_prev_midpoint: " + str(msg-self.prev_prev_midpoint))
# rospy.loginfo("self.prev_prev_midpoint: " + str(self.prev_prev_midpoint))
# rospy.loginfo("np.cross: " + str(np.cross(two_closest_points[0]-self.prev_prev_midpoint,msg-self.prev_prev_midpoint)))
if (np.cross(two_closest_points[0]-self.prev_prev_midpoint,msg-self.prev_prev_midpoint)>0):
self.cte *= -1
if np.all(self.prev_prev_midpoint == self.prev_midpoint):
self.cte *= -1
# if ((course_midpoint-msg)**2).sum() < ((course_midpoint-self.prev_midpoint)**2).sum():
# rospy.loginfo("The CTE: " + str(self.cte))
kp_cte = 0.25#0.1 - .05*self.current_velocity/self.maximum_velocity###07 best is 0.31, .41
ki_cte = 0.0#16#.08 # 1.015
kd_cte = 0.5#0.25 + .20*self.current_velocity/self.maximum_velocity#5#.35 # 0.5
pid_step_cte = max(min(self.pid_controller_cte.step(self.cte, self.sample_time, kp_cte, ki_cte, kd_cte), 8), -8)
self.prev_msg = msg
# rospy.loginfo("The steer value: " + str(steer_value))
# rospy.loginfo("The PID CTE: " + str(pid_step_cte))
# rospy.loginfo("The STR: " + str(steer_value+pid_step_angle+pid_step_cte))
# the drive model will determine the throttle and brake
if self.drive_model==-2:
throttle, brake = 0, 0
self.drive_model = self.prev_light_msg
elif self.drive_model == -1:
if self.current_velocity >= self.maximum_velocity:
throttle, brake = 0, 0
else:
#accelerate at 8m/s**2. I noticed that at a constant throttle of 0.1, a velocity close to 12mph (5.36m/s) was reached.
#Using this as a constant proportion, accelerating 8m/s would require and extra .15 added to the throttle.
#its current throttle can be estimated by proportioning (.1/5.36) it to the current velocity.
throttle, brake = min(.42,self.current_velocity*.1/5.36 + .15), 0
elif self.drive_model >= 0:
# rospy.loginfo("The self.drive_model: " + str(self.drive_model))
# rospy.loginfo("The self.drive_model type : " + str(type(self.drive_model)))
#brake at a deceleration rate of current_velocity**2/(2*distance)
wp_2_pos = ((msg-self.base_waypoints[int(self.drive_model)])**2).sum() - self.current_velocity*1.0/self.loop_rate
brake_rate = self.current_velocity**2/(2*wp_2_pos)
throttle, brake = 0, self.vehicle_mass*brake_rate*self.wheel_radius
if wp_2_pos<3:
throttle, brake = 0, self.vehicle_mass*15**2
# throttle, brake = self.controller.control(self.min_speed, self.linear_velocity, self.angular_velocity,
# self.current_velocity, self.current_angular_velocity)
if self.dbw_enabled_bool:
# rospy.loginfo("The steer: " + str(steer_value+pid_step_cte))
self.publish(throttle=throttle, brake=brake, steer=steer_value+pid_step_cte)
rospy.loginfo("The controls published: " + str(brake))
def traffic_cb(self, msg):
#choose the model, depending upon the msg
try:
self.light_msg = msg.data
except:
self.light_msg = msg
rospy.loginfo("The msg: " + str(self.light_msg))
if self.light_msg==-2:
# Unknown traffic light. Use previous model
self.light_msg = self.prev_light_msg
if self.light_msg==-1:
# Green light or far distance from red model
#If the previous message was red, do not do anything
if self.prev_light_msg>=0:
self.drive_model = -2
else:
#use the green drive model
self.drive_model = self.light_msg
elif self.light_msg>=0:
# Red or Yellow light model
#If the previous message was green, do not do anything
if self.prev_light_msg==-1:
self.drive_model = -2
else:
#use the red drive model
self.drive_model = self.light_msg
self.prev_light_msg = self.light_msg
rospy.loginfo("The self.drive_model: " + str(self.drive_model))
def dbw_enabled_function(self,msg):
self.dbw_enabled_bool = msg.data
self.dbw_enabled = msg
def current_velocity_function(self,msg):
# rospy.loginfo("Current velocity is loading")
# obtain current_velocity for yaw controller
self.current_velocity = (msg.twist.linear.x**2 + msg.twist.linear.y**2 + msg.twist.linear.z**2 * 1.0)**(1.0/2)
# rospy.loginfo("The current velocity is: " + str(self.current_velocity))
#obtain current_angular_velocity for controller
self.current_angular_velocity = (msg.twist.angular.x**2 + msg.twist.angular.y**2 + msg.twist.angular.z**2 * 1.0)**(1.0/2)
# rospy.loginfo("The current angular velocity is: " + str(self.current_angular_velocity))
# pass
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
|
'''
Created on 26 mei 2017
@author: Robin Knoet
'''
import configparser
import os
class unitConverter(object):
'''
classdocs
'''
__MODULECOMMAND = "convert"
__isPublicOnError = False
__isPublicOnHelp = False
__helpMessage = """
Convert message part command
Expected format:
convert <unit> <unit> <value>
"""
@property
def moduleCommand(self):
return [self.__MODULECOMMAND]
@property
def isPublicOnError(self):
return self.__isPublicOnError
@property
def isPublicHelp(self):
return self.__isPublicOnHelp
@property
def helpMessage(self):
return self.__helpMessage
def addToGraph(self, unit_pair,graph = {}):
if unit_pair.get("units")[0] not in graph: #New node
graph[unit_pair.get("units")[0]] = [unit_pair.get("units")[1]]
elif unit_pair.get("units")[1] not in graph.get(unit_pair.get("units")[0]): #new arc (vertex in a directed graph)
graph.get(unit_pair.get("units")[0]).append(unit_pair.get("units")[1])
else:
print("There seems to be a double unitPair: " + unit_pair)
if unit_pair.get("units")[1] not in graph: #New node
graph[unit_pair.get("units")[1]] = [unit_pair.get("units")[0]]
elif unit_pair.get("units")[0] not in graph.get(unit_pair.get("units")[1]): #new arc (vertex in a directed graph)
graph.get(unit_pair.get("units")[1]).append(unit_pair.get("units")[0])
else:
print("There seems to be a double unitPair: " + unit_pair)
return graph
def createGraph(self, unit_pairs):
graph = {}
for d in unit_pairs: #build a directed graph
graph = self.addToGraph(d,graph)
return graph
def __init__(self, params):
'''
Constructor
'''
config = configparser.RawConfigParser()
config.readfp(open(os.path.join(params[1],"modules","convert","config.ini")))
unitList = config.get("Vertices","distanceVertices").strip().split("\n")
self.distanceUnitPairs = []
for s in unitList:
spaceSplit = s.split()
factorSplit = spaceSplit[2].split(":")
self.distanceUnitPairs.append({"units":(spaceSplit[0],spaceSplit[1]),
"values":(float(factorSplit[0]),float(factorSplit[1]))})
self.distanceGraph= self.createGraph(self.distanceUnitPairs)
def findShortestPath(self, graph, start, end, path=[]):
"""
finds shortest path, code from:
https://www.python.org/doc/essays/graphs/
"""
path = path + [start]
if start == end:
return path
if start not in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = self.findShortestPath(graph, node, end, path)
if newpath:
if not shortest or len(newpath)<len(shortest):
shortest = newpath
return shortest
def command(self,m_dictionary):
pass
def help(self,m_dictionary):
pass |
# Generated by Django 2.2.9 on 2020-01-24 17:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('homechallenge', '0008_auto_20200124_1132'),
]
operations = [
migrations.CreateModel(
name='SelfReportedCashFlowModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('AnnualRevenue', models.FloatField()),
('MonthlyAverageBankBalance', models.FloatField()),
('MonthlyAverageCreditCardVolume', models.FloatField()),
('business', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='homechallenge.BusinessModel')),
],
),
]
|
class Node:
def __init__(self, var, posCof, negCof):
self.var = var
self.posCof = posCof
self.negCof = negCof
class Formula:
def __init__(self, node, compBit):
self.node = node
self.compBit = compBit
class ROBDD:
# init
def __init__(self, varSeq):
self.varSeq = varSeq + ['1']
self.table = dict()
self.formulas = dict()
self.addLeafNode()
self.formulas['T'] = Formula(self.leafNode, False)
self.formulas['F'] = Formula(self.leafNode, True)
self.formulas['1'] = self.formulas['T']
self.formulas['0'] = self.formulas['F']
for var in varSeq:
self.formulas[var] = Formula(Node(var, self.formulas['T'], self.formulas['F']), False)
self.formulas['~'+var] = Formula(Node(var, self.formulas['T'], self.formulas['F']), True)
# add the leaf node '1'
def addLeafNode(self):
self.leafNode = Node('1', None, None)
entry = ('1', -1, -1, -1, -1)
self.table[entry] = self.leafNode
# add a node to the unique table
# returns a Node
def addNode(self, var, posCof, negCof):
if self.cmpFormula(posCof, negCof):
return posCof.node, posCof.compBit
pc = self.cpyFormula(posCof)
nc = self.cpyFormula(negCof)
# guarentee that the posCof edge must be a regular edge
cb = pc.compBit
if cb:
pc.compBit = not(pc.compBit)
nc.compBit = not(nc.compBit)
# check if repetitive node
node = self.findNode(var, pc, nc)
if node != None:
return node, cb
# add a new node
else:
entry = (var, id(pc.node), pc.compBit, id(nc.node), nc.compBit)
# create a new node
node = Node(var, pc, nc)
self.table[entry] = node
return node, cb
# find a node from the unique table
# returns None if not found
def findNode(self, var, posCof, negCof):
if var == '1':
return self.leafNode
pc = posCof
nc = negCof
entry = (var, id(pc.node), pc.compBit, id(nc.node), nc.compBit)
node = self.table.get(entry)
return node
# add a new formula to the formula collection
def addFormula(self, Xstr, var, posCof, negCof, compBit):
node, cb = self.addNode(var, posCof, negCof)
X = Formula(node, compBit ^ cb)
self.formulas[Xstr] = X
return X
# apply ite(F,G,H) and add the formula to the formula collection self.formulas
# calls self.applyIteRec to apply ite recursively
def addIteFormula(self, Xstr, Fstr, Gstr, Hstr):
F = self.formulas[Fstr]
G = self.formulas[Gstr]
H = self.formulas[Hstr]
X = self.applyIte(F, G, H, 0)
self.formulas[Xstr] = X
return X
# apply ite(F,G,H) recursively
# returns a Formula
def applyIte(self, F, G, H, varIndex):
# check terminal cases
if self.cmpFormula(F, self.formulas['T']):
return G
elif self.cmpFormula(F, self.formulas['F']):
return H
elif self.cmpFormula(G, self.formulas['T']) and self.cmpFormula(H, self.formulas['F']):
return F
elif self.cmpFormula(G, self.formulas['F']) and self.cmpFormula(H, self.formulas['T']):
return Formula(F.node, not F.compBit)
# F.node的index可能大于varIndex(一次跳多级)
var = self.varSeq[varIndex]
if F.node.var == var:
Fpc = F.node.posCof
Fnc = F.node.negCof
if F.compBit == True:
Fpc = self.cpyFormula(Fpc)
Fnc = self.cpyFormula(Fnc)
Fpc.compBit = not Fpc.compBit
Fnc.compBit = not Fnc.compBit
else:
Fpc = F
Fnc = F
if G.node.var == var:
Gpc = G.node.posCof
Gnc = G.node.negCof
if G.compBit == True:
Gpc = self.cpyFormula(Gpc)
Gnc = self.cpyFormula(Gnc)
Gpc.compBit = not Gpc.compBit
Gnc.compBit = not Gnc.compBit
else:
Gpc = G
Gnc = G
if H.node.var == var:
Hpc = H.node.posCof
Hnc = H.node.negCof
if H.compBit == True:
Hpc = self.cpyFormula(Hpc)
Hnc = self.cpyFormula(Hnc)
Hpc.compBit = not Hpc.compBit
Hnc.compBit = not Hnc.compBit
else:
Hpc = H
Hnc = H
# apply ite recursively
posCof = self.applyIte(Fpc, Gpc, Hpc, varIndex + 1)
negCof = self.applyIte(Fnc, Gnc, Hnc, varIndex + 1)
# add the new node
node, cb = self.addNode(var, posCof, negCof)
return Formula(node, cb)
# apply cofactor and add the formula to the formula collection self.formulas
# calls self.applyCof to apply cofactor recursively
def addCofFormula(self, Fstr, tgVar, sign):
tgVarIndex = self.varSeq.index(tgVar)
F = self.formulas[Fstr]
X = self.applyCof(F, tgVarIndex, sign)
self.formulas[Fstr + sign + tgVar] = X
# apply cofactor
# returns a Formula
def applyCof(self, F, tgVarIndex, sign):
# check terminal cases
if tgVarIndex < self.varSeq.index(F.node.var):
return F
pc = self.cpyFormula(F.node.posCof)
nc = self.cpyFormula(F.node.negCof)
if F.compBit == True:
pc.compBit = not pc.compBit
nc.compBit = not nc.compBit
if tgVarIndex == self.varSeq.index(F.node.var):
if sign == '+':
return pc
else:
return nc
# apply cofactor
posCof = self.applyCof(pc)
negCof = self.applyCof(nc)
# add the new node
node, cb = self.addNode(F.node.var, posCof, negCof)
return Formula(node, cb)
# compares two formulas
def cmpFormula(self, F1, F2):
return F1.node == F2.node and F1.compBit == F2.compBit
# copies a formula
def cpyFormula(self, F):
return Formula(F.node, F.compBit)
#
def showFormula(self, Xstr):
X = self.formulas[Xstr]
if X.node.var == '1':
print(Xstr, '=', not X.compBit)
else:
print(Xstr, '=', end = ' ')
self.showRec(X, False, list())
print(0)
def showRec(self, X, compBit, varList):
if X.node == self.leafNode:
if X.compBit ^ compBit:
return
else:
for var in varList:
print(var, end = '')
print(' + ', end = '')
else:
self.showRec(X.node.posCof, compBit ^ X.compBit, varList + [X.node.var])
self.showRec(X.node.negCof, compBit ^ X.compBit, varList + ['~' + X.node.var])
|
import nonebot
from nonebot.adapters.cqhttp import Bot as CQHTTPBot
nonebot.init(command_start=[""])
app = nonebot.get_asgi()
driver = nonebot.get_driver()
driver.register_adapter('cqhttp', CQHTTPBot)
nonebot.load_builtin_plugins()
nonebot.load_plugins('src/plugins')
nonebot.load_plugin("nonebot_plugin_apscheduler")
if __name__ == "__main__":
nonebot.run(app="bot:app")
|
#-*- coding: UTF-8 -*-
import os
import json
import codecs
import logging
import ray
import subprocess
import master
import senteval
from collections import defaultdict
def prepare(params, samples):
vocab = defaultdict(lambda : 0)
for s in samples:
for word in s:
vocab[word] = 1
vocab['<s>'] = 1
vocab['</s>'] = 1
params.master.build_emb(vocab)
def batcher(params, batch):
batch = [' '.join(sent +['</s>']) for sent in batch]
embeddings = params.master.encode(batch, use_norm=True)
return embeddings
@ray.remote
def call_eval(task, call_id):
fileHandler = logging.FileHandler(os.path.abspath('.')+'/log.eval.'+str(call_id), mode='w', encoding='UTF-8')
formatter = logging.Formatter('%(asctime)s %(filename)s[%(lineno)d] %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S')
fileHandler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
m = master.Master('conf.json')
m.creat_graph()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
m.prepare()
params_senteval = {'task_path':m.conf['path']['tasks'], 'usepytorch':False, 'kfold':10}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
params_senteval['master'] = m
se = senteval.engine.SE(params_senteval, batcher, prepare)
result = se.eval(task)
m.sess.close()
return result
if __name__ == '__main__':
with codecs.open('conf.json', 'r', 'utf-8') as fin:
conf = json.load(fin)
ray.init(num_gpus=1, redirect_output=True)
tasks = [['MR'], ['SUBJ'], ['MPQA'], ['CR', 'SST2', 'TREC', 'MRPC', 'SICKRelatedness', 'SICKEntailment', 'STS14']]
outs = ray.get( [call_eval.remote(tasks[i], i) for i in xrange(len(tasks))] )
results = dict()
for result in outs:
results.update(result)
resultstr = 'MR:%.2f CR:%.2f SUBJ:%.2f MPQA:%.2f SST2:%.2f TREC:%.1f MRPC:%.2f/%.2f SICK-E:%.2f SCIK-R:%.3f STS14:%.2f/%.2f' % (
results['MR']['acc'], results['CR']['acc'], results['SUBJ']['acc'], results['MPQA']['acc'], results['SST2']['acc'],
results['TREC']['acc'], results['MRPC']['acc'], results['MRPC']['f1'], results['SICKEntailment']['acc'],
results['SICKRelatedness']['pearson'], results['STS14']['all']['pearson']['wmean'], results['STS14']['all']['spearman']['wmean'])
cmd = ''
for i in xrange(len(tasks)):
cmd += "cat %s/log.eval.%d >>log.eval;" % (os.path.abspath('.'), i)
cmd += r"rm %s/log.eval.*; echo %s>>log.eval;" % (os.path.abspath('.'), resultstr)
subprocess.check_call(cmd, shell=True)
|
import numpy as np
from binomial_node import BinomialNode
def binomial_pricing(spot, strike, dividend_yield, volatility, time_mature, desired_length, interest_rate):
"""
Generates a binomial price tree for an American-Style call option
:param spot: current price of stock in question
:param strike: given strike/exercise price for options contract
:param dividend_yield: how much given stock pays out in dividends as a percent
:param volatility: standard deviation of stock's returns
:param time_mature: time until option matures, given in days
:param desired_length: desired number of levels for price tree
:param interest_rate: annualized risk free interest rate
:return: calculated value of the option as a float
"""
len_step = time_mature / desired_length
increase_factor = np.e ** (volatility * np.sqrt(len_step / 365.0))
decrease_factor = 1.0 / increase_factor
exercise_value = strike - spot
def option_probability():
"""
Function used to calculate the probability of an up/down move. Meant to simulate geometric
Brownian motion
"""
numerator = np.e ** ((interest_rate - dividend_yield) * len_step) - decrease_factor
denominator = increase_factor - decrease_factor
return numerator / denominator
opt_prob = option_probability()
def generate_tree(node, spot, time):
"""
Generates binomial price tree two levels at a time
On the final level, assigns a value to each node
:param node: reference to BinomialNode
:param spot: current price at the level
:param time: current level the tree is on (time period of level is decided earlier)
"""
node.left_node = BinomialNode(spot * decrease_factor)
node.right_node = BinomialNode(spot * increase_factor)
time += 1
if time + 1 == desired_length:
node.left_node.left_node = BinomialNode(spot * decrease_factor * decrease_factor)
node.left_node.left_node.value = np.max([node.left_node.left_node.root - strike, 0])
node.left_node.right_node = BinomialNode(spot * decrease_factor * increase_factor)
node.left_node.right_node.value = np.max([node.left_node.right_node.root - strike, 0])
node.right_node.left_node = node.left_node.right_node
node.right_node.right_node = BinomialNode(spot * increase_factor * increase_factor)
node.right_node.right_node.value = np.max([node.right_node.right_node.root - strike, 0])
else:
node.left_node.left_node = generate_tree(
BinomialNode(spot * decrease_factor * decrease_factor),
spot * decrease_factor * decrease_factor, time + 1)
node.left_node.right_node = generate_tree(
BinomialNode(spot * decrease_factor * increase_factor),
spot * decrease_factor * increase_factor, time + 1)
node.right_node.left_node = node.left_node.right_node
node.right_node.right_node = generate_tree(
BinomialNode(spot * increase_factor * increase_factor),
spot * increase_factor * increase_factor, time + 1)
return node
def value_node(node):
"""
Given a binary node, sets the value of that node based on future nodes (must start one level
behind final level)
:param node: node being valued
"""
if node.left_node.value is None:
node.left_node = value_node(node.left_node)
if node.right_node.value is None:
node.right_node = value_node(node.right_node)
binomial_value = (np.e ** (-interest_rate * len_step)) * (
opt_prob * node.right_node.value + (1 - opt_prob) * node.left_node.value)
node.value = np.max([binomial_value, exercise_value])
return node
def price_tree(node):
"""
Prices the entire binary tree working backwards before returning the updated price tree
:param node: top node of entire binomial price tree
"""
return value_node(node)
node = generate_tree(BinomialNode(spot), spot, 0)
node = price_tree(node)
return node.value
|
# This program creates an object of the pet class
# and asks the user to enter the name, type, and age of pet.
# The program retrieves the pet's name, type, and age and
# displays the data.
class Pet:
def __init__(self, name, animal_type, age):
# Gets the pets name
self.__name = name
# Gets the animal type
self.__animal_type = animal_type
# Gets the animals age
self.__age = age
def set_name(self, name):
self.__name = name
def set_type(self, animal_type):
self.__animal_type = animal_type
def set_age(self, age):
self.__age = age
# Displays the pets name
def get_name(self):
return self.__name
# Displays the animal type
def get_animal_type(self):
return self.__animal_type
# Displays the pets age
def get_age(self):
return self.__age
# The main function
def main():
# Get the pets name
name = input('What is the name of the pet? ')
# Get the animal type
animal_type = input('What type of animal is it? ')
# Get the animals age
age = int(input('How old is the pet? '))
pets = Pet(name, animal_type, age)
# Display the inputs
print('This information will be added to our records.')
print('Here is the data you entered: ')
print('------------------------------')
print('Pet Name:', pets.get_name())
print('Animal Type:', pets.get_animal_type())
print('Age:', pets.get_age())
# Call the main function
main()
|
class Solution(object):
def fn(self,v):
#able to decode
if v=="":
return False
elif len(v)==1:
if int(v)==0:
return False
else:
return True
elif len(v)==2:
if v[0]=='0':
return False
elif 1<=int(v) and int(v)<=26:
return True
else:
return False
else:
return False
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if s=="":
return 0
v=[0]*len(s)
for idx,c in enumerate(s):
if idx ==0:
if self.fn(c):
v[idx]=1
else:
if self.fn(s[idx]) is False and self.fn(s[idx-1:idx+1]) is False:
return 0
elif self.fn(s[idx]) is True and self.fn(s[idx-1:idx+1]) is True :
v[idx]=v[idx-1]+1
elif self.fn(s[idx]) is False and self.fn(s[idx-1:idx+1]) is True :
v[idx]=v[idx-1]-1
else:
v[idx]=v[idx-1]
return v[len(s)-1]
sol =Solution()
print sol.numDecodings("102")
|
'''
User enters an arithmetic progression, with one item omitted, and the length of the input, the
program tells the omitted item
'''
import sys
import numpy as np
def solution1():
n = int(raw_input())
list= raw_input().split()
for i in range(0,n):
list[i] = int(list[i])
difference = list[len(list)-1] - list[0]
progression = difference / n
for i in range(0,len(list)-1):
if (list[i+1] != list[i] + progression):
print str(list[i] + progression)
def main():
solution1()
if __name__ == '__main__':
main()
|
import ROOT
import argparse
from ROOT import TLorentzVector, TH1D
import numpy as np
import Sample
from helpers_old import progress, makeDirIfNeeded, showBranch
import objectSelection as objSel
from efficiency import efficiency
from ROC import ROC
import eventSelectionTest
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--sampleName', action='store', default='DYJetsToLL_M-50')
argParser.add_argument('--subJob', action='store', default=0)
argParser.add_argument('--method', action='store', default='Bluj')
argParser.add_argument('--inputFile', action='store', default='inputFilesv3')
argParser.add_argument('--isTest', action='store', default=False)
args = argParser.parse_args()
sampleList = Sample.createSampleList('/user/lwezenbe/private/PhD/Code/TauStudy/Efficiency/Data/'+ args.inputFile +'.conf')
sample = Sample.getSampleFromList(sampleList, args.sampleName)
print 'Initializing'
Chain = sample.initTree(needhCount=False)
#Define the algorithms and their working points
tau_id_algos = [('oldMVA2015', ['VLoose', 'Loose', 'Medium', 'Tight', 'VTight']),
('newMVA2015', ['VLoose', 'Loose', 'Medium', 'Tight', 'VTight']),
('oldMVA2017v2', ['VLoose', 'Loose', 'Medium', 'Tight', 'VTight']),
('newMVA2017v2', ['VLoose', 'Loose', 'Medium', 'Tight', 'VTight']),
('cut_based', ['VVLoose', 'VLoose', 'Loose', 'Medium', 'Tight']),
('deeptau', ['VVVLoose', 'VVLoose', 'VLoose', 'Loose', 'Medium', 'Tight', 'VTight', 'VVTight'])] #If you add more ID's, don't forget to change it in the getTauIDs() function in objectSelection as well
#################################################################################################
#####################################METHOD FUNCTIONS############################################
#depending on the method variable, the efficiency is calculated in one of the ways defined below#
#############Temporarily here because I didnt know what to do with it yet########################
#################################################################################################
def CalcANFakeRate(Chain, sample, args):
global roc
global ptHist
global etaHist
for jet in xrange(Chain._nJets):
if Chain._jetPt[jet] < 20 or abs(Chain._jetEta[jet]) > 2.3: continue
jetVec = objSel.getFourVec(Chain._jetPt[jet], Chain._jetEta[jet], Chain._jetPhi[jet], Chain._jetE[jet])
#Find matching reco tau
matchindex = objSel.tauMatchIndexIso(Chain, jetVec)
if matchindex != 1 and Chain._tauGenStatus[matchindex] == 5: continue
for i in xrange(len(tau_id_algos)):
roc[i].fill_misid_denominator(CHain._weight)
ptHist[i].fill_denominator(Chain._jetPt[jet], Chain._weight)
etaHist[i].fill_denominator(Chain._jetEta[jet], Chain._weight)
DMfinding = objSel.getDMfinding(Chain, matchindex)
discriminators = objSel.getTauIDs(Chain, matchindex)
for i, discr in enumerate(discriminators):
if not DMfinding[i]: continue
for j, WP in enumerate(discr):
if WP:
passedTau[i][j] += 1.
ptHist[i].fill_numerator(Chain._lPt[matchindex], j, Chain._weight)
etaHist[i].fill_numerator(Chain._lEta[matchindex], j, Chain._weight)
def CalcBlujFakeRate(Chain, sample, args):
global roc
global ptHist
global etaHist
for lepton in xrange(Chain._nLight, Chain._nL):
if not objSel.isGoodBaseTauIso(Chain, lepton): continue
if Chain._tauGenStatus[lepton] != 6: continue
for i in xrange(len(tau_id_algos)):
roc[i].fill_misid_denominator(Chain._weight)
ptHist[i].fill_denominator(Chain._lPt[lepton], Chain._weight)
etaHist[i].fill_denominator(Chain._lEta[lepton], Chain._weight)
DMfinding = objSel.getDMfinding(Chain, lepton)
discriminators = objSel.getTauIDs(Chain, lepton)
for i, discr in enumerate(discriminators):
if not DMfinding[i]: continue
for j, WP in enumerate(discr):
if WP:
roc[i].fill_misid_numerator(j, Chain._weight)
ptHist[i].fill_numerator(Chain._lPt[lepton], j, Chain._weight)
etaHist[i].fill_numerator(Chain._lEta[lepton], j, Chain._weight)
# for jet in xrange(Chain._nJets):
#
# if Chain._jetPt[jet] < 20 or abs(Chain._jetEta[jet]) > 2.3: continue
#
# jetVec = objSel.getFourVec(Chain._jetPt[jet], Chain._jetEta[jet], Chain._jetPhi[jet], Chain._jetE[jet])
#
# #Find matching reco tau
# matchindex = objSel.tauMatchIndexIso(Chain, jetVec, needFake=True)
# if matchindex == -1: continue
#
# for i in xrange(len(tau_id_algos)):
# roc[i].fill_misid_denominator(Chain._weight)
# ptHist[i].fill_denominator(Chain._jetPt[jet], Chain._weight)
# etaHist[i].fill_denominator(Chain._jetEta[jet], Chain._weight)
#
# DMfinding = objSel.getDMfinding(Chain, matchindex)
# discriminators = objSel.getTauIDs(Chain, matchindex)
#
# #if DMfinding[0] and
# for i, discr in enumerate(discriminators):
# if not DMfinding[i]: continue
# for j, WP in enumerate(discr):
# if WP:
# roc[i].fill_misid_numerator(j, Chain._weight)
# ptHist[i].fill_numerator(Chain._jetPt[jet], j, Chain._weight)
# etaHist[i].fill_numerator(Chain._jetEta[jet], j, Chain._weight)
###########
#Main body#
###########
#Whatever needs to be saved at the end
pt_bins = np.linspace(20, 120, 11)
eta_bins = np.linspace(-2.4, 2.4, 25)
basefolder = '/storage_mnt/storage/user/lwezenbe/private/PhD/Results/TauStudy/Efficiency/Histos/Iso/'
makeDirIfNeeded(basefolder)
makeDirIfNeeded(basefolder+sample.output)
makeDirIfNeeded(basefolder+sample.output+'/'+args.method)
basefolder = basefolder + sample.output + '/' + args.method
roc = []
ptHist = []
etaHist = []
for tau_id in tau_id_algos:
roc.append(ROC('roc_fakerate_'+tau_id[0], tau_id[1]))
ptHist.append(efficiency('pt_fakerate_'+tau_id[0], pt_bins, tau_id[1]))
etaHist.append(efficiency('eta_fakerate_'+tau_id[0], eta_bins, tau_id[1]))
TEST_NL = []
TEST_NL.append(ROOT.TH1D('test_nl_all', 'test_nl_all',25, -2.4, 2.4))
TEST_NL.append(ROOT.TH1D('test_nl_isfalse', 'test_nl_isfalse',25, -2.4, 2.4))
TEST_NL.append(ROOT.TH1D('test_nl_isnotfalse', 'test_nl_notisfalse',25, -2.4, 2.4))
#Get range of events in subjob
if args.isTest:
eventRange = xrange(1000000)
else:
eventRange = sample.getEventRange(int(args.subJob))
#begin event loop
for entry in eventRange:
progress(entry, len(eventRange))
Chain.GetEntry(entry)
if args.method == 'AN': CalcANFakeRate(Chain, sample, args)
elif args.method == 'Bluj': CalcBlujFakeRate(Chain, sample, args)
else:
print('NOOO, GOD! NO, GOD, PLEASE, NO! NO! NO! ')
time.sleep(2.5)
print('NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO!')
time.sleep(1)
print('Please enter proper method argument')
exit()
output_file = ROOT.TFile('test_nl.root', 'recreate')
output_file.cd()
for i in TEST_NL:
i.Write()
output_file.Close()
#save
if not args.isTest:
#if True:
for i in xrange(len(tau_id_algos)):
roc[i].write(basefolder, str(args.subJob))
ptHist[i].write(basefolder, str(args.subJob))
etaHist[i].write(basefolder, str(args.subJob))
|
import serial
import time
import sys
ser = serial.Serial()
ser.port = sys.argv[1]
ser.baudrate = 115200
ser.timeout = None;
ser.bytesize = serial.EIGHTBITS #number of bits per bytes
ser.parity = serial.PARITY_NONE #set parity check: no parity
ser.stopbits = serial.STOPBITS_ONE #number of stop bits
try:
ser.open()
except Exception, e:
print "error open serial port: " + str(e)
sys.exit(-1)
log = open(sys.argv[2], 'a')
count = 0;
if ser.isOpen():
while True:
try:
ser.flushInput() #flush input buffer, discarding all its contents
ser.flushOutput() #flush output buffer, aborting current output
#and discard all that is in buffer
numOfLines = 0
while True:
response = ser.readline().strip()
if response:
print '%s: %s' % (time.ctime(), response.decode())
log.write('%s: %s\n' % (time.ctime(), response.decode()))
#print '%f: %s' % (time.time(),time.strftime("%T"), response.decode())
count += 1
#if count % 2 == 0:
#print "getthis..."
#ser.write("getthis\n")
ser.close()
except Exception, e1:
print "Error: " + str(e1)
else:
print "cannot open serial port "
log.close()
|
#!/usr/bin/env python
# Programmer: Chris Bunch (chris@appscale.com)
""" s3_storage.py provides a single class, S3Storage, that callers can use to
interact with Amazon's Simple Storage Service (S3). """
# Third-party libraries
import boto.s3.connection
import boto.s3.key
# S3Storage-specific imports
from magik.base_storage import BaseStorage
from magik.custom_exceptions import BadConfigurationException
class S3Storage(BaseStorage):
""" S3Storage provides callers with an interface to Amazon S3. """
def __init__(self, parameters):
""" Creates a new S3Storage object, with the AWS_ACCESS_KEY and
AWS_SECRET_KEY that the user has specified.
Args:
parameters: A dict that contains the credentials necessary to authenticate
with S3.
Raises:
BadConfigurationException: If AWS_ACCESS_KEY or AWS_SECRET_KEY is not
specified.
"""
self.setup_s3_credentials(parameters)
self.connection = self.create_s3_connection()
# TODO(cgb): Consider validating the user's credentials here, and throw
# a BadConfigurationException if they aren't valid.
def create_s3_connection(self):
""" Uses boto to connect to Amazon S3.
Returns:
A boto.s3.Connection, which represents a connection to Amazon S3.
"""
return boto.s3.connection.S3Connection(
aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key)
def setup_s3_credentials(self, parameters):
""" Ensures that the user has passed in the credentials necessary to
communicate with Amazon S3.
Args:
parameters: A dict that contains the credentials necessary to authenticate
with S3.
Raises:
BadConfigurationException: If AWS_ACCESS_KEY or AWS_SECRET_KEY is not
specified.
"""
if 'AWS_ACCESS_KEY' not in parameters:
raise BadConfigurationException("AWS_ACCESS_KEY needs to be specified")
if 'AWS_SECRET_KEY' not in parameters:
raise BadConfigurationException("AWS_SECRET_KEY needs to be specified")
self.aws_access_key = parameters['AWS_ACCESS_KEY']
self.aws_secret_key = parameters['AWS_SECRET_KEY']
def does_bucket_exist(self, bucket_name):
""" Queries Amazon S3 to see if the specified bucket exists or not.
Args:
bucket_name: A str containing the name of the bucket we wish to query for
existence.
Returns:
True if the bucket does exist, and False otherwise.
"""
bucket = self.connection.lookup(bucket_name)
if bucket:
return True
else:
return False
def create_bucket(self, bucket_name):
""" Creates the named bucket in Amazon S3.
Args:
bucket_name: A str containing the name of the bucket we wish to create.
"""
self.connection.create_bucket(bucket_name)
def upload_file(self, source, bucket_name, key_name):
""" Uploads a file from the local filesystem to Amazon S3.
Args:
source: A str containing the name of the file on the local filesystem that
should be uploaded to Amazon S3.
bucket_name: A str containing the name of the bucket that the file should
be placed in.
key_name: A str containing the name of the key that the file should be
placed in.
"""
bucket = self.connection.lookup(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
key.set_contents_from_filename(source)
def does_key_exist(self, bucket_name, key_name):
""" Queries Amazon S3 to see if the named file exists.
Args:
bucket_name: A str containing the name of the bucket that the file exists
in.
key_name: A str containing the name of the key that identifies the file.
Returns:
True if a file does exist in the named bucket with the provided key name,
and False otherwise.
"""
bucket = self.connection.lookup(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
return key.exists()
def download_file(self, destination, bucket_name, key_name):
""" Downloads a file to the local filesystem from Amazon S3.
Args:
destination: A str containing the name of the file on the local filesystem
that we should download our file to.
bucket_name: A str containing the name of the bucket that the file should
be downloaded from.
key_name: A str containing the name of the key that the file should be
downloaded from.
"""
bucket = self.connection.lookup(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
key.get_contents_to_filename(destination)
def delete_file(self, bucket_name, key_name):
""" Deletes a file stored in Amazon S3.
Args:
bucket_name: A str containing the name of the bucket that the file should
be downloaded from.
key_name: A str containing the name of the key that the file should be
downloaded from.
"""
bucket = self.connection.lookup(bucket_name)
key = boto.s3.key.Key(bucket)
key.key = key_name
key.delete()
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent, json
from dataent.utils import cint, quoted
from dataent.website.render import resolve_path
from dataent.model.document import get_controller, Document
from dataent import _
no_cache = 1
no_sitemap = 1
def get_context(context, **dict_params):
"""Returns context for a list standard list page.
Will also update `get_list_context` from the doctype module file"""
dataent.local.form_dict.update(dict_params)
doctype = dataent.local.form_dict.doctype
context.parents = [{"route":"me", "title":_("My Account")}]
context.meta = dataent.get_meta(doctype)
context.update(get_list_context(context, doctype) or {})
context.doctype = doctype
context.txt = dataent.local.form_dict.txt
context.update(get(**dataent.local.form_dict))
@dataent.whitelist(allow_guest=True)
def get(doctype, txt=None, limit_start=0, limit=20, pathname=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
raw_result = get_list_data(doctype, txt, limit_start, limit=limit + 1, **kwargs)
show_more = len(raw_result) > limit
if show_more:
raw_result = raw_result[:-1]
meta = dataent.get_meta(doctype)
list_context = dataent.flags.list_context
if not raw_result: return {"result": []}
if txt:
list_context.default_subtitle = _('Filtered by "{0}"').format(txt)
result = []
row_template = list_context.row_template or "templates/includes/list/row_template.html"
list_view_fields = [df for df in meta.fields if df.in_list_view][:4]
for doc in raw_result:
doc.doctype = doctype
new_context = dataent._dict(doc=doc, meta=meta,
list_view_fields=list_view_fields)
if not list_context.get_list and not isinstance(new_context.doc, Document):
new_context.doc = dataent.get_doc(doc.doctype, doc.name)
new_context.update(new_context.doc.as_dict())
if not dataent.flags.in_test:
pathname = pathname or dataent.local.request.path
new_context["pathname"] = pathname.strip("/ ")
new_context.update(list_context)
set_route(new_context)
rendered_row = dataent.render_template(row_template, new_context, is_path=True)
result.append(rendered_row)
from dataent.utils.response import json_handler
return {
"raw_result": json.dumps(raw_result, default=json_handler),
"result": result,
"show_more": show_more,
"next_start": limit_start + limit,
}
@dataent.whitelist(allow_guest=True)
def get_list_data(doctype, txt=None, limit_start=0, limit=20, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
if not txt and dataent.form_dict.search:
txt = dataent.form_dict.search
del dataent.form_dict['search']
controller = get_controller(doctype)
meta = dataent.get_meta(doctype)
filters = prepare_filters(doctype, controller, kwargs)
list_context = get_list_context(dataent._dict(), doctype)
list_context.title_field = getattr(controller, 'website',
{}).get('page_title_field', meta.title_field or 'name')
if list_context.filters:
filters.update(list_context.filters)
_get_list = list_context.get_list or get_list
kwargs = dict(doctype=doctype, txt=txt, filters=filters,
limit_start=limit_start, limit_page_length=limit,
order_by = list_context.order_by or 'modified desc')
# allow guest if flag is set
if not list_context.get_list and (list_context.allow_guest or meta.allow_guest_to_view):
kwargs['ignore_permissions'] = True
raw_result = _get_list(**kwargs)
# list context to be used if called as rendered list
dataent.flags.list_context = list_context
return raw_result
def set_route(context):
'''Set link for the list item'''
if context.web_form_name:
context.route = "{0}?name={1}".format(context.pathname, quoted(context.doc.name))
elif context.doc and getattr(context.doc, 'route', None):
context.route = context.doc.route
else:
context.route = "{0}/{1}".format(context.pathname or quoted(context.doc.doctype),
quoted(context.doc.name))
def prepare_filters(doctype, controller, kwargs):
filters = dataent._dict(kwargs)
meta = dataent.get_meta(doctype)
if hasattr(controller, 'website') and controller.website.get('condition_field'):
filters[controller.website['condition_field']] = 1
if filters.pathname:
# resolve additional filters from path
resolve_path(filters.pathname)
for key, val in dataent.local.form_dict.items():
if key not in filters and key != 'flags':
filters[key] = val
# filter the filters to include valid fields only
for fieldname, val in list(filters.items()):
if not meta.has_field(fieldname):
del filters[fieldname]
return filters
def get_list_context(context, doctype):
from dataent.modules import load_doctype_module
from dataent.website.doctype.web_form.web_form import get_web_form_list
list_context = context or dataent._dict()
meta = dataent.get_meta(doctype)
if not meta.custom:
# custom doctypes don't have modules
module = load_doctype_module(doctype)
if hasattr(module, "get_list_context"):
out = dataent._dict(module.get_list_context(list_context) or {})
if out:
list_context = out
# get path from '/templates/' folder of the doctype
if not list_context.row_template:
list_context.row_template = meta.get_row_template()
# is web form, show the default web form filters
# which is only the owner
if dataent.form_dict.web_form_name:
list_context.web_form_name = dataent.form_dict.web_form_name
if not list_context.get("get_list"):
list_context.get_list = get_web_form_list
if not dataent.flags.web_form:
# update list context from web_form
dataent.flags.web_form = dataent.get_doc('Web Form', dataent.form_dict.web_form_name)
if dataent.flags.web_form.is_standard:
dataent.flags.web_form.update_list_context(list_context)
return list_context
def get_list(doctype, txt, filters, limit_start, limit_page_length=20, ignore_permissions=False,
fields=None, order_by=None):
meta = dataent.get_meta(doctype)
if not filters:
filters = []
if not fields:
fields = "distinct *"
or_filters = []
if txt:
if meta.search_fields:
for f in meta.get_search_fields():
if f == 'name' or meta.get_field(f).fieldtype in ('Data', 'Text', 'Small Text', 'Text Editor'):
or_filters.append([doctype, f, "like", "%" + txt + "%"])
else:
if isinstance(filters, dict):
filters["name"] = ("like", "%" + txt + "%")
else:
filters.append([doctype, "name", "like", "%" + txt + "%"])
return dataent.get_list(doctype, fields = fields,
filters=filters, or_filters=or_filters, limit_start=limit_start,
limit_page_length = limit_page_length, ignore_permissions=ignore_permissions,
order_by=order_by)
|
# Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import io
from pullenti.unisharp.Utils import Utils
from pullenti.morph.DerivateWord import DerivateWord
from pullenti.morph.MorphClass import MorphClass
from pullenti.morph.DerivateGroup import DerivateGroup
from pullenti.morph.internal.MorphSerializeHelper import MorphSerializeHelper
from pullenti.morph.MorphLang import MorphLang
from pullenti.morph.MorphCase import MorphCase
from pullenti.morph.internal.ByteArrayWrapper import ByteArrayWrapper
from pullenti.morph.internal.ExplanTreeNode import ExplanTreeNode
class DeserializeHelper:
@staticmethod
def deserializeDD(str0_ : io.IOBase, dic : 'DerivateDictionary', lazy_load : bool) -> 'ByteArrayWrapper':
wr = None
with io.BytesIO() as tmp:
MorphSerializeHelper.deflateGzip(str0_, tmp)
wr = ByteArrayWrapper(bytearray(tmp.getvalue()))
cou = wr.deserializeInt()
while cou > 0:
p1 = wr.deserializeInt()
ew = DerivateGroup()
if (lazy_load):
ew._lazy_pos = wr.position
wr.seek(p1)
else:
DeserializeHelper.deserializeDerivateGroup(wr, ew)
dic._m_all_groups.append(ew)
cou -= 1
dic._m_root = ExplanTreeNode()
DeserializeHelper.deserializeTreeNode(wr, dic, dic._m_root, lazy_load)
return wr
@staticmethod
def deserializeDerivateGroup(str0_ : 'ByteArrayWrapper', dg : 'DerivateGroup') -> None:
attr = str0_.deserializeShort()
if (((attr & 1)) != 0):
dg.is_dummy = True
if (((attr & 2)) != 0):
dg.not_generate = True
if (((attr & 4)) != 0):
dg.m_transitive = 0
if (((attr & 8)) != 0):
dg.m_transitive = 1
dg.prefix = str0_.deserializeString()
cou = str0_.deserializeShort()
while cou > 0:
w = DerivateWord(dg)
w.spelling = str0_.deserializeString()
w.class0_ = MorphClass()
w.class0_.value = (str0_.deserializeShort())
w.lang = MorphLang._new5(str0_.deserializeShort())
w.attrs.value = (str0_.deserializeShort())
dg.words.append(w)
cou -= 1
cou = str0_.deserializeShort()
while cou > 0:
pref = Utils.ifNotNull(str0_.deserializeString(), "")
cas = MorphCase()
cas.value = (str0_.deserializeShort())
if (dg.nexts is None):
dg.nexts = dict()
dg.nexts[pref] = cas
cou -= 1
@staticmethod
def deserializeTreeNode(str0_ : 'ByteArrayWrapper', dic : 'DerivateDictionary', tn : 'ExplanTreeNode', lazy_load : bool) -> None:
cou = str0_.deserializeShort()
li = (list() if cou > 1 else None)
while cou > 0:
id0_ = str0_.deserializeInt()
if (id0_ > 0 and id0_ <= len(dic._m_all_groups)):
gr = dic._m_all_groups[id0_ - 1]
if (gr._lazy_pos > 0):
p0 = str0_.position
str0_.seek(gr._lazy_pos)
DeserializeHelper.deserializeDerivateGroup(str0_, gr)
gr._lazy_pos = 0
str0_.seek(p0)
if (li is not None):
li.append(gr)
else:
tn.groups = (gr)
cou -= 1
if (li is not None):
tn.groups = (li)
cou = str0_.deserializeShort()
if (cou == 0):
return
while cou > 0:
ke = str0_.deserializeShort()
p1 = str0_.deserializeInt()
tn1 = ExplanTreeNode()
if (tn.nodes is None):
tn.nodes = dict()
if (not ke in tn.nodes):
tn.nodes[ke] = tn1
if (lazy_load):
tn1.lazy_pos = str0_.position
str0_.seek(p1)
else:
DeserializeHelper.deserializeTreeNode(str0_, dic, tn1, False)
cou -= 1 |
import re, uuid
print ("The MAC address is : ", end="")
print (':'.join(re.findall('..', '%012x' % uuid.getnode())))
|
import threading
_localdata = threading.local()
class Middleware:
"""
Put the user into current thread local data
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
_localdata.loginuser = request.user
response = self.get_response(request)
return response
def get():
try:
return _localdata.loginuser
except:
return None
|
# In[ ]:
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from osgeo import osr, gdal
import tensorflow as tf
from Funciones import *
from Entrenamiento import *
from Imagenes import *
# Import librarys
Mostrar_bienvenida()
global Diccionario
global Xtrain
global Ytrain
global Ciudad
global Xreal
global imagen
global Lista
global Porcentaje
global nombre
global num
opcion = 1
while opcion != 0:
opcion = opcion_menu()
if opcion == 1:
print("Has elegido Cargar datos de entrenamiento para la Red neuronal.")
Datos, Objetivo = PreparararDataFromCSV()
Xtrain, Ytrain, Xtest, Ytest = PrepararDatosentrenamiento(Datos, Objetivo)
Resultado = Obtener_Prediccion(Xtrain, Ytrain, Xtest)
Obtener_Verificador(Resultado, Ytest)
elif opcion == 2:
print("Has elegido Selecionar Ciudad.")
Ciudad = Obtener_Ciudad()
m1, m2, n1, n2 = Obtener_Dimension(Ciudad)
Xreal, imagen, num = ObtenerDiccionario(m1, m2, n1, n2, Ciudad)
nombre = Obtener_nombre(Ciudad)
elif opcion == 3:
print("Has elegido Insertar Imagenes.")
nir, red, green, blue, real = Insertar_Urls()
Ciudad = [ nir, red, green, blue, real]
m1, m2, n1, n2 = Obtener_area()
Xreal, imagen, num = ObtenerDiccionario(m1, m2, n1, n2, Ciudad)
nombre = str(input("Ingrese el nombre de la ciudad : " ))
elif opcion == 4:
print("Has elegido Obtener prediccion.")
Prediccion = Obtener_Prediccion(Xtrain, Ytrain, Xreal)
Resultado, Porcentaje = Obtener_Clasificador(Prediccion)
le = len(Resultado)
Porcentaje = Obtener_Porcentaje(Porcentaje, le, num)
print("Porcentaje de Vegetacion en la zona:", +Porcentaje, "%")
Lista = Resultado
elif opcion == 5:
print("Has elegido Imagenes Antes y Despues.")
m1, m2, n1, n2 = Obtener_Dimension(Ciudad)
matrix = Lista
Crear_Imangen_Predecida(matrix, m1, m2, n1, n2, nombre, Porcentaje)
Crear_Imangen_Real(imagen, m1, m2, n1, n2, nombre)
print("Imagenes creada con exito")
elif opcion == 6:
print("Has elegido calcular la cantidad de parques en la ciudad.")
m1, m2, n1, n2 = Obtener_Dimension(Ciudad)
matrix = Lista
parques = Calcular_parques(matrix, m1, m2, n1, n2)
print("En la ciudad de "+nombre+ " hay aproximadamente: "+str(parques)+ " parques")
#print(Xreal)
elif opcion == 0:
print("Has decidido salir.")
print("Gracias por usar Clasificador Artificial. ¡Hasta pronto!") |
from project import db
from project.com.vo.AreaVO import AreaVO
from project.com.vo.BloodGroupVO import BloodGroupVO
from project.com.vo.CityVO import CityVO
from project.com.vo.LoginVO import LoginVO
from project.com.vo.BloodBankVO import BloodBankVO
class EmergencyRequestVO(db.Model):
__tablename__ = 'emergencyrequestmaster'
emergencyRequestId = db.Column('emergencyRequestId', db.Integer, primary_key=True, autoincrement=True)
emergencyRequestQuantity = db.Column('emergencyRequestQuantity', db.String(100), nullable=False)
emergencyRequestDate = db.Column('emergencyRequestDate', db.String(100), nullable=False)
emergencyRequestTime = db.Column('emergencyRequestTime', db.String(100), nullable=False)
emergencyRequestStatus = db.Column('emergencyRequestStatus', db.String(100), nullable=False)
emergencyRequestPersonRequire = db.Column('emergencyRequestPersonRequire', db.Integer, nullable=False)
emergencyRequest_CityId = db.Column('emergencyRequest_CityId', db.Integer, db.ForeignKey(CityVO.cityId))
emergencyRequest_AreaId = db.Column('emergencyRequest_AreaId', db.Integer, db.ForeignKey(AreaVO.areaId))
emergencyRequest_BloodGroupId = db.Column('emergencyRequest_BloodGroupId', db.Integer, db.ForeignKey(BloodGroupVO.bloodGroupId))
emergencyRequest_BloodBankId = db.Column('emergencyRequest_BloodBankId', db.Integer, db.ForeignKey(BloodBankVO.bloodBankId))
emergencyRequest_LoginId = db.Column('emergencyRequest_LoginId', db.Integer, db.ForeignKey(LoginVO.loginId))
def as_dict(self):
return {
'emergencyRequestId': self.emergencyRequestId,
'emergencyRequestQuantity': self.emergencyRequestQuantity,
'emergencyRequestDate': self.emergencyRequestDate,
'emergencyRequestTime': self.emergencyRequestTime,
'emergencyRequestStatus': self.emergencyRequestStatus,
'emergencyRequestPersonRequire': self.emergencyRequestPersonRequire,
'emergencyRequest_CityId': self.emergencyRequest_CityId,
'emergencyRequest_AreaId': self.emergencyRequest_AreaId,
'emergencyRequest_BloodGroupId': self.emergencyRequest_BloodGroupId,
'emergencyRequest_BloodBankId': self.emergencyRequest_BloodBankId,
'emergencyRequest_LoginId': self.emergencyRequest_LoginId
}
db.create_all()
|
from Tkinter import *
from PIL import Image, ImageTk
import re
import os
import zmq
import urllib2
import json
import graphanalysis as g
import datetime
#alert number constants
ALERT_INVALID_INPUT = 0
ALERT_NO_NETWORK_CONNECTION = 1
ALERT_NO_SERVER_CONNECTION = 2
ALERT_FAILED_SENTIMENT_ANALYZER = 3
ALERT_FAILED_GET_TWEETS = 4
ALERT_FAILED_FINANCE_INFO = 5
ALERT_DATE_RANGE_ERROR = 6
ALERT_ARRAY = ["Invalid Input", "No Network Connection", "Server Problems", \
"Sentiment Analyzer Failure", "Aaron's Fault...", "Floundering Financials", "Invalid Date Range"]
TICKER_SYMBOL_DICT = {'Google':'GOOG', 'Ibm':'IBM', 'Sony':'SNE', 'Microsoft':'MSFT', 'Dell':'DELL', 'Amazon':'AMZN'}
class Application(Frame):
def __init__(self, parent, address):
Frame.__init__(self, parent, background="white")
#---------Class Variables---------------
self.parent = parent
self.companies = []
self.address = address
self.previousSelectedCompany = ""
self.companiesAdded = []
self.startDatesAdded = []
self.endDatesAdded = []
self.listViewList = []
self.tweetInfoDict = {}
self.stockInfoDict = {}
self.parent.title("Twahoo Finance")
self.createMainMenuObjects()
self.displayMainMenu()
def addCompany(self):
company = self.listVariableCompany.get()
startDate = self.listVariableStartDate.get()
endDate = self.listVariableEndDate.get()
startYear = int(startDate[0:4])
endYear = int(endDate[0:4])
startMonth = int(startDate[5:7])
endMonth = int(endDate[5:7])
startDay = int(startDate[8:10])
endDay = int(endDate[8:10])
if(company != "" and company not in self.companiesAdded):
if(endYear < startYear):
self.showAlertDialogue(ALERT_DATE_RANGE_ERROR)
elif(endMonth < startMonth):
self.showAlertDialogue(ALERT_DATE_RANGE_ERROR)
elif(endDay < startDay):
self.showAlertDialogue(ALERT_DATE_RANGE_ERROR)
else:
self.companyListBox.insert(END, company)
self.companiesAdded.append(company)
self.startDateListBox.insert(END, startDate)
self.startDatesAdded.append(startDate)
self.endDateListBox.insert(END, endDate)
self.endDatesAdded.append(endDate)
def showAlertDialogue(self, alertNum):
alert = Toplevel()
alert.title("Something Went Wrong!")
alertMessage = Message(alert, text=ALERT_ARRAY[alertNum])
alertMessage.pack()
dismiss = Button(alert, text="Dismiss", command=alert.destroy)
dismiss.pack()
def hideMainMenu(self):
self.refreshButton.grid_forget()
self.companyListBox.grid_forget()
self.startDateListBox.grid_forget()
self.endDateListBox.grid_forget()
self.companyLabel.grid_forget()
self.startDateLabel.grid_forget()
self.endDateLabel.grid_forget()
self.addButton.grid_forget()
self.retrieveDataButton.grid_forget()
self.refreshButton.grid_forget()
self.companyDrop.grid_forget()
self.startDateDrop.grid_forget()
self.endDateDrop.grid_forget()
self.deleteButton.grid_forget()
def displayMainMenu(self):
self.companyLabel.grid(row=0, column=0, columnspan=1, rowspan=1, padx=5, pady=(100,0), sticky=S)
self.startDateLabel.grid(row=0, column=2, columnspan=1, rowspan=1, padx=5, pady=(100,0), sticky=S)
self.endDateLabel.grid(row=0, column=4, columnspan=1, rowspan=1, padx=5, pady=(100,0), sticky=S)
self.companyListBox.grid(row=1, column=0, columnspan=1, rowspan=2, padx=10, sticky=E+W+S+N)
self.startDateListBox.grid(row=1, column=2, columnspan=1, rowspan=2, padx=10, sticky=E+W+S+N)
self.endDateListBox.grid(row=1, column=4, columnspan=1, rowspan=2, padx=10, sticky=E+S+W+N)
self.companyDrop.grid(row=4, column=0, columnspan=1, rowspan=1, padx=10, sticky=E+S+N+W)
self.startDateDrop.grid(row=4, column=2, columnspan=1, rowspan=1, padx=10, sticky=E+S+N+W)
self.endDateDrop.grid(row=4, column=4, columnspan=1, rowspan=1, padx=10, sticky=E+S+N+W)
self.addButton.grid(row=4, column=5, columnspan=1, rowspan=1, sticky=W+S+N)
self.retrieveDataButton.grid(row=7, column=4, columnspan=1, rowspan=1, padx=15, pady=25, sticky=E+S+N+W)
self.refreshButton.grid(row=7, column=0, columnspan=1, rowspan=1, padx=15, pady=25, sticky=E+S+N+W)
self.deleteButton.grid(row=1, column=5, columnspan=1, rowspan=1, sticky=W+N+E)
def createMainMenuObjects(self):
self.companyLabel = Label(self.parent, text="Company:")
self.startDateLabel = Label(self.parent, text="Start Date:")
self.endDateLabel = Label(self.parent, text="End Date:")
self.companyListBox = Listbox(self.parent)
self.startDateListBox = Listbox(self.parent)
self.endDateListBox = Listbox(self.parent)
companies = self.refreshCompanyList()
self.listCompanies = companies
self.listVariableCompany = StringVar()
self.listVariableCompany.set(self.listCompanies[0])
self.companyDrop = OptionMenu(self.parent, self.listVariableCompany, *self.listCompanies)
startDates = self.refreshDateList()
self.listStartDates = startDates
self.listVariableStartDate = StringVar()
self.listVariableStartDate.set(self.listStartDates[0])
self.startDateDrop = OptionMenu(self.parent, self.listVariableStartDate, *self.listStartDates)
self.listEndDates = startDates
self.listVariableEndDate = StringVar()
self.listVariableEndDate.set(self.listEndDates[0])
self.endDateDrop = OptionMenu(self.parent, self.listVariableEndDate, *self.listEndDates)
self.addButton = Button(self.parent, text="+", command=self.addCompany)
self.retrieveDataButton = Button(self.parent, text="Get My Data", command=self.retrieveData)
self.refreshButton = Button(self.parent, text="Refresh Company List", command=self.refreshCompanyList)
self.deleteButton = Button(self.parent, text="-", command=self.deleteSelectedCompany)
def createCompanyInfoObjects(self, companyInfo):
self.listViewList = []
offset = 0
for c in companyInfo:
index = self.companiesAdded.index(c)
sd = self.startDatesAdded[index]
ed = self.endDatesAdded[index]
sdt = datetime.datetime(int(sd[0:4]), int(sd[5:7]), int(sd[8:10]))
edt = datetime.datetime(int(ed[0:4]), int(ed[5:7]), int(ed[8:10]))
diff = edt - sdt
timeDelta = diff.days
corrCoeff = self.calculateCorrelation(c, sdt, timeDelta)
newListView = self.ListView(self, c, companyInfo[c][0], companyInfo[c][1], companyInfo[c][2], rowOffset=offset, startDate=sdt, timeDelta=timeDelta, corrCoeff=corrCoeff)
self.listViewList.append(newListView)
offset = offset + 1
self.companyInfoBackButton = Button(self.parent, text="Back", command=self.companyInfoBack)
def displayCompanyInfo(self):
self.hideMainMenu()
backButtonRow = 1
for lv in self.listViewList:
lv.display()
backButtonRow = backButtonRow+2
self.companyInfoBackButton.grid(row=backButtonRow, column=0, columnspan=1, rowspan=1, padx=(25,0), pady=(15,0), sticky=W+N)
def hideCompanyInfo(self):
for lv in self.listViewList:
lv.forget()
self.companyInfoBackButton.grid_forget()
def deleteSelectedCompany(self):
cSelection = self.companyListBox.curselection()
sSelection = self.startDateListBox.curselection()
eSelection = self.endDateListBox.curselection()
selection = ""
if(cSelection):
selection = cSelection
elif(sSelection):
selection = sSelection
elif(eSelection):
selection = eSelection
if(selection):
self.companyListBox.delete(selection)
self.startDateListBox.delete(selection)
self.endDateListBox.delete(selection)
del self.companiesAdded[int(selection[0])]
del self.startDatesAdded[int(selection[0])]
del self.endDatesAdded[int(selection[0])]
else:
return 0
def retrieveData(self):
if(len(self.companiesAdded) > 0):
tempData = []
messageDict = {'type':'gui_tweet_pull', 'companies':self.companiesAdded, 'start_dates':self.startDatesAdded, 'end_dates':self.endDatesAdded}
message = json.dumps(messageDict)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
self.createCompanyInfoObjects(rcvd)
self.displayCompanyInfo()
else:
return 0
def refreshCompanyList(self):
dict = {'type': 'gui_get_companies'}
list = self.refreshListFromDB(dict)
return list
def refreshDateList(self):
company = self.listVariableCompany.get().lower()
dict = {'type': 'gui_get_dates', 'company': company}
list = self.refreshListFromDB(dict)
return list
def refreshListFromDB(self, messageDict):
tempData = []
message = json.dumps(messageDict)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
for r in rcvd:
tempData.append(r.title())
return tempData
def onCompanySelect(self):
currentCompany = self.listVariableCompany.get()
if(self.previousSelectedCompany != currentCompany):
newDates = self.refreshDateList()
sMenu = self.startDateDrop['menu']
sMenu.delete(0, END)
eMenu = self.endDateDrop['menu']
eMenu.delete(0, END)
for nd in newDates:
sMenu.add_command(label=nd, command=lambda v=self.listVariableStartDate, l=nd: v.set(l))
eMenu.add_command(label=nd, command=lambda v=self.listVariableEndDate, l=nd: v.set(l))
self.listVariableStartDate.set(newDates[0])
self.listVariableEndDate.set(newDates[0])
self.previousSelectedCompany = currentCompany
self.parent.after(250, self.onCompanySelect)
def showStockGraph(self, company, startDate, timeDelta):
## retreive stock dataset
dataset = {'type' : 'stock_pull', 'symbol' : TICKER_SYMBOL_DICT[company], 'clientname' : 'graphanalysis_test'}
message = json.dumps(dataset)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
stk = g.graphanalysis(rcvd, 'stock')
stk.interpolate(10)
stk.run_plot(timeDelta, stk.get_date_loc(startDate))
return 0
def showTweetGraph(self, company, startDate, timeDelta):
dataset = {'type' : 'avgSentiment_pull', 'symbol' : TICKER_SYMBOL_DICT[company], 'dateRange' : 'doesntmatter', 'clientname' : 'graphanalysis_test'}
message = json.dumps(dataset)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
twt = g.graphanalysis(rcvd, 'tweet')
twt.interpolate(10)
twt.run_plot(timeDelta, twt.get_date_loc(startDate))
#twt.run_plot()
return 0
def calculateCorrelation(self, company, startDate, timeDelta):
## retreive stock dataset
dataset = {'type' : 'stock_pull', 'symbol' : TICKER_SYMBOL_DICT[company], 'clientname' : 'graphanalysis_test'}
message = json.dumps(dataset)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
stk = g.graphanalysis(rcvd, 'stock')
stk.interpolate(10)
dataset = {'type' : 'avgSentiment_pull', 'symbol' : TICKER_SYMBOL_DICT[company], 'dateRange' : 'doesntmatter', 'clientname' : 'graphanalysis_test'}
message = json.dumps(dataset)
socket = self.createSocket()
socket.send(message)
message = socket.recv()
rcvd = json.loads(message)
twt = g.graphanalysis(rcvd, 'tweet')
corrCoeff = stk.correlation(twt)
print corrCoeff
return corrCoeff[0][1]
def companyInfoBack(self):
self.hideCompanyInfo()
self.displayMainMenu()
def createSocket(self):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://%s:5555" % (self.address))
return socket
class ListView:
def __init__(self, app, company, numTweets, posTweets, negTweets, corrCoeff=0, timeDelta=0, rowOffset=0, startDate=0):
self.app = app
self.parent = app.parent
self.company = company
self.numTweets = numTweets
self.posTweets = posTweets
self.negTweets = negTweets
self.rowOffset = rowOffset
self.startDate = startDate
self.corrCoeff = corrCoeff
self.timeDelta = timeDelta
self.createDisplayObjects()
def createDisplayObjects(self):
self.backgroundLabel = Label(self.parent, text="")
self.companyLabel = Label(self.parent, text=self.company)
self.tweetsLabel = Label(self.parent, text="Tweets: %s" % (self.numTweets))
self.posTweetsLabel = Label(self.parent, text="Pos: %s" % (self.posTweets))
self.negTweetsLabel = Label(self.parent, text="Neg: %s" % (self.negTweets))
self.stockGraphButton = Button(self.parent, text="Stock Graph", command=self.showStockGraph)
self.tweetGraphButton = Button(self.parent, text="Tweet Graphs", command=self.showTweetGraph)
self.correlationLabel = Label(self.parent, text="Correlation Coefficient: %s" % (self.corrCoeff))
#self.correlationGraphButton = Button(self.parent, text="Correlation Graph", command=self.showCorrelationGraph)
def display(self):
row = 2*self.rowOffset
self.backgroundLabel.grid(row=row, column=0, columnspan=5, rowspan=2, padx=(25,0), pady=(15,0), stick=N+S+W+E)
self.companyLabel.grid(row=row, column=0, columnspan=1, rowspan=1, padx=(25,0), pady=(15,0), sticky=W+N)
self.tweetsLabel.grid(row=row+1, column=1, columnspan=1, rowspan=1, padx=5, sticky=E+N)
self.posTweetsLabel.grid(row=row+1, column=2, columnspan=1, rowspan=1, padx=5, sticky=W+N)
self.negTweetsLabel.grid(row=row+1, column=3, columnspan=1, rowspan=1, padx=5, sticky=W+N)
self.stockGraphButton.grid(row=row, column=3, columnspan=1, rowspan=1, padx=5, pady=(15,0), sticky=W+N)
self.tweetGraphButton.grid(row=row, column=2, columnspan=1, rowspan=1, padx=5, pady=(15,0), sticky=W+N)
self.correlationLabel.grid(row=row, column=1, columnspan=1, rowspan=1, padx=5, pady=(15,0), sticky=W+N)
#self.correlationGraphButton.grid(row=row, column=2, columnspan=1, rowspan=1, padx=5, pady=(15,0), stick=W+N)
def forget(self):
self.backgroundLabel.grid_forget()
self.companyLabel.grid_forget()
self.tweetsLabel.grid_forget()
self.posTweetsLabel.grid_forget()
self.negTweetsLabel.grid_forget()
self.stockGraphButton.grid_forget()
self.tweetGraphButton.grid_forget()
#self.correlationGraphButton.grid_forget()
self.correlationLabel.grid_forget()
def showStockGraph(self):
self.app.showStockGraph(self.company, self.startDate, self.timeDelta)
def showTweetGraph(self):
self.app.showTweetGraph(self.company, self.startDate, self.timeDelta)
#def showCorrelationGraph(self):
# self.app.showCorrelationGraph(self.company, self.startDate)
def main():
if(len(sys.argv) < 2):
mainAddress = "localhost"
elif(len(sys.argv) > 2):
print "Usage: GoldmineGUI.py [ADDR]"
else:
mainAddress = sys.argv[1]
print "Address: %s" % (mainAddress)
root = Tk()
image = Image.open("res/TWAHOO_Finance_Background.jpg")
background = ImageTk.PhotoImage(image=image)
backgroundLabel = Label(root, image=background)
backgroundLabel.place(x=0, y=0)
width = background.width()
height = background.height()
root.geometry('%dx%d+0+0' % (width, height))
root.resizable(0,0)
app = Application(root, mainAddress)
root.after(250, app.onCompanySelect)
root.mainloop()
sys.exit(0)
if __name__=='__main__':
main()
|
#! /usr/bin/env python
import random
import matplotlib.pyplot as plt
def pick_char(l):
return l[random.randint(0,len(l)-1)]
def init():
alphabet = [chr(i) for i in range(0x61,0x7B)]
alphabet.append(chr(0x20))
return alphabet
def tuples2lists(l_tuples):
size_tuple = len(l_tuples[0])
l_lists = []
for i in range(size_tuple):
l_lists.append([])
for point in l_tuples:
for i in range(size_tuple):
l_lists[i].append(point[i])
return l_lists
def fill(num):
alphabet = init()
chars = [pick_char(alphabet) for i in range(num)]
text = ''.join(chars)
return text
def stats(text):
dictionary = {}
for word in text.split(' '):
if not dictionary.has_key(word):
dictionary[word] = 0
dictionary[word] +=1
results = []
for word in dictionary.keys():
results.append((dictionary[word],word))
results.sort()
return results
if __name__=="__main__":
text = fill(100000000)
results = stats(text)
l_lists = tuples2lists(results)
frequency = l_lists[0]
print frequency
frequency.reverse()
plt.loglog(frequency)
plt.show()
|
#solution:
def print_rangoli(size):
my_str = 'abcdefghijklmnopqrstuvwxyz'
for i in range(size-1, -size, -1):
print ('-'.join(my_str[size-1:abs(i):-1]+my_str[abs(i):size]).center(4*size-3,'-'))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
|
import logging
import requests
import time
import ujson
import urllib
class RequestError(Exception):
def __init__(self, method, url, params, data, status_code, response_text, error=None):
self.status_code = status_code
message = '[%s %s%s%s] %s: "%s" %s' % (
method,
url,
'' if not params else '?' + urllib.urlencode(params.items()),
'' if not data else ' ' + ujson.dumps(data),
status_code,
response_text,
self.message)
super(RequestError, self).__init__(message, error)
class RetryError(Exception):
pass
class Timer(object):
def __init__(self):
self._begin = None
self._end = None
self.duration = None
def __enter__(self):
self._begin = time.time()
return self
def __exit__(self, *_):
self._end = time.time()
self.duration = (self._end - self._begin) * 1000
class BaseClient(object):
ROOT_API_URL = None
def __init__(self):
self._session = requests.session()
self._common_data = {}
self._common_headers = {}
self._common_params = {}
self._logger = logging.getLogger(__name__)
def validate_response(self, response):
return response
@staticmethod
def _combine(instance, common):
if not common:
return instance
if not instance:
instance = {}
instance.update(common)
return instance
def _make_request(self, method, url, params=None, data=None, headers=None, validate=True):
full_url = self.ROOT_API_URL + url
data = self._combine(data, self._common_data)
headers = self._combine(headers, self._common_headers)
params = self._combine(params, self._common_params)
if isinstance(data, dict):
headers = headers or {}
headers['Content-Type'] = 'application/json'
data = ujson.dumps(data)
while True:
with Timer() as timer:
response = self._session.request(
method,
full_url,
params=params,
data=data,
headers=headers,
)
self._logger.info('[{method} {url}{params}][ms={ms}][status={status}]'.format(
method=method,
url=full_url,
params='' if not params else '?' + urllib.urlencode(params.items()),
ms=timer.duration,
status=response.status_code,
))
try:
response.raise_for_status()
if validate is False:
return response
return self.validate_response(response)
except RetryError as re:
self._logger.warn('retrying request: %s' % re.message)
continue
except Exception as e:
raise RequestError(
method, full_url, params, data,
response.status_code, response.text, e)
|
from rest_framework import serializers
from django.contrib.auth.models import User, Group
from tips.models import Tip
class TipSerializer(serializers.ModelSerializer):
author = serializers.RelatedField(read_only=True)
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Tip
fields = ('tip', 'code', 'link', 'author', 'approved', 'share_link', 'user')
class UserSerializera(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'date_joined', 'password')
|
from platypus import NSGAII, Problem, Integer
from Library.DataClass import *
from Library.RiskCalc import *
from Library.RabbitMQProducer import *
def calc_total_value(universe: Universe, weights: [float]):
total = 0
for i in range(universe.count):
total += universe.universe_set[i].last_price * weights[i]
return total
class OptPortfolio(Problem):
def __init__(self, universe: Universe, buying_power: float):
super(OptPortfolio, self).__init__(universe.count,1,1)
self.universe = universe
self.universe_historical_data = gen_universe_hist_data(universe.universe_set, "Adj. Close")
self.buying_power = buying_power
self.typeInt = Integer(0,1)
self.types[:] = self.typeInt
self.constraints[:] = "==0"
self.directions[:] = Problem.MAXIMIZE
def evaluate(self, solution):
solution.objectives[:] = gen_fitness_value(solution.variables, self.universe_historical_data)
solution.constraints[0] = sum(solution.variables) - 10
#solution.constraints[1] = calc_total_value(self.universe, solution.variables) - self.buying_power
def generate_portfolio(universe: Universe, buying_power: float, user_id: int):
problem = OptPortfolio(universe,buying_power)
algorithm = NSGAII(problem)
algorithm.run(1000)
feasible_solutions = [s for s in algorithm.result if s.feasible]
sol = [problem.typeInt.decode(i) for i in feasible_solutions[0].variables]
print(sol)
alloc = {}
assets = []
for i, asset in enumerate(universe.universe_set):
if (sol[i] == 1):
alloc[asset.ticker] = 0.1
assets.append(asset)
print(alloc)
portf = Portfolio(user_id=user_id,buying_power=buying_power,assets=assets,asset_alloc=alloc)
return portf
### RabbitMQ request to generate portfolios and push to Database
def Assets_to_UDMHoldings(portf: Portfolio ):
UDMholdings = []
for asset in portf.assets:
quantity = math.trunc((portf.buying_power * portf.asset_alloc[asset.ticker])/asset.last_price)
holding = UDMHolding(None,None, asset.name, asset.ticker,'',quantity)
UDMholdings.append(holding)
return UDMholdings
def Portfolio_to_UDMPortfolio(portf: Portfolio):
VaR = -.03 # will be set by var function
stopValue = portf.buying_power - portf.buying_power * VaR
holdings = Assets_to_UDMHoldings(portf)
UDMportf = UDMPortfolio(np.NaN, np.NaN, False, datetime.datetime.today(), portf.buying_power, stopValue, None, holdings)
return UDMportf
def GeneratePortfs():
# universe = ReadUniverse()
# filtered_universe = filter_universe(universe)
# portf = generate_portfolio(filtered_universe, 10000, 1)
# sample output from genetic algo
portf = Portfolio(user_id=1, buying_power=10000, assets=[Asset(ticker='AAP', name='Advance Auto Parts', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/AAP.csv',
last_price=115.01),
Asset(ticker='CXO', name='Concho Resources', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/CXO.csv',
last_price=157.0),
Asset(ticker='COP', name='ConocoPhillips', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/COP.csv',
last_price=59.14),
Asset(ticker='CSRA', name='CSRA Inc.', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/CSRA.csv',
last_price=41.33),
Asset(ticker='HES', name='Hess Corporation', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/HES.csv',
last_price=49.87),
Asset(ticker='INTC', name='Intel Corp.', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/INTC.csv',
last_price=51.19),
Asset(ticker='NFLX', name='Netflix Inc.', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/NFLX.csv',
last_price=300.69),
Asset(ticker='PCG', name='PG&E Corp.', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/PCG.csv',
last_price=43.94),
Asset(ticker='SBAC', name='SBA Communications', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/SBAC.csv',
last_price=170.1),
Asset(ticker='TDG', name='TransDigm Group', asset_type='stock',
price_history_file='C:\\Users\\Francisco\\Documents\\AlgoTradingCode\\portfolio_gen\\Stock_Data/TDG.csv',
last_price=305.14)],
asset_alloc={'AAP': 0.1, 'CXO': 0.1, 'COP': 0.1, 'CSRA': 0.1, 'HES': 0.1, 'INTC': 0.1, 'NFLX': 0.1,
'PCG': 0.1, 'SBAC': 0.1, 'TDG': 0.1})
UDMportf = Portfolio_to_UDMPortfolio(portf)
rabbitmq = rabbitMqProducer('UserDB-PortfGen', "localhost", "UserDB-PortfGen", "")
request_msg = UDMRequest(None, RequestType.Portfolio, Operation.Insert, None, UDMportf, None, None)
rabbitmq.publish(request_msg.to_json())
return
|
import sys
from Controllers.ControllerMain import ControllerMain
class MainView:
def __init__(self, argv):
self.controller = ControllerMain(self)
self.controller.set_sys_argv(argv)
def show_data(self, data):
print(data)
if __name__ == '__main__':
view = MainView(sys.argv) |
"""
Revision ID: e98384408eae
Revises: 2e3b3ca8a665
Create Date: 2020-11-12 16:00:06.798935
"""
# revision identifiers, used by Alembic.
revision = 'e98384408eae'
down_revision = '2e3b3ca8a665'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('body_html', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('posts', 'body_html')
# ### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.