index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,900 | 88c79f429ed5722358c3c042b9c9ecabfbc54e10 | from django.test import TestCase
from ..models import Recipe, Tag
# Create your tests here.
class RecipeTestCase(TestCase):
def setUp(self):
self.recipe_data = {
'name': 'recipe-test',
'description': 'description',
'image': 'needs-to-be-data',
'difficulty': 'E',
'serves': 1,
'time_prep': 2,
'time_cook': 3,
'time_other': 4
}
Recipe.objects.create(**self.recipe_data)
def test_create(self):
recipe = Recipe.objects.get(name=self.recipe_data['name'])
self.assertEqual(recipe.name, self.recipe_data['name'])
self.assertEqual(recipe.description, self.recipe_data['description'])
self.assertEqual(len(recipe.tags.all()), 0)
self.assertEqual(len(recipe.steps.all()), 0)
def test_add_tag(self):
recipe = Recipe.objects.get(name=self.recipe_data['name'])
Tag.objects.create(name='test')
tag = Tag.objects.get(name='test')
recipe.add_tag(tag)
recipe = Recipe.objects.get(name=self.recipe_data['name'])
self.assertEqual(len(recipe.tags.all()), 1)
|
22,901 | 1a1930ac0f4ac1385291c2708a707c14564949ac | import os
import sys
print "Choose directory to commit genocide on the files."
Dir = raw_input("> ")
def deleteFiles(path):
currentDir = os.listdir(path)
for i in currentDir:
i = path + "/" + i
if os.path.isfile(i):
print "deleting",i
os.remove(i)
else:
print "found folder:",i
deleteFiles(i)
os.rmdir(i)
deleteFiles(Dir)
|
22,902 | 8123a50036ddf9017801849e2dddc18641ec8296 | #### DataSource script
### Import the wl module (weblogic module files made from wlst: writeIniFile('wl.py')
# 2 different ways of importing
# without prefix prefered
# but need to create the cmo
#from wl import *
#
#def cd_wl(bean):
# return super.cd(bean)
#
#def cd(bean):
# cmo=cd_wl(bean)
# return cmo
#
# or by using the prefix
# import wl
from java.io import FileInputStream
from java.util import Properties
print (' Creating a Datasource')
print (' Loading test file: '+ propertyfile)
propInputStream = FileInputStream(propertyfile)
configProps = Properties()
configProps.load(propInputStream)
####
adminURL=configProps.get("admin.url")
adminUserName=configProps.get("admin.userName")
adminPassword=configProps.get("admin.password")
dsName=configProps.get("datasource.name")
print ('Connecting to '+adminURL+' ...')
connect(adminUserName, adminPassword, adminURL)
edit()
startEdit()
cd('/')
cd('/JDBCSystemResources/'+dsName)
print("Destroying: "+dsName)
getMBean("/JDBCSystemResources/").destroyJDBCSystemResource(cmo)
activate() |
22,903 | f4843087f773e50241422d4f880ee7e33f5de35c | from contextlib import contextmanager
from datetime import datetime
from os import environ
import feedgenerator
import selenium
from selenium import webdriver
# This is lame.
if "Apple" not in environ.get("TERM_PROGRAM", ""):
from pyvirtualdisplay import Display
else:
@contextmanager
def Display():
yield
PATREON_URL = "https://www.patreon.com/{}"
def get_first_child(element, tag="div"):
return element.find_elements_by_tag_name(tag)[0]
def patreon_posts(user):
patreon_user_url = PATREON_URL.format(user)
with Display():
# Start Firefox and it will run inside the virtual display.
driver = webdriver.Firefox()
# Make sure we always clean up at the end.
try:
driver.get(patreon_user_url)
element = driver.find_element_by_tag_name("h1")
feed_title = element.text
# Find a h1, followed by a span.
feed_description = (
feed_title
+ " "
+ driver.find_element_by_xpath("//h1/following-sibling::span").text
)
feed = feedgenerator.Rss201rev2Feed(
title=feed_title, link=patreon_user_url, description=feed_description
)
posts = driver.find_elements_by_css_selector('div[data-tag="post-card"]')
for post in posts:
print(post)
element = post.find_element_by_css_selector(
'a[data-tag="post-published-at"'
)
link = element.get_attribute("href")
date = datetime.strptime(element.text, "%b %d, %Y AT %I:%M %p")
title = post.find_element_by_css_selector(
'span[data-tag="post-title"]'
).text
try:
container = post.find_element_by_css_selector(
'div[data-tag="post-content-collapse"]'
)
description_el = get_first_child(
get_first_child(get_first_child(get_first_child(container)))
)
description = description_el.get_attribute("innerHTML")
except selenium.common.exceptions.NoSuchElementException:
# No description.
description = ""
# TODO Handle media.
feed.add_item(
title=title,
link=link,
description=description,
author_name=feed_title,
author_link=patreon_user_url,
pubdate=date,
)
finally:
driver.quit()
return feed.writeString("utf-8")
|
22,904 | 2c2ce171ec66ef6a61db7f00f47a19a6e877be89 | #
# import os
# from openpyxl import Workbook
#
# wb = Workbook()
# dest_filename = '音频列表.xlsx'
# ws1 = wb.active
# ws1.title = "音频列表"
# fileNameList = [];
# # name = ''
# def file_name(file_dir):
# for root, dirs,files in os.walk(file_dir):
# print(root)
# print(dirs)
# print(files)
# for name in files:
# print(name)
#
# def file_name1(file_dir):
# L=[]
# for root, dirs, files in os.walk(file_dir):
# for file in files:
# if os.path.splitext(file)[1] == '.wav':
# L.append(os.path.splitext(file)[0])
# return L
# def main():
# fileDir = "/Users/shanfangliang/Desktop/工作文档/跑道检查频率"
# fileNameList = file_name1(fileDir)
# print(fileNameList)
# print(fileNameList.__len__())
# for file in fileNameList:
# col_A = 'A%s' % (fileNameList.index(file))
#
# ws1[col_A] = file
#
# wb.save(filename=dest_filename)
#
#
#
# if __name__ == '__main__':
# main() |
22,905 | 9cb36bb8d6ad17b8025d46b6820943f3f0330b1e | #!/usr/bin/python3
# Copyright (C) 2023 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a modified version of the setuptools "easy install" entrypoint
# https://github.com/pypa/setuptools/blob/main/setuptools/command/easy_install.py#L2058
import os
import re
import sys
scripts = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'Scripts')
if os.path.isdir(os.path.join(scripts, 'webkitpy')):
sys.path.insert(0, scripts)
import webkitpy
from webkitpy.autoinstalled import buildbot
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
|
22,906 | c17fe03abbf8d05683f8608a6b0839f71a2b9e00 | # -*- coding: utf-8 -*
import tensorflow as tf
import random
import numpy as np
import pickle
from collections import deque
import itertools
from FMemory import Memory
from FAGENT import AGENT
def set_n_step(container, n, Config):
t_list = list(container)
# accumulated reward of first (trajectory_n-1) transitions
n_step_reward = sum([t[2] * Config.GAMMA**i for i, t in enumerate(t_list[0:min(len(t_list), n) - 1])])
for begin in range(len(t_list)):
end = min(len(t_list) - 1, begin + Config.trajectory_n - 1)
n_step_reward += t_list[end][2]*Config.GAMMA**(end-begin)
# extend[n_reward, n_next_s, n_done, actual_n]
t_list[begin].extend([n_step_reward, t_list[end][3], t_list[end][4], end-begin+1])
n_step_reward = (n_step_reward - t_list[begin][2])/Config.GAMMA
return t_list
def run_DQfD(env, Config):
sess=tf.InteractiveSession()
with open(Config.DEMO_DATA_PATH, 'rb') as f:
demo_transitions = pickle.load(f)
demo_transitions = deque(itertools.islice(demo_transitions, 0, Config.DEMO_BUFFER_SIZE))
print("demo_transitions len: ", len(demo_transitions))
with tf.variable_scope('AGENT'):
agent = AGENT(env, Config,sess)
agent.add_data_to_memory(demo_transitions, agent.demo_memory)
#print("demo_memory", agent.get_data_from_fullmemory(agent.demo_memory))
agent.copy_AFULL_to_B(agent.demo_memory, agent.replay_memory)
try:
print("agent model existed")
agent.restore_model()
agent_model_improve_flag = False
agent.epsilon = 0.01
except:
print("there is no model,we are going to initialize it randomly")
agent.sess.run(tf.global_variables_initializer())
print("agent.epsilon:{}".format(agent.epsilon))
agent.save_model()
agent_model_improve_flag = True
scores, e, replay_full_episode = [], 0, None
n_dqfd = 0
while True:
if agent_model_improve_flag:
agent.restore_model()
agent_model_improve_flag = False
e += 1
done, score, n_step_reward, state = False, 0, None, env.reset()
t_q = deque(maxlen=Config.trajectory_n)
n_dqfd += 1
while done is False:
action = agent.egreedy_action(state)
next_state, reward, done, _ = env.step(action)
score += reward
reward = reward if not done or score == 499 else -100
reward_to_sub = 0. if len(t_q) < t_q.maxlen else t_q[0][2] # record the earliest reward for the sub
t_q.append([state, action, reward, next_state, done, 0.0])
if len(t_q) == t_q.maxlen:
if n_step_reward is None: # only compute once when t_q first filled
n_step_reward = sum([t[2] * Config.GAMMA ** i for i, t in enumerate(t_q)])
else:
n_step_reward = (n_step_reward - reward_to_sub) / Config.GAMMA
n_step_reward += reward * Config.GAMMA ** (Config.trajectory_n - 1)
t_q[0].extend([n_step_reward, next_state, done, t_q.maxlen]) # actual_n is max_len here
agent.perceive(t_q[0]) # perceive when a transition is completed
if agent.replay_memory.full():
agent.train_Q_network(update=False) # train along with generation
replay_full_episode = replay_full_episode or e
state = next_state
env.render(state)
if done:
# handle transitions left in t_q
t_q.popleft() # first transition's n-step is already set
transitions = set_n_step(t_q, Config.trajectory_n, Config)
for t in transitions:
agent.perceive(t)
if agent.replay_memory.full():
agent.train_Q_network(update=False)
replay_full_episode = replay_full_episode or e
if agent.replay_memory.full():
scores.append(score)
agent.sess.run(agent.update_target_net)
if replay_full_episode is not None:
print("episode: {} trained-episode: {} score: {} memory length: {} epsilon: {}"
.format(e, e - replay_full_episode, score, len(agent.replay_memory), agent.epsilon))
if agent.epsilon == agent.config.FINAL_EPSILON:
agent.save_model()
agent_model_improve_flag = True
if len(scores) > 100:
break
return scores
|
22,907 | 248a8852c7c1b6bc57da769a892f3aa63f79eb51 | import numpy as np
import logging
import os
import time
from datetime import timedelta
from torch.nn import CosineSimilarity
import torch
class Loss(object):
def __init__(self):
''' Running loss metric '''
self.num_steps = 0.0
self.total_loss = 0.0
def update(self, loss):
''' Inputs are torch tensors '''
self.total_loss += loss.item()
self.num_steps += 1.0
def __call__(self):
return self.total_loss / self.num_steps if self.num_steps else self.num_steps
def reset(self):
self.num_steps = 0.0
self.total_loss = 0.0
class AccuracyRec(object):
def __init__(self, pad_ind=1):
''' Running accuracy metric '''
self.correct = 0.0
self.total = 0.0
self.pad_ind = pad_ind
def update(self, outputs, targets):
''' Inputs are torch tensors '''
outputs = outputs.detach().cpu().numpy()
targets = targets.detach().cpu().numpy()
relevant_ids = np.where(targets != self.pad_ind)
predicted = outputs[relevant_ids].argmax(-1)
targets = targets[relevant_ids]
self.total += len(targets)
self.correct += (predicted == targets).sum().item()
def __call__(self):
return self.correct / self.total * 100.0 if self.total else self.total
def reset(self):
self.correct = 0.0
self.total = 0.0
class AccuracyCls(object):
def __init__(self):
''' Running accuracy for classification '''
self.correct = 0.0
self.total = 0.0
def update(self, outputs, targets):
_, predicted = torch.max(outputs.data, 1)
self.total += targets.size(0)
self.correct += (predicted == targets).sum().item()
def __call__(self):
return self.correct / self.total * 100.0 if self.total else self.total
def reset(self):
self.correct = 0.0
self.total = 0.0
def preict_labels(preds):
return preds.detach().argmax(-1)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def num_tokens(batch, pad_ind=1):
batch = batch.detach().cpu().numpy()
return len(np.where(batch != pad_ind)[0])
def pprint_params(paramsObj):
logging.info('Params for experiment:')
for attr in dir(paramsObj):
if attr.startswith('_'):
pass
else:
logging.info("%s = %r" % (attr, getattr(paramsObj, attr)))
class EarlyStopping:
"""Early stops the training if validation accuracy doesn't improve after a given patience."""
def __init__(self, patience=3):
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_acc_min = np.Inf
self.is_current_ist_best = False
def is_new_best_score(self):
return not (self.counter)
def __call__(self, val_acc):
score = val_acc
if self.best_score is None:
self.best_score = score
elif score <= self.best_score:
self.counter += 1
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime('%x %X'),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message)
def create_logger(log_dir, dump=False):
log_dir = str(log_dir)
filepath = os.path.join(str(log_dir), 'net_launcher_log.log')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# # Safety check
# if os.path.exists(filepath) and opt.checkpoint == "":
# logging.warning("Experiment already exists!")
# Create logger
log_formatter = LogFormatter()
if dump:
# create file handler and set level to info
file_handler = logging.FileHandler(filepath, "a")
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_formatter)
# create console handler and set level to info
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
# create logger and set level to info
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.propagate = False
if dump:
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# reset logger elapsed time
def reset_time():
log_formatter.start_time = time.time()
logger.reset_time = reset_time
logger.info('Created main log at ' + str(filepath))
return logger
def preds_embedding_cosine_similarity(preds, embedding):
vocab_size = embedding.lut.num_embeddings
preds.unsqueeze_(-1)
preds = preds.expand(-1, -1, -1, vocab_size)
embeddings = embedding.lut.weight.transpose(0,1).unsqueeze(0).unsqueeze(0)
embeddings = embeddings.expand(preds.shape[0], preds.shape[1], -1, -1)
cosine_sim = CosineSimilarity(dim=2)
return cosine_sim(preds, embeddings) |
22,908 | b9a2ef726ce9d41e2b862d14778ba872ad76246b | #!{PYTHON}
# example syntax: retrieve_s2_priors.py workshop-test.yaml /data/m5/priors
from multiply_prior_engine import PriorEngine
import datetime
import logging
import os
import sys
import yaml
script_progress_logger = logging.getLogger('ScriptProgress')
script_progress_logger.setLevel(logging.INFO)
script_progress_formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
script_progress_logging_handler = logging.StreamHandler()
script_progress_logging_handler.setLevel(logging.INFO)
script_progress_logging_handler.setFormatter(script_progress_formatter)
script_progress_logger.addHandler(script_progress_logging_handler)
# setup parameters
configuration_file = sys.argv[1]
start = sys.argv[2]
end = sys.argv[3]
output_root_dir = sys.argv[4]
# read request file for parameters
with open(configuration_file) as f:
parameters = yaml.load(f)
required_priors = []
for model in parameters['Inference']['forward_models']:
if model['data_type'] == 'Sentinel-2':
required_priors = model['required_priors']
start_time = datetime.datetime.strptime(start, '%Y-%m-%d')
end_time = datetime.datetime.strptime(end, '%Y-%m-%d')
# execute the Prior engine for the requested times
time = start_time
num_days = (end_time - start_time).days + 1
i = 0
while time <= end_time:
print(time)
PE = PriorEngine(config=configuration_file, datestr=time.strftime('%Y-%m-%d'), variables=required_priors)
script_progress_logger.info(f'{int((i/num_days) * 100)}-{int(((i+1)/num_days) * 100)}')
priors = PE.get_priors()
time = time + datetime.timedelta(days=1)
i += 1
# create output_dir (if not already exist)
if not os.path.exists(output_root_dir):
os.makedirs(output_root_dir)
# put the files for the 'vegetation priors' into the proper directory
if 'General' in parameters['Prior']:
directory = parameters['Prior']['output_directory']
os.system("cp " + directory + "/*.vrt " + output_root_dir + "/")
# put the files for the 'soil moisture' into the proper directory
# if 'sm' in parameters['Prior']:
# ptype = parameters['Prior']['sm']
# if 'climatology' in ptype:
# soil_moisture_dir = parameters['Prior']['sm']['climatology']['climatology_dir']
# soil_moisture_dir = '/data/auxiliary/priors/Climatology/SoilMoisture'
# else:
# soil_moisture_dir = parameters['Prior']['General']['directory_data']
# os.system("mv " + soil_moisture_dir + "/*.vrt " + output_root_dir + "/")
script_progress_logger.info('100-100')
|
22,909 | 7566e86a800b86a231c43e1b4468d13da8065d30 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = '1.0.0'
__author__= 'Robert Caranog'
import cv2
import numpy as np
def apply_invert(frame):
return cv2.bitwise_not(frame)
def apply_sepia(frame, intensity=0.5):
blue, green, red = 20, 66,112
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame = apply_alpha_convert(frame)
frame_height, frame_width, frame_channel = frame.shape
sepia_bgra = (blue, green, red,1)
overlay = np.full((frame_height, frame_width, 4), sepia_bgra, dtype='uint8')
frame = cv2.addWeighted(overlay, intensity, frame, 1.0, 0)
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
return frame
def apply_reddish(frame, intensity=0.5):
blue, green, red = 0, 0, 204
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame_height, frame_width, frame_channel = frame.shape
red_bgra = (blue, green, red,1)
red_overlay = np.full((frame_height, frame_width, 4), red_bgra, dtype='uint8')
frame = cv2.addWeighted(red_overlay, intensity, frame, 1.0, 0)
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
return frame
def apply_alpha_convert(frame):
try:
frame.shape[3]
except IndexError:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
return frame
def apply_portrait_mode(frame):
frame = apply_alpha_convert(frame)
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
_, mask= cv2.threshold(gray, 120,255,cv2.THRESH_BINARY)
mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGRA)
blurred = cv2.GaussianBlur(frame,(21,21), 0)
blended = apply_blend(frame,blurred,mask)
frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)
return frame
def apply_blend(frame_1, frame_2, mask):
alpha = mask/ 255.0
blended = cv2.convertScaleAbs(frame_1 * (1 - alpha) + (frame_2 * alpha))
return blended
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
invert = apply_invert(frame)
sepia = apply_sepia(frame)
reddish = apply_reddish(frame)
portrait = apply_portrait_mode(frame)
cv2.imshow('frame', frame)
cv2.imshow('invert', invert)
cv2.imshow('sepia', sepia)
cv2.imshow('red', reddish)
cv2.imshow('portrait', portrait)
k = cv2.waitKey(1)
if k == ord('q') or k ==27:
cap.release()
cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows() |
22,910 | f456aee42f011198115acd9d6aab72ff7a314837 | import pyglet
import pybox
from pyglet.window import key, mouse
# ----------------------------
# General
# ----------------------------
@pybox.game.load
def load(win):
global window
window = win
@pybox.game.update
def update(dt):
pass
@pybox.game.draw
def draw():
pass
# ----------------------------
# Window
# ----------------------------
# @pybox.game.focus
# def focus():
# print('focused')
# @pybox.game.blur
# def blur():
# print('blur')
# @pybox.game.hide
# def hide():
# print('hidden')
# @pybox.game.show
# def show():
# print('shown')
# @pybox.game.move
# def move(x, y):
# print('moved', x, y)
# ----------------------------
# Keyboard
# ----------------------------
@pybox.game.key_press
def key_press(symbol, modifiers):
if symbol == key.ESCAPE:
window.close()
# @pybox.game.key_release
# def key_press(symbol, modifiers):
# pass
# @pybox.game.key_down
# def key_down(keys):
# pass
# @pybox.game.text
# def text(text):
# print(text)
# ----------------------------
# Mouse
# ----------------------------
# @pybox.game.mouse_drag
# def mouse_drag(x, y, dx, dy, buttons, modifiers):
# print(x, y, dx, dy, buttons)
# @pybox.game.mouse_motion
# def mouse_motion(x, y, dx, dy):
# print(x, y, dx, dy)
# @pybox.game.mouse_press
# def mouse_press(x, y, button, modifiers):
# print(x, y, button)
# @pybox.game.mouse_release
# def mouse_release(x, y, button, modifiers):
# print(x, y, button)
# @pybox.game.mouse_scroll
# def on_mouse_scroll(x, y, scroll_x, scroll_y):
# print(x, y, scroll_x, scroll_y)
# ----------------------------
# Joystick
# ----------------------------
# joysticks = pyglet.input.get_joysticks()
# joystick = joysticks[0]
# joystick.open()
# @joystick.event
# def on_joybutton_press(joystick, button):
# print('release', joystick, button)
# @joystick.event
# def on_joybutton_release(joystick, button):
# print('release', joystick, button)
# @joystick.event
# def on_joyaxis_motion(joystick, axis, value):
# print('axis motion', joystick, axis, value)
# @joystick.event
# def on_joyhat_motion(joystick, hat_x, hat_y):
# print('hat motion', joystick, hat_x, hat_y)
if __name__ == "__main__":
pybox.game.run() |
22,911 | a1f6f330f5a4dc7265c88bad6988dc473c7878d3 | UNITS = {
"length": {
"meters": 1.0,
"feet": 0.3048
},
"time": {
"days": 1.0,
"hours": 1/24.0,
"minutes": 1/(24.0*60.0)
},
"discharge": {
"m3/day": 1.0,
"gal/min": 5.451,
"ft3/day": 0.02832
}
}
def unit_conversion_factor(unit_type, unit_from, unit_to):
# Standard units are meters, days, and m3/day
to_std = CONVERSION_FACTORS[unit_type][unit_from]
from_std = 1.0/CONVERSION_FACTORS[unit_type][unit_to]
return to_std*from_std
def to_std_units_factor(unit_type, unit_from):
return UNITS[unit_type][unit_from]
def from_std_units_factor(unit_type, unit_from):
return 1.0/UNITS[unit_type][unit_from]
|
22,912 | dbfee0f3982bcece64fc356a84dad11cf82c68ef | from keras.callbacks import EarlyStopping, TensorBoard
import os
def stopper(patience: int, monitor: object) -> object:
stop = EarlyStopping(monitor=monitor, min_delta=0, patience=patience,
verbose=2, mode='auto')
return stop
def tensorboard():
# TODO: configurate tensorboard
log_dir = os.path.relpath('logs')
board = TensorBoard(log_dir=log_dir, histogram_freq=0, batch_size=32, write_graph=True, write_grads=False,
write_images=False, update_freq='epoch')
return board
|
22,913 | 8c032c2e978a913c855621a2a85216580125e0da | import requests
import json
import re
import bs4
import os
url="http://pagelet.mafengwo.cn/note/pagelet/recommendNoteApi?callback=jQuery18103353494952086171_1586840488510¶ms=%7B%22type%22%3A%220%22%7D&_=1586840489457"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}
def loads_jsonp(_jsonp):
try:
return json.loads(re.match(".*?({.*}).*", _jsonp, re.S).group(1))
except:
raise ValueError('Invalid Input')
a=requests.get(url,headers=headers)
b=loads_jsonp(a.text)
c=b['data']
d=c['html']
public
e=bs4.BeautifulSoup(d,'html.parser')
f=e.findAll(attrs={'class':'tn-item clearfix'})
try:
os.mkdir("./note")
except:
pass
for i in range(0,len(f)):
filename="./note/note"+str(i+1)+".txt"
with open(filename,'w+',encoding='utf-8') as wf:
image=f[i].find(attrs={'class':'tn-image'})
wf.write("图片地址1:"+ image.a.img['data-src'])
wf.write("\n图片地址2:"+ image.a.img['data-rt-src'])
wf.write("\n文章地址:"+image.a['href'])
wrapper=f[i].find(attrs={'class':'tn-wrapper'})
wf.write("\n文章标题:"+ wrapper.dl.dt.a.text)
wf.write("\n文章摘要:"+ wrapper.dl.dd.a.text)
extra=wrapper.find(attrs={'class':'tn-extra'})
ding=extra.find(attrs={'class':'tn-ding'})
wf.write("\nding数:"+ding.em.text)
place=extra.find(attrs={'class':'tn-place'})
wf.write("\n地点:"+place.text)
user=extra.find(attrs={'class':'tn-user'})
userName=user.a.text
wf.write("\n用户名:"+userName.strip())
userAvatar=user.img['src']
wf.write("\n用户头像地址:"+userAvatar)
nums=extra.find(attrs={'class':'tn-nums'})
wf.write("\nnums:"+nums.text)
|
22,914 | 6fc5359209fceb97c5bcda7b3470e848f2114452 | from tkinter import *
from tkinter import ttk
from sorting_functions import count_distance
def howtogo_window(frame ,userlocate, cantlocate):
distance = count_distance(userlocate, cantlocate)
##frame##
bottomframe = Frame(frame)
bottomframe.pack(side=BOTTOM)
##canvas##
canvas = Canvas(frame)
canvas.pack(fill=BOTH, expand=1) # Stretch canvas to root window size.
##image + line + circle##
background_image= PhotoImage(file=r"Canteen-Recommender-App/Main/NTUcampus.png")
image = canvas.create_image((0,0), anchor='nw', image=background_image)
line = canvas.create_line( userlocate[0] ,userlocate[1], cantlocate[0],cantlocate[1], fill="red",width=3,arrow=LAST)
frame.image = image
circleuser = canvas.create_oval(userlocate[0]-5, userlocate[1]-5, userlocate[0] + 5, userlocate[1] + 5,fill="#000fff000", outline='black', width=3)
circlecant= canvas.create_oval(cantlocate[0]-5, cantlocate[1]-5, cantlocate[0] + 5, cantlocate[1] + 5,fill="#000fff000",outline='black',width=3)
background_image.image = background_image # keeps a reference
##status bar##
status = Label(frame,bd=1,relief=SUNKEN,anchor=W)
status["text"] = "The distance between you and the canteen is: " + str(distance)
status.pack(side = BOTTOM,fill="x")
if __name__ == "__main__":
root = Tk()
# howtogo_window(frame ,userlocate, cantlocate)
howtogo_window(root, (472, 242), (632, 291))
root.mainloop() |
22,915 | f012538039b4b88b044c119079b67c710e2f2fcf | import csv
base="/Users/shengdongliu/Trading_Strategy/output_data/"
def get_gold(n=100):
with open('/Users/shengdongliu/Trading_Strategy/output_data/metal/GOLD.csv') as inf:
csvr = csv.reader(inf)
csvr.next()
prices = [float(row[6]) for row in csvr]
prices=prices[:n]
prices.reverse()
return prices
def get_silver(n=100):
with open('/Users/shengdongliu/Trading_Strategy/output_data/metal/SLV.csv') as inf:
csvr = csv.reader(inf)
csvr.next()
prices2 = [float(row[6]) for row in csvr]
prices2=prices2[:n]
prices2.reverse()
return prices2
def get_pt(n=100):
with open('/Users/shengdongliu/Trading_Strategy/output_data/metal/PPLT.csv') as inf:
csvr = csv.reader(inf)
csvr.next()
prices3 = [float(row[6]) for row in csvr]
prices3=prices3[:n]
prices3.reverse()
return prices3
def get_date(n=100):
with open('/Users/shengdongliu/Trading_Strategy/output_data/metal/GOLD.csv') as inf:
csvr = csv.reader(inf)
csvr.next()
date = [row[0] for row in csvr]
date=date[:n]
date.reverse()
return date |
22,916 | 98c4313ef927335661b007be06151279950efe82 | import re
import os
import urllib.request
def getHtml(url):
res = urllib.request.urlopen(url) #打开url地址
html = res.read().decode('utf-8') #读取url页面数据
return html
def getImg(html):
reg = r'src=".+?\.jpg"'
imgre = re.compile(reg)
# imglist = re.findall(imgre, html)
imglist = imgre.findall(html) # 第二种findall方式
imgurllist = []
for imgurl in imglist:
src = re.compile(r'com(\/.+\.jpg)')
imgsrc = src.findall(imgurl)
imgurllist.append('http://img.mukewang.com'+imgsrc[0])
x = 0
for imgurl in imgurllist:
path = os.path.abspath('.')
urllib.request.urlretrieve(imgurl, path + '/image/%s.jpg' % x)
x += 1
return imgurllist
html = getHtml("http://www.imooc.com/course/list")
imgurllist = getImg(html)
print(imgurllist) |
22,917 | 595479409e611c52168ed47dad3e6cd1b7ca4e62 | #!/usr/bin/env python3
# coding: utf-8
# ### Import modules#
import csv
import datetime
import ipaddress
import logging
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
class render_graphs():
#### Globals
images_directory="./startflask/static/images/"
total_addresses = 65534
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
handler = logging.FileHandler('data_render.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Starting graph rendering.')
def read_data_gen_graphs(self):
# ### Read in the csv and add headers to the columns
df = pd.read_csv('../data/new_data.csv',
names = ['IP_Address', 'Subnet_mask', 'In_use?', 'unix_timestamp'])
# ### Combine IP_Address and Subnet_mask columns to create IP_Network column
df['IP_Network'] = df['IP_Address'] + '/' + df['Subnet_mask'].map(str)
df
# ### Convert IP_Address column to ipaddress.ip_address object and IP_Network column to ipaddress.ip_network object
df['IP_Address'] = df['IP_Address'].apply(ipaddress.ip_address)
df['IP_Network'] = df['IP_Network'].apply(ipaddress.ip_network)
df
# ### Create dictionary for ratio of 24 to 22 subnet masks
dic = {22:4, 24:1}
# # Add new column to hold ratio values
df['mask_conversion'] = df['Subnet_mask'].map(dic)
# ### Display new dataframe to verify new column addition
df
# ### Create new column for number of addresses per subnet
df['num_addresses'] = (df['mask_conversion']*256)
df
# ### Output current dataframe
self.logger.info(df)
# ### Summing num_addresses column to get count of total IP addresses
IP_count = df['num_addresses'].sum()
count_24s = df['mask_conversion'].sum()
self.logger.info("number of /24s:" + str(count_24s))
# ### total addresses = number of addresses in a /16 network
# ### Caluculated percent of a /16 network
total_24s_per_16 = 256
pct_16_used = (count_24s/total_24s_per_16)*100
self.logger.info("percent used: {}".format(pct_16_used))
# ### Pie chart of /24 usage per /16 network
labels = 'IPs added', 'unused network'
fracs = [IP_count, (self.total_addresses - IP_count)]
explode = (0.05, 0)
grid = GridSpec(1,1)
plt.subplot(grid[0,0], aspect=1)
plt.pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True)
plt.title("Percentage of /16 network utilized")
plt.savefig("./startflask/static/images/IP_pct_pie", dpi=100)
self.logger.info("wrote file IP_pct_pie")
# ### Convert unix_timestamp to datetime and display new dataframe
df['date'] = pd.to_datetime(df['unix_timestamp'], unit='s')
df
# ### Creating new dataframe for analyzing ip address additions over time
ip_per_10sec = df[['date', 'mask_conversion']]
# ### Setting index to date
ip_per_10sec = ip_per_10sec.set_index(['date'])
ip_per_10sec
# ### Grouping by Date and summing the number of addresses per date group
ip_per_10sec = ip_per_10sec.groupby(['date']).sum()
# ### Number of IP addresses created every 10 seconds
ip_per_10sec
# ### Plot of number of IP addresses added to network at each point in time
###get_ipython().run_line_magic('matplotlib', 'inline')
ip_per_10sec.plot()
plt.ylabel("Number or IP Addresses")
plt.xlabel("time")
plt.title("IP Addresses added to network over time")
plt.savefig("./startflask/static/images/IP_Addition", dpi=100)
self.logger.info("wrote file IP_Addition")
# ### Adding column to hold cumulative summation of IP addresses over time
ip_per_10sec['ip_cumul'] = ip_per_10sec['mask_conversion'].cumsum()
ip_per_10sec
# ### Unnecessarily creating new dataframe to plot accumulation of ip addresses on network over time
ip = ip_per_10sec.reset_index()
ips_over_time = ip[['date', 'ip_cumul']]
ips_over_time = ips_over_time.set_index(['date'])
ips_over_time
# ### Plot of accumulation of IP adddresses on network over time
ips_over_time.plot()
plt.ylabel("Number or IP Addresses")
plt.xlabel("time")
plt.title("Accumulation of IP Addresses on network over time")
plt.savefig( os.path.join(self.images_directory, "IP_Accumulation"), dpi=100)
self.logger.info("wrote file IP_Accumulation")
if __name__ == '__main__':
render=render_graphs()
render.read_data_gen_graphs()
|
22,918 | 883c2352395bcfb82ef7e5ff6bc6441d88ee213c | #Question 1:
import pandas as pd
import scipy
import numpy as np
from scipy import stats
data=pd.read_csv("E:\\Assignments\\Assignment week 8\\Hypothesis\\Assignments\\cutlets.csv")
data1=data.iloc[0:35,]
data1.mean()
data1.columns = "cutlet1", "cutlet2"
#Normality test , we assume that H0: the data is normal ; H1: our data is not normal
stats.shapiro(data1.cutlet1)
stats.shapiro(data1.cutlet2)
# As both the Normality test are saying that p>0.05 so we cannot reject H0
#hence we proceed with variance test
scipy.stats.levene(data1.cutlet1, data1.cutlet2)
# p-value = 0.417616 > 0.05 so p high null fly => Equal variances
# 2 Sample T test
scipy.stats.ttest_ind(data1.cutlet1, data1.cutlet2)
#Hence the T-test values is 0.4722394724599501>0.05. According to the conditon p high null fly
#We can conclude that both the cutlets are of same size.
#************************************************************************************************************************************
#Question 2:
import pandas as pd
import scipy
import numpy as np
from scipy import stats
q2=pd.read_csv("E:\\Assignments\\Assignment week 8\\Hypothesis\\Assignments\\lab_tat_updated.csv")
q2.columns
#Normality test , we assume that H0: the data is normal ; H1: our data is not normal
stats.shapiro(q2.Laboratory_1)
stats.shapiro(q2.Laboratory_2)
stats.shapiro(q2.Laboratory_3)
stats.shapiro(q2.Laboratory_4)
# As both the Normality test are saying that p>0.05 so we cannot reject H0
#hence we proceed with variance test
scipy.stats.levene(q2.Laboratory_1, q2.Laboratory_2,q2.Laboratory_3,q2.Laboratory_4)
# p-value = 0.417616 > 0.05 so p high null fly => Equal variances
# One way anova test
q2.columns
F, p = stats.f_oneway(q2.Laboratory_1, q2.Laboratory_2 , q2.Laboratory_3,q2.Laboratory_4)
p
# As the p value is 2.1453e-58 <0.05 , So p low H0 go, So we are rejecting H0 and we can conclude that yes there is difference in the
#average Turn Around Time (TAT) of reports of the laboratories on their preferred list
#******************************************************************************************************************************************
#question3
import pandas as pd
import scipy
import numpy as np
from scipy import stats
BuyerRatio = pd.read_csv("E:\\Assignments\\Assignment week 8\\Hypothesis\\Assignments\\BuyerRatio.csv")
#count=pd.crosstab(BuyerRatio[""],BuyerRatio[""])
BuyerRatios = pd.melt(BuyerRatio.reset_index(),id_vars=['index'], value_vars=['East','West','North','South'],var_name=['regions'])
#BuyerRatios.columns=['MaleFemale','regions','values']
count=pd.crosstab(BuyerRatios.index,BuyerRatios.value)
#countrename=pd.crosstab(BuyerRatios.MaleFemale,BuyerRatios.values)
Chisquares_results=scipy.stats.chi2_contingency(count)
Chi_square=[['','Test Statistic','p-value'],['Sample Data',Chisquares_results[0],Chisquares_results[1]]]
Chi_square
# As the chi_square value is greater than 0.05 According to our H0 we can say that male-female buyer rations are similar across regions
#***********************************************************************************************************************************
#Question4
import pandas as pd
import scipy
import numpy as np
from scipy import stats
q4=pd.read_csv("E:/Assignments/Assignment week 8/Hypothesis/Assignments/CustomerOrderform.csv")
q4=q4.iloc[0:300,]
from statsmodels.stats.proportion import proportions_ztest
tab1 = q4.India.value_counts()
tab1
tab2 = q4.Malta.value_counts()
tab2
tab3 = q4.Indonesia.value_counts()
tab3
tab4 = q4.Phillippines.value_counts()
tab4
q4_1=pd.DataFrame([tab1,tab2,tab3,tab4])
q4data=pd.crosstab(q4_1["Error Free"],q4_1["Defective"])
q4data
chi_results=scipy.stats.chi2_contingency(q4data)
chi_final=[['Test statistic','p-value'] ,[chi_results[0],chi_results[1]]]
chi_final
# Hence our value is >0.05 occording our H0 hypothesis i.e The defects varies by centre is true..
#**************************************************************************************************************************
#Question 5
import pandas as pd
import scipy
import numpy as np
from scipy import stats
from statsmodels.stats.proportion import proportions_ztest
q5=pd.read_csv("E:/Assignments/Assignment week 8/Hypothesis/Assignments/Fantaloons.csv")
q5.columns
table=pd.crosstab(q5['Weekdays'],q5['Weekend'])
table
c=np.array([233,167])
d=np.array([520,280])
stats, pval = proportions_ztest(c,d, alternative = 'two-sided')
print(pval)
stats, pval = proportions_ztest(c,d, alternative = 'larger')
print(pval)
# yes there is evidence at 5 % significance level to support this hypothesis |
22,919 | 05785556ca9a446eb93c061837b9d557ef92e13b | print('Abdullah Farooq')
print('18B-104-CS-A')
print('Lab-05, 24-11-2018')
print('Program 6')
def CubeValues():
lst=list()
for i in range(1, 31):
lst.append(i**3)
print(lst[:6])
print(lst[-6:])
CubeValues()
|
22,920 | 9cb13cde0c026208ae99ebe183faffd26974dbb7 | l1,l2=input().split()
l2=int(12)
for i in range(l2):
print(l1)
|
22,921 | d21a9e7b767f1f36b2962bbe585a57ca1ed8d72f | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import os
import json
METHODS = ['ANN Single', 'EWC', 'iCaRL', 'GEM', 'SNN']
def average_accuracy(class_num, file_path):
result_map_txt = open(file_path, 'r')
lines = result_map_txt.readlines()
average_accuracy_array = np.zeros(shape=[class_num])
learning_step = []
for id in range(class_num):
learning_step.append('Step%s' % (id + 1))
for r, line in enumerate(lines):
line = line.strip()
cr_list = line.split(',')[:r + 1]
cr_array = np.array(cr_list).astype(np.float32)
average_accuracy_array[r] = cr_array.mean()
accuracy_var = cr_array.var() / 3
return average_accuracy_array
def draw_average_accuracy_of_model(task_num, method, root_dir, param_list, is_val=False):
# model_list = []
if task_num == 10:
dataset = 'MNIST'
interleave = 1
elif task_num == 20:
dataset = 'EMNIST20'
interleave = 1
elif task_num == 100:
dataset = 'CIFAR100'
interleave = 10
else:
raise Exception('unsupport task number!')
save_path = '%s/%s_%s_average_accuracy'%(root_dir, method.replace(' ', ''), dataset)
acc_array = np.zeros(shape=[task_num, (len(param_list))])
# var_array = np.zeros(shape=[task_num, (len(model_list))])
ex_idx = 0
for p in param_list:
dst_file = None
for file in os.listdir(root_dir):
if '.csv' in file:
if 'epoch%d'%p in file:
dst_file = file
elif file.split('_')[3] == str(p):
dst_file = file
if dst_file:
txt = '%s/%s' % (root_dir, dst_file)
print(txt)
acc_array[:,ex_idx] = average_accuracy(task_num, txt)
ex_idx += 1
acc_array = acc_array[:, :ex_idx]
# var_array = var_array[:, :ex_idx]
with open('%s.csv' % save_path, 'w') as f:
writer = csv.writer(f)
result_a = acc_array.tolist()
for line in result_a:
str_line = [str(x) for x in line]
writer.writerow(str_line)
accuracy_array = acc_array.transpose()
# variance_array = var_array.transpose()
x = range(1,(task_num+1))
line_list = []
plt.figure(figsize=(10, 5))
for r in range(accuracy_array.shape[0]):
label = 'epoch ' + str(param_list[r])
line, = plt.plot(x, accuracy_array[r], label=label)
# plt.fill_between(x, accuracy_array[r]-variance_array[r], accuracy_array[r]+variance_array[r])
line_list.append(line)
plt.legend(handles=line_list, loc=3)
# plt.xticks(x, fontsize=font_size)
plt.ylim([0, 1])
plt.xlim([1, task_num-1])
l = np.arange(0, task_num+1 , interleave)
l = l[1:]
if task_num==100:
t2 = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(l)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.title('%s %s average accuracy' % (method, dataset))
print(save_path)
plt.savefig(save_path,)
plt.show()
def draw_average_accuracy_of_model_4FVs(task_num, method, root_dir, is_val=False):
# model_list = [
if task_num == 10:
dataset = 'MNIST'
interleave = 1
elif task_num == 20:
dataset = 'EMNIST20'
interleave = 1
elif task_num == 100:
dataset = 'CIFAR100'
interleave = 10
else:
raise Exception('unsupport task number!')
net_structure = root_dir[root_dir.find('MLP')+3: -1]
save_path = '%s/%s_%s_average_accuracy'%(root_dir, method.replace(' ', ''), dataset)
# acc_array = np.zeros(shape=[4, task_num])
# var_array = np.zeros(shape=[task_num, (len(model_list))])
ex_idx = 0
acc_array_dropout = np.zeros(shape=[4, task_num])
acc_array_withoutdropout = np.zeros(shape=[4, task_num])
avg_array_dropout = np.zeros(shape=[4, 1])
avg_array_withoutdropout = np.zeros(shape=[4, 1])
dst_file_dropout_list = []
dst_file_withoutdropout_list = []
for file in os.listdir(root_dir):
if '.csv' in file and not '.npz' in file:
if 'WithoutDropout' in file:
dst_file_withoutdropout_list.append(file)
elif 'Dropout' in file:
dst_file_dropout_list.append(file)
else:
pass
for index, file in enumerate(dst_file_dropout_list):
txt = '%s/%s' % (root_dir, file)
print(txt)
acc_array_dropout[index, :] = average_accuracy(task_num, txt)
for index, file in enumerate(dst_file_withoutdropout_list):
txt = '%s/%s' % (root_dir, file)
print(txt)
acc_array_withoutdropout[index, :] = average_accuracy(task_num, txt)
# acc_array = acc_array[:, :, :ex_idx]
# var_array = var_array[:, :ex_idx]
############## Saving Data #################
for idx, file in enumerate(dst_file_dropout_list):
current_FV_name = file[file.find('FV'): file.find('FV') + 3]
np.savetxt('%s_%s_dropout.csv' %(save_path, current_FV_name), np.array(acc_array_dropout[idx, :]))
for idx, file in enumerate(dst_file_withoutdropout_list):
current_FV_name = file[file.find('FV'): file.find('FV') + 3]
np.savetxt('%s_%s_withoutdropout.csv' % (save_path, current_FV_name), np.array(acc_array_withoutdropout[idx, :]))
# with open('%s_%s_withoutdropout.csv' % (save_path, current_FV_name), 'w') as f:
# # acc = acc_array[idx, :, :]
# # for item in acc:
# # f.write(item[0])
# # f.write('\n')
# # f.close()
# writer = csv.writer(f)
# result_a = acc_array_dropout[idx, :].tolist()
# for line in result_a:
# try:
# str_line = [str(x) for x in line]
# except:
# str_line = line
# writer.writerow(str(str_line))
############################################
for i in range(avg_array_dropout.shape[0]):
avg_array_dropout[i] = np.mean(acc_array_dropout[i, :])
model_avg_save_path = 'result/model_avgacc_avg'
if not os.path.exists(model_avg_save_path):
os.mkdir(model_avg_save_path)
with open('%s/%s.csv' % (model_avg_save_path, net_structure), 'w') as f:
writer = csv.writer(f)
result_a = avg_array_dropout.tolist()
for line in result_a:
str_line = [str(x) for x in line]
writer.writerow(str_line)
############################
accuracy_array = acc_array_dropout.transpose()
# variance_array = var_array.transpose()
x = range(1,(task_num+1))
line_list = []
########################
plt.figure(figsize=(10, 5))
for idx, file in enumerate(dst_file_dropout_list):
current_FV_name = file[file.find('FV'): file.find('FV') + 3]
line, = plt.plot(x, acc_array_dropout[idx, :], label=current_FV_name)
# plt.fill_between(x, accuracy_array[r]-variance_array[r], accuracy_array[r]+variance_array[r])
line_list.append(line)
plt.hold
plt.legend(handles=line_list, loc=3)
# plt.xticks(x, fontsize=font_size)
plt.ylim([0, 1])
plt.xlim([1, task_num-1])
l = np.arange(0, task_num+1 , interleave)
l = l[1:]
if task_num==100:
t2 = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(l)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.title('%s %s average accuracy--(%s)--net_struc: %s' % (method, dataset, 'Using Dropout', net_structure))
print(save_path)
plt.savefig(save_path + '_dropout', )
plt.show()
plt.close()
#####################
plt.figure(figsize=(10, 5))
for idx, file in enumerate(dst_file_withoutdropout_list):
current_FV_name = file[file.find('FV'): file.find('FV') + 3]
line, = plt.plot(x, acc_array_withoutdropout[idx, :], label=current_FV_name)
# plt.fill_between(x, accuracy_array[r]-variance_array[r], accuracy_array[r]+variance_array[r])
line_list.append(line)
plt.hold
# plt.legend(handles=line_list, loc=3)
# plt.xticks(x, fontsize=font_size)
plt.ylim([0, 1])
plt.xlim([1, task_num-1])
plt.legend()
l = np.arange(0, task_num+1 , interleave)
l = l[1:]
if task_num==100:
t2 = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(l)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.title('%s %s average accuracy--(%s)--net_struc: %s' % (method, dataset, 'Without Dropout', net_structure))
print(save_path)
plt.savefig(save_path+'_withoutdropout')
plt.show()
###############################
def average_accuracy_from_txt(class_num, file_path):
result_map_txt = open(file_path, 'r')
lines = result_map_txt.readlines()
average_accuracy_array = np.zeros(shape=[class_num])
learning_step = []
for id in range(class_num):
learning_step.append('Step%s' % (id + 1))
for r, line in enumerate(lines):
average_accuracy_array[r] = float(line.split(' ')[-1])
return average_accuracy_array
def draw_average_accuracy_of_snn(task_num, method, root_dir):
model_list = []
if task_num == 10:
dataset = 'MNIST'
interleave = 1
elif task_num == 20:
dataset = 'EMNIST20'
interleave = 1
elif task_num == 100:
dataset = 'CIFAR100'
interleave = 10
else:
raise Exception('unsupport task number!')
dir_list = os.listdir(root_dir)
dataset_num = 4
acc_array = np.zeros(shape=[task_num, dataset_num])
ex_idx = 0
fv_name_list = []
for cur_dir in dir_list:
if '.' in cur_dir:
continue
dst_file = None
for file in os.listdir(os.path.join(root_dir, cur_dir, 'ANN_final_result')):
if ('FINAL_SNN_CL_result') in file:
dst_file = file
if dst_file:
txt = os.path.join(root_dir, cur_dir, 'ANN_final_result', dst_file)
print(txt)
acc_array[:,ex_idx] = average_accuracy_from_txt(task_num, txt)
fv_name = cur_dir
fv_name_list.append(fv_name)
ex_idx += 1
save_path = '%s/%s_%s_average_accuracy'%(root_dir, method.replace(' ', ''), dataset)
acc_array = acc_array[:,:ex_idx]
with open('%s.csv'%save_path, 'w', newline='') as f:
writer = csv.writer(f)
result_a = acc_array.tolist()
for line in result_a:
str_line = [str(x) for x in line]
writer.writerow(str_line)
accuracy_array = acc_array.transpose()
x = range(1,(task_num+1))
line_list = []
plt.figure(figsize=(10, 5))
for r in range(accuracy_array.shape[0]):
line, = plt.plot(x, accuracy_array[r], label=fv_name_list[r])
line_list.append(line)
plt.legend(handles=line_list, loc=3)
plt.ylim([0, 1])
plt.xlim([1, task_num-1])
l = np.arange(0, task_num+1 , interleave)
l = l[1:]
plt.xticks(ticks=l)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.title('%s %s average accuracy' % (method, dataset))
plt.savefig(save_path)
plt.show()
def plotting_diff_model_avgacc(dir, task_num, fv_index, hidden_num=100):
plt.figure(figsize=(12, 8))
# color_list = ['g', 'c', 'm', 'y', 'k', 'darkviolet', 'midnightblue', 'peru', 'deepskyblue', 'darkorchid', 'brown', 'deeppink', 'black', 'coral',
# 'chartreuse', 'yellow', 'darkorange', 'indigo']
color_list = ['black', 'gray', 'lightcoral', 'red', 'orangered', 'saddlebrown', 'peru', 'darkorange', 'gold', 'olive',
'yellowgreen', 'lawngreen', 'palegreen', 'cyan', 'dodgerblue', 'slategray', 'midnightblue', 'indigo', 'deeppink', 'crimson']
t = range(1, (task_num + 1))
plot_num = 0
if task_num == 10:
dataset = 'MNIST'
orig_hidden_dim = 50
SNN_STRUCTURE='15-50-10'
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
SNN_STRUCTURE = '20-67-20'
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 167
SNN_STRUCTURE = '50-167-100'
SNN_result_dir = 'SNN_result/'
SNN_RESULT = Load_SNN_Result(SNN_result_dir, task_num, fv_index=fv_index, neuron_model='GC')
SNN_CA3_RESULT = Load_SNN_Result(SNN_result_dir, task_num, fv_index=fv_index, neuron_model='CA3')
###################### plotting MLP width change width result ##########################
for each_model_dir in os.listdir(dir):
if 'MLP' in each_model_dir:
current_model_dir = dir + each_model_dir + '/'
current_model_structure = each_model_dir[each_model_dir.find('MLP'):]
if len(current_model_structure.split('-'))==3 and int(current_model_structure.split('-')[-1])==task_num:
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = current_model_structure + ',withoutdropout'
# if plot_num>=10:
# if int(current_model_structure.split('-')[1])==orig_hidden_dim:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label, marker='o')
# else:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)
# else:
if int(current_model_structure.split('-')[1])==orig_hidden_dim:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label, marker='o')
else:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)
plt.hold
plot_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = current_model_structure + ',dropout(0.5)'
# if plot_num>=10:
# if int(current_model_structure.split('-')[1])==orig_hidden_dim:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)#, linestyle='-.')
# else:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)
# else:
if int(current_model_structure.split('-')[1])==orig_hidden_dim:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)#, linestyle='-.')
else:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)
plt.hold
plot_num += 1
else:
pass
plt.plot(t, SNN_RESULT, color=color_list[int(plot_num / 2)], marker='*', label='SNN-GC-(%s)WithoutUsingMemory' % SNN_STRUCTURE)
plot_num+=2
plt.hold
plt.plot(t, SNN_CA3_RESULT, color=color_list[int(plot_num / 2)], marker='*',
label='SNN-CA3-(%s)WithoutUsingMemory' % SNN_STRUCTURE)
plot_num+=1
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.ylim([0, 1.1])
if task_num==100:
t2 = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(t)
plt.title('%s--Change MLP hidden layer (width) in iCaRL, FV%s, average accuracy'%(dataset, fv_index))
plt.legend()
plt.savefig('result/%s_change_model_width_FV%d_result.png'%(dataset, fv_index))
plt.show()
#############################################################################
###################### plotting MLP width change Depth result ##########################
plt.figure(figsize=(12, 8))
color_num_list = []
temp_dropout_num = 0
temp_nodropout_num = 0
for idx, each_model_dir in enumerate(os.listdir(dir)):
if 'MLP' in each_model_dir:
current_model_dir = dir + each_model_dir + '/'
current_model_structure = each_model_dir[each_model_dir.find('MLP'):]
if not len(current_model_structure.split('-'))==3 and int(current_model_structure.split('-')[-1])==task_num:
input_dim, output_dim = current_model_structure.split('-')[0], current_model_structure.split('-')[-1]
hidden_dim = current_model_structure.split('-')[1]
current_mlp_hiddenlayer_number = len(current_model_structure.split('-')) - 2
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',withoutdropout'
plt.plot(t, current_model_result, color=color_list[int(color_num_list[temp_nodropout_num])*4], label=current_label)
plt.hold
temp_nodropout_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',dropout(0.5)'
color_num_list.append(temp_dropout_num)
plt.plot(t, current_model_result, color=color_list[int(color_num_list[temp_nodropout_num])*4], label=current_label, linestyle=':')
plt.hold
temp_dropout_num += 1
else:
pass
######### plotting 1 layers result ##################
elif int(current_model_structure.split('-')[1])==hidden_num and int(current_model_structure.split('-')[-1])==task_num:
input_dim, output_dim = current_model_structure.split('-')[0], current_model_structure.split('-')[-1]
hidden_dim = current_model_structure.split('-')[1]
current_mlp_hiddenlayer_number = len(current_model_structure.split('-')) - 2
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',withoutdropout'
plt.plot(t, current_model_result, color=color_list[-7], label=current_label)
plt.hold
plot_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',dropout(0.5)'
plt.plot(t, current_model_result, color=color_list[-7], label=current_label, linestyle=':')
plt.hold
plot_num += 1
else:
pass
plt.plot(t, SNN_RESULT, color=color_list[-2], marker='*', label='SNN-GC-(%s)WithoutUsingMemory'%SNN_STRUCTURE)
plt.hold
plot_num += 2
plt.plot(t, SNN_CA3_RESULT, color=color_list[-1], marker='*',
label='SNN-CA3-(%s)WithoutUsingMemory' % SNN_STRUCTURE)
plot_num += 1
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.ylim([0,1.1])
if task_num==100:
t2 = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(t)
plt.title('%s--Change MLP hidden layer (Depth) in iCaRL, FV%s, average accuracy'%(dataset, fv_index))
plt.legend()
plt.savefig('result/%s_change_model_depth_FV%d_result.png'%(dataset, fv_index))
plt.show()
def Load_SNN_Result(dir, task_num, fv_index, neuron_model='GC'):
result = np.zeros(shape=[task_num])
saving_path = 'result/SNN_Result/'
if not os.path.exists(saving_path):
os.mkdir(saving_path)
if task_num == 10:
dataset = 'MNIST10'
orig_hidden_dim = 50
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 100
for each_dataset_dir in os.listdir(dir):
if each_dataset_dir.split('-')[0]==dataset and not '.rar' in each_dataset_dir:
for neuron_model_dir in os.listdir(dir + each_dataset_dir):
if neuron_model_dir==neuron_model:
####
each_dataset_dir = each_dataset_dir + '/' + neuron_model_dir
for each_fv_dir in os.listdir(dir + each_dataset_dir):
if str(fv_index) in each_fv_dir:
for sub_dir in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir):
if sub_dir=='SNN_final_result':
for file in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir):
if 'FINAL_SNN_CL' in file:
path = dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir + '/'
f = open(path + file)
temp = f.readlines()
f.close()
for i, item in enumerate(temp):
result[i] = float(item.split(' ')[-1])
np.savetxt(saving_path + '%s_FV%s_result.csv' % (dataset, fv_index), result)
return result
def Load_SNN_UsingMemoryResult(dir, task_num, fv_index, neuron_model='GC', Memory=1):
result = np.zeros(shape=[task_num])
saving_path = 'result/SNN_Result/'
if not os.path.exists(saving_path):
os.mkdir(saving_path)
if task_num == 10:
dataset = 'MNIST10'
orig_hidden_dim = 50
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 100
for each_dataset_dir in os.listdir(dir):
if each_dataset_dir.split('-')[0]==dataset and not '.rar' in each_dataset_dir and 'MEMORY({})'.format(Memory) in each_dataset_dir:
for neuron_model_dir in os.listdir(dir + each_dataset_dir):
if neuron_model_dir==neuron_model:
####
each_dataset_dir = each_dataset_dir + '/' + neuron_model_dir
for each_fv_dir in os.listdir(dir + each_dataset_dir):
if str(fv_index) in each_fv_dir:
for sub_dir in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir):
for file in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir):
if 'FINAL_SNN_CL' in file:
path = dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir + '/'
f = open(path + file)
temp = f.readlines()
f.close()
for i, item in enumerate(temp):
result[i] = float(item.split(' ')[-1])
np.savetxt(saving_path + '%s_FV%s_Using(%s)Memory_result.csv' % (dataset, fv_index, Memory), result)
return result
def Load_SNN_fewshot_Result(dir, task_num, fv_index, neuron_model='GC', traintest = 'TRAIN10TEST50'):
result = np.zeros(shape=[task_num])
saving_path = 'result/SNN_fewshot_Result/'
if not os.path.exists(saving_path):
os.mkdir(saving_path)
if task_num == 10:
dataset = 'MNIST10'
orig_hidden_dim = 50
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 100
for each_dataset_dir in os.listdir(dir):
if each_dataset_dir.split('-')[0]==dataset and not '.rar' in each_dataset_dir:
for neuron_model_dir in os.listdir(dir + each_dataset_dir):
if neuron_model_dir==neuron_model:
####
each_dataset_dir = each_dataset_dir + '/' + neuron_model_dir
for train_test in os.listdir(dir + each_dataset_dir):
if train_test==traintest:
each_dataset_dir = each_dataset_dir + '/' + train_test
for each_fv_dir in os.listdir(dir + each_dataset_dir):
if str(fv_index) in each_fv_dir:
for sub_dir in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir):
for file in os.listdir(dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir):
if 'FINAL_SNN_CL' in file:
path = dir + each_dataset_dir + '/' + each_fv_dir + '/' + sub_dir + '/'
f = open(path + file)
temp = f.readlines()
f.close()
for i, item in enumerate(temp):
result[i] = float(item.split(' ')[-1])
np.savetxt(saving_path + '%s_FV%s_result.csv' % (dataset, fv_index), result)
return result
def draw_bias_difference(dir, fv_index, task_num=20):
plt.figure(figsize=(10, 5))
t = range(1, (task_num + 1))
for bias_dir in os.listdir(dir):
if 'nobias' in bias_dir:
path = dir + bias_dir + '\\'
bias_name = 'nobias'
elif 'withbias' in bias_dir:
path = dir + bias_dir + '\\'
bias_name = 'withbias'
for file in os.listdir(path):
if '.csv' in file and 'FV{}'.format(fv_index) in file and 'average' in file:
if 'withoutdropout' in file:
drop_name = 'withoutdropout'
else:
drop_name = 'dropout'
file_path = path + file
data = np.loadtxt(file_path)
plt.plot(t, data, label='EMNIST20_20-67-20_FV{}_{}_{}'.format(fv_index, bias_name, drop_name))
plt.hold
plt.xticks(t)
plt.ylim([0, 1.1])
plt.legend()
plt.show()
def draw_patience_difference(dir, fv_index, task_num=20):
plt.figure(figsize=(10, 5))
t = range(1, (task_num + 1))
for bias_dir in os.listdir(dir):
if 'nobias' in bias_dir:
path = dir + bias_dir + '\\'
bias_name = 'nobias'
pass
elif 'withbias' in bias_dir:
path = dir + bias_dir + '\\'
bias_name = 'withbias'
patience = int(bias_dir.split('_')[-1][1:])
for file in os.listdir(path):
if '.csv' in file and 'FV{}'.format(fv_index) in file and 'average' in file:
if 'withoutdropout' in file:
drop_name = 'withoutdropout'
file_path = path + file
data = np.loadtxt(file_path)
plt.plot(t, data, label='EMNIST20_20-67-20_FV{}_{}_{},patience:{}'.format(fv_index, bias_name, drop_name, patience))
plt.hold
else:
drop_name = 'dropout'
plt.xticks(t)
plt.ylim([0, 1.1])
plt.legend()
plt.show()
def draw_diff_samples_acc_result(dir, fv_index, dataset_nums=3, task_num=20, drop='withoutdropout'):
if task_num == 10:
dataset = 'MNIST'
orig_hidden_dim = 50
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 100
t = range(1, (task_num + 1))
if task_num==100:
color_list = ['blue', 'r', 'g']
else:
if dataset_nums==4:
color_list = ['pink', 'r', 'g', 'blue', 'yellow', 'orange', 'cyan']
else:
color_list = ['r', 'g', 'blue', 'pink', 'yellow', 'orange', 'cyan']
nodrop_data_index = 0
drop_data_index = 0
plt.figure(figsize=(10, 5))
for dir1 in os.listdir(dir):
if 'train' in dir1:
name = dir1.split('-')[-1]
temp_list = name.split('train')[-1].split('test')
if not temp_list[0]==temp_list[1]:
use_name = '(fewshotlearning)'
else:
use_name = ''
path = dir + dir1 + '\\'
for dataset_dir in os.listdir(path):
if dataset_dir.split('_')[0] == dataset:
net_struc = dataset_dir.split('MLP')[-1]
path = path + dataset_dir + '\\'
for each_file in os.listdir(path):
if '.csv' in each_file and 'average' in each_file and 'FV{}'.format(fv_index) in each_file and 'withoutdropout' in each_file:
if drop in each_file:
data = np.loadtxt(path + each_file)
plt.plot(t, data, label='{}_MLP_{}_FV{}_{}_{}{}'.format(dataset, net_struc, fv_index, 'nodropout', name, use_name), color=color_list[nodrop_data_index])
plt.hold
nodrop_data_index+=1
elif '.csv' in each_file and 'average' in each_file and 'FV{}'.format(fv_index) in each_file and 'dropout' in each_file:
if drop in each_file:
data = np.loadtxt(path + each_file)
plt.plot(t, data,
label='{}_MLP_{}_FV{}_{}_{}{}'.format(dataset, net_struc, fv_index, 'dropout',
name, use_name), color=color_list[drop_data_index], linestyle=':')
plt.hold
drop_data_index += 1
# add SNN result
SNN_fewshot_result_dir = 'SNN_fewshot_result/'
if not task_num==100:
SNN_train10test50_RESULT = Load_SNN_fewshot_Result(SNN_fewshot_result_dir, task_num, fv_index=fv_index, neuron_model='GC', traintest='TRAIN10TEST50')
SNN_train30test50_RESULT = Load_SNN_fewshot_Result(SNN_fewshot_result_dir, task_num, fv_index=fv_index, neuron_model='GC',
traintest='TRAIN30TEST50')
plt.plot(t, SNN_train10test50_RESULT,
label='{}_SNN({})_{}_FV{}_{}(fewshot)'.format(dataset, 'GC', net_struc, fv_index,
'train10test50'), color='orangered', marker='*')
plt.hold
plt.plot(t, SNN_train30test50_RESULT,
label='{}_SNN({})_{}_FV{}_{}(fewshot)'.format(dataset, 'GC', net_struc, fv_index,
'train30test50'), color='yellowgreen', marker='*')
plt.hold
SNN_train50test50_result_dir = 'SNN_result/'
SNN_train50test50_RESULT = Load_SNN_Result(SNN_train50test50_result_dir, task_num, fv_index=fv_index, neuron_model='GC')
plt.plot(t, SNN_train50test50_RESULT, label='{}_SNN({})_{}_FV{}_{}'.format(dataset, 'GC', net_struc, fv_index,
'train50test50'), color='darkblue', marker='*')
else:
SNN_train3test10_RESULT = Load_SNN_fewshot_Result(SNN_fewshot_result_dir, task_num, fv_index=fv_index,
neuron_model='GC', traintest='TRAIN3TEST10')
SNN_train6test10_RESULT = Load_SNN_fewshot_Result(SNN_fewshot_result_dir, task_num, fv_index=fv_index,
neuron_model='GC',
traintest='TRAIN6TEST10')
plt.plot(t, SNN_train3test10_RESULT,
label='{}_SNN({})_{}_FV{}_{}(fewshot)'.format(dataset, 'GC', net_struc, fv_index,
'train3test10'), color='orangered', marker='*')
plt.hold
plt.plot(t, SNN_train6test10_RESULT,
label='{}_SNN({})_{}_FV{}_{}(fewshot)'.format(dataset, 'GC', net_struc, fv_index,
'train6test10'), color='yellowgreen', marker='*')
plt.hold
SNN_train10test10_result_dir = 'SNN_result/'
SNN_train10test10_RESULT = Load_SNN_Result(SNN_train10test10_result_dir, task_num, fv_index=fv_index,
neuron_model='GC')
plt.plot(t, SNN_train10test10_RESULT, label='{}_SNN({})_{}_FV{}_{}'.format(dataset, 'GC', net_struc, fv_index,
'train10test10'), color='darkblue',
marker='*')
plt.ylim([0, 1.1])
if task_num==100:
plt.xticks([1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
else:
plt.xticks(t)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.title('%s %s average accuracy, FV%s, net_struc: %s' % ('iCaRL', dataset, fv_index, net_struc))
plt.legend()
plt.savefig(dir + 'result\\{}_MLP_{}_change_traintest_result_FV{}_{}.png'.format(dataset, net_struc, fv_index, drop), dpi=300)
# plt.show()
def plotting_diff_model_avgacc_WithSNN_MemoryResult(dir, task_num, fv_index, hidden_num=100):
plt.figure(figsize=(20, 10))
# color_list = ['g', 'c', 'm', 'y', 'k', 'darkviolet', 'midnightblue', 'peru', 'deepskyblue', 'darkorchid', 'brown', 'deeppink', 'black', 'coral',
# 'chartreuse', 'yellow', 'darkorange', 'indigo']
color_list = ['black', 'gray', 'lightcoral', 'red', 'orangered', 'saddlebrown', 'peru', 'darkorange', 'gold', 'olive',
'yellowgreen', 'lawngreen', 'palegreen', 'cyan', 'dodgerblue', 'crimson', 'midnightblue', 'indigo', 'deeppink', 'crimson', 'darkviolet','coral']
t = range(1, (task_num + 1))
plot_num = 0
if task_num == 10:
dataset = 'MNIST'
orig_hidden_dim = 50
SNN_STRUCTURE='15-50-10'
elif task_num == 20:
dataset = 'EMNIST20'
orig_hidden_dim = 67
SNN_STRUCTURE = '20-67-20'
elif task_num == 100:
dataset = 'CIFAR100'
orig_hidden_dim = 167
SNN_STRUCTURE = '50-167-100'
SNN_result_dir = 'SNN_UsingOneMemory_Result/'
if not task_num==100:
SNN_Memory_One_RESULT = Load_SNN_UsingMemoryResult(SNN_result_dir, task_num, fv_index=fv_index, neuron_model='GC', Memory=1)
SNN_No_Memory_RESULT = Load_SNN_UsingMemoryResult(SNN_result_dir, task_num, fv_index=fv_index, neuron_model='GC',
Memory=0)
SNN_NoLearning_RESULT_dir = 'SNN_NoLearning_Result/'
SNN_NoLearning_RESULT = Load_SNN_Result(SNN_NoLearning_RESULT_dir, task_num, fv_index=fv_index, neuron_model='GC')
# SNN_CA3_RESULT = Load_SNN_Result(SNN_result_dir, task_num, fv_index=fv_index, neuron_model='CA3')
###################### plotting MLP width change width result ##########################
for each_model_dir in os.listdir(dir):
if 'MLP' in each_model_dir:
current_model_dir = dir + each_model_dir + '/'
current_model_structure = each_model_dir[each_model_dir.find('MLP'):]
if len(current_model_structure.split('-'))==3 and int(current_model_structure.split('-')[-1])==task_num:
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = current_model_structure + ',withoutdropout'
# if plot_num>=10:
# if int(current_model_structure.split('-')[1])==orig_hidden_dim:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label, marker='o')
# else:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)
# else:
if int(current_model_structure.split('-')[1])==orig_hidden_dim:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)#, marker='o')
else:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)
plt.hold
plot_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = current_model_structure + ',dropout(0.5)'
# if plot_num>=10:
# if int(current_model_structure.split('-')[1])==orig_hidden_dim:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)#, linestyle='-.')
# else:
# plt.plot(t, current_model_result, color=color_list[plot_num-10], label=current_label)
# else:
if int(current_model_structure.split('-')[1])==orig_hidden_dim:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)#, linestyle='-.')
else:
plt.plot(t, current_model_result, color=color_list[plot_num], label=current_label)
plt.hold
plot_num += 1
else:
pass
if task_num==100:
plot_num += 2
# plt.plot(t, SNN_No_Memory_RESULT, color=color_list[int(plot_num / 2)], marker='*',
# label='SNN-GC-(%s)WithoutUsingMemory' % SNN_STRUCTURE)
# plot_num+=2
# plt.hold
#
# plt.plot(t, SNN_NoLearning_RESULT, color=color_list[int(plot_num / 2)], marker='*',
# label='SNN-GC-(%s)-WithoutLearning' % SNN_STRUCTURE)
# plot_num += 2
# plt.hold
#
# if not task_num == 100:
# plt.plot(t, SNN_Memory_One_RESULT, color=color_list[int(plot_num / 2)], marker='*',
# label='SNN-GC-(%s)Using(One)Memory' % SNN_STRUCTURE)
# plot_num += 2
# plt.hold
plt.subplots_adjust(top=0.94, bottom=0.05, left=0.05, right=0.99, hspace=0, wspace=0)
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.ylim([0, 1.1])
if task_num==100:
plt.xlim([0, task_num + 1])
t2 = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(t)
plt.title('%s--Change MLP hidden layer (width) in iCaRL, FV%s, average accuracy'%(dataset, fv_index))
plt.legend(loc=3)
plt.savefig('result/%s_change_model_width_FV%d_result.png'%(dataset, fv_index), dpi=300)
plt.show()
#############################################################################
###################### plotting MLP width change Depth result ##########################
plt.figure(figsize=(20, 10))
color_num_list = []
temp_dropout_num = 0
temp_nodropout_num = 0
for idx, each_model_dir in enumerate(os.listdir(dir)):
if 'MLP' in each_model_dir:
current_model_dir = dir + each_model_dir + '/'
current_model_structure = each_model_dir[each_model_dir.find('MLP'):]
if not len(current_model_structure.split('-'))==3 and int(current_model_structure.split('-')[-1])==task_num:
input_dim, output_dim = current_model_structure.split('-')[0], current_model_structure.split('-')[-1]
hidden_dim = current_model_structure.split('-')[1]
current_mlp_hiddenlayer_number = len(current_model_structure.split('-')) - 2
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',withoutdropout'
plt.plot(t, current_model_result, color=color_list[int(color_num_list[temp_nodropout_num])*4], label=current_label)
plt.hold
temp_nodropout_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',dropout(0.5)'
color_num_list.append(temp_dropout_num)
plt.plot(t, current_model_result, color=color_list[int(color_num_list[temp_nodropout_num])*4], label=current_label, linestyle=':')
plt.hold
temp_dropout_num += 1
else:
pass
######### plotting 1 layers result ##################
elif int(current_model_structure.split('-')[1])==hidden_num and int(current_model_structure.split('-')[-1])==task_num:
input_dim, output_dim = current_model_structure.split('-')[0], current_model_structure.split('-')[-1]
hidden_dim = current_model_structure.split('-')[1]
current_mlp_hiddenlayer_number = len(current_model_structure.split('-')) - 2
for file in os.listdir(current_model_dir):
if '.csv' in file and 'FV%s'%fv_index in file and 'withoutdropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',withoutdropout'
plt.plot(t, current_model_result, color=color_list[-7], label=current_label)
plt.hold
plot_num+=1
elif '.csv' in file and 'FV%s'%fv_index in file and 'dropout' in file:
current_model_result = np.loadtxt(current_model_dir + file)
current_label = '%s-%s[%sHiddenLayers]-%s'%(input_dim, hidden_dim, current_mlp_hiddenlayer_number, output_dim) + ',dropout(0.5)'
plt.plot(t, current_model_result, color=color_list[-7], label=current_label, linestyle=':')
plt.hold
plot_num += 1
else:
pass
# if task_num==100:
# plot_num += 2
# plt.plot(t, SNN_No_Memory_RESULT, color=color_list[-1], marker='*',
# label='SNN-GC-(%s)WithoutUsingMemory' % SNN_STRUCTURE)
# plot_num += 2
# plt.hold
# plt.plot(t, SNN_NoLearning_RESULT, color=color_list[int(plot_num / 2)], marker='*',
# label='SNN-GC-(%s)-WithoutLearning' % SNN_STRUCTURE)
# plot_num += 2
# plt.hold
#
# if not task_num == 100:
# plt.plot(t, SNN_Memory_One_RESULT, color=color_list[int(plot_num / 2)], marker='*',
# label='SNN-GC-(%s)Using(One)Memory' % SNN_STRUCTURE)
# plot_num += 2
# plt.hold
plt.ylabel('Average accuracy')
plt.xlabel('Number of tasks')
plt.subplots_adjust(top=0.94, bottom=0.05, left=0.05, right=0.99, hspace=0, wspace=0)
plt.ylim([0, 1.1])
if task_num==100:
plt.xlim([0, task_num + 1])
t2 = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
plt.xticks(t2)
else:
plt.xticks(t)
plt.title('%s--Change MLP hidden layer (Depth) in iCaRL, FV%s, average accuracy'%(dataset, fv_index))
plt.legend(loc=3)
plt.savefig('result/%s_change_model_depth_FV%d_result.png'%(dataset, fv_index), dpi=300)
plt.show()
if __name__ == '__main__':
task_num = 10
method = METHODS[2]
if task_num == 20:
input_dim = 20
output_dim = 20
dataset = 'EMNIST20'
elif task_num == 10:
input_dim = 15
output_dim = 10
dataset = 'MNIST'
elif task_num == 100:
input_dim = 50
output_dim = 100
dataset = 'CIFAR100'
if method == 'SNN':
root_dir = 'D:\\vacation\continue-learn\ANNSingle-master\data\\raw\CIFAR100-0408'
draw_average_accuracy_of_snn(task_num, method, root_dir)
else:
# param_list = [100,200,300,400]
# param_list = [5000]
# root_dir = 'D:\\vacation\continue-learn\GEM-master\\results/'
# hidden_width = [50, 67, 100, 200, 400, 800, 1600]
# hidden_width = [[100, 100], [100, 100, 100], [100, 100, 100, 100]]
#
# # hidden_width = [20, 50, 100, 200, 400, 800, 1600]
# # hidden_width = [3200]
# hidden_width = [[20], [50], [100], [200], [400], [800], [1600]] # for MNIST
# hidden_width = [[2400], [3200]] # for MNIST
# hidden_width = [[400, 400], [400, 400, 400]]
# # #
# # #
# hidden_width = [[20], [50], [67], [100], [200], [400], [800], [1600], [2400], [3200]] # for EMNIST
# # #
# # # # hidden_width = [[20], [50], [100], [200], [400], [800], [1600], [2400], [3200]] # for EMNIST
# hidden_width = [[50], [167], [200], [400], [800], [1600]] # for CIFAR100
# hidden_width = [[15]]
#
# hidden_width = [[50], [50, 50], [50, 50, 50]]
# hidden_width = [[67], [67, 67], [67, 67, 67]]
#
# for hidden_num in hidden_width:
# if isinstance(hidden_num, int):
# root_dir = 'result/%s_FV_MLP%s-%s-%s/'%(dataset, input_dim, hidden_num, output_dim)
# else:
# hidden_str = ''
# for item in hidden_num:
# hidden_str += str(item) + '-'
# root_dir = 'result/%s_FV_MLP%s-%s%s/'%(dataset, input_dim, hidden_str, output_dim)
#
# draw_average_accuracy_of_model_4FVs(task_num, method, root_dir, is_val=False)
hidden_width = [[15]]#, [20], [50], [67], [100], [200], [400], [800], [1600], [2400], [3200]] # for EMNIST
# hidden_width = [[15]]
# # dir_list = ['MNIST10-NOISE(0.3)','MNIST10-NOISE(0.5)','MNIST10-NOISE(0.7)']
# dir_list = ['FVnew-train50test50-cleaned']
dir_list = ['CMNIST-NEW-BINARY-FV1']
for dir_temp in dir_list:
for hidden_num in hidden_width:
if isinstance(hidden_num, int):
root_dir = 'result/%s/%s_FV_MLP%s-%s-%s/' % (dir_temp, dataset, input_dim, hidden_num, output_dim)
else:
hidden_str = ''
for item in hidden_num:
hidden_str += str(item) + '-'
root_dir = 'result/%s/%s_FV_MLP%s-%s%s/' % (dir_temp, dataset, input_dim, hidden_str, output_dim)
draw_average_accuracy_of_model_4FVs(task_num, method, root_dir, is_val=False)
# train_test = ['FV-train3test10', 'FV-train6test10']
# train_test = ['FV-train2test50'] #, 'FV-train10test50']
# for each_dir in train_test:
# for hidden_num in hidden_width:
# if isinstance(hidden_num, int):
# root_dir = 'result/%s/%s_FV_MLP%s-%s-%s/' % (each_dir, dataset, input_dim, hidden_num, output_dim)
# else:
# hidden_str = ''
# for item in hidden_num:
# hidden_str += str(item) + '-'
# root_dir = 'result/%s/%s_FV_MLP%s-%s%s/' % (each_dir, dataset, input_dim, hidden_str, output_dim)
#
# draw_average_accuracy_of_model_4FVs(task_num, method, root_dir, is_val=False)
# hidden_width = [[20], [50], [67], [100], [200], [400], [800], [1600], [2400], [3200]] # for EMNIST
# hidden_width = [[20], [50], [67], [100], [200], [400], [800], [1600], [2400], [3200]] # for EMNIST\
# # dir_list = ['FV-train50test50']
# dir_list = ['FVnew-train50test50-0619']
# # # dir_list = ['MNIST10-NOISE(0.3)']#,'MNIST10-NOISE(0.5)','MNIST10-NOISE(0.7)']
# result_dir = 'result/'
# fv_list = [1]#, 2, 3, 4]
#
# for temp_dir in dir_list:
# for fv_id in fv_list:
# result_dir = result_dir + temp_dir + '/'
# # # hidden_num = 800 for MNIST; 400 for EMNIST;1600 for CIFAR100
# plotting_diff_model_avgacc_WithSNN_MemoryResult(result_dir, task_num, fv_index=fv_id, hidden_num=67)
# # result_dir = 'result/'
#
# # plotting_diff_model_avgacc(result_dir, task_num, fv_index=fv_id, hidden_num=400)
# SNN_result_dir = 'SNN_result/'
# Load_SNN_Result(SNN_result_dir, task_num, fv_index=1)
# compare_dir = 'D:\\Projects\\Projects\\pytorch_Projects\\iCaRL-TheanoLasagne\\EMNIST_Compare_Results\\'
# draw_bias_difference(compare_dir, fv_index=2, task_num=20)
# compare_dir = 'D:\\Projects\\Projects\\pytorch_Projects\\iCaRL-TheanoLasagne\\EMNIST_Compare_Results\\'
# draw_patience_difference(compare_dir, fv_index=1, task_num=20)
# fv_list = [1, 2, 3, 4]
#
# for fv_i in fv_list:
# compare_dir = 'D:\\Projects\\Projects\\pytorch_Projects\\iCaRL-TheanoLasagne\\train_test_result\\'
# draw_diff_samples_acc_result(compare_dir, fv_index=fv_i, task_num=task_num, dataset_nums=3, drop='withoutdropout') |
22,922 | e14bc380b85abef3dc66590a6b6891a892a251c0 | from .early_stopping.early_stopping import EarlyStopping
|
22,923 | e6eb43b55072851f02c67f1f9312bfaaeb64c136 | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from login.models import UsersModel, add,login,TESTAPI_resetFixture
class Test(TestCase):
def simple_add(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
def simple_reset(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
r = TESTAPI_resetFixture()
self.assertTrue(r==1)
r = add('name','pass')
self.assertTrue(r==1)
def short_username(self):
TESTAPI_resetFixture()
r = add('','pass')
self.assertTrue(r==-3)
def long_username(self):
TESTAPI_resetFixture()
name = """aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"""
r = add(name,'pass')
self.assertTrue(r==-3)
def long_pass(self):
TESTAPI_resetFixture()
password = """aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"""
r = add('name',password)
self.assertTrue(r==-4)
def existing_user(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
r = add('name','pass')
self.assertTrue(r==-2)
def simple_login(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
r = login('name','pass')
self.assertTrue(r==2)
def wrong_pass(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
r = login('name','wrong')
self.assertTrue(r==-1)
def wrong_user(self):
TESTAPI_resetFixture()
r = add('name','pass')
self.assertTrue(r==1)
r = login('wrong','pass')
self.assertTrue(r==-1)
def two_users(self):
TESTAPI_resetFixture()
r = add('name1','pass1')
self.assertTrue(r==1)
r = add('name2','pass2')
self.assertTrue(r==1)
r = login('name1', 'pass1')
self.assertTrue(r==2)
r = login('name1', 'pass1')
self.assertTrue(r==3)
r = login('name2', 'pass2')
self.assertTrue(r==2)
|
22,924 | 32603e23d7c7c32f1589c4ebbcedf542f47af9f2 | import logging
from flask import (
Blueprint,
current_app as app,
redirect,
render_template,
request,
session,
url_for)
from flask_wtf import FlaskForm
from pyltiflask import lti
from wtforms import (
SubmitField,
IntegerField,
FloatField,
BooleanField,
validators
)
from random import randint
from index import error
divide_blueprint = Blueprint('divide', __name__)
logger = logging.getLogger('divide')
class DivideForm(FlaskForm):
""" Divide data FlaskForm
:param FlaskForm:
"""
p1 = FloatField('p1', [validators.DataRequired()])
p2 = FloatField('p2', [validators.DataRequired()])
result = FloatField('result', [validators.DataRequired()])
correct = BooleanField('correct')
submit = SubmitField('Check')
@divide_blueprint.route('/divide', methods=['GET', 'POST'])
@lti(request='session', error=error, app=app)
def index(lti=lti):
""" initial access page for lti consumer
:param lti: the `lti` object from `pylti`
:return: index page for lti provider
"""
form = DivideForm()
form.p1.data = float(randint(1, 9))
form.p2.data = float(randint(1, 9))
return render_template('divide/index.html', form=form)
@divide_blueprint.route('/divide/grade', methods=['POST'])
@lti(request='session', error=error, app=app)
def grade(lti=lti):
""" post grade
:param lti: the `lti` object from `pylti`
:return: grade rendered by grade.html template
"""
form = DivideForm(request.form)
if not form.validate():
return error(message='The divide form could not be validated.')
correct = (round((form.p1.data / form.p2.data), 2) == form.result.data)
form.correct.data = correct
lti.post_grade(1 if correct else 0)
return render_template('divide/grade.html', form=form)
|
22,925 | c413b951fdb54a003dd815137e00924d8e67c5f7 | # -*- coding: utf-8 -*-
import pyomo.environ as pe
import numpy as np
'''setA= np.array([[0, 860, 599, 574, 269, 349, 87, 100, 353, 1300],
[860, 0 , 268, 347, 596, 541, 779, 961, 925, 859],
[599, 268, 0 , 85, 334, 279, 516, 698, 663, 901],
[574, 347, 85, 0, 309, 254, 492, 674, 595, 981],
[269, 596, 334, 309, 0, 85, 187, 369, 342, 1138],
[349, 541, 279, 254, 85, 0, 266, 448, 413, 1083],
[87 , 779, 516, 492, 187, 266, 0, 186, 314, 1240],
[100, 961, 698, 674, 369, 448, 186, 0, 373, 1404],
[353, 925, 663, 595, 342, 413, 314, 373, 0, 1467],
[1300, 859, 901, 981, 1138, 1083, 1240, 1404, 1467, 0]])'''
setA= np.array([ [0,860,599,100,353,269,349,87,1300,574],
[860,0,268,961,925,596,541,779,859,347],
[599,268,0,698,663,334,279,516,901,85],
[100,961,698,0,373,369,448,186,1404,674],
[353,925,663,373,0,342,413,314,1467,595],
[269,596,334,369,342,0,85,187,1138,309],
[349,541,279,448,413,85,0,266,1083,254],
[87,779,516,186,314,187,266,0,1240,492],
[1300,859,901,1404,1467,1138,1083,1240,0,981],
[574,347,85,674,595,309,254,492,981,0] ])
setB=np.array([[setA[0,5],setA[1,5],1000000,1000000,setA[4,5]],
[setA[0,6],1000000,1000000,setA[3,6],setA[4,6]],
[1000000,setA[1,7],setA[2,7],setA[3,7],1000000],
[1000000,setA[1,3],setA[2,3],1000000,setA[4,3]],
[setA[0,8],1000000,setA[2,8],setA[3,8],1000000]])
setC=np.array([ [1000000,setA[6,0],setA[7,0],1000000,setA[8,0]],
[1000000,1000000,setA[7,1],setA[3,1],setA[8,1]],
[setA[5,9],1000000,setA[7,9],setA[3,9],1000000],
[setA[5,4],setA[6,4],1000000,1000000,setA[8,4]],
[setA[5,8],setA[6,8],1000000,setA[3,8],1000000]])
setD=np.array([ [1000000,1000000,setA[9,5],setA[4,5],setA[8,5]],
[setA[0,6],setA[1,6],1000000,setA[4,6],1000000],
[setA[0,7],setA[1,7],1000000,setA[4,7],1000000],
[setA[0,3],1000000,setA[9,3],1000000,setA[8,3]],
[1000000,setA[1,4],setA[9,4],1000000,setA[8,4]]])
setE=np.array([ [1000000,1000000,setA[7,0],setA[3,0],setA[4,0]],
[setA[5,1],setA[6,1],setA[7,1],1000000,1000000],
[setA[5,9],setA[6,9],1000000,1000000,setA[4,9]],
[1000000,setA[6,7],1000000,setA[3,7],setA[4,7]],
[setA[5,8],1000000,setA[7,8],setA[3,8],1000000]])
setF=np.array([ [setA[0,2],1000000,setA[9,2],setA[7,2],1000000],
[1000000,setA[1,9],1000000,setA[7,9],setA[8,9]],
[setA[0,5],1000000,setA[9,5],1000000,setA[8,5]],
[1000000,setA[1,6],setA[9,6],1000000,setA[8,6]],
[setA[0,8],setA[1,8],1000000,setA[7,8],1000000]])
# Create the model= pe.ConcreteModel()
model = pe.ConcreteModel()
model.dual = pe.Suffix(direction = pe.Suffix.IMPORT)
# ------ SETS ---------
model.crew = pe.Set(initialize = range(5))
model.week = pe.Set(initialize = range(5))
model.set = pe.Set(initialize = range(5))
# -------------VARIABLES------------
model.x = pe.Var(model.crew,model.crew,model.week,domain = pe.Binary)
# ------PARAMETERS--------
model.setB = pe.Param(model.set, model.set, initialize = lambda model, i,j: setB[i][j])
model.setC = pe.Param(model.set, model.set, initialize = lambda model, i,j: setC[i][j])
model.setD = pe.Param(model.set, model.set, initialize = lambda model, i,j: setD[i][j])
model.setE = pe.Param(model.set, model.set, initialize = lambda model, i,j: setE[i][j])
model.setF = pe.Param(model.set, model.set, initialize = lambda model, i,j: setF[i][j])
# ------CONSTRAINTS-----------
def uniqrow_cons(model,j,k):
return sum(model.x[i,j,k] for i in range(5)) == 1
model.uniqrowCons = pe.Constraint(model.crew,model.week,rule = uniqrow_cons)
def uniqcol_cons(model,i,k):
return sum(model.x[i,j,k] for j in range(5)) == 1
model.uniqcolCons = pe.Constraint(model.crew,model.week,rule = uniqcol_cons)
# ------OBJECTIVE-----------
def obj_rule(model):
w = 0
for j in range(5):
for i in range(5):
w = w + model.setB[i,j] * model.x[i,j,0] + model.setC[i,j] * model.x[i,j,1]\
+ model.setD[i,j] * model.x[i,j,2] + model.setE[i,j] * model.x[i,j,3]\
+ model.setF[i,j] * model.x[i,j,4]
return (0.5*7*w)
model.OBJ = pe.Objective(rule = obj_rule, sense = pe.minimize)
model.OBJ.pprint()
#----------SOLVING----------
solver = pe.SolverFactory('gurobi') # Specify Solver
results = solver.solve(model, tee=False, keepfiles=False)
print()
print("Status:", results.solver.status)
print("Termination Condition:", results.solver.termination_condition)
# ---------POST-PROCESSING-------------------
print()
for k in model.crew:
print('The %d th week to %d th week schedule:'%(k+1, k+2))
for i in model.crew:
print(model.x[i,0,k].value,model.x[i,1,k].value,\
model.x[i,2,k].value,model.x[i,3,k].value,model.x[i,4,k].value)
temp1 = 0
for i in range(5):
for j in range(5):
temp1 = temp1 + model.x[i,j,0].value * setB[i,j]
print('Temp1: ',temp1)
temp2 = 0
for i in range(5):
for j in range(5):
temp2 = temp2 + model.x[i,j,1].value * setC[i,j]
print('Temp2: ',temp2)
temp3 = 0
for i in range(5):
for j in range(5):
temp3 = temp3 + model.x[i,j,2].value * setD[i,j]
print('Temp3: ',temp3)
temp4 = 0
for i in range(5):
for j in range(5):
temp4 = temp4 + model.x[i,j,3].value * setE[i,j]
print('Temp4: ',temp4)
temp5 = 0
for i in range(5):
for j in range(5):
temp5 = temp5 + model.x[i,j,4].value * setF[i,j]
print('Temp5: ',temp5)
print()
print("\nObjective function value: ", model.OBJ()) |
22,926 | ddac556027fc46c54ec5c23a1e12468faf29ae15 |
# Author Ricardo
# prints out hello World
print ("Hello World!")
|
22,927 | 2115c55d88e0771dffc445b6f078ab7ad11f1f04 | # -*- coding: utf-8 -*-
# @Author : lishouxian
# @Email : gzlishouxian@gmail.com
# @File : model.py
# @Software: PyCharm
from abc import ABC
from torch import nn
from transformers import BertModel
class Model(nn.Module, ABC):
def __init__(self, hidden_size, num_labels):
super().__init__()
self.num_labels = num_labels
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-12)
self.bert_model = BertModel.from_pretrained('bert-base-chinese')
self.fc = nn.Linear(hidden_size, 2 * num_labels)
self.sigmoid = nn.Sigmoid()
def forward(self, sentences, attention_mask):
bert_hidden_states = self.bert_model(sentences, attention_mask=attention_mask)[0]
layer_hidden = self.layer_norm(bert_hidden_states)
fc_results = self.fc(layer_hidden)
output = self.sigmoid(fc_results)
batch_size = output.size(0)
transfer_output = output.view(batch_size, -1, self.num_labels, 2)
return transfer_output
|
22,928 | 02e2cb7f69ac0cd1f4bcf87c9acd4b7b563c90f4 | import os
import csv
filepath ="C:\\Users\\aptho\\Downloads\\Instructions\\Instructions\\PyPoll\\Resources\\election_data.csv"
voter = 0
candidates = []
votecount = []
khancount = []
correycount = []
licount = []
otooleycount = []
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
print(csvreader)
csv_header = next(csvreader)
first_row = next(csvreader)
voter = 1
khan = 0
correy = 0
li = 0
otooley = 0
for row in csvreader:
voter += 1
votecount = str(first_row[0])
candidates.append(row[2])
for candidate in candidates:
if candidate == "Khan":
khancount.append(candidates)
khan = len(khancount)
elif candidate == "Correy":
correycount.append(candidates)
correy = len(correycount)
elif candidate == "Li":
licount.append(candidates)
li = len(licount)
else:
otooleycount.append(candidates)
otooley = len(otooleycount)
khan_per =round(((khan / voter) * 100), 2)
correy_per = round(((correy / voter) * 100), 2)
li_per = round (((li / voter) *100), 2)
otooley_per = round(((otooley / voter) *100),2)
print("Election Results")
print("---------------------------")
print(f"Total Votes: {voter}")
print("----------------------------")
print(f"Khan:{khan_per} % {khan}")
print(f"Correy:{correy_per} % {correy}")
print(f"Li:{li_per} % {li}")
print(f"O'Tooley:{otooley_per} % {otooley}")
print("----------------------------")
print(f"Winner: Khan ") |
22,929 | ec63da582c15d66359184f826f54c29ac2390034 | import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark import SparkConf, SparkContext
import pyspark.sql.functions as psf
# Create a schema for incoming resources
schema = StructType([
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", TimestampType(), True),
StructField("call_date", TimestampType(), True),
StructField("offense_date", TimestampType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", TimestampType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
radio_schema = StructType([
StructField("disposition_code", StringType(), True),
StructField("description", StringType(), True)
])
def run_spark_job(spark):
#Set WARN after stdout warning
spark.sparkContext.setLogLevel("WARN")
BOOTSTRAP_SERVERS = "DESKTOP-B2QMGU6:9092"
# Create Spark Configuration
# Create Spark configurations with max offset of 200 per trigger
# set up correct bootstrap server and port
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", BOOTSTRAP_SERVERS) \
.option("subscribe", "police.calls.service") \
.option("startingOffsets", "earliest") \
.option("maxOffsetPerTrigger", 200) \
.option("parallelism", 10000) \
.load()
# Show schema for the incoming resources for checks
df.printSchema()
# extract the correct column from the kafka input resources
# Take only value and convert it to String
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df.select(
psf.from_json(
kafka_df.value,
schema).alias("main_df")
).select("main_df.*")
# select original_crime_type_name and disposition
distinct_table = service_table \
.select(
"original_crime_type_name",
"disposition",
"call_date_time"
).withWatermark("call_date_time", "10 minutes")
# count the number of original crime type
agg_df = distinct_table.groupBy(
"original_crime_type_name"
).count().sort("count", ascending=False)
# TODO Q1. Submit a screen shot of a batch ingestion of the aggregation
logger.info('Stream of crime count by type')
query = agg_df.writeStream \
.format("console") \
.outputMode("complete") \
.start()
# TODO attach a ProgressReporter
query.awaitTermination()
# get the right radio code json path
radio_code_json_filepath = "radio_code.json"
# Needs option multiline,True or output will be an empty df
radio_code_df = spark.read.option(
"multiline",
"true"
).json(
radio_code_json_filepath,
schema=radio_schema
)
# rename disposition_code column to disposition
radio_code_df = radio_code_df.withColumnRenamed("disposition_code", "disposition")
# join on disposition column
join_query = distinct_table.join(
radio_code_df, "disposition"
).writeStream\
.format("console")\
.queryName("join")\
.start()
join_query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
# Create Spark in Standalone mode
spark = SparkSession \
.builder \
.master("local[*]") \
.config("spark.ui.port", 4040) \
.appName("KafkaSparkStructuredStreaming") \
.getOrCreate()
print(SparkConf().getAll())
logger.info("Spark started")
run_spark_job(spark)
spark.stop()
|
22,930 | e9883e8d953dd04bc0008bdbc3c2fbed26caaeb5 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 20:56:43 2021
@author: Hewlett-Packard
"""
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
import pandas as pd
import numpy as np
data = pd.read_excel('HasilVaderMaretJuniId.xlsx')
X = data['Tweet']
y = data['label']
k = 10
skf = KFold(n_splits=k)
akurasi = []
recall = []
precision=[]
gamma=[]
for train_index, test_index in skf.split(X):
X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]
vectorizer = TfidfVectorizer(norm = 'l1')
X_train=vectorizer.fit_transform(X_train)
X_test=vectorizer.transform(X_test)
clf=svm.SVC(kernel='poly', C=20, gamma = 'scale', degree = 2)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
akurasi.append(metrics.accuracy_score(y_test, y_pred))
recall.append(metrics.recall_score(y_test, y_pred))
precision.append(metrics.precision_score(y_test, y_pred))
gamma.append(clf._gamma)
akurasiTotal = np.mean(akurasi)
recallTotal = np.mean(recall)
precisionTotal = np.mean(precision)
gammaTotal = np.mean(gamma) |
22,931 | b9195ae6d5d38b84e2fcba9e8fe8886414ef5673 | import json
import os
import tempfile
from pathlib import Path
from tempfile import NamedTemporaryFile
from textwrap import dedent
from typing import List, Union
from unittest.mock import patch
import numpy as np
import pandas as pd
import scipy.sparse
import yaml
from pandas.testing import assert_frame_equal
import pyarrow
import pytest
import responses
from strictyaml import load, YAMLValidationError
from datarobot_drum.drum.drum import (
possibly_intuit_order,
output_in_code_dir,
create_custom_inference_model_folder,
)
from datarobot_drum.drum.exceptions import DrumCommonException, DrumSchemaValidationException
from datarobot_drum.drum.model_adapter import PythonModelAdapter
from datarobot_drum.drum.language_predictors.python_predictor.python_predictor import (
PythonPredictor,
)
from datarobot_drum.drum.language_predictors.r_predictor.r_predictor import RPredictor
from datarobot_drum.drum.language_predictors.java_predictor.java_predictor import JavaPredictor
from datarobot_drum.drum.push import _push_inference, _push_training, drum_push
from datarobot_drum.drum.common import (
read_model_metadata_yaml,
MODEL_CONFIG_FILENAME,
TargetType,
validate_config_fields,
ModelMetadataKeys,
)
from datarobot_drum.drum.utils import StructuredInputReadUtils
from datarobot_drum.drum.typeschema_validation import (
get_type_schema_yaml_validator,
revalidate_typeschema,
Conditions,
Values,
Fields,
RequirementTypes,
SchemaValidator,
)
class TestOrderIntuition:
tests_data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "testdata"))
binary_filename = os.path.join(tests_data_path, "iris_binary_training.csv")
regression_filename = os.path.join(tests_data_path, "boston_housing.csv")
one_target_filename = os.path.join(tests_data_path, "one_target.csv")
def test_colname(self):
classes = possibly_intuit_order(self.binary_filename, target_col_name="Species")
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_colfile(self):
with NamedTemporaryFile() as target_file:
df = pd.read_csv(self.binary_filename)
with open(target_file.name, "w") as f:
target_series = df["Species"]
target_series.to_csv(f, index=False, header="Target")
classes = possibly_intuit_order(self.binary_filename, target_data_file=target_file.name)
assert set(classes) == {"Iris-versicolor", "Iris-setosa"}
def test_badfile(self):
with pytest.raises(DrumCommonException):
possibly_intuit_order(self.one_target_filename, target_col_name="Species")
def test_unsupervised(self):
classes = possibly_intuit_order(
self.regression_filename, target_col_name="MEDV", is_anomaly=True
)
assert classes is None
class TestValidatePredictions:
def test_class_labels(self):
positive_label = "poslabel"
negative_label = "neglabel"
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.BINARY)
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=[positive_label, negative_label],
)
with pytest.raises(ValueError):
df = pd.DataFrame({positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]})
adapter._validate_predictions(
to_validate=df, class_labels=["yes", "no"],
)
def test_regression_predictions_header(self):
adapter = PythonModelAdapter(model_dir=None, target_type=TargetType.REGRESSION)
df = pd.DataFrame({"Predictions": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
with pytest.raises(ValueError):
df = pd.DataFrame({"other_name": [0.1, 0.2, 0.3]})
adapter._validate_predictions(
to_validate=df, class_labels=None,
)
def test_add_to_one(self):
positive_label = "poslabel"
negative_label = "neglabel"
for predictor in [PythonPredictor(), RPredictor(), JavaPredictor()]:
predictor._target_type = TargetType.BINARY
df_good = pd.DataFrame(
{positive_label: [0.1, 0.2, 0.3], negative_label: [0.9, 0.8, 0.7]}
)
predictor.validate_predictions(df_good)
df_bad = pd.DataFrame({positive_label: [1, 1, 1], negative_label: [-1, 0, 0]})
with pytest.raises(ValueError):
predictor.validate_predictions(df_bad)
modelID = "5f1f15a4d6111f01cb7f91f"
environmentID = "5e8c889607389fe0f466c72d"
projectID = "abc123"
@pytest.fixture
def inference_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: inference
targetType: regression
environmentID: {environmentID}
inferenceModel:
targetName: MEDV
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_yaml_no_target_name():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
environmentID: {environmentID}
inferenceModel:
positiveClassLabel: yes
negativeClassLabel: no
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def inference_binary_metadata_no_label():
return dedent(
"""
name: drumpush-binary
type: inference
targetType: binary
inferenceModel:
positiveClassLabel: yes
"""
)
@pytest.fixture
def multiclass_labels():
return ["GALAXY", "QSO", "STAR"]
@pytest.fixture
def inference_multiclass_metadata_yaml_no_labels():
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
validation:
input: hello
"""
).format(environmentID)
@pytest.fixture
def inference_multiclass_metadata_yaml(multiclass_labels):
return dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, *multiclass_labels)
@pytest.fixture
def inference_multiclass_metadata_yaml_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
validation:
input: hello
"""
).format(environmentID, f.name)
@pytest.fixture
def inference_multiclass_metadata_yaml_labels_and_label_file(multiclass_labels):
with NamedTemporaryFile(mode="w+") as f:
f.write("\n".join(multiclass_labels))
f.flush()
yield dedent(
"""
name: drumpush-multiclass
type: inference
targetType: multiclass
environmentID: {}
inferenceModel:
targetName: class
classLabelsFile: {}
classLabels:
- {}
- {}
- {}
validation:
input: hello
"""
).format(environmentID, f.name, *multiclass_labels)
@pytest.fixture
def training_metadata_yaml():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
validation:
input: hello
"""
).format(environmentID=environmentID)
@pytest.fixture
def training_metadata_yaml_with_proj():
return dedent(
"""
name: drumpush-regression
type: training
targetType: regression
environmentID: {environmentID}
trainingModel:
trainOnProject: {projectID}
validation:
input: hello
"""
).format(environmentID=environmentID, projectID=projectID)
@pytest.fixture
def custom_predictor_metadata_yaml():
return dedent(
"""
name: model-with-custom-java-predictor
type: inference
targetType: regression
customPredictor:
arbitraryField: This info is read directly by a custom predictor
"""
)
version_response = {
"id": "1",
"custom_model_id": "1",
"version_minor": 1,
"version_major": 1,
"is_frozen": False,
"items": [{"id": "1", "file_name": "hi", "file_path": "hi", "file_source": "hi"}],
}
@pytest.mark.parametrize(
"config_yaml",
[
"custom_predictor_metadata_yaml",
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None])
def test_yaml_metadata(request, config_yaml, existing_model_id, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
read_model_metadata_yaml(tmp_path)
@pytest.mark.parametrize(
"config_yaml, test_case_number",
[
("custom_predictor_metadata_yaml", 1),
("inference_binary_metadata_no_label", 2),
("inference_multiclass_metadata_yaml_no_labels", 3),
("inference_multiclass_metadata_yaml_labels_and_label_file", 4),
("inference_multiclass_metadata_yaml", 100),
("inference_multiclass_metadata_yaml_label_file", 100),
],
)
def test_yaml_metadata_missing_fields(tmp_path, config_yaml, request, test_case_number):
config_yaml = request.getfixturevalue(config_yaml)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
if test_case_number == 1:
conf = read_model_metadata_yaml(tmp_path)
with pytest.raises(
DrumCommonException, match="Missing keys: \['validation', 'environmentID'\]"
):
validate_config_fields(
conf,
ModelMetadataKeys.CUSTOM_PREDICTOR,
ModelMetadataKeys.VALIDATION,
ModelMetadataKeys.ENVIRONMENT_ID,
)
elif test_case_number == 2:
with pytest.raises(DrumCommonException, match="Missing keys: \['negativeClassLabel'\]"):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 3:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file must be provided in model-metadata.yaml file",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 4:
with pytest.raises(
DrumCommonException,
match="Error - for multiclass classification, either the class labels or a class labels file should be provided in model-metadata.yaml file, but not both",
):
read_model_metadata_yaml(tmp_path)
elif test_case_number == 100:
read_model_metadata_yaml(tmp_path)
def test_read_model_metadata_properly_casts_typeschema(tmp_path, training_metadata_yaml):
config_yaml = training_metadata_yaml + dedent(
"""
typeSchema:
input_requirements:
- field: number_of_columns
condition: IN
value:
- 1
- 2
- field: data_types
condition: EQUALS
value:
- NUM
- TXT
output_requirements:
- field: number_of_columns
condition: IN
value: 2
- field: data_types
condition: EQUALS
value: NUM
"""
)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
yaml_conf = read_model_metadata_yaml(tmp_path)
output_reqs = yaml_conf["typeSchema"]["output_requirements"]
input_reqs = yaml_conf["typeSchema"]["input_requirements"]
value_key = "value"
expected_as_int_list = next(
(el for el in input_reqs if el["field"] == "number_of_columns")
).get(value_key)
expected_as_str_list = next((el for el in input_reqs if el["field"] == "data_types")).get(
value_key
)
expected_as_int = next((el for el in output_reqs if el["field"] == "number_of_columns")).get(
value_key
)
expected_as_str = next((el for el in output_reqs if el["field"] == "data_types")).get(value_key)
assert all(isinstance(el, int) for el in expected_as_int_list)
assert all(isinstance(el, str) for el in expected_as_str_list)
assert isinstance(expected_as_str_list, list)
assert isinstance(expected_as_int, int)
assert isinstance(expected_as_str, str)
def version_mocks():
responses.add(
responses.GET,
"http://yess/version/",
json={"major": 2, "versionString": "2.21", "minor": 21},
status=200,
)
responses.add(
responses.POST,
"http://yess/customModels/{}/versions/".format(modelID),
json=version_response,
status=200,
)
def mock_get_model(model_type="training", target_type="Regression"):
body = {
"customModelType": model_type,
"id": modelID,
"name": "1",
"description": "1",
"targetType": target_type,
"deployments_count": "1",
"created_by": "1",
"updated": "1",
"created": "1",
"latestVersion": version_response,
}
if model_type == "inference":
body["language"] = "Python"
body["trainingDataAssignmentInProgress"] = False
responses.add(
responses.GET, "http://yess/customModels/{}/".format(modelID), json=body,
)
responses.add(
responses.POST, "http://yess/customModels/".format(modelID), json=body,
)
def mock_post_blueprint():
responses.add(
responses.POST,
"http://yess/customTrainingBlueprints/",
json={
"userBlueprintId": "2",
"custom_model": {"id": "1", "name": "1"},
"custom_model_version": {"id": "1", "label": "1"},
"execution_environment": {"id": "1", "name": "1"},
"execution_environment_version": {"id": "1", "label": "1"},
"training_history": [],
},
)
def mock_post_add_to_repository():
responses.add(
responses.POST,
"http://yess/projects/{}/blueprints/fromUserBlueprint/".format(projectID),
json={"id": "1"},
)
def mock_get_env():
responses.add(
responses.GET,
"http://yess/executionEnvironments/{}/".format(environmentID),
json={
"id": "1",
"name": "hi",
"latestVersion": {"id": "hii", "environment_id": environmentID, "build_status": "yes"},
},
)
def mock_train_model():
responses.add(
responses.POST,
"http://yess/projects/{}/models/".format(projectID),
json={},
adding_headers={"Location": "the/moon"},
)
responses.add(
responses.GET,
"http://yess/projects/{}/modelJobs/the/".format(projectID),
json={
"is_blocked": False,
"id": "55",
"processes": [],
"model_type": "fake",
"project_id": projectID,
"blueprint_id": "1",
},
)
@responses.activate
@pytest.mark.parametrize(
"config_yaml",
[
"training_metadata_yaml",
"training_metadata_yaml_with_proj",
"inference_metadata_yaml",
"inference_multiclass_metadata_yaml",
"inference_multiclass_metadata_yaml_label_file",
],
)
@pytest.mark.parametrize("existing_model_id", [None, modelID])
def test_push(request, config_yaml, existing_model_id, multiclass_labels, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
if existing_model_id:
config_yaml = config_yaml + "\nmodelID: {}".format(existing_model_id)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
version_mocks()
mock_post_blueprint()
mock_post_add_to_repository()
mock_get_model(model_type=config["type"], target_type=config["targetType"].capitalize())
mock_get_env()
mock_train_model()
push_fn = _push_training if config["type"] == "training" else _push_inference
push_fn(config, code_dir="", endpoint="http://Yess", token="okay")
calls = responses.calls
if existing_model_id is None:
assert calls[1].request.path_url == "/customModels/" and calls[1].request.method == "POST"
if config["targetType"] == TargetType.MULTICLASS.value:
sent_labels = json.loads(calls[1].request.body)["classLabels"]
assert sent_labels == multiclass_labels
call_shift = 1
else:
call_shift = 0
assert (
calls[call_shift + 1].request.path_url == "/customModels/{}/versions/".format(modelID)
and calls[call_shift + 1].request.method == "POST"
)
if push_fn == _push_training:
assert (
calls[call_shift + 2].request.path_url == "/customTrainingBlueprints/"
and calls[call_shift + 2].request.method == "POST"
)
if "trainingModel" in config:
assert (
calls[call_shift + 3].request.path_url
== "/projects/{}/blueprints/fromUserBlueprint/".format(projectID)
and calls[call_shift + 3].request.method == "POST"
)
assert (
calls[call_shift + 4].request.path_url == "/projects/abc123/models/"
and calls[call_shift + 4].request.method == "POST"
)
assert len(calls) == 6 + call_shift
else:
assert len(calls) == 3 + call_shift
else:
assert len(calls) == 2 + call_shift
@responses.activate
@pytest.mark.parametrize(
"config_yaml", ["inference_binary_metadata_yaml_no_target_name",],
)
def test_push_no_target_name_in_yaml(request, config_yaml, tmp_path):
config_yaml = request.getfixturevalue(config_yaml)
config_yaml = config_yaml + "\nmodelID: {}".format(modelID)
with open(os.path.join(tmp_path, MODEL_CONFIG_FILENAME), mode="w") as f:
f.write(config_yaml)
config = read_model_metadata_yaml(tmp_path)
from argparse import Namespace
options = Namespace(code_dir=tmp_path, model_config=config)
with pytest.raises(DrumCommonException, match="Missing keys: \['targetName'\]"):
drum_push(options)
def test_output_in_code_dir():
code_dir = "/test/code/is/here"
output_other = "/test/not/code"
output_code_dir = "/test/code/is/here/output"
assert not output_in_code_dir(code_dir, output_other)
assert output_in_code_dir(code_dir, output_code_dir)
def test_output_dir_copy():
with tempfile.TemporaryDirectory() as tempdir:
# setup
file = Path(tempdir, "test.py")
file.touch()
Path(tempdir, "__pycache__").mkdir()
out_dir = Path(tempdir, "out")
out_dir.mkdir()
# test
create_custom_inference_model_folder(tempdir, str(out_dir))
assert Path(out_dir, "test.py").exists()
assert not Path(out_dir, "__pycache__").exists()
assert not Path(out_dir, "out").exists()
def test_read_structured_input_arrow_csv_na_consistency(tmp_path):
"""
Test that N/A values (None, numpy.nan) are handled consistently when using
CSV vs Arrow as a prediction payload format.
1. Make CSV and Arrow prediction payloads from the same dataframe
2. Read both payloads
3. Assert the resulting dataframes are equal
"""
# arrange
df = pd.DataFrame({"col_int": [1, np.nan, None], "col_obj": ["a", np.nan, None]})
csv_filename = os.path.join(tmp_path, "X.csv")
with open(csv_filename, "w") as f:
f.write(df.to_csv(index=False))
arrow_filename = os.path.join(tmp_path, "X.arrow")
with open(arrow_filename, "wb") as f:
f.write(pyarrow.ipc.serialize_pandas(df).to_pybytes())
# act
csv_df = StructuredInputReadUtils.read_structured_input_file_as_df(csv_filename)
arrow_df = StructuredInputReadUtils.read_structured_input_file_as_df(arrow_filename)
# assert
is_nan = lambda x: isinstance(x, float) and np.isnan(x)
is_none = lambda x: x is None
assert_frame_equal(csv_df, arrow_df)
# `assert_frame_equal` doesn't make a difference between None and np.nan.
# To do an exact comparison, compare None and np.nan "masks".
assert_frame_equal(csv_df.applymap(is_nan), arrow_df.applymap(is_nan))
assert_frame_equal(csv_df.applymap(is_none), arrow_df.applymap(is_none))
class TestJavaPredictor:
# Verifying that correct code branch is taken depending on the data size.
# As jp object is not properly configured, just check for the expected error message.
@pytest.mark.parametrize(
"data_size, error_message",
[(2, "object has no attribute 'predict'"), (40000, "object has no attribute 'predictCSV'")],
)
def test_java_predictor_py4j_data(self, data_size, error_message):
from datarobot_drum.drum.language_predictors.java_predictor.java_predictor import (
JavaPredictor,
)
jp = JavaPredictor()
with pytest.raises(AttributeError, match=error_message):
jp._predict(binary_data=b"d" * data_size)
@patch.object(JavaPredictor, "find_free_port", return_value=80)
def test_run_java_server_entry_point_fail(self, mock_find_free_port):
pred = JavaPredictor()
pred.model_artifact_extension = ".jar"
with pytest.raises(DrumCommonException, match="java gateway failed to start"):
pred._run_java_server_entry_point()
def test_run_java_server_entry_point_succeed(self):
pred = JavaPredictor()
pred.model_artifact_extension = ".jar"
pred._run_java_server_entry_point()
# required to properly shutdown py4j Gateway
pred._setup_py4j_client_connection()
pred._stop_py4j()
def input_requirements_yaml(
field: Fields, condition: Conditions, values: List[Union[int, Values]]
) -> str:
yaml_dict = get_yaml_dict(condition, field, values, RequirementTypes.INPUT_REQUIREMENTS)
return yaml.dump(yaml_dict)
def output_requirements_yaml(
field: Fields, condition: Conditions, values: List[Union[int, Values]]
) -> str:
yaml_dict = get_yaml_dict(condition, field, values, RequirementTypes.OUTPUT_REQUIREMENTS)
return yaml.dump(yaml_dict)
def get_yaml_dict(condition, field, values, top_requirements: RequirementTypes) -> dict:
def _get_val(value):
if isinstance(value, Values):
return str(value)
return value
if len(values) == 1:
new_vals = _get_val(values[0])
else:
new_vals = [_get_val(el) for el in values]
yaml_dict = {
str(top_requirements): [
{"field": str(field), "condition": str(condition), "value": new_vals}
]
}
return yaml_dict
def get_data(dataset_name: str) -> pd.DataFrame:
tests_data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "testdata"))
return pd.read_csv(os.path.join(tests_data_path, dataset_name))
CATS_AND_DOGS = get_data("cats_dogs_small_training.csv")
TEN_K_DIABETES = get_data("10k_diabetes.csv")
IRIS_BINARY = get_data("iris_binary_training.csv")
LENDING_CLUB = get_data("lending_club_reduced.csv")
@pytest.fixture
def lending_club():
return LENDING_CLUB.copy()
@pytest.fixture
def iris_binary():
return IRIS_BINARY.copy()
@pytest.fixture
def ten_k_diabetes():
return TEN_K_DIABETES.copy()
@pytest.fixture
def cats_and_dogs():
return CATS_AND_DOGS.copy()
class TestSchemaValidator:
tests_data_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "testdata"))
@pytest.fixture
def data(self, iris_binary):
yield iris_binary
@pytest.fixture
def missing_data(self, data):
df = data.copy(deep=True)
for col in df.columns:
df.loc[df.sample(frac=0.1).index, col] = pd.np.nan
yield df
@pytest.fixture
def sparse_df(self):
yield pd.DataFrame.sparse.from_spmatrix(scipy.sparse.eye(10))
@pytest.fixture
def dense_df(self):
yield pd.DataFrame(np.zeros((10, 10)))
@staticmethod
def yaml_str_to_schema_dict(yaml_str: str) -> dict:
"""this emulates how we cast a yaml to a dict for validation in
`datarobot_drum.drum.common.read_model_metadata_yaml` and these assumptions
are tested in: `tests.drum.test_units.test_read_model_metadata_properly_casts_typeschema` """
schema = load(yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(schema)
return schema.data
@pytest.mark.parametrize(
"condition, value, passing_dataset, passing_target, failing_dataset, failing_target",
[
(
Conditions.IN,
[Values.CAT, Values.NUM],
"iris_binary",
"SepalLengthCm",
"ten_k_diabetes",
"readmitted",
),
(
Conditions.EQUALS,
[Values.NUM],
"iris_binary",
"Species",
"ten_k_diabetes",
"readmitted",
),
(
Conditions.NOT_IN,
[Values.TXT],
"iris_binary",
"SepalLengthCm",
"ten_k_diabetes",
"readmitted",
),
(
Conditions.NOT_EQUALS,
[Values.CAT],
"iris_binary",
"Species",
"lending_club",
"is_bad",
),
(
Conditions.EQUALS,
[Values.IMG],
"cats_and_dogs",
"class",
"ten_k_diabetes",
"readmitted",
),
],
ids=lambda x: str([str(el) for el in x]) if isinstance(x, list) else str(x),
)
def test_data_types(
self,
condition,
value,
passing_dataset,
passing_target,
failing_dataset,
failing_target,
request,
):
yaml_str = input_requirements_yaml(Fields.DATA_TYPES, condition, value)
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
good_data = request.getfixturevalue(passing_dataset)
good_data.drop(passing_target, inplace=True, axis=1)
assert validator.validate_inputs(good_data)
bad_data = request.getfixturevalue(failing_dataset)
bad_data.drop(failing_target, inplace=True, axis=1)
with pytest.raises(DrumSchemaValidationException):
validator.validate_inputs(bad_data)
def test_data_types_raises_error_if_all_type_in_in_are_not_present(self, iris_binary):
"""Because of how it's implemented in DataRobot,
- field: data_types
condition: IN
value:
- NUM
- TXT
requires that the DataFrame's set of types present _EQUALS_ the set: {NUM, TXT},
but uses the condition: `IN` :shrug:
"""
condition = Conditions.IN
value = Values.data_values()
yaml_str = input_requirements_yaml(Fields.DATA_TYPES, condition, value)
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
with pytest.raises(DrumSchemaValidationException):
validator.validate_inputs(iris_binary)
@pytest.mark.parametrize(
"single_value_condition",
[
Conditions.EQUALS,
Conditions.NOT_EQUALS,
Conditions.GREATER_THAN,
Conditions.NOT_GREATER_THAN,
Conditions.LESS_THAN,
Conditions.NOT_LESS_THAN,
],
)
def test_instantiating_validator_raises_error_for_too_many_values(
self, single_value_condition, iris_binary
):
yaml_str = input_requirements_yaml(Fields.NUMBER_OF_COLUMNS, single_value_condition, [1, 2])
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
with pytest.raises(DrumSchemaValidationException):
SchemaValidator(schema_dict)
@pytest.mark.parametrize(
"condition, value, fail_expected",
[
(Conditions.EQUALS, [6], False),
(Conditions.EQUALS, [3], True),
(Conditions.IN, [2, 4, 6], False),
(Conditions.IN, [1, 2, 3], True),
(Conditions.LESS_THAN, [7], False),
(Conditions.LESS_THAN, [3], True),
(Conditions.GREATER_THAN, [4], False),
(Conditions.GREATER_THAN, [10], True),
(Conditions.NOT_EQUALS, [5], False),
(Conditions.NOT_EQUALS, [6], True),
(Conditions.NOT_IN, [1, 2, 3], False),
(Conditions.NOT_IN, [2, 4, 6], True),
(Conditions.NOT_GREATER_THAN, [6], False),
(Conditions.NOT_GREATER_THAN, [2], True),
(Conditions.NOT_LESS_THAN, [3], False),
(Conditions.NOT_LESS_THAN, [100], True),
],
ids=lambda x: str(x),
)
def test_num_columns(self, data, condition, value, fail_expected):
yaml_str = input_requirements_yaml(Fields.NUMBER_OF_COLUMNS, condition, value)
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
if fail_expected:
with pytest.raises(DrumSchemaValidationException):
validator.validate_inputs(data)
else:
assert validator.validate_inputs(data)
@pytest.mark.parametrize(
"value, missing_ok", [(Values.FORBIDDEN, False), (Values.SUPPORTED, True)]
)
def test_missing_input(self, data, missing_data, value, missing_ok):
yaml_str = input_requirements_yaml(Fields.CONTAINS_MISSING, Conditions.EQUALS, [value])
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
assert validator.validate_inputs(data)
if missing_ok:
assert validator.validate_inputs(missing_data)
else:
with pytest.raises(DrumSchemaValidationException):
validator.validate_inputs(missing_data)
@pytest.mark.parametrize("value, missing_ok", [(Values.NEVER, False), (Values.DYNAMIC, True)])
def test_missing_output(self, data, missing_data, value, missing_ok):
yaml_str = output_requirements_yaml(Fields.CONTAINS_MISSING, Conditions.EQUALS, [value])
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
assert validator.validate_outputs(data)
if missing_ok:
assert validator.validate_outputs(missing_data)
else:
with pytest.raises(DrumSchemaValidationException):
validator.validate_outputs(missing_data)
@pytest.mark.parametrize(
"value, sparse_ok, dense_ok",
[
(Values.FORBIDDEN, False, True),
(Values.SUPPORTED, True, True),
(Values.REQUIRED, True, False),
],
)
def test_sparse_input(self, sparse_df, dense_df, value, sparse_ok, dense_ok):
yaml_str = input_requirements_yaml(Fields.SPARSE, Conditions.EQUALS, [value])
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
self._assert_validation(validator.validate_inputs, sparse_df, should_pass=sparse_ok)
self._assert_validation(validator.validate_inputs, dense_df, should_pass=dense_ok)
@pytest.mark.parametrize(
"value, sparse_ok, dense_ok",
[
(Values.NEVER, False, True),
(Values.DYNAMIC, True, True),
(Values.ALWAYS, True, False),
(Values.IDENTITY, False, True),
],
)
def test_sparse_output(self, sparse_df, dense_df, value, sparse_ok, dense_ok):
yaml_str = output_requirements_yaml(Fields.SPARSE, Conditions.EQUALS, [value])
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
self._assert_validation(validator.validate_outputs, sparse_df, should_pass=sparse_ok)
self._assert_validation(validator.validate_outputs, dense_df, should_pass=dense_ok)
@pytest.mark.parametrize(
"value, sparse_ok, dense_ok",
[(Values.FORBIDDEN, False, True), (Values.REQUIRED, True, False),],
)
def test_multiple_input_requirements(self, sparse_df, dense_df, value, sparse_ok, dense_ok):
yaml_str = input_requirements_yaml(Fields.SPARSE, Conditions.EQUALS, [value])
num_input = input_requirements_yaml(
Fields.DATA_TYPES, Conditions.EQUALS, [Values.NUM]
).replace("input_requirements:\n", "")
random_output = output_requirements_yaml(
Fields.NUMBER_OF_COLUMNS, Conditions.EQUALS, [10000]
)
yaml_str += num_input
yaml_str += random_output
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
self._assert_validation(validator.validate_inputs, sparse_df, should_pass=sparse_ok)
self._assert_validation(validator.validate_inputs, dense_df, should_pass=dense_ok)
@pytest.mark.parametrize(
"value, sparse_ok, dense_ok", [(Values.NEVER, False, True), (Values.ALWAYS, True, False),],
)
def test_multiple_output_requirements(self, sparse_df, dense_df, value, sparse_ok, dense_ok):
yaml_str = output_requirements_yaml(Fields.SPARSE, Conditions.EQUALS, [value])
num_output = output_requirements_yaml(
Fields.DATA_TYPES, Conditions.EQUALS, [Values.NUM]
).replace("output_requirements:\n", "")
random_input = input_requirements_yaml(Fields.NUMBER_OF_COLUMNS, Conditions.EQUALS, [10000])
yaml_str += num_output
yaml_str += random_input
schema_dict = self.yaml_str_to_schema_dict(yaml_str)
validator = SchemaValidator(schema_dict)
self._assert_validation(validator.validate_outputs, sparse_df, should_pass=sparse_ok)
self._assert_validation(validator.validate_outputs, dense_df, should_pass=dense_ok)
@staticmethod
def _assert_validation(validator_method, data_frame, should_pass):
if should_pass:
assert validator_method(data_frame)
else:
with pytest.raises(DrumSchemaValidationException):
validator_method(data_frame)
class TestRevalidateTypeSchemaDataTypes:
field = Fields.DATA_TYPES
@pytest.mark.parametrize("condition", Conditions.non_numeric())
def test_datatypes_allowed_conditions(self, condition):
values = [Values.NUM, Values.TXT]
input_data_type_str = input_requirements_yaml(self.field, condition, values)
output_data_type_str = output_requirements_yaml(self.field, condition, values)
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("condition", list(set(Conditions) - set(Conditions.non_numeric())))
def test_datatypes_unallowed_conditions(self, condition):
values = [Values.NUM, Values.TXT]
input_data_type_str = input_requirements_yaml(self.field, condition, values)
output_data_type_str = output_requirements_yaml(self.field, condition, values)
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", Values.data_values())
def test_datatyped_allowed_values(self, value):
condition = Conditions.EQUALS
input_data_type_str = input_requirements_yaml(self.field, condition, [value])
output_data_type_str = output_requirements_yaml(self.field, condition, [value])
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(set(Values) - set(Values.data_values())))
def test_datatypes_unallowed_values(self, value):
condition = Conditions.EQUALS
input_data_type_str = input_requirements_yaml(self.field, condition, [value])
output_data_type_str = output_requirements_yaml(self.field, condition, [value])
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_datatypes_multiple_values(self):
condition = Conditions.IN
values = Values.data_values()
input_data_type_str = input_requirements_yaml(self.field, condition, values)
output_data_type_str = output_requirements_yaml(self.field, condition, values)
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize(
"permutation",
[[Values.CAT, Values.NUM], [Values.NUM, Values.CAT]],
ids=lambda x: str([str(el) for el in x]),
)
def test_regression_test_datatypes_multi_values(self, permutation):
corner_case = input_requirements_yaml(Fields.DATA_TYPES, Conditions.IN, permutation)
parsed_yaml = load(corner_case, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
def test_datatypes_mix_allowed_and_unallowed_values(self):
values = [Values.NUM, Values.REQUIRED]
condition = Conditions.EQUALS
input_data_type_str = input_requirements_yaml(self.field, condition, values)
output_data_type_str = output_requirements_yaml(self.field, condition, values)
for data_type_str in (input_data_type_str, output_data_type_str):
parsed_yaml = load(data_type_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
class TestRevalidateTypeSchemaSparse:
field = Fields.SPARSE
@pytest.mark.parametrize("value", Values.input_values())
def test_sparsity_input_allowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(set(Values) - set(Values.input_values())))
def test_sparsity_input_disallowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_sparsity_input_only_single_value(self):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(self.field, condition, Values.input_values())
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", Values.output_values())
def test_sparsity_output_allowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(set(Values) - set(Values.output_values())))
def test_sparsity_output_disallowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_sparsity_output_only_single_value(self):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(self.field, condition, Values.output_values())
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("condition", list(set(Conditions) - {Conditions.EQUALS}))
def test_sparsity_input_output_disallows_conditions(self, condition):
sparse_yaml_input_str = input_requirements_yaml(self.field, condition, [Values.REQUIRED])
sparse_yaml_output_str = output_requirements_yaml(self.field, condition, [Values.ALWAYS])
for yaml_str in (sparse_yaml_input_str, sparse_yaml_output_str):
parsed_yaml = load(yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
class TestRevalidateTypeSchemaContainsMissing:
field = Fields.CONTAINS_MISSING
@pytest.mark.parametrize("value", [Values.FORBIDDEN, Values.SUPPORTED])
def test_contains_missing_input_allowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(set(Values) - {Values.FORBIDDEN, Values.SUPPORTED}))
def test_contains_missing_input_disallowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_contains_missing_input_only_single_value(self):
condition = Conditions.EQUALS
sparse_yaml_str = input_requirements_yaml(
self.field, condition, [Values.FORBIDDEN, Values.SUPPORTED]
)
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", [Values.NEVER, Values.DYNAMIC])
def test_contains_missing_output_allowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(set(Values) - {Values.NEVER, Values.DYNAMIC}))
def test_contains_missing_output_disallowed_values(self, value):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(self.field, condition, [value])
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_contains_missing_output_only_single_value(self):
condition = Conditions.EQUALS
sparse_yaml_str = output_requirements_yaml(
self.field, condition, [Values.NEVER, Values.DYNAMIC]
)
parsed_yaml = load(sparse_yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("condition", list(set(Conditions) - {Conditions.EQUALS}))
def test_contains_missing_input_output_disallows_conditions(self, condition):
sparse_yaml_input_str = input_requirements_yaml(self.field, condition, [Values.REQUIRED])
sparse_yaml_output_str = output_requirements_yaml(self.field, condition, [Values.ALWAYS])
for yaml_str in (sparse_yaml_input_str, sparse_yaml_output_str):
parsed_yaml = load(yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
class TestRevalidateTypeSchemaNumberOfColumns:
field = Fields.NUMBER_OF_COLUMNS
@pytest.mark.parametrize("condition", list(Conditions))
def test_number_of_columns_can_use_all_conditions(self, condition):
sparse_yaml_input_str = input_requirements_yaml(self.field, condition, [1])
sparse_yaml_output_str = output_requirements_yaml(self.field, condition, [1])
for yaml_str in (sparse_yaml_input_str, sparse_yaml_output_str):
parsed_yaml = load(yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
def test_number_of_columns_can_have_multiple_ints(self):
yaml_str = input_requirements_yaml(self.field, Conditions.EQUALS, [1, 0, -1])
parsed_yaml = load(yaml_str, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("value", list(Values))
def test_number_of_columns_cannot_use_other_values(self, value):
yaml_str = input_requirements_yaml(self.field, Conditions.EQUALS, [value])
parsed_yaml = load(yaml_str, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_revalidate_typescehma_mutates_yaml_num_columns_to_int(self):
yaml_single_int = input_requirements_yaml(self.field, Conditions.EQUALS, [1])
yaml_int_list = input_requirements_yaml(self.field, Conditions.EQUALS, [1, 2])
parsed_single_int = load(yaml_single_int, get_type_schema_yaml_validator())
parsed_int_list = load(yaml_int_list, get_type_schema_yaml_validator())
def get_value(yaml):
return yaml[str(RequirementTypes.INPUT_REQUIREMENTS)][0]["value"].data
assert isinstance(get_value(parsed_single_int), str)
assert isinstance(get_value(parsed_int_list)[0], str)
revalidate_typeschema(parsed_single_int)
revalidate_typeschema(parsed_int_list)
assert isinstance(get_value(parsed_single_int), int)
assert isinstance(get_value(parsed_int_list)[0], int)
class TestRevalidateTypeSchemaMixedCases:
@pytest.fixture
def passing_yaml_string(self):
yield dedent(
"""
input_requirements:
- field: data_types
condition: IN
value:
- NUM
- field: sparse
condition: EQUALS
value: FORBIDDEN
output_requirements:
- field: data_types
condition: EQUALS
value: NUM
- field: sparse
condition: EQUALS
value: NEVER
"""
)
def test_happy_path(self, passing_yaml_string):
parsed_yaml = load(passing_yaml_string, get_type_schema_yaml_validator())
revalidate_typeschema(parsed_yaml)
@pytest.mark.parametrize("requirements_key", list(RequirementTypes))
def test_failing_on_bad_requirements_key(self, requirements_key, passing_yaml_string):
bad_yaml = passing_yaml_string.replace(str(requirements_key), "oooooops")
with pytest.raises(YAMLValidationError):
load(bad_yaml, get_type_schema_yaml_validator())
def test_failing_on_bad_field(self, passing_yaml_string):
bad_yaml = passing_yaml_string.replace("sparse", "oooooops")
with pytest.raises(YAMLValidationError):
load(bad_yaml, get_type_schema_yaml_validator())
def test_failing_on_bad_condition(self, passing_yaml_string):
bad_yaml = passing_yaml_string.replace("EQUALS", "oooooops")
parsed_yaml = load(bad_yaml, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
def test_failing_on_bad_value(self, passing_yaml_string):
bad_yaml = passing_yaml_string.replace("NUM", "oooooops")
parsed_yaml = load(bad_yaml, get_type_schema_yaml_validator())
with pytest.raises(YAMLValidationError):
revalidate_typeschema(parsed_yaml)
|
22,932 | e389de7303a9baf50e9bcf16bf8e44a64aad9c31 | # coding: utf-8
"""
Exemplo de uma classe com descritores
A classe ItemPedido deve ser instanciada com os dados essenciais que sao:
descricao do item, preco unitario e quantidade.
>>> bolas = ItemPedido('bola de golf', 2, 10)
>>> bolas.descr
'bola de golf'
>>> bolas.qtd
10
O atributo qtd de um ItemPedido nunca pode ser <= 0:
>>> duendes = ItemPedido('duende verde', 2.99, 0)
Traceback (most recent call last):
...
TypeError: qtd deve ser > 0
>>> duendes = ItemPedido('duende verde', 2.99, 13)
>>> duendes.qtd
13
>>> duendes.qtd = -1
Traceback (most recent call last):
...
TypeError: qtd deve ser > 0
>>> duendes.qtd = 20
>>> duendes.qtd
20
O preco também nao pode ser <= 0:
>>> saci = ItemPedido('saci', -1, 10)
Traceback (most recent call last):
...
TypeError: pr_unitario deve ser > 0
"""
class Quantidade(object):
def __set__(self, instance, valor):
if not hasattr(self, 'nome_atr'):
for nome, atr in instance.__class__.__dict__.items():
if atr is self:
self.nome_atr = '__'+nome
break
else: # only if the for loop terminates without break
assert False, 'descriptor not found in class'
if valor < 1:
raise TypeError('%s deve ser > 0' % self.nome_atr[2:])
setattr(instance, self.nome_atr, valor)
def __get__(self, instance, owner):
return getattr(instance, self.nome_atr)
class ItemPedido(object):
"""um item de um pedido"""
qtd = Quantidade()
pr_unitario = Quantidade()
def __init__(self, descr, pr_unitario, qtd):
self.descr = descr
self.qtd = qtd
self.pr_unitario = pr_unitario
|
22,933 | 4200a77614fb9458b1bcce624eaea26c73a3c1ae | test_case=input()
for num in range(0,test_case):
first=raw_input()
second=raw_input()
len_first=len(first)
len_second=len(second)
counter=0
for alpha_val in range(0,26):
if(chr(ord('a')+alpha_val) in first and chr(ord('a')+alpha_val) in second):
counter=1
break
if(counter==1):
print "YES"
if(counter==0):
print "NO"
|
22,934 | 9afc3e58d6a81be5c5c92248f15c7100a0a3319f | #Faça um programa que peça a base
#e a altura de um retângulo e calcule
#e mostre na tela a área e o perímetro.
base=float(input('Digite a base: '))
altura=float(input('Digite a altura: '))
area=base*altura
perimetro=2*base+2*altura
print(f'O retângulo digitado tem base {base} e altura {altura}.')
print(f'A área deste retângulo é: {area}')
print(f'O perímetro deste retângulo é: {perimetro}')
|
22,935 | 3a509505cba226630e2977924aca873354ae2d00 | import requests # library to fetch the html contect of given page
import sqlite3 # library to connect to SQLite database
from HTMLParser import HTMLParser # Parser used to parse HTML pages
conn = sqlite3.connect("imdb.db") # Connecting to SQLite database named imdb.db
cursor = conn.cursor()
cursor.execute("CREATE TABLE movies (title text, rating real)")
all_time_gross = requests.get('http://www.imdb.com/boxoffice/alltimegross') # acquiring HTML content of the page that has the list
class movies(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.flag = 0
self.rating = []
def handle_starttag(self, tag, attrs):
if tag == 'span':
for name, value in attrs:
if name == 'itemprop' and value == 'ratingValue':
self.flag = 1
def handle_data(self, data):
if self.flag:
self.rating.append(data);
self.flag = 0
class complete_list(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.movie_count = 0
self.flag = 0
self.movies_list = []
def handle_starttag(self, tag, attrs):
if self.movie_count>=100:
return;
if tag == 'a':
for name, value in attrs:
if name == 'href' and value[:6] == '/title':
url = 'http://www.imdb.com'+value
rating = requests.get(url)
movie_parser.feed(rating.text)
self.movie_count += 1
self.flag = 1
def handle_data(self, data):
if self.flag:
self.flag = 0
self.movies_list.append(data);
# instantiate the parser and feed it some HTML
movie_parser = movies()
parser = complete_list()
parser.feed(all_time_gross.text)
for i in range(100):
cursor.execute("INSERT INTO movies VALUES('"+parser.movies_list[i]+"', '"+movie_parser.rating[i]+"')")
conn.commit()
print "The average of top 100 movies with a gross income of more than 50 million dollars is: ",
for row in cursor.execute('SELECT AVG(rating) FROM movies'):
print row[0]
|
22,936 | 21f19b45786e962a7f5181c8ad610fe78797f4e7 | from django.forms import ModelForm, TextInput
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
class SignUpForm(UserCreationForm):
#extend the email field from the stock UserCreationForm
email = forms.EmailField(label="Email Address", required=True)
class Meta:
model=User
fields=( "username", "email", "password1", "password2")
#Override the __init__ method so that the form fields line up
def __init__(self, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['password1'].widget.attrs['class'] = 'form-control'
self.fields['password2'].widget.attrs['class'] = 'form-control'
#example of how to custom clean with own error msg.
def clean_username(self):
username = self.cleaned_data['username'].lower()
r = User.objects.filter(username=username)
if r.count():
raise ValidationError("Username already exists")
return username
|
22,937 | 0c52d35c94a987e5c0ae78ce143dd91b3adff233 | #!/usr/bin/env python3
import argparse
import json
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
from Snarl.src import json_utils
from Snarl.src.Remote.Server import Server
# Use argparse to read arguments
parser = argparse.ArgumentParser()
parser.add_argument('--levels', action='store', nargs='?', type=str, default='snarl.levels')
parser.add_argument('--clients', action='store', type=int, nargs='?', default=4)
parser.add_argument('--wait', action='store', type=int, nargs='?', default=60)
parser.add_argument('--observe', action='store_const', const='observe')
parser.add_argument('--address', action='store', nargs='?', type=str, default='127.0.0.1')
parser.add_argument('--port', action='store', type=int, nargs='?', default=45678)
args = vars(parser.parse_args())
levels_filename = args['levels']
max_num_clients = args['clients']
reg_timeout = args['wait']
should_create_observer = args['observe'] is not None
host_addr = args['address']
port = args['port']
# Read levels file
with open(levels_filename) as f:
levels_json = json.load(f)
# Convert jsons into objects
levels = [json_utils.json_to_level(level_json) for level_json in levels_json]
server = Server(levels, max_num_clients, reg_timeout, should_create_observer, host_addr, port)
server.start()
|
22,938 | 6ea2e4e4dcc2f823893566067591c8592408cd92 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
logging.debug('这是一条debug日志')
logging.info('这是一条info日志')
logging.error('这是一条error日志')
logging.warning('这是一条warning日志')
|
22,939 | 974ca065bb7ee5d29026fc2b4874d16f450af14f | book = "born to run"
print ("I predict the value to be true")
print (book == "born to run")
hat = "on"
print ("I predict the value to be true")
print (hat != "off")
name = "VIRAG"
print ("I predict the value to be true")
print (name.lower() == "virag")
litre = 5
print ("I believe the value to being true")
print (litre > 4)
time = 10
print ("I believe the value to be true")
print (time > 5 and time < 20)
name = "denzel"
print ("I believe the value to be false")
print (name == "vybz" or name == "tommy")
mice = ["zalman", "microsoft", "apple", "razor"]
print ("I believe the value to be true")
print ("zalman" in mice)
print ("I believe the value to be false")
print ("apple" not in mice) |
22,940 | f72791c95dc8fa5d3e9409ae6664954d51ddafd2 | class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
temp = {}
for s in strs:
key = tuple(sorted(s))
if key in temp:
temp[key].append(s)
else:
temp[key] = [s]
return [x for x in temp.values()]
s = Solution()
print(s.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])) |
22,941 | b904f9e1f343c756424bef745218165e9f4d9bd2 | # -*- coding: utf-8 -*-
"""
randonet.activation
~~~~~~~~~~~~~~~~~~~
Generation of activation layers, through a choice parameter
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: see LICENSE for more details.
"""
from randonet.pytorch import (
Sigmoid,
Tanh,
Tanhshrink,
ReLU,
ReLU6,
SELU,
ELU,
CELU,
LeakyReLU,
)
from randonet.generator.param import ChoiceParam
class ActivationParam(ChoiceParam):
def __init__(self):
ChoiceParam.__init__(
self,
name="Activation",
choices=[Sigmoid(), Tanh(), ReLU(), SELU()],
cprobs=[i / 4 for i in range(1, 5)],
is_random=False,
)
|
22,942 | 6e7854fd6074c80538564f4337bbf9514307b0e0 | class Node():
def __init__(self, size, keys=None, pointers=None):
self.size = size
self.minimum = size // 2
if keys:
self.keys = keys
else:
self.keys = []
if pointers:
self.pointers = pointers
else:
self.pointers = []
def __str__(self):
return '[' + ','.join(map(str, self.keys)) + ']'
def is_full(self):
return len(self.keys) == self.size
def split(self):
mid = self.size // 2
parent = self.keys[mid]
left = Node(self.size, self.keys[:mid], self.pointers[:mid+1])
right = Node(self.size, self.keys[mid+1:], self.pointers[mid+1:])
root = Node(self.size, [parent], [left, right])
return root
def add(self, key):
i = 0
while i < len(self.keys):
if self.keys[i] <= key:
i += 1
else:
break
self.keys.insert(i, key)
while len(self.pointers) < len(self.keys) + 1:
self.pointers.append(None)
def delete(self, key):
i = 0
while i < len(self.keys):
if self.keys[i] == key:
break
i += 1
if i != len(self.keys):
self.keys.pop(i)
if self.pointers and self.pointers[0]:
left, right = self.pointers[i], self.pointers[i+1]
if i == 0:
right.combine(left, False)
self.pointers.pop(i)
else:
left.combine(right)
self.pointers.pop(i+1)
elif self.pointers:
self.pointers.pop(i)
def combine(self, node, inverse=True):
if inverse:
self.keys = node.keys.extend(self.keys)
self.pointers = node.pointers.extend(self.pointers)
else:
self.keys.extend(node.keys)
self.pointers.extend(node.pointers)
class BTree():
def __init__(self, order):
self.order = order
self.root = Node(order)
self.depth = 1
def __str__(self):
queue = [self.root]
res = []
string = ''
while queue:
tmp = []
while queue:
node = queue.pop(0)
res.append(str(node))
tmp += [x for x in node.pointers if x]
queue = tmp
string += ','.join(res) + '\n'
res = []
return string
def insert(self, key):
path = self.insert_find(key)
node = path.pop()
node.add(key)
if node.is_full():
head = node.split()
while path:
head, split_flag = self.merge(path.pop(), head)
if split_flag:
continue
else:
return
self.root = head
self.depth += 1
def insert_find(self, key):
path = [self.root]
if self.depth == 1:
return path
else:
cnt = 1
node = self.root
while cnt < self.depth:
i = 0
while i < len(node.keys):
if node.keys[i] > key:
break
else:
i += 1
node = node.pointers[i]
cnt += 1
path.append(node)
return path
def delete(self, key):
node, state, path = self.delete_find(key)
if state == 'not_found':
return
elif state == 'leaf':
if len(node.keys) > node.minimum:
node.delete(key)
else:
parent = path[-2]
child_idx = 0
for child in parent.pointers:
if key in child.keys:
break
child_idx += 1
bro = self.rich_brother(parent, child_idx)
if bro:
node.delete(key)
bro_idx = parent.pointers.index(bro)
if bro_idx < child_idx:
node.add(parent.keys[bro_idx])
parent.keys[bro_idx] = bro.keys[-1]
bro.delete(bro.keys[-1])
else:
node.add(parent.keys[child_idx])
parent.keys[child_idx] = bro.keys[0]
bro.delete(bro.keys[0])
else:
pass
else:
i = node.keys.index(key)
child = node.pointers[i+1]
if len(child.keys) > child.minimum:
v = child.keys[0]
child.delete(v)
node.keys[i] = v
def delete_find(self, key):
cnt = 0
node = self.root
path = [node]
while cnt < self.depth - 1:
i = 0
while i < len(node.keys):
if node.keys[i] == key:
return node, 'middle', path
elif node.keys[i] > key:
break
i += 1
node = node.pointers[i]
path.append(node)
cnt += 1
i = 0
while i < len(node.keys):
if node.keys[i] == key:
return node, 'leaf', path
i += 1
return None, 'not_found', path
def rich_brother(self, parent, child_idx):
if child_idx == 0:
if len(parent.pointers) > 1 and len(parent.pointers[1].keys) > parent.pointers[1].minimum:
return parent.pointers[1]
else:
return None
elif child_idx == len(parent.pointers) - 1:
if len(parent.pointers) > 1 and len(parent.pointers[-2].keys) > parent.pointers[-2].minimum:
return parent.pointers[-2]
else:
return None
else:
if len(parent.pointers[child_idx-1].keys) > parent.pointers[child_idx-1].minimum:
return parent.pointers[child_idx-1]
elif len(parent.pointers[child_idx+1].keys) > parent.pointers[child_idx+1].minimum:
return parent.pointers[child_idx+1]
else:
return None
def merge(self, node1, node2):
keys1 = node1.keys
key2 = node2.keys[0]
i = 0
while i < len(keys1):
if keys1[i] > key2:
break
else:
i += 1
node1.keys.insert(i, key2)
node1.pointers[i] = node2.pointers[0]
node1.pointers.insert(i+1, node2.pointers[1])
if len(node1.keys) >= node1.size:
return node1.split(), True
else:
return node1, False
return node1, split_flag
if __name__ == '__main__':
node1 = Node(3)
node1.add(12)
node1.add(1)
node1.add(3)
node1.add(46)
t = BTree(5)
t.insert('C')
t.insert('N')
t.insert('G')
t.insert('A')
t.insert('H')
print(t)
t.insert('E')
t.insert('K')
t.insert('Q')
print(t)
t.insert('M')
print(t)
t.insert('F')
t.insert('W')
t.insert('L')
t.insert('T')
print(t)
t.insert('Z')
print(t)
t.insert('D')
t.insert('P')
t.insert('R')
t.insert('X')
t.insert('Y')
print(t)
t.insert('S')
print(t)
print('delete testing.....')
t.delete('H')
t.delete('T')
print(t)
t.delete('R')
print(t)
|
22,943 | 907b6dfa773129a0ec6a66adb6857a6a47199bdb | from .baseline import Baseline
from .detector import Detector
from .detector_fpn import Detector_FPN
# from .detector_orn import Detector_ORN # can not run on cpu
__all__ = ["Baseline", "Detector", "Detector_FPN"] # , 'Detector_ORN']
|
22,944 | 936db64e955730755cc244be94cf280e25051ef2 | # Character Picture Grid
#
# Say you have a list of lists where each value in the inner lists is a one-character string, like this:
#
#
# grid = [['.', '.', '.', '.', '.', '.'],
# ['.', 'O', 'O', '.', '.', '.'],
# ['O', 'O', 'O', 'O', '.', '.'],
# ['O', 'O', 'O', 'O', 'O', '.'],
# ['.', 'O', 'O', 'O', 'O', 'O'],
# ['O', 'O', 'O', 'O', 'O', '.'],
# ['O', 'O', 'O', 'O', '.', '.'],
# ['.', 'O', 'O', '.', '.', '.'],
# ['.', '.', '.', '.', '.', '.']]
#
# You can think of grid[x][y] as being the character at the x- and y-coordinates of a “picture” drawn with text characters. The (0, 0) origin will be in the upper-left corner, the x-coordinates increase going right, and the y-coordinates increase going down.
# >>> spam = [['cat', 'bat'], [10, 20, 30, 40, 50]]
# >>> spam[0]
# ['cat', 'bat']
# >>> spam[0][1]
# 'bat'
# >>> spam[1][4]
# 50
# The first index dictates which list value to use, and the second indicates the value within the list value. For example, spam[0][1] prints 'bat', the second value in the first list. If you only use one index, the program will print the full list value at that index.
# based on this it seems that in grid[y][x] is more accurate...
# Copy the previous grid value, and write code that uses it to print the image.
#
# ..OO.OO..
# .OOOOOOO.
# .OOOOOOO.
# ..OOOOO..
# ...OOO...
# ....O....
# This is the above image sideways
# Hint: You will need to use a loop in a loop in order to print grid[0][0], then grid[1][0], then grid[2][0], and so on, up to grid[8][0]. This will finish the first row, so then print a newline. Then your program should print grid[0][1], then grid[1][1], then grid[2][1], and so on. The last thing your program will print is grid[8][5].
# Also, remember to pass the end keyword argument to print() if you don’t want a newline printed automatically after each print() call.
grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
for j in range(len(grid[0])): # this is basically functioning as the Y axis?
for i in range(0, len(grid)): # this is basically functioning as the X axis?
print(grid[i][j], end='') # keyword=end prevents a newline after every print()
print()
# I don't follow this at all.... :'(
|
22,945 | 3db1f299bd8d22377009ac4a70740ea1c70bf290 | import urwid
def __main__():
repo_list = build_repo_list()
menu = build_menu()
container = urwid.Columns(
[("weight", 3, repo_list), ("weight", 1, menu)], dividechars=1, min_width=10,
)
loop = urwid.MainLoop(container)
loop.run()
def build_menu():
menu = urwid.SimpleFocusListWalker([urwid.Text("Menu")])
return urwid.ListBox(menu)
def build_repo_list():
repo_list = urwid.SimpleFocusListWalker([urwid.Text("Repositories")])
return urwid.ListBox(repo_list)
if __name__ == "__main__":
__main__()
|
22,946 | c140d33912be8dfc05f505d9b341d38222c6d899 | from data_loader.data_generator import DataGenerator
from models.invariant_basic import invariant_basic
from trainers.trainer import Trainer
from Utils.config import process_config
from Utils.dirs import create_dirs
import numpy as np
from collections import Counter
from Utils.utils import get_args
from Utils import config
import warnings
warnings.filterwarnings('ignore')
import importlib
import collections
import data_loader.data_helper as helper
from Utils.utils import get_args
import os
import time
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/example.json')
# config.num_classes=4
"""reset config.num_classes if it's syn data"""
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
# create the experiments dirs
tf.set_random_seed(1)
np.random.seed(1)
create_dirs([config.summary_dir, config.checkpoint_dir])
# create tensorflow session
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
# create your data generator
data = DataGenerator(config)
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = Trainer(sess, model, data, config)
# load model if exists
# model.load(sess)
# here you train your model
stt = time.time()
trainer.train()
end = time.time()
sess.close()
tf.reset_default_graph()
|
22,947 | d0b690d5b538872696140adfd88d46fee1b42b57 | import sys
import argparse
import StringIO
from panko import audiofile
from panko.audiofile import albumart
def parse_args():
parser = argparse.ArgumentParser(description='Display an audio files meta data.')
parser.add_argument('files', metavar='FILES', type=str, nargs='+',
help='the audio files that will be inspected')
parser.add_argument('-c', '--cover', type=str, help='file or url for cover art',
default=None)
parser.add_argument('-f', '--cover-format', type=str, help='file or url for cover art',
default=None)
return parser.parse_args()
def main():
args = parse_args()
art = None
if args.cover:
if args.cover.startswith('http://') or args.cover.startswith('https://'):
art = albumart.load_url(args.cover)
else:
art = albumart.load(args.cover)
for filepath in args.files:
target_file = audiofile.open(filepath)
if art:
target_file.embed_cover(art, format=args.cover_format)
if __name__ == '__main__':
main() |
22,948 | a83485795e364712019683bcc50267a7285fb157 | #LEA encryption
import LEA
import its
import timeit
#start= timeit.default_timer()
def main():
key = bytearray(b"blacksnakeblacksnake1234")
#print(key)
start= timeit.default_timer()
input_str = its.its("1.png")
#print("input string : " + input_str)
#print("Start Encryption")
pt = bytearray(input_str, "utf8")
#print(pt,type(pt))
leaECB = LEA.ECB(True, key, True)
ct = leaECB.update(pt)
ct += leaECB.final()
#l_r = ' '.join([str(elem) for elem in ct])
#emsg = its.sti(''.join(ct), 'ec_gbaby.jpg' )
#print(str(ct),type(str(ct)))
emsg = its.sti(''.join(str(ct)), 'ec_gbaby.png' )
#print(ct,type(ct))
#print("End Encryption")
stop = timeit.default_timer()
execution_time = stop - start
print("Program Executed in :",execution_time)
#print("Start Decryption")
start= timeit.default_timer()
leaECB = LEA.ECB(False, key, True)
pt = leaECB.update(ct)
pt += leaECB.final()
#print(pt,type(pt))
decrypt_output = pt.decode('utf8')
dmsg = its.sti(''.join(decrypt_output), 'd_gbaby.png' )
#print( decrypt_output,type( decrypt_output))
#print("End Decrypt")
stop = timeit.default_timer()
execution_time = stop - start
print("Program Executed in :",execution_time)
if __name__ == "__main__":
main()
#stop = timeit.default_timer()
#execution_time = stop - start
#print("Program Executed in :",execution_time)
|
22,949 | 866f29f182f8e42b0edbb5d31ae4f0eb2abcbc65 | /Users/jalal/anaconda/lib/python3.6/operator.py |
22,950 | c29f605d232d599fbe8c4cc4cb484df714647a5c | from abc import ABCMeta, abstractmethod
class Class1(metaclass=ABCMeta):
@abstractmethod
def func(self, x): # Абстрактный метод
pass
class Class2(Class1): # Наследуем абстрактный метод
def func(self, x): # Переопределяем метод
print(x)
class Class3(Class1): # Класс не переопределяет метод
pass
c2 = Class2()
c2.func(50) # Выведет: 50
try:
c3 = Class3() # Ошибка. Метод func() не переопределен
c3.func(50)
except TypeError as msg:
print(msg) # Can't instantiate abstract class Class3
# with abstract methods func |
22,951 | a3288424f565467bd0323b159362ae24a5cff7bf | #!/usr/bin/env python3
import sys
import os
import time
import mido
import struct
import array
Envelope = struct.Struct("iidixxxx")
def make_instrument(channel: int, amps: "List[float]") -> bytes:
return b"\xF0" + \
struct.pack("B", ((len(amps) << 4) | (channel & 15))) + \
array.array("d", amps).tobytes() + \
b"\xF1" + \
struct.pack("B", channel & 15) + \
Envelope.pack(4410, 44100, 0.3, 10000)
def playnote(note, amp):
if amp != 0:
sys.stdout.buffer.write(bytes([0, note, amp]))
else:
sys.stdout.buffer.write(bytes([1, note]))
sys.stdout.buffer.flush()
if sys.argv[1] == "--loop":
loop = True
file = mido.MidiFile(sys.argv[2])
else:
loop = False
file = mido.MidiFile(sys.argv[1])
sys.stderr.write("%.2f seconds\n" % file.length)
play = file.play()
while True:
try:
for msg in play if not loop else file.play():
#sys.stderr.write(str(msg.bytes()))
if msg.type in ('note_on', 'note_off', 'pitchwheel'):
sys.stdout.buffer.write(bytes(msg.bytes()))
elif msg.type == 'program_change':
sys.stdout.buffer.write(bytes(msg.bytes()))
# if msg.program > 1:
# sys.stdout.buffer.write(bytes(msg.bytes()))
# else:
# amps = [1.0, 0, 1/4, 0, 1/9, 0, 1/16, 0, 1/49]
# sys.stdout.buffer.write(make_instrument(msg.channel, amps))
sys.stdout.buffer.flush()
break
except KeyboardInterrupt:
#sys.stdout.close()
#https://docs.python.org/3/faq/library.html#why-doesn-t-closing-sys-stdout-stdin-stderr-really-close-it
exit()
exit()
"""
for msg in mido.MidiFile(sys.argv[1]).play():
#sys.stderr.write(str(msg.bytes()))
if msg.type == 'note_on' and msg.velocity > 0:
sys.stdout.buffer.write(bytes(msg.bytes()))
elif msg.type == 'note_off':
sys.stdout.buffer.write(bytes(msg.bytes()))
elif msg.type == 'note_on' and msg.velocity == 0:
sys.stdout.buffer.write(bytes([0x80+msg.channel, msg.note, 0]))
elif msg.type == 'program_change':
sys.stdout.buffer.write(bytes(msg.bytes()))
sys.stdout.buffer.flush()
"""
|
22,952 | 6b7f940b23295b1fa4cff2ca52155e5d1a15ea80 | from haversine import inverse_haversine_vector, Unit, Direction
from numpy import isclose
from math import pi
import pytest
from tests.geo_ressources import LYON, PARIS, NEW_YORK, LONDON
@pytest.mark.parametrize(
"point, dir, dist, result",
[
(PARIS, Direction.NORTH, 32, (49.144444, 2.3508)),
(PARIS, 0, 32, (49.144444, 2.3508)),
(LONDON, Direction.WEST, 50, (51.507778, -0.840556)),
(LONDON, pi * 1.5, 50, (51.507778, -0.840556)),
(NEW_YORK, Direction.SOUTH, 15, (40.568611, -74.235278)),
(NEW_YORK, Direction.NORTHWEST, 50, (41.020556, -74.656667)),
(NEW_YORK, pi * 1.25, 50, (40.384722, -74.6525)),
],
)
def test_inverse_kilometers(point, dir, dist, result):
assert isclose(inverse_haversine_vector([point], [dist], [dir]),
([result[0]], [result[1]]), rtol=1e-5).all()
|
22,953 | 27041a228021d98725cc238017335e9900a46f2a | import pandas as pd
import numpy as np
from datetime import datetime
####################################################
#Helper Functions
def get_teams(team_list):
teams = {}
for num,team in enumerate(team_list):
teams[team]=num
return teams
def get_home_away(df):
hometeam,awayteam = [],[]
teams = get_teams(df.HomeTeam.unique())
for team in df.HomeTeam:
hometeam.append(teams[team])
for team in df.AwayTeam:
awayteam.append(teams[team])
return hometeam,awayteam
def final_result(result):
if result =="H":
return -1
elif result =="D":
return 0
else:
return 1
############################################################
def main(cut=360):
#ingest the data
data = pd.read_csv("../data/E0.csv")
data.HomeTeam,data.AwayTeam = get_home_away(data)
data.FTR = data.FTR.apply(final_result)
data.Date = pd.to_datetime(data.Date)
data.Time = pd.to_datetime(data.Time)
data['year'] = data.Date.dt.year
data['month'] = data.Date.dt.month
data['day'] = data.Date.dt.day
data['hour'] = data.Time.dt.hour
data['minute'] = data.Time.dt.minute
data.drop(["Div","Date","Referee","Time","HTR"],axis=1,inplace=True)
Y = data.FTR
X1 = data[["HomeTeam","AwayTeam","year","month","day"]]
y1 = data.drop(["HomeTeam","AwayTeam","FTR","year","month","day"],axis=1)
X1_train = X1.iloc[:cut]
y1_train = y1.iloc[:cut]
X1_test = X1.iloc[cut:]
y1_test = y1.iloc[cut:]
return data,np.array(Y),X1,y1,X1_train,y1_train,X1_test,y1_test
def team_names():
#ingest the data
data = pd.read_csv("../data/E0.csv")
teams = get_teams(data.HomeTeam.unique())
return teams |
22,954 | e91371d526bc9a977fb0edd71a7815791dae00a9 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 01:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20170206_1131'),
]
operations = [
migrations.AddField(
model_name='company',
name='company_reg_type',
field=models.CharField(default=django.utils.timezone.now, max_length=32, verbose_name='Company Address'),
preserve_default=False,
),
migrations.AddField(
model_name='company',
name='description',
field=models.CharField(default=django.utils.timezone.now, max_length=255, verbose_name='Company Address'),
preserve_default=False,
),
migrations.AddField(
model_name='company',
name='latitude',
field=models.DecimalField(decimal_places=7, default=0.0, max_digits=10, verbose_name='Latitude'),
),
migrations.AddField(
model_name='company',
name='longitude',
field=models.DecimalField(decimal_places=7, default=0.0, max_digits=10, verbose_name='Longitude'),
),
migrations.AlterField(
model_name='company',
name='company_address',
field=models.CharField(max_length=255, verbose_name='Company Address'),
),
migrations.AlterField(
model_name='company',
name='company_code',
field=models.CharField(max_length=32, verbose_name='Company number'),
),
migrations.AlterField(
model_name='company',
name='company_manager',
field=models.CharField(max_length=32, verbose_name='Company Manager'),
),
migrations.AlterField(
model_name='company',
name='company_manager_tel',
field=models.CharField(max_length=32, verbose_name='Company Manager Tel'),
),
migrations.AlterField(
model_name='company',
name='company_name',
field=models.CharField(max_length=255, verbose_name='Company Name'),
),
migrations.AlterField(
model_name='company',
name='company_short_name',
field=models.CharField(max_length=32, verbose_name='Company Short Name'),
),
migrations.AlterField(
model_name='company',
name='id',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False),
),
]
|
22,955 | 9c13e84f2af715e0b1d7a7b723ee3a78ebd018ff | import tkinter as tk
from pynput import mouse
from pynput import keyboard
from pynput.mouse import Button, Controller
from pynput.keyboard import Key, Controller as KeyController
import time
import pickle
root= tk.Tk()
root.title("Its Boring Player")
#####################################################################################
data= []
ptime= time.time()
live= True
speedx = 1
#############################################################
import pyautogui
from pyscreeze import ImageNotFoundException
#####################################################################################
def on_release(key):
global ptime
print('{0} released'.format(key))
if key == keyboard.Key.esc:
return False
if(key== keyboard.Key.delete):
stopPlay()
return False
dur= time.time()- ptime
data.append({'type':'keyrelease', 'dur': dur, 'key': key})
print("Key "+str(key)+" with "+str(dur))
ptime= time.time()
return live
def stopPlay():
global live
print("Stopping play")
live= False
def play(data, mouse, keyc):
global live
for dd in data:
type= dd['type']
dur= dd['dur']/ float(speedx)
time.sleep(dur)
if(not live):
break
if(type=='mouse'):
x, y = dd['pos']
btn= dd['btn']
pressed= dd['pressed']
# print("X "+str(x)+" Y "+str(y)+ "delay "+str(dur))
mouse.position = (x, y)
if(pressed):
mouse.press(btn)
else:
mouse.release(btn)
#mouse.click(btn)
if(type=='keypress'):
key= dd['key']
# print("Keypress "+str(key))
keyc.press(key)
if(type=='keyrelease'):
key= dd['key']
# print("Keyrelease "+str(key))
keyc.release(key)
#if(type=='scroll'):
# x, y= dd['pos']
# dx, dy = dd['amount']
# mouse.scroll(dx, dy)
def playRec():
statusTv['text']= "Playing"
statusTv['fg']= 'green'
root.update()
name= nameE.get();
data = pickle.load( open( "recordings/{}_0_input.p".format(name), "rb" ) )
mouse = Controller()
keyc= KeyController()
play(data, mouse, keyc)
statusTv['text']= "Played Successfully"
def playInLoop():
global live, key_listener
live= True
key_listener = keyboard.Listener(on_release=on_release)
key_listener.start()
statusTv['text']= "Press HOME to Stop"
statusTv['fg']= 'red'
name= nameE.get();
data = pickle.load( open( "recordings/{}_0_input.p".format(name), "rb" ) )
mouse = Controller()
keyc= KeyController()
while(live):
play(data, mouse, keyc)
statusTv['text']= "Stopped Successfully"
statusTv['fg']= "green"
def on_slider(value):
global speedx
speedx= value
statusTv['text']= "Speed: {}x".format(speedx)
statusTv['fg']= "green"
def just_play(nn):
nameE.delete(0,END)
nameE.insert(0,nn)
#playRec()
#####################################################################################
statusTv= tk.Label(root, text= "READY", fg= 'green', font = "Verdana 12 bold")
statusTv.pack(padx=2, pady=2)
nameE= tk.Entry(root)
nameE.config(width=25, borderwidth = '4', relief='flat', bg='white')
nameE.pack(padx=2, pady=2)
# statusTv= tk.Label(root, text= "Recordings", fg= 'gray', font = "Verdana 10")
# statusTv.pack()
# slider= tk.Scale(root, from_= 1, to= 25, command= on_slider, orient= 'horizontal', label= "Speed", length= 200)
slider= tk.Scale(root, from_= 1, to= 25, command= on_slider, orient= 'horizontal',length= 200)
slider.pack()
import glob
import os
from tkinter import END
playB= tk.Button(root, text= "Play", width= 25, command= playRec, borderwidth = '4', relief='flat', overrelief= 'ridge', bg='#63f542', activebackground='green' )
playB.pack(padx=4, pady=2)
pilB= tk.Button(root, text= "Play in Loop", width= 25, command= playInLoop, borderwidth = '4', relief='flat', overrelief= 'ridge', bg='#63f542', activebackground='green' )
pilB.pack(padx=4, pady=2)
pilsB= tk.Button(root, text= "Stop Playing (press delete)", width= 25, command= stopPlay, borderwidth = '4', relief='flat', overrelief= 'ridge', bg='#ffa1a1', activebackground='red' )
pilsB.pack(padx=4, pady=2)
exitB= tk.Button(root, text= "Close", width= 25, command= root.destroy, borderwidth = '4', relief='flat', overrelief= 'ridge', bg='#ffa1a1', activebackground='red' )
exitB.pack(padx=4, pady=2)
recs= []
for ff in glob.glob("recordings/*.p"):
nn= os.path.basename(ff)
nn= nn[: nn.find("_")]
recs.append(nn)
# playB= tk.Button(root, text= str(nn), width= 25, command= lambda nn=nn: just_play(nn))
# playB.pack()
variable = tk.StringVar(root)
variable.set(recs[0]) # default value
w = tk.OptionMenu(root, variable , *recs,)
w.config(width=25, borderwidth = '4', relief='flat', bg='#a1ebff', activebackground='skyblue' )
w.pack(padx=4, pady=2)
def callback(*args):
print("The selected item is {}".format(variable.get()))
just_play(variable.get())
variable.trace("w", callback)
#root.attributes('-topmost', True)
#root.update()
w = 200 # width for the Tk root
h = 300 # height for the Tk root
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
# x = (ws/2) - (w/2)
# y = (hs/2) - (h/2)
x= ws- w;
y= 0;
# set the dimensions of the screen
# and where it is placed
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.mainloop() |
22,956 | 3c15d1941e91fec56db2216a63c75eec7cc32847 | from secure_delete import secure_delete
secure_delete.secure_random_seed_init()
secure_delete.secure_delete('./sd.txt')
|
22,957 | 75f27e6e4233aaa6b0d6d4e7379dcbeb3134391c | # -*- coding:utf-8 -*-
import math, sys
N = int(input())
x = 1
depth = math.floor(math.log(N, 2))
if depth == 0:
print("Aoki")
sys.exit()
for tmp in range(depth+1):
if depth%2 == 0:
if tmp%2 == 0:
x = 2*x+1
else:
x *= 2
else:
if tmp%2 == 0:
x *= 2
else:
x = 2*x+1
if x > N:
if tmp%2 == 0:
print("Aoki")
sys.exit()
else:
print("Takahashi")
sys.exit()
|
22,958 | 4fec3c106b073ceab54e5faecec87074d0adf0e7 | #!/usr/bin/env python
import os,sys,pdb,scipy,glob
from pylab import *
from strolger_util import util as u
from strolger_util import rates_z as rz
from strolger_util import cosmotools as ct
if __name__=='__main__':
p0 = [0.013, 2.6, 3.2, 6.1]
redshifts = arange(0, 6.1, 0.1)
lbt = array([ct.cosmotime(x) for x in redshifts])
tt = 13.6 - lbt
csfh = rz.csfh(redshifts, *p0)
ax = subplot(111)
ax.plot(tt, csfh, 'r-')
tt = arange(0, 13.6, 0.05)
sfh_t = rz.sfr_behroozi_12(tt)
ax.plot(tt,sfh_t, 'b-')
show()
|
22,959 | 61792759bf866a048b8e196866c9d5fa08b0fa06 | '''
456
'''
|
22,960 | a9f7384b254a889ea60a7e7cf0472cf60425a3ff | # Generated by Django 2.0.1 on 2019-12-21 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customer',
name='consultant',
field=models.ForeignKey(blank=True, limit_choices_to={'depart__title': '销售部'}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='consultant', to=settings.AUTH_USER_MODEL, verbose_name='课程顾问'),
),
]
|
22,961 | 10b17998d39247d6e6e0d8a3bbcf42726cc9d4f6 | import pandas as pd
from geocleaner import clean_locations
import time
df = pd.read_pickle("../../data.pkl")
print('Total tweet count: ', len(df))
t0 = time.time()
df = clean_locations(df, chunksize=100000)
t1 = time.time()
print('Time taken: ', t1-t0)
print('Number of tweets with non-empty location string : ', len(df))
print(df.head(20))
count = 0
for l in df['clean_location']:
if l:
count += 1
print('Number of tweets with cleaned locations', count)
|
22,962 | 34dc74739e39faea45b6c43436c37de37bf10a25 | #!/usr/bin/env python
import socket
from socketServer import Server
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 9001
def main():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((SERVER_HOST, SERVER_PORT))
server = Server(server_socket)
server.serve()
if __name__ == '__main__':
main()
|
22,963 | 93858e109fff70d5d84c15dd76def1ab7ef0ffc3 | /Users/pawel/opt/anaconda3/lib/python3.7/io.py |
22,964 | 79c3abd1811738aef86328c433ed0ea02b813757 | ## PyHum (Python program for Humminbird(R) data processing)
## has been developed at the Grand Canyon Monitoring & Research Center,
## U.S. Geological Survey
##
## Author: Daniel Buscombe
## Project homepage: <https://github.com/dbuscombe-usgs/PyHum>
##
##This software is in the public domain because it contains materials that originally came from
##the United States Geological Survey, an agency of the United States Department of Interior.
##For more information, see the official USGS copyright policy at
##http://www.usgs.gov/visual-id/credit_usgs.html#copyright
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#"""
# ____ _ _
#| _ \ _ _| | | |_ _ _ __ ___ _ _
#| |_) | | | | |_| | | | | '_ ` _ \ (_) (_)
#| __/| |_| | _ | |_| | | | | | | _ _
#|_| \__, |_| |_|\__,_|_| |_| |_| (_) (_)
# |___/
#
# __
# _________ _____________ _____/ /_
# / ___/ __ \/ ___/ ___/ _ \/ ___/ __/
#/ /__/ /_/ / / / / / __/ /__/ /_
#\___/\____/_/ /_/ \___/\___/\__/
#
#
##+-+-+ +-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
#|b|y| |D|a|n|i|e|l| |B|u|s|c|o|m|b|e|
#+-+-+ +-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|d|a|n|i|e|l|.|b|u|s|c|o|m|b|e|@|n|a|u|.|e|d|u|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#"""
# =========================================================
# ====================== libraries ======================
# =========================================================
#operational
from __future__ import print_function
from __future__ import division
from scipy.io import savemat, loadmat
import os, time #, sys, getopt
try:
from Tkinter import Tk
from tkFileDialog import askopenfilename, askdirectory
except:
pass
from joblib import Parallel, delayed, cpu_count
import io #PyHum.io as io
#numerical
import numpy as np
import utils as humutils #PyHum.utils as humutils
import ppdrc #PyHum.ppdrc as ppdrc
from scipy.special import jv
from scipy.ndimage.filters import median_filter
from skimage.restoration import denoise_tv_chambolle
#plotting
import matplotlib.pyplot as plt
#import matplotlib.colors as colors
# suppress divide and invalid warnings
np.seterr(divide='ignore')
np.seterr(invalid='ignore')
import warnings
warnings.filterwarnings("ignore")
# =========================================================
# =============== begin program ======================
# ========================================================
#################################################
def correct(humfile, sonpath, maxW, doplot, dofilt, correct_withwater, ph, temp, salinity, dconcfile):
'''
Remove water column and carry out some rudimentary radiometric corrections,
accounting for directivity and attenuation with range
Syntax
----------
[] = PyHum.correct(humfile, sonpath, maxW, doplot, correct_withwater, ph, temp, salinity, dconcfile)
Parameters
----------
humfile : str
path to the .DAT file
sonpath : str
path where the *.SON files are
maxW : int, *optional* [Default=1000]
maximum transducer power
doplot : int, *optional* [Default=1]
1 = make plots, otherwise do not
dofilt : int, *optional* [Default=0]
1 = apply a phase preserving filter to the scans
correct_withwater : int, *optional* [Default=0]
1 = apply radiometric correction but don't remove water column from scans
ph : float, *optional* [Default=7.0]
water acidity in pH
temp : float, *optional* [Default=10.0]
water temperature in degrees Celsius
salinity : float, *optional* [Default=0.0]
salinity of water in parts per thousand
dconcfile : str, *optional* [Default=None]
file path of a text file containing sediment concentration data
this file must contain the following fields separated by spaces:
size (microns) conc (mg/L) dens (kg/m3)
with one row per grain size, for example:
30 1700 2200
100 15 2650
Returns
-------
sonpath+base+'_data_star_l.dat': memory-mapped file
contains the starboard scan with water column removed
sonpath+base+'_data_port_l.dat': memory-mapped file
contains the portside scan with water column removed
sonpath+base+'_data_star_la.dat': memory-mapped file
contains the starboard scan with water column removed and
radiometrically corrected
sonpath+base+'_data_port_la.dat': memory-mapped file
contains the portside scan with water column removed and
radiometrically corrected
sonpath+base+'_data_range.dat': memory-mapped file
contains the cosine of the range which is used to correct
for attenuation with range
sonpath+base+'_data_dwnlow_l.dat': memory-mapped file
contains the low freq. downward scan with water column removed
sonpath+base+'_data_dwnhi_l.dat': memory-mapped file
contains the high freq. downward scan with water column removed
sonpath+base+'_data_dwnlow_la.dat': memory-mapped file
contains the low freq. downward scan with water column removed and
radiometrically corrected
sonpath+base+'_data_dwnhi_la.dat': memory-mapped file
contains the high freq. downward scan with water column removed and
radiometrically corrected
if correct_withwater == 1:
sonpath+base+'_data_star_lw.dat': memory-mapped file
contains the starboard scan with water column retained and
radiometrically corrected
sonpath+base+'_data_port_lw.dat': memory-mapped file
contains the portside scan with water column retained and
radiometrically corrected
'''
# prompt user to supply file if no input file given
if not humfile:
print('An input file is required!!!!!!')
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
inputfile = askopenfilename(filetypes=[("DAT files","*.DAT")])
# prompt user to supply directory if no input sonpath is given
if not sonpath:
print('A *.SON directory is required!!!!!!')
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
sonpath = askdirectory()
# print given arguments to screen and convert data type where necessary
if humfile:
print('Input file is %s' % (humfile))
if sonpath:
print('Sonar file path is %s' % (sonpath))
if maxW:
maxW = np.asarray(maxW,float)
print('Max. transducer power is %s W' % (str(maxW)))
if doplot:
doplot = int(doplot)
if doplot==0:
print("Plots will not be made")
if dofilt:
dofilt = int(dofilt)
if dofilt==0:
print("Phase preserving filter will not be applied")
else:
print("Phase preserving filter will be applied")
if correct_withwater:
correct_withwater = int(correct_withwater)
if correct_withwater==1:
print("Correction will be applied without removing water column")
if salinity:
salinity = np.asarray(salinity,float)
print('Salinity is %s ppt' % (str(salinity)))
if ph:
ph = np.asarray(ph,float)
print('pH is %s' % (str(ph)))
if temp:
temp = np.asarray(temp,float)
print('Temperature is %s' % (str(temp)))
if dconcfile is not None:
try:
print('Suspended sediment size/conc. file is %s' % (dconcfile))
dconc = np.genfromtxt(dconcfile).T
conc = dconc[1]
dens = dconc[2]
d = dconc[0]
except:
pass
#================================
# start timer
if os.name=='posix': # true if linux/mac or cygwin on windows
start = time.time()
else: # windows
start = time.clock()
# if son path name supplied has no separator at end, put one on
if sonpath[-1]!=os.sep:
sonpath = sonpath + os.sep
base = humfile.split('.DAT') # get base of file name for output
base = base[0].split(os.sep)[-1]
# remove underscores, negatives and spaces from basename
base = humutils.strip_base(base)
# add wattage to metadata dict
meta = loadmat(os.path.normpath(os.path.join(sonpath,base+'meta.mat')))
dep_m = meta['dep_m'][0]
pix_m = meta['pix_m'][0]
meta['maxW'] = maxW
savemat(os.path.normpath(os.path.join(sonpath,base+'meta.mat')), meta ,oned_as='row')
bed = np.squeeze(meta['bed'])
ft = 1/(meta['pix_m'])
dist_m = np.squeeze(meta['dist_m'])
try:
if dconcfile is not None:
# sediment attenuation
alpha = sed_atten(meta['f'],conc,dens,d,meta['c'])
else:
alpha = 0
except:
alpha = 0
# load memory mapped scans
shape_port = np.squeeze(meta['shape_port'])
if shape_port!='':
if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_port2.dat'))):
port_fp = io.get_mmap_data(sonpath, base, '_data_port2.dat', 'int16', tuple(shape_port))
else:
port_fp = io.get_mmap_data(sonpath, base, '_data_port.dat', 'int16', tuple(shape_port))
shape_star = np.squeeze(meta['shape_star'])
if shape_star!='':
if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_star2.dat'))):
star_fp = io.get_mmap_data(sonpath, base, '_data_star2.dat', 'int16', tuple(shape_star))
else:
star_fp = io.get_mmap_data(sonpath, base, '_data_star.dat', 'int16', tuple(shape_star))
if len(shape_star)==2:
extent = shape_star[0]
else:
extent = shape_star[1] #np.shape(data_port)[0]
bed = np.asarray(bed,'int')+int(0.25*ft)
# calculate in dB
######### star
Zt, R, A = remove_water(star_fp, bed, shape_star, dep_m, pix_m, 1, maxW)
Zt = np.squeeze(Zt)
# create memory mapped file for Z)
shape_star = io.set_mmap_data(sonpath, base, '_data_star_l.dat', 'float32', Zt)
del Zt
A = np.squeeze(A)
# create memory mapped file for A
shape_A = io.set_mmap_data(sonpath, base, '_data_incidentangle.dat', 'float32', A)
del A
R = np.squeeze(R)
R[np.isnan(R)] = 0
try:
alpha_w = water_atten(R,meta['f'],meta['c'], ph, temp, salinity)
except:
alpha_w = 1e-5
# compute transmission losses
TL = (40 * np.log10(R) + alpha_w + (2*alpha)*R/1000)/255
del alpha_w
# create memory mapped file for R
shape_R = io.set_mmap_data(sonpath, base, '_data_range.dat', 'float32', R)
del R
TL[np.isnan(TL)] = 0
TL[TL<0] = 0
shape_TL = io.set_mmap_data(sonpath, base, '_data_TL.dat', 'float32', TL)
del TL
A_fp = io.get_mmap_data(sonpath, base, '_data_incidentangle.dat', 'float32', shape_star)
TL_fp = io.get_mmap_data(sonpath, base, '_data_TL.dat', 'float32', shape_star)
R_fp = io.get_mmap_data(sonpath, base, '_data_range.dat', 'float32', shape_star)
if correct_withwater == 1:
Zt = correct_scans(star_fp, A_fp, TL_fp, dofilt)
# create memory mapped file for Z)
shape_star = io.set_mmap_data(sonpath, base, '_data_star_lw.dat', 'float32', Zt)
#we are only going to access the portion of memory required
star_fp = io.get_mmap_data(sonpath, base, '_data_star_l.dat', 'float32', shape_star)
##Zt = correct_scans(star_fp, A_fp, TL_fp, dofilt)
#phi=1.69
alpha=59 # vertical beam width at 3db
theta=35 #opening angle theta
# lambertian correction
Zt = correct_scans_lambertian(star_fp, A_fp, TL_fp, R_fp, meta['c'], meta['f'], theta, alpha)
Zt = np.squeeze(Zt)
avg = np.nanmedian(Zt,axis=0)
##avg = median_filter(avg,int(len(avg)/10))
Zt2 = Zt-avg + np.nanmean(avg)
Zt2 = Zt2 + np.abs(np.nanmin(Zt2))
try:
Zt2 = median_filter(Zt2, (3,3))
except:
pass
##Zt2 = np.empty(np.shape(Zt))
##for kk in range(np.shape(Zt)[1]):
## Zt2[:,kk] = (Zt[:,kk] - avg) + np.nanmean(avg)
##Zt2[Zt<=0] = np.nan
##Zt2[Zt2<=0] = np.nan
del Zt
# create memory mapped file for Z
shape_star = io.set_mmap_data(sonpath, base, '_data_star_la.dat', 'float32', Zt2)
del Zt2
#we are only going to access the portion of memory required
star_fp = io.get_mmap_data(sonpath, base, '_data_star_la.dat', 'float32', shape_star)
######### port
if correct_withwater == 1:
Zt = correct_scans(port_fp, A_fp, TL, dofilt)
# create memory mapped file for Z)
shape_port = io.set_mmap_data(sonpath, base, '_data_port_lw.dat', 'float32', Zt)
Zt = remove_water(port_fp, bed, shape_port, dep_m, pix_m, 0, maxW)
Zt = np.squeeze(Zt)
# create memory mapped file for Z
shape_port = io.set_mmap_data(sonpath, base, '_data_port_l.dat', 'float32', Zt)
#we are only going to access the portion of memory required
port_fp = io.get_mmap_data(sonpath, base, '_data_port_l.dat', 'float32', shape_port)
##Zt = correct_scans(port_fp, A_fp, TL_fp, dofilt)
# lambertian correction
Zt = correct_scans_lambertian(port_fp, A_fp, TL_fp, R_fp, meta['c'], meta['f'], theta, alpha)
Zt = np.squeeze(Zt)
Zt2 = Zt-avg + np.nanmean(avg)
Zt2 = Zt2 + np.abs(np.nanmin(Zt2))
##Zt2 = np.empty(np.shape(Zt))
##for kk in range(np.shape(Zt)[1]):
## Zt2[:,kk] = (Zt[:,kk] - avg) + np.nanmean(avg)
##Zt2[Zt<=0] = np.nan
##Zt2[Zt2<=0] = np.nan
del Zt
# create memory mapped file for Z
shape_port = io.set_mmap_data(sonpath, base, '_data_port_la.dat', 'float32', Zt2)
del Zt2
#we are only going to access the portion of memory required
port_fp = io.get_mmap_data(sonpath, base, '_data_port_la.dat', 'float32', shape_port)
## do plots of merged scans
if doplot==1:
if correct_withwater == 1:
port_fpw = io.get_mmap_data(sonpath, base, '_data_port_lw.dat', 'float32', shape_port)
star_fpw = io.get_mmap_data(sonpath, base, '_data_star_lw.dat', 'float32', shape_star)
if len(np.shape(star_fpw))>2:
for p in range(len(star_fpw)):
plot_merged_scans(port_fpw[p], star_fpw[p], dist_m, shape_port, ft, sonpath, p)
else:
plot_merged_scans(port_fpw, star_fpw, dist_m, shape_port, ft, sonpath, 0)
else:
if len(np.shape(star_fp))>2:
for p in range(len(star_fp)):
plot_merged_scans(port_fp[p], star_fp[p], dist_m, shape_port, ft, sonpath, p)
else:
plot_merged_scans(port_fp, star_fp, dist_m, shape_port, ft, sonpath, 0)
# load memory mapped scans
shape_low = np.squeeze(meta['shape_low'])
shape_hi = np.squeeze(meta['shape_hi'])
if shape_low!='':
if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_dwnlow2.dat'))):
try:
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow2.dat', 'int16', tuple(shape_low))
except:
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', tuple(shape_low))
finally:
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', tuple(shape_hi))
#if 'shape_hi' in locals():
# low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow2.dat', 'int16', tuple(shape_hi))
else:
try:
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', tuple(shape_low))
except:
if 'shape_hi' in locals():
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', tuple(shape_hi))
shape_hi = np.squeeze(meta['shape_hi'])
if shape_hi!='':
if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_dwnhi2.dat'))):
try:
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi2.dat', 'int16', tuple(shape_hi))
except:
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', tuple(shape_hi))
finally:
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', tuple(shape_low))
#if 'shape_low' in locals():
# hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi2.dat', 'int16', tuple(shape_low))
else:
try:
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', tuple(shape_hi))
except:
if 'shape_low' in locals():
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', tuple(shape_low))
if 'low_fp' in locals():
######### low
Zt = remove_water(low_fp, bed, shape_low, dep_m, pix_m, 0, maxW)
Zt = np.squeeze(Zt)
# create memory mapped file for Z
shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow_l.dat', 'float32', Zt)
del Zt
#we are only going to access the portion of memory required
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow_l.dat', 'float32', shape_low)
Zt = correct_scans2(low_fp, TL_fp)
# create memory mapped file for Z
shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow_la.dat', 'float32', Zt)
del Zt
#we are only going to access the lowion of memory required
low_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow_la.dat', 'float32', shape_low)
if doplot==1:
if len(np.shape(low_fp))>2:
for p in range(len(low_fp)):
plot_dwnlow_scans(low_fp[p], dist_m, shape_low, ft, sonpath, p)
else:
plot_dwnlow_scans(low_fp, dist_m, shape_low, ft, sonpath, 0)
if 'hi_fp' in locals():
######### hi
Zt = remove_water(hi_fp, bed, shape_hi, dep_m, pix_m, 0, maxW)
Zt = np.squeeze(Zt)
# create memory mapped file for Z
shape_hi = io.set_mmap_data(sonpath, base, '_data_dwnhi_l.dat', 'float32', Zt)
del Zt
#we are only going to access the portion of memory required
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi_l.dat', 'float32', shape_hi)
Zt = correct_scans2(hi_fp, TL_fp)
# create memory mapped file for Z
shape_hi = io.set_mmap_data(sonpath, base, '_data_dwnhi_la.dat', 'float32', Zt)
del Zt
#we are only going to access the hiion of memory required
hi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi_la.dat', 'float32', shape_hi)
if doplot==1:
if len(np.shape(hi_fp))>2:
for p in range(len(hi_fp)):
plot_dwnhi_scans(hi_fp[p], dist_m, shape_hi, ft, sonpath, p)
else:
plot_dwnhi_scans(hi_fp, dist_m, shape_hi, ft, sonpath, 0)
if os.name=='posix': # true if linux/mac
elapsed = (time.time() - start)
else: # windows
elapsed = (time.clock() - start)
print("Processing took "+ str(elapsed) + "seconds to analyse")
print("Done!")
print("===================================================")
# =========================================================
def water_atten(H,f,c,pH,T,S):
'''
calculate absorption of sound in water.
'''
H = np.abs(H)
P1 = 1 # cosntant
A1 = (8.86/c)*(10**(0.78*pH - 5))
f1 = 2.8*(S/35)**0.5 * 10**(4 - 1245/(T + 273))
A2 = 21.44*(S/c)*(1 + 0.025*T)
A3 = (4.937 *10**-4) - (2.59 * 10**-5)*T + (9.11* 10**-7)*T**2- (1.5 * 10**-8)*T**3
f2 = (8.17 * 10**(8 - 1990/(T + 273))) / (1 + 0.0018*(S - 35))
P2= 1 - (1.37*10**-4) * H + (6.2 * 10**-9)* H**2
P3 = 1 - (3.83 * 10**-5)*H + (4.9 *10**(-10) )* H**2
# absorption sound water dB/km
alphaw = ( (A1*P1*f1*f**2)/(f**2 + f1**2) ) + ( (A2*P2*f2*f**2)/(f**2 + f2**2) ) + (A3*P3*f**2)
return 2*(alphaw/1000)*H # depth(m) * dB/m = dB
# =========================================================
def sed_atten(f,conc,dens,d,c):
'''
calculates the attenuation due to sediment, in dB/m
given sediment concentration, density, grain size, frequency and speed of sound
according to Urick (1948), JASA
http://www.rdinstruments.com/pdfs/Use-ADCP-suspended-sediment%20discharge-NY.pdf
http://rspa.royalsocietypublishing.org/content/459/2037/2153.full.pdf
example values
f = 400 freq, kHz
c = 1490 speed sound in water, m/s
d = [40, 100] microns
dens = [2000, 2650] sediment density, kg/m^3
conc = [1000, 100] mg/L
'''
if np.any(conc)>0:
f = f * 1000 # frequency, Hz
sigma = dens/1000 # ratio sediment to fluid density
d = d/1e6 # particle diameter, m
nu = 1.004e-6 # viscosity fresh water, m^2/s
lam = c/f # acoustic wavelength, m
k = (2*np.pi)/lam # acoustic wavenumber
w = (2*np.pi)*f # radian frequency
delta_v = np.sqrt(2*nu/w)
phi = (conc/1e6)/dens #sediment volume fraction
a = d/2 # particle radius, m
tau = (1/2) + (9/4)*(delta_v/a)
s = (9/4)*(delta_v/a)*(1+(delta_v/a))
alpha = phi*( (1/6) *k**4 *a**3 + k*(sigma-1)**2 *( s/( s**2+(sigma+tau)**2 ) ) )*1e4
return np.sum(alpha) # times 2 because 2 way travel
else:
return np.nan
# =========================================================
def custom_save(figdirec,root):
plt.savefig(os.path.normpath(os.path.join(figdirec,root)),bbox_inches='tight',dpi=400)
# =========================================================
def remove_water(fp,bed,shape, dep_m, pix_m, calcR, maxW):
Zt = []
if calcR==1:
R = []
A = []
if len(np.shape(fp))>2:
for p in range(len(fp)):
data_dB = fp[p]*(10*np.log10(maxW)/255)
Zbed = np.squeeze(bed[shape[-1]*p:shape[-1]*(p+1)])
# shift proportionally depending on where the bed is
for k in range(np.shape(data_dB)[1]):
try:
data_dB[:,k] = np.r_[data_dB[Zbed[k]:,k], np.zeros( (np.shape(data_dB)[0] - np.shape(data_dB[Zbed[k]:,k])[0] ,) )]
except:
data_dB[:,k] = np.ones(np.shape(data_dB)[0])
Zt.append(data_dB)
if calcR ==1:
extent = shape[1]
yvec = np.linspace(pix_m,extent*pix_m,extent)
d = dep_m[shape[-1]*p:shape[-1]*(p+1)]
a = np.ones(np.shape(fp[p]))
for k in range(len(d)):
a[:,k] = d[k]/yvec
r = np.ones(np.shape(fp[p]))
for k in range(len(d)):
r[:,k] = np.sqrt(yvec**2 - d[k]**2)
# shift proportionally depending on where the bed is
for k in range(np.shape(r)[1]):
try:
r[:,k] = np.r_[r[Zbed[k]:,k], np.zeros( (np.shape(r)[0] - np.shape(r[Zbed[k]:,k])[0] ,) )]
a[:,k] = np.r_[a[Zbed[k]:,k], np.zeros( (np.shape(a)[0] - np.shape(a[Zbed[k]:,k])[0] ,) )]
except:
r[:,k] = np.ones(np.shape(r)[0])
a[:,k] = np.ones(np.shape(a)[0])
R.append(r)
A.append(a)
else:
data_dB = fp*(10*np.log10(maxW)/255)
Zbed = np.squeeze(bed)
# shift proportionally depending on where the bed is
for k in range(np.shape(data_dB)[1]):
try:
data_dB[:,k] = np.r_[data_dB[Zbed[k]:,k], np.zeros( (np.shape(data_dB)[0] - np.shape(data_dB[Zbed[k]:,k])[0] ,) )]
except:
data_dB[:,k] = np.ones(np.shape(data_dB)[0])
Zt.append(data_dB)
if calcR ==1:
extent = shape[0]
yvec = np.linspace(pix_m,extent*pix_m,extent)
d = dep_m
a = np.ones(np.shape(fp))
for k in range(len(d)):
a[:,[k]] = np.expand_dims(d[k]/yvec, axis=1)
r = np.ones(np.shape(fp))
for k in range(len(d)):
r[:,[k]] = np.expand_dims(np.sqrt(yvec**2 - d[k]**2), axis=1)
# shift proportionally depending on where the bed is
for k in range(np.shape(r)[1]):
try:
r[:,k] = np.r_[r[Zbed[k]:,k], np.zeros( (np.shape(r)[0] - np.shape(r[Zbed[k]:,k])[0] ,) )]
a[:,k] = np.r_[a[Zbed[k]:,k], np.zeros( (np.shape(a)[0] - np.shape(a[Zbed[k]:,k])[0] ,) )]
except:
r[:,k] = np.ones(np.shape(r)[0])
a[:,k] = np.ones(np.shape(a)[0])
R.append(r)
A.append(a)
if calcR ==1:
return Zt, R, np.pi/2 - np.arctan(A)
else:
return Zt
# =========================================================
def correct_scans(fp, a_fp, TL, dofilt):
if np.ndim(fp)==2:
return c_scans(fp, a_fp, TL, dofilt)
else:
return Parallel(n_jobs = cpu_count(), verbose=0)(delayed(c_scans)(fp[p], a_fp[p], TL[p], dofilt) for p in range(len(fp)))
# =========================================================
def c_scans(fp, a_fp, TL, dofilt):
nodata = fp==0
if dofilt==1:
fp = do_ppdrc(fp, np.shape(fp)[-1]/4)
#mg = 10**np.log10(np.asarray(fp*np.cos(a_fp),'float32')+0.001)
mg = 10**np.log10(np.asarray(fp * 1-np.cos(a_fp)**2,'float32')+0.001 + TL)
mg[fp==0] = np.nan
mg[mg<0] = np.nan
mg[nodata] = np.nan
return mg
# =========================================================
def correct_scans_lambertian(fp, a_fp, TL, R, c, f, theta, alpha):
if np.ndim(fp)==2:
return c_scans_lambertian(fp, a_fp, TL, R, c, f, theta, alpha)
else:
return Parallel(n_jobs = cpu_count(), verbose=0)(delayed(c_scans_lambertian)(fp[p], a_fp[p], TL[p], R[p], c, f, theta, alpha) for p in range(len(fp)))
# =========================================================
def c_scans_lambertian(fp, a_fp, TL, R, c, f, theta, alpha):
lam = c/(f*1000)
Rtmp = np.deg2rad(R.copy()) ##/2
try:
Rtmp[np.where(Rtmp==0)] = Rtmp[np.where(Rtmp!=0)[0][-1]]
except:
pass
#transducer radius
a = 0.61*lam / (np.sin(alpha/2))
M = (f*1000)/(a**4)
# no 'M' constant of proportionality
phi = ((M*(f*1000)*a**4) / Rtmp**2)*(2*jv(1,(2*np.pi/lam)*a*np.sin(np.deg2rad(theta))) / (2*np.pi/lam)*a*np.sin(np.deg2rad(theta)))**2
phi = np.squeeze(phi)
phi[phi==np.inf]=np.nan
# fp is 1d (1 scan)
beta = np.cos(a_fp)
try:
beta[np.where(beta<10e-5)] = beta[np.where(beta>10e-5)[0][-1]]
except:
pass
mg = (fp / phi * beta)*(1/Rtmp)
mg[np.isinf(mg)] = np.nan
K = np.nansum(fp)/np.nansum(mg)
mg = mg*K
mg[mg<0] = np.nan
mg = 10**np.log10(mg + TL)
mg[fp==0] = np.nan
mg[mg<0] = np.nan
mask = np.isnan(mg)
mg[np.isnan(mg)] = 0
mg = denoise_tv_chambolle(mg.copy(), weight=.2, multichannel=False).astype('float32')
mg[mask==True] = np.nan
return mg
# =========================================================
def correct_scans2(fp, TL):
if np.ndim(fp)==2:
return c_scans2(fp, TL)
else:
return Parallel(n_jobs = cpu_count(), verbose=0)(delayed(c_scans2)(fp[p], TL[p]) for p in range(len(fp)))
# =========================================================
def c_scans2(fp, TL):
#nodata = fp==0
try:
mg = 10**np.log10(np.asarray(fp,'float32')+0.001 + TL) #[:,::2] )
except:
mg = 10**np.log10(np.asarray(fp,'float32')+0.001 )
mg[fp==0] = np.nan
mg[mg<0] = np.nan
#mg[nodata] = np.nan
return mg
# =========================================================
def do_ppdrc(fp, filtsize):
dat = fp.astype('float64')
dat[np.isnan(dat)] = 0
dat1 = ppdrc.ppdrc(dat, filtsize)
dat1 = humutils.rescale(dat1.getdata(),np.min(dat),np.max(dat))
dat1[np.isnan(fp)] = np.nan
return dat1
# =========================================================
def plot_merged_scans(dat_port, dat_star, dist_m, shape_port, ft, sonpath, p):
if 2>1: #~os.path.isfile(os.path.normpath(os.path.join(sonpath,'merge_corrected_scan'+str(p)))):
if len(shape_port)>2:
Zdist = dist_m[shape_port[-1]*p:shape_port[-1]*(p+1)]
extent = shape_port[1] #np.shape(merge)[0]
else:
Zdist = dist_m
extent = shape_port[0] #np.shape(merge)[0]
fig = plt.figure()
plt.imshow(np.vstack((np.flipud(np.uint8(dat_port)), np.uint8(dat_star))), cmap='gray', extent=[min(Zdist), max(Zdist), -extent*(1/ft), extent*(1/ft)])
plt.ylabel('Range (m)'), plt.xlabel('Distance along track (m)')
plt.axis('normal'); plt.axis('tight')
custom_save(sonpath,'merge_corrected_scan'+str(p))
del fig
# =========================================================
def plot_dwnlow_scans(dat_dwnlow, dist_m, shape_low, ft, sonpath, p):
if 2>1: #~os.path.isfile(os.path.normpath(os.path.join(sonpath,'dwnlow_corrected_scan'+str(p)))):
if len(shape_low)>2:
Zdist = dist_m[shape_low[-1]*p:shape_low[-1]*(p+1)]
extent = shape_low[1] #np.shape(merge)[0]
else:
Zdist = dist_m
extent = shape_low[0] #np.shape(merge)[0]
fig = plt.figure()
plt.imshow(np.uint8(dat_dwnlow), cmap='gray', extent=[min(Zdist), max(Zdist), extent*(1/ft), 0])
plt.ylabel('Range (m)'), plt.xlabel('Distance along track (m)')
#plt.axis('normal');
#plt.axis('tight')
custom_save(sonpath,'dwnlow_corrected_scan'+str(p))
del fig
# =========================================================
def plot_dwnhi_scans(dat_dwnhi, dist_m, shape_hi, ft, sonpath, p):
if 2>1: #~os.path.isfile(os.path.normpath(os.path.join(sonpath,'dwnhi_corrected_scan'+str(p)))):
if len(shape_hi)>2:
Zdist = dist_m[shape_hi[-1]*p:shape_hi[-1]*(p+1)]
extent = shape_hi[1] #np.shape(merge)[0]
else:
Zdist = dist_m
extent = shape_hi[0] #np.shape(merge)[0]
fig = plt.figure()
plt.imshow(np.uint8(dat_dwnhi), cmap='gray', extent=[min(Zdist), max(Zdist), extent*(1/ft), 0])
plt.ylabel('Range (m)'), plt.xlabel('Distance along track (m)')
#plt.axis('normal');
#plt.axis('tight')
custom_save(sonpath,'dwnhi_corrected_scan'+str(p))
del fig
# =========================================================
# =========================================================
if __name__ == '__main__':
correct(humfile, sonpath, maxW, doplot)
|
22,965 | 267333cd2e1c283970798838fc925c5e9307af99 |
def getSMSList():
sms_list=[]
f=open("sms\data.txt","r")
sms_list=f.readlines()
f.close()
return sms_list
def get_sms_list_sort(sms_list):
sms_list_sort=[]
k=len(sms_list)
print k
for i in range(k):
sms_list_sort.append(sms_list[(k-1-i)])
#print sms_list[k-1-i]
return sms_list_sort
sms_list=getSMSList()
get_sms_list_sort(sms_list) |
22,966 | d116c614475a227ccc3284b902cc46ec83f2de96 | # noinspection PyPackageRequirements
"""`main` is the top level module for your Flask application."""
import json
import string
import random
# Import the Flask Framework
from flask import Flask, session,render_template,request,g
import flask
# appengine stuff
import httplib2
import apiclient
from apiclient import discovery
from apiclient.discovery import build
from apiclient.errors import HttpError
# oauth stuff -- probably needs to be imported wherever @oauth2.required is used
from oauth2client import client
from oauth2client.client import AccessTokenRefreshError
from oauth2client import flask_util
from oauth2client.flask_util import UserOAuth2
#import locally accessable modules
from app_module import app
from app_module.valid import validator # checks for validation
from app_module.projcheck import get_proj # gets list of projects
from compute_request import ComputeInfo
from google.appengine.api import urlfetch
#TODO: FIx this line -- make it more secure, perhaps have Python generate a random number to use
app.config['SECRET_KEY']=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
# Set permissions and permission request
# First, scope:
scope=['https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/plus.me',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'email',
'https://www.googleapis.com/auth/userinfo.profile']
# Now, setup the oauth2 request
# The service key should be a oauth2 client id file as granted under APIs and credentials at https://console.developers.google.com/apis/credentials
oauth2=UserOAuth2(app,client_secrets_file="./GeneralAdmin_Webapp_OAUTH.json",scopes=scope)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
# Baseline Defaults
# set the admin project -- this will be referenced at various points in the app
admin_project='admin-project'
default_zone='us-central1-f'
@app.route('/')
@oauth2.required
def hello():
"""Return a friendly HTTP greeting."""
# proj_service = build('cloudresourcemanager','v1beta1',credentials=oauth2.credentials)
# projects_raw=proj_service.projects().list().execute()
# if projects_raw:
# session['projects'] = [i['projectId'] for i in projects_raw['projects']]
# else:
# session['projects']='None'
get_proj(oauth2)
# Change next line to determine the project whose membership is tested for access
test_project = 'PROJECT TO TEST FOR VALIDATION'
if test_project in session['projects']:
session['validated'] = 1
return render_template('index.html')
else:
[session.pop('validated') if session.get("validated") else None]
flask.abort(403)
# This looks like it works swimmingly.
# This is a brief helper script that will allow us to use an @validator decorator to ensure that members that are not in the required group are not given access to webapp pages
@app.route('/test.html')
@validator
def testing_page():
out = """
This page has a bunch of stuff on it that is used for testing
<p> {}
<p> {}
""".format(session.get('validated',0),session['projects'])
return out
@app.route("/temptest.html")
@validator
def temptest():
return flask.redirect(flask.url_for("project_select"))
@app.route("/active_project")
@validator
def project_select():
if not session.get("projects"):
get_proj(oauth2) # attempt to get projects if not present in session.
# TODO 01-13-2016 00:32 put error handling here
msg=flask.get_flashed_messages()
return render_template("projectform.html")
@app.route('/active_project',methods=['POST'])
@validator
def project_select_pull_in():
b=request.form['project']
session['active_proj']=b
if session.get("returnpath"):
return flask.redirect(flask.url_for(session['returnpath']))
else:
return flask.redirect('/')
# @app.route("/compute_request_pt1")
# @validator
# def compute_req():
# return session['active_proj']
# @app.route('/oauth2callback')
# def info():
# if oauth2.email!='trcook@gmail.com':
# redirect(url_for('404'))
# else:
# return "you made it!! {}".format(oauth2.email)
# Setup the unauthorized handler
@app.errorhandler(403)
def page_not_found(e):
"""Return a custom 404 error."""
return "Hey Bozo. You don't belong here -- scramo.", 403
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
@app.route('/login')
def login():
if session.get('validated'):
session.pop('validated')
return flask.redirect(oauth2.authorize_url('/'))
@app.route("/logout")
@oauth2.required
def logout():
if session.get('validated'):
session.pop('validated')
try:
oauth2.credentials.revoke(httplib2.HTTP())
'credentials.revoke success'
# oauth2.credentials.revoke(httplib2.Http())
# session.clear()
except:
print 'credentials.revoke did not work'
pass
try:
urlfetch.Fetch(url=oauth2.credentials.revoke_uri + '?token=' + oauth2.credentials.access_token,method=urlfetch.GET)
print 'flask redirect success'
except:
print 'flask.redirect did not work'
pass
# except:
# # flask.redirect(credentials.revoke_uri + '?token=' + credentials.access_token)
# return '''
# <p>Problems loging out. Probably due to changes in the app.
# <p>Try logging in again and then logging out:
# <a href='/login'>click here</a>
# '''
session.clear() # needed because oauth is storing oauth2 creds in session.
return 'later duder'
@app.route("/reset")
def reset_ses():
if session.get("active_proj"):
session.pop("active_proj")
if session.get("projectlist"):
session.pop("projectlist")
if session.get("validated"):
session.pop("validated")
return 'go back'
@app.before_request
def session_defaults():
print 'before_request'
if not 'admin_project' in session:
session['admin_project']=admin_project
if not 'zone' in session:
session['zone']=default_zone
if not ('projectlist' in session or request.endpoint or request.endpoint == 'login'):
print '{}'.format(str(request.endpoint))
print 'proj list not found'
try:
get_proj(oauth2)
except:
return flask.redirect('/')
@app.errorhandler(AccessTokenRefreshError)
def handle_invalid_grant(e):
print 'hello'
return flask.redirect('/login') |
22,967 | a4c03d28fb622e868fea3eda97a3c0d1a43c9695 | # coding: utf-8
import sys
import time
import telepot
import wikipedia
def handle(msg):
wikipedia.set_lang("fr")
content_type, chat_type, chat_id = telepot.glance2(msg)
print (content_type, chat_type, chat_id)
if content_type == 'sticker' :
bot.sendMessage(chat_id,'Pardon ?')
if content_type == 'text' :
command = msg['text'].split()
print (command)
if command[0] == '/help' :
bot.sendMessage(chat_id,'Taper /get suivi du mot recherché')
if command[0] == '/get' :
if len(command) == 1 :
bot.sendMessage(chat_id, 'Quel est le mot recherché ? Taper /get suivi du mot recherché')
else :
try:
command = command[1:]
print(command)
command = ' '.join(command[0:])
print(command)
page = wikipedia.page(command)
bot.sendChatAction(chat_id, 'typing')
bot.sendMessage(chat_id,wikipedia.summary(command, sentences=1))
bot.sendChatAction(chat_id, 'typing')
bot.sendMessage(chat_id,page.url)
except wikipedia.exceptions.PageError :
bot.sendMessage(chat_id, 'Il n\'y a aucun résultat correspondant à la requête.')
except wikipedia.exceptions.DisambiguationError as e:
bot.sendMessage(chat_id,'Cette requête renvoie plusieurs résultats, est ce que vous vouliez dire :')
bot.sendMessage(chat_id,e.options)
bot = telepot.Bot('')
bot.notifyOnMessage(handle)
print ('Listening ...')
while 1:
time.sleep(10)
|
22,968 | 822dd887fc241fd4a523483bcc2e3b2423c8b6ea | def take_page_screenshot(sndri, flnm):
print("inside taking screenshot")
sndri.get_screenshot_as_file(flnm)
|
22,969 | f9a04fec3946544db9fb416ec3928ca01a56b82e | import pygame
# -----------------------------------------------
# -----------------------------------------------
"""
Justin Andrews
Spring 2020
CS455 - Artificial Intelligence
Final Project - Peg Game
File Description:
This file contains functions needed to display the game.
Function Declarations:
board(screen)
placeHolder(screen, x, y)
placePeg(screen, x, y)
setBoard(pegs, screen)
"""
# -----------------------------------------------
# -----------------------------------------------
# DISPLAY VARIABLES
# load image to be used for the board
boardImage = pygame.image.load('images/pegBoard.png')
# set board x location
boardX = 200
# set board y location
boardY = 100
# load peg and peg place holder images
pegImage = pygame.image.load('images/redPeg.png')
holderImage = pygame.image.load('images/blackCircle.png')
# centers of peg holes for reference
pegHoleCenterX = [501, 446, 555, 388, 502, 614, 330, 445, 552, 673, 275, 386, 500, 609, 733]
pegHoleCenterY = [186, 277, 275, 372, 375, 373, 469, 469, 469, 468, 565, 565, 565, 565, 565]
# where to actually place pegs and peg place holders
pegHoleX = [x - 17.5 for x in pegHoleCenterX]
pegHoleY = [y - 17.5 for y in pegHoleCenterY]
# -----------------------------------------------
# -----------------------------------------------
def board(screen):
"""
Function Description:
This function draws the board to the screen. It also draws 3 text boxes to the screen that act as buttons.
The first text box is used to initiate the Breadth First Search algorithm.
The second text box is used to initiate the Depth First Search algorithm.
The thirds text box is used to clear the array of clicks if a miss click was made while doing a move.
:param screen: this function takes the parameter screen which is used to display the graphics to the screen
:return: this function has no return, the output is purely graphical
"""
# DRAW BOARD
# place board at x,y
screen.blit(boardImage, (boardX, boardY))
# FORMAT TEXT
# format text for search algorithm buttons
textFormatAlgorithm = pygame.font.SysFont("arial", 32)
# DRAW BFS BUTTON
# create text for BFS search algorithm button
textBFS = textFormatAlgorithm.render("Click here to run BFS search algorithm!", True, (0, 0, 0))
# place BFS search algorithm text on screen
screen.blit(textBFS, (10, 10))
# DRAW DFS BUTTON
# create text for DFS search algorithm button
textDFS = textFormatAlgorithm.render("Click here to run DFS search algorithm!", True, (0, 0, 0))
# place DFS search algorithm text on screen
screen.blit(textDFS, (530, 10))
# DRAW CLEAR CLICKS BUTTON
# format text for clear clicks button
textFormatClicks = pygame.font.SysFont("arial", 25)
# create text for clear clicks button
text = textFormatClicks.render("Click here to clear clicks!", True, (0, 0, 0))
# place clear clicks text on screen
screen.blit(text, (390, 680))
# -----------------------------------------------
def placeHolder(screen, x, y):
"""
Function Description:
This function draws black circles to the board to indicate what peg holes are empty.
:param screen: this function takes the parameter screen to display the graphics to the screen
:param x: this function takes x, the x location of the graphic
:param y: this function takes y, the y location of the graphic
:return: this function has no return, the output is purely graphical
"""
# place empty hole at x,y
screen.blit(holderImage, (x, y))
# -----------------------------------------------
def placePeg(screen, x, y):
"""
Function Description:
This function draws the pegs to the board to indicate the game pieces.
:param screen: this function takes the parameter screen to display the graphics to the screen
:param x: this function takes x, the x location of the graphic
:param y: this function takes y, the y location of the graphic
:return: this function has no return, the output is purely graphic
"""
# place peg on screen at x,y
screen.blit(pegImage, (x, y))
# -----------------------------------------------
# create starting board positions
# this function takes emptyLoc which is the location of the peg hole that will start empty
# peg hole numbering starts at 0 and goes to 14 from top to bottom, left to right (see attached reference image)
def setBoard(pegs, screen):
"""
Function Description:
This function sets the board with pegs and black circles to represent the empty holes.
This function is called to update the board with the new game state of pegs after moves are completed.
:param pegs: this function takes the parameter pegs, an array of true and false values
to represent if pegs are at that peg hole - true, or if the hole is empty - false
:param screen: this function takes the parameter screen to display the graphics to
:return pegs: return the array pegs that holds true and false values for peg positions
"""
# arrange True and False to correspond with correct peg locations
for i in range(len(pegs)):
# if peg not in hole
if not pegs[i]:
# place black circle to show that no peg is at that location
placeHolder(screen, pegHoleX[i], pegHoleY[i])
# if peg in hole
else:
# place peg at location
placePeg(screen, pegHoleX[i], pegHoleY[i])
# return array of where pegs are located
return pegs
|
22,970 | 95d66c7604ff75132702261b30ec6149798fc8d3 | from functions import *
def newton_method(f, g, x, y, eps=10**-5, derivative='analytically'):
k = 0 # количество итераций
# Значение производных в точках
if derivative == 'analytically': # аналитически
a = f_dx(x, y)
b = f_dy(x, y)
c = g_dx(x, y)
d = g_dy(x, y)
elif derivative == 'numerically': # численно
a = f_dx_num(x, y)
b = f_dy_num(x, y)
c = g_dx_num(x, y)
d = g_dy_num(x, y)
else:
return -1, -1
# - [F']^{-1} * F
dx = -(d * f(x, y) - b * g(x, y)) / (a * d - b * c)
dy = -(a * g(x, y) - c * f(x, y)) / (a * d - b * c)
# print('dx, dy =', dx, dy)
# x_{n+1} = x_{n} + dx
x_next = x + dx
y_next = y + dy # вычисление следующего значения y_{n+1}
k += 1
# print(k)
while math.sqrt(f(x_next, y_next)**2 + f(x_next, y_next)**2) > eps and \
math.sqrt((x_next - x)**2 + (y_next - y)**2) > eps:
# print(math.sqrt(f(x_next, y_next)**2 + f(x_next, y_next)**2))
if derivative == 'analytically': # аналитически
a = f_dx(x, y)
b = f_dy(x, y)
c = g_dx(x, y)
d = g_dy(x, y)
if derivative == 'numerically': # численно
a = f_dx_num(x, y)
b = f_dy_num(x, y)
c = g_dx_num(x, y)
d = g_dy_num(x, y)
dx = -(d * f(x_next, y_next) - b * g(x_next, y_next)) / (a * d - b * c)
dy = -(a * g(x_next, y_next) - c * f(x_next, y_next)) / (a * d - b * c)
# print('dx, dy =', dx, dy)
# Сохраняю значения x_n и y_n
x = x_next
y = y_next
# x_{n+1} = x_{n} + dx
x_next = x + dx
y_next = y + dy # вычисление следующего значения y_{n+1}
k += 1
print('Число итераций:', k)
return x_next, y_next
|
22,971 | fef5c8ee49339e2455cabeb737f3898115748ac2 | from datetime import datetime
import uuid
import time
import sys
import logging
sys.path.append('web/')
sys.path.append('utility/')
sys.path.append('database/')
import utility
import globvar
import request
import robot
import extractor
from pq import PoolQuery
import query
class Crawler:
def __init__(self, fld, pool):
self.creation_date = datetime.now()
self.pool = pool
self.fld = fld
self.rowid = None
self.thread_id = uuid.uuid4().hex
self.extractor = extractor.Extractor(self.fld, None, self.pool)
self.crawl_counter = 0
def add_to_domain_database(self):
rowid = self.pool.database.query_get(query.get_id_domain, (self.fld, ))
updateRow = True
# FLD does not exist in domain, insert it
while rowid == []:
updateRow = False
self.pool.database.query(query.insert_table_domain, (globvar.scheme, self.fld, 1, self.creation_date, self.creation_date))
rowid = self.pool.database.query_get(query.get_id_domain, (self.fld, ))
self.rowid = rowid[0][0]
self.extractor.rowid = self.rowid
if updateRow:
self.pool.database.query(query.update_table_domain, (self.creation_date, self.rowid))
def start_crawling(self):
url = f'{globvar.scheme}{self.fld}'
logging.info(f'{self.thread_id}: Starting crawling on {url}')
self.add_to_domain_database()
self.parse_robots()
req = self.send_request(url)
if req == None:
logging.info(f'{self.thread_id}: {url} could not be crawled. Stopping crawler...')
return
self.extractor.extract_urls(req.text)
self.crawl()
print(self.extractor.emails)
print(self.extractor)
def send_request(self, url, depth=globvar.REDIRECT_MAX_DEPTH):
req = request.get_request(url, redirects=globvar.ALLOW_REDIRECTS)
self.pool.put(PoolQuery(query.insert_table_crawl_history, req.to_tuple(self.rowid)))
i = 0
while (300 <= req.status_code < 400) and i <= depth:
if utility.same_fld(utility.get_fld(req.new_location), self.fld):
req = request.get_request(req.new_location, redirects=globvar.ALLOW_REDIRECTS)
self.pool.put(PoolQuery(query.insert_table_crawl_history, req.to_tuple(self.rowid)))
else:
# TODO: Log error message here.
return None
i +=1
if 300 <= req.status_code < 400:
return None
else:
return req
def crawl(self):
while len(self.extractor.urls) > 0:
url = self.extractor.get_url()
logging.info(f'{self.thread_id} | Crawling: {url}')
req = self.send_request(url)
if req != None:
self.extractor.extract_urls(req.text)
self.crawl_counter += 1
print(f'id: {self.thread_id} | crawled: {self.crawl_counter} | queue: {len(self.extractor.urls)}')
logging.info(f'{self.thread_id}: Finished crawling {self.fld} with: {self.crawl_counter} crawled urls!')
def parse_robots(self):
logging.info(f'{self.thread_id}: Parsing robots.txt')
url = f'{globvar.scheme}{self.fld}/robots.txt'
try:
req = request.get_request(url)
if req.status_code != 404:
self.extractor.robots.parse_robots(req.text)
except:
logging.error(f'Something went wrong parsing robots.txt url: {url}')
def __str__(self):
return f'{self.thread_id} | {self.fld}' |
22,972 | 7e0f01e60805b2e202da963b3ccb7ce0e386ee2d | from faker import Factory
from django.core.management.base import BaseCommand
from explainers.models import Explainer
FAKE = Factory.create()
class Command(BaseCommand):
help = u'Load fake explainers into the database. For development ONLY.'
def handle(self, *args, **kwargs):
self.load_fake_explainers()
def load_fake_explainers(self):
self.stdout.write(u'Loading fake explainers...')
fakes = [{
'title': u'What Really Happens During the 5 Months of Session',
'status': u'P',
'youtube_id': 'UJlA6_Ij4Pw',
'text': FAKE.paragraph(),
}, {
'title': u'What is a Point of Order?',
'status': u'P',
'youtube_id': 'UJlA6_Ij4Pw',
'text': FAKE.paragraph(),
}, {
'title': u'What Does the Lieutenant Governor do?',
'status': u'D',
'youtube_id': 'UJlA6_Ij4Pw',
'text': FAKE.paragraph(),
}, {
'title': u'What is a Second Reading?',
'status': u'P',
'youtube_id': 'UJlA6_Ij4Pw',
'text': FAKE.paragraph(),
},
]
for idx, fake in enumerate(fakes):
self.create_explainer(fake, idx)
def create_explainer(self, data, order):
explainer, _ = Explainer.objects.get_or_create(
name=data['title'],
youtube_id=data['youtube_id'],
text=data['text'],
status=data['status'],
order=order,
)
|
22,973 | d61be6a12997869e38cdbf83a17f31eb4307b298 | a = 22
number = 42
number_user = int(input('Введите число: '))
print(f'Ваше число - {number_user}, а заданное - {number}.')
name = input('Введите своё имя: ')
age = int(input('Введите свой возраст: '))
print(f'Вас зовут {name}, и ваш возраст {age}')
|
22,974 | 14a467f342897d411e1f580e259efd6f5997c77e | #! /usr/bin/python3
# cliEmailer.py - Send emails from cli
# Usage: ./cliEmailer.py "toEmail" "subject" "body"
from selenium import webdriver
import sys, time
if len(sys.argv) < 4:
print('Usage: ./cliEmailer.py "toEmail" "subject" "text"')
sys.exit()
email = "adrian.hintermaier@gmail.com"
password = "***"
toEmail, subject, body = sys.argv[1:]
# Setup a webdriver and open gmail
browser = webdriver.Firefox()
browser.get('http://gmail.com')
# Find the email text field and insert login
emailElem = browser.find_element_by_id('Email')
emailElem.clear()
emailElem.send_keys(email)
emailElem.submit()
# Find the password field and insert the password
passwordElem = browser.find_element_by_id('Passwd')
passwordElem.clear()
passwordElem.send_keys(password)
passwordElem.submit()
# Switch to gmail's basic html view for easier processing
browser.get("https://mail.google.com/mail/u/0/h/s32qz81kdosc/?zy=h&f=1")
# Find the button to compose and click it
composeElem = browser.find_element_by_link_text("Compose Mail")
composeElem.click()
# Find the email, subject and body text fields to input text from cli
toElem = browser.find_element_by_id("to")
toElem.clear()
toElem.send_keys(toEmail)
subjectElem = browser.find_element_by_name("subject")
subjectElem.clear()
subjectElem.send_keys(subject)
bodyElem = browser.find_element_by_name("body")
bodyElem.clear()
bodyElem.send_keys(body)
# Send email!
browser.find_element_by_name("nvp_bu_send").click()
time.sleep(1)
# Log out
browser.find_element_by_id("gb_71").click()
time.sleep(1)
browser.close()
|
22,975 | 514ecc2d07ee0b04cabc71009fc19be1a1c91083 | from PIL import Image
import numpy
import csv
import os
picArray = numpy.zeros([41995,2], dtype = object)
def convertImg(img,numb,result):
WIDTH, HEIGHT = img.size
value = []
data = list(img.getdata()) # convert image data to a list of integers
data = [data[offset:offset+WIDTH] for offset in range(0, WIDTH*HEIGHT, WIDTH)]
for x in range(0,27):
for y in range(0,27):
value.append(data[x][y])
picArray[numb][0] = value
picArray[numb][1] = result
for l in range(0,41995):
if os.path.exists('img_'+str(l)+'.jpg'):
imag = Image.open('img_'+str(l)+'.jpg').convert('L')
convertImg(imag,l,0)
numpy.savetxt("array.csv",picArray,fmt = "%s")
f = open("array.csv", 'r')
for row in f:
print(row)
|
22,976 | f7565cae861767e1c880962872f826b2df352da4 | import argparse
print "hello world"
def func (x):
print " this is " + x
func("3") ## define function
def func2 (x) :
if x == 3 :
print "hahahahah"
else :
print "wuwuwuwu"
func2(3)
func2(4)
array =[1,2,3]
def func3 (x) :
for item in x :
print item +1
func3(array) ## two type of for loops
def func4 (x):
for i in range(0,len(x)):
print x[i]
func4(array) ## while loops
y = 0
z = 0
while y < 5:
print y
y += 1
a = [1,2,3]
a.append(4)
print a
print a.pop()
print a
print a.pop(0)
print a
|
22,977 | 39961629ff06433ceeeff661b332e86b3a1a20df | from task import *
|
22,978 | 41fcdcdbba1d71aad6e683b7c454a26ccfdbf63d | from kmeans import kmeans
from kprototype import kprototype
usecols = (1,2)
kmeans('ChitraData.csv',5,usecols)
kprototype(source='ChitraData.csv',n_clusters=5,usecols=usecols,categorical=[1])
|
22,979 | 4cb58faf0d64ef9974b0789420d444774199e2b4 | def flisttostring(l):
ret=''
for c in l:
ret=ret+str(c)
ret=str(int(ret))
return ret
def fFill(what,list,f):
i=f
list.sort()
while (i<len(what)):
what[i]=list.pop()
i=i+1
return
def fRecover(what,f,list):
i=f
j=0
while(i<len(what)):
what[i]=list[j]
j=j+1
i=i+1
return
import os
dir=os.listdir('.')
fname=''
for x in dir:
if (x.find('.in')>0):
fname=x
f=open(fname,'r')
T=int(f.readline())
for case in range(T):
Nstr=f.readline().rstrip();
N=int(Nstr)
digits=[]
for c in Nstr:
digits.append(int(c))
digits.append(0)
digits.sort()
i=0;
result=[]
for x in range(len(digits)):
result.append('0')
while(len(digits)>1):
j=0
while(int(flisttostring(result))<=N):
tDigits=list(digits)
result[i]=tDigits.pop(j)
fFill(result,tDigits,i+1)
j=j+1
digits.remove(result[i])
digits.sort()
i=i+1
fRecover(result,i,digits)
result[-1]=digits[0]
print 'Case #'+str(case+1)+':',flisttostring(result)
|
22,980 | 3990eed090fd99a3ae546b1d71b5cb12a43279f0 | # -*- coding: utf-8 -*-
from django.contrib import admin
from setup.models import *
# !!!!! Settings !!!!!
class SettingsAdmin(admin.ModelAdmin):
list_display = ['key', 'value']
list_filter = ['key']
# !!!!! Apdrošinātāji !!!!!
class ApdrosinatajiAdmin(admin.ModelAdmin):
list_display = ['title', 'visible']
list_filter = ['visible']
admin.site.register(Settings, SettingsAdmin)
admin.site.register(Apdrosinataji, ApdrosinatajiAdmin)
|
22,981 | da29131b6ad6152ed1ce6f2a05606bf07aaec1fa | ../test_migration_uuid_globalid_live_migration.py |
22,982 | e2859b984e919afbb732f7fc9ee7f6efbbdf806b | from enum import Enum
class MethodEnum(Enum):
MME = 'Multi_Model_Ensemble'
SF = 'Statistical_Forecasting'
TSF = 'Time_Series_Forecasting'
MGF = 'Mobile_Geographics_Forecasts'
OBS = 'Observations'
Other = 'Other'
@staticmethod
def getType(name):
_nameToType = {
'Multi_Model_Ensemble': MethodEnum.MME,
'MME': MethodEnum.MME,
'Statistical_Forecasting': MethodEnum.SF,
'SF': MethodEnum.SF,
'Time_Series_Forecasting': MethodEnum.TSF,
'TSF': MethodEnum.TSF,
'Mobile_Geographics_Forecasts': MethodEnum.MGF,
'MGF': MethodEnum.MGF,
'Observations': MethodEnum.OBS,
'OBS': MethodEnum.OBS,
'Other': MethodEnum.Other
}
return _nameToType.get(name, MethodEnum.Other)
@staticmethod
def getAbbreviation(name):
_nameToAbbr = {
MethodEnum.MME: 'MME',
MethodEnum.SF: 'SF',
MethodEnum.TSF: 'TSF',
MethodEnum.MGF: 'MGF',
MethodEnum.OBS: 'OBS',
MethodEnum.Other: 'Other'
}
return _nameToAbbr.get(name, 'Other')
|
22,983 | dddf2da1681388806bd84ca9ede21374d9bafa6e | #!/usr/bin/env python
import numpy as N
import matplotlib.pyplot as P
from matplotlib.projections import PolarAxes, register_projection
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
class NorthPolarAxes(PolarAxes):
'''
A variant of PolarAxes where theta starts pointing north and goes
clockwise.
'''
name = 'northpolar'
class NorthPolarTransform(PolarAxes.PolarTransform):
def transform(self, tr):
xy = N.zeros(tr.shape, N.float_)
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
x[:] = r * N.sin(t)
y[:] = r * N.cos(t)
return xy
transform_non_affine = transform
def inverted(self):
return InvertedNorthPolarTransform()
class InvertedNorthPolarTransform(PolarAxes.InvertedPolarTransform):
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:]
r = N.sqrt(x*x + y*y)
theta = N.arctan2(y, x)
return N.concatenate((theta, r), 1)
def inverted(self):
return NorthPolarTransform()
def _set_lim_and_transforms(self):
PolarAxes._set_lim_and_transforms(self)
self.transProjection = self.NorthPolarTransform()
self.transData = (
self.transScale +
self.transProjection +
(self.transProjectionAffine + self.transAxes))
self._xaxis_transform = (
self.transProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._yaxis_transform = (
Affine2D().scale(N.pi * 2.0, 1.0) +
self.transData)
self._yaxis_text1_transform = (
self._r_label1_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform)
register_projection(NorthPolarAxes)
# myd = [(0, 22.67157894736842),
# (10, 23.756578947368421),
# (20, 23.092039800995025),
# (30, 24.081081081081081),
# (40, 20.427450980392155),
# (50, 17.668831168831169),
# (60, 18.326599326599325),
# (70, 17.487864077669904),
# (80, 11.759776536312849),
# (90, 15.906474820143885),
# (100, 10.76),
# (180, 22.90295358649789),
# (190, 15.840220385674931),
# (200, 23.93734939759036),
# (210, 22.654794520547945),
# (220, 19.866220735785951),
# (230, 22.635730858468676),
# (240, 12.791428571428572),
# (250, 22.978401727861772),
# (260, 24.961290322580645),
# (270, 25.101052631578948),
# (280, 20.38372093023256)]
# import math
# theta = [2*math.pi*i[0]/360. for i in myd]
# r = [i[1] for i in myd]
# P.clf()
# P.subplot(1,1,1,projection='northpolar')
# P.plot(theta,r)
# P.show()
|
22,984 | ddc3f11a6248a6a8c713cf926d9f20678d845eff | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 20:29:05 2019
@author: Matt Jonas
"""
import matplotlib.pyplot as plt
import numpy as np
import urllib
from matplotlib.lines import Line2D
import datetime
from matplotlib.pyplot import cm
import calendar
import h5py
import os.path
from sys import exit as ext
class cloud_height:
def __init__(self, date, dataset,h5 = None):
if h5 is None: # If "h5" keyword was not set, then we actually need to read the file from the web, rather than restoring it from the hard drive
url = 'https://skywatch.colorado.edu/data/'
# get julian day (requested date)
y0=int(date[0:4]); m0=int(date[5:7]); d0= int(date[8:10])
jul = [] # initialize julian day
loc = [] # initialize local times
dat1 = [] # cloud base 1 [m]
dat2 = [] # cloud base 2 [m]
dat3 = [] # cloud base 3 [m]
#get url based on selected dataset and date
url=url+dataset+date[2:]+'.dat'
print('Reading: ',url)
hh0=0. # incremented by 24 each time we pass midnight
loc0previous=0. # retains current time; if time switches back below that, will increment "hh0"
jday = (datetime.datetime(y0,m0,d0)-datetime.datetime(y0,1,1)).total_seconds()/86400.0 + 1.0
try:
lines = urllib.request.urlopen(url).readlines()
for line in lines[5:]: # go through all lines, ignoring first three (header)
entries = line.decode("utf-8").split("\t")
columns = [] # will contain columns
for entry in entries:
if len(entry) > 1: columns.append(entry)
hhmmX = columns[0] # assigns time, filling in leading '0'
hh = float(hhmmX[0:2])
self.doy = jday
mm = float(hhmmX[3:5])
ss = float(hhmmX[6:8])
loc0 = hh+mm/60.+ss/3600.+hh0
if loc0<loc0previous:
hh0=hh0+24.
loc0=loc0+hh0
loc0previous=loc0
loc.append(loc0)
jul.append(jday+loc0/24.)
dat1.append(float(columns[1]))
dat2.append(float(columns[2]))
dat3.append(float(columns[3]))
except:
print("website not found ",date)
pass
self.jul = np.array(jul)
self.loc = np.array(loc)
self.h1 = np.array(dat1)
self.h2 = np.array(dat2)
self.h3 = np.array(dat3)
self.date = date
self.doy = int(jday)
self.year = date[0:4]
else:
h5f = h5py.File(h5, "r")
self.jul = h5f['jul'][...]
self.loc = h5f['loc'][...]
self.h1 = h5f['h1'][...]
self.h2 = h5f['h2'][...]
self.h3 = h5f['h3'][...]
self.date= str(h5f['date'][...])
self.doy = int(h5f['doy'][...])
self.year= str(h5f['year'][...])
try: # If statistics exist, restore them, if not set them to zero
self.cf=float(h5f['cf'][...])
self.min=float(h5f['min'][...])
self.max=float(h5f['max'][...])
self.mean=float(h5f['mean'][...])
except:
self.cf=0
self.min=0
self.max=0
self.mean=0
h5f.close()
def plot(self):
plt.plot(self.loc,self.h1,'k.')
plt.xlabel('Local Time [h]')
plt.ylabel('Cloud Base Height [m]')
plt.title(self.date+' Cloud Fraction '+str(round(self.cf,1))+'%')
def stats(self): # of lowest cloud layer, calculate min, max, mean of day & cloud fraction
tot=len(self.h1) # total number of data points
flt=np.where(self.h1>0)
cld=len(flt[0]) # number of cloudy data points
if len(flt[0]>0):
mn =np.min(self.h1[flt])
mx =np.max(self.h1[flt])
mm =np.mean(self.h1[flt])
self.min=mn
self.max=mx
self.mean=mm
#filter out nonexistant data sets
if tot != 0:
self.cf = float(cld)/float(tot)*100.
else:
self.cf = 0.
def save(self):
file = './'+self.year+'_'+str(int(self.doy)).zfill(3)+'.h5'
print('Saving data to: '+file)
h5 = h5py.File(file, "w")
h5['jul'] = self.jul
h5['loc'] = self.loc
h5['h1'] = self.h1
h5['h2'] = self.h2
h5['h3'] = self.h3
h5['date']= self.date
h5['doy'] = self.doy
h5['year']= self.year
if hasattr(self,'mean'):
h5['mean']= self.mean
h5['min'] = self.min
h5['max'] = self.max
h5['cf'] = self.cf
h5.close()
def jday2yyyymmdd(y,jd):
month = 1
while jd - calendar.monthrange(y,month)[1] > 0 and month <= 12:
jd = jd - calendar.monthrange(y,month)[1]
month = month + 1
return(y,month,jd)
if __name__ == '__main__':
# Test one day
if False:
doy = 345
y,m,d = jday2yyyymmdd(2019,doy)
date = str(y).zfill(2)+'_'+str(m).zfill(2)+'_'+str(d).zfill(2)
ch=cloud_height(date,'ceil_')
ch.stats()
ch.plot()
# Read range of dates in a year, do some simple statistics, and write everything to individual h5 files for a day
if True:
year = 2018
m0,d0 = 1,1 # start (m,d)
m1,d1 = 12,31 # end (m,d)
j0 = int((datetime.datetime(year,m0,d0)-datetime.datetime(year,1,1)).total_seconds()/86400.0 + 1.0)
j1 = int((datetime.datetime(year,m1,d1)-datetime.datetime(year,1,1)).total_seconds()/86400.0 + 1.0)
doy_list = []
cf_list = []
for doy in range(j0,j1+1):
doy_list.append(doy) # keep track of days
y,m,d = jday2yyyymmdd(year,doy)
date = str(y).zfill(2)+'_'+str(m).zfill(2)+'_'+str(d).zfill(2)
# First, check if h5 file is already in existance for this date
h5 = './'+str(year)+'_'+str(doy).zfill(3)+'.h5'
if os.path.isfile(h5):
print('Open and read '+h5)
ch = cloud_height(date,'ceil_h',h5=h5)
else:
ch=cloud_height(date,'ceil_')
ch.stats()
ch.save()
print('Cloud fraction that day:',round(ch.cf,2),'%')
cf_list.append(ch.cf) # keep track of cloud fraction
plt.plot(doy_list,cf_list,'.')
plt.xlabel('Day of year')
plt.ylabel('Cloud Fraction')
|
22,985 | d99fc932f31398b3ba3a7b223d203ced47a01c6c | import advent_helpers
from typing import List
import copy
def get_adjacent_ords(row, col):
return {(row-1, col - 1), (row - 1, col), (row - 1, col + 1), (row, col - 1),
(row, col + 1), (row + 1, col - 1), (row + 1, col), (row + 1, col + 1)}
def list_to_str(arr: List[List[str]]):
output = ""
for s in arr:
for x in s:
output += x
output += "\n"
return output
def count_seats(seat_grid: List[List[str]]):
total = 0
for row in seat_grid:
for j in range(len(row)):
if row[j] == "#":
total += 1
return total
def part_1(problem_input: List[List[str]]):
max_iterations = 500
current = 0
while current < max_iterations:
current += 1
next_state = copy.deepcopy(problem_input)
for i, row in enumerate(problem_input):
for j, seat in enumerate(row):
if seat == ".":
continue
else:
adjacent_seats = 0
for surrounding in get_adjacent_ords(i, j):
if (0 <= surrounding[0] < len(problem_input)) and (0 <= surrounding[1] < len(row)):
if problem_input[surrounding[0]][surrounding[1]] == "#":
adjacent_seats += 1
if adjacent_seats >= 4:
next_state[i][j] = "L"
elif adjacent_seats == 0:
next_state[i][j] = "#"
else:
next_state[i][j] = problem_input[i][j]
if problem_input == next_state:
print(f"stabilised after {current} iterations")
break
problem_input = copy.deepcopy(next_state)
return count_seats(next_state)
def get_adj_los(row, col, current_state):
units = [-1, 0, 1]
output = set()
for i in units:
for j in units:
if i == j == 0:
continue
x = i
y = j
while (0 <= row + x < len(current_state)) and (0 <= col + y < len(current_state[0])) and (current_state[row + x][col + y] not in {"L", "#"}):
x += i
y += j
if (0 <= row + x < len(current_state)) and (0 <= col + y < len(current_state[0])):
#then we found a seat
output.add((row + x, col + y))
#otherwise we got to the end of the board
return output
def part_2(problem_input: List[str]):
max_iterations = 500
current = 0
while current < max_iterations:
current += 1
next_state = copy.deepcopy(problem_input)
for i, row in enumerate(problem_input):
for j, seat in enumerate(row):
if seat == ".":
continue
else:
adjacent_seats = 0
for surrounding in get_adj_los(i, j, problem_input):
if (0 <= surrounding[0] < len(problem_input)) and (0 <= surrounding[1] < len(row)):
if problem_input[surrounding[0]][surrounding[1]] == "#":
adjacent_seats += 1
# print(f"i: {i}, j: {j}, ADJ SEATS: {adjacent_seats}")
if adjacent_seats >= 5:
next_state[i][j] = "L"
elif adjacent_seats == 0:
next_state[i][j] = "#"
else:
next_state[i][j] = problem_input[i][j]
if problem_input == next_state:
print(f"stabilised after {current} iterations")
break
problem_input = copy.deepcopy(next_state)
return count_seats(next_state)
def main(problem_input):
# print(part_1(list(map(list, problem_input.split("\n")[:-1]))))
print(part_2(list(map(list, problem_input.split("\n")[:-1]))))
if __name__ == '__main__':
main(advent_helpers.get_problem_input(11, 1))
|
22,986 | d7442c948b8f46d6b6214cda997b08dbfc7207e3 | /*
A KBase module: DifferentialExpressionUtils
*/
module DifferentialExpressionUtils {
/**
A KBase module: DifferentialExpressionUtils
This module uploads, downloads and exports DifferentialExpression and ExpressionMatrix objects
**/
/* A boolean - 0 for false, 1 for true.
@range (0, 1)
*/
typedef int boolean;
/** Required input parameters for uploading Differential expression data
string destination_ref - object reference of Differential expression data.
The object ref is 'ws_name_or_id/obj_name_or_id'
where ws_name_or_id is the workspace name or id
and obj_name_or_id is the object name or id
string diffexpr_filepath - file path of the differential expression data file
created by cuffdiff, deseq or ballgown
string tool_used - cufflinks, ballgown or deseq
string tool_version - version of the tool used
string genome_ref - genome object reference
**/
typedef structure {
string destination_ref;
string diffexpr_filepath;
string tool_used;
string tool_version;
string genome_ref;
string description; /* Optional */
string type; /* Optional - default is 'log2_level' */
string scale; /* Optional - default is 1.0 */
} UploadDifferentialExpressionParams;
/** Output from upload differential expression **/
typedef structure {
string diffExprMatrixSet_ref;
} UploadDifferentialExpressionOutput;
/** Uploads the differential expression **/
funcdef upload_differentialExpression(UploadDifferentialExpressionParams params)
returns (UploadDifferentialExpressionOutput)
authentication required;
/* --------------------------------------------------------------------------------- */
typedef structure {
mapping<string,string> condition_mapping; /* {'condition1': 'condition2'} */
string diffexpr_filepath; /* The input file given is expected to have the columns
'gene_id', 'log2_fold_change', 'p_value', 'q_value',
among other columns. */
string delimiter; /* optional */
/* If the file extension does not indicate the delimiter,
('csv' or 'tsv') then the default delimiter tab is used
for reading the values from input file. This optional
parameter can be used to pass in another delimiter */
} DiffExprFile;
/** Required input parameters for saving Differential expression data
string destination_ref - object reference of Differential expression data.
The object ref is 'ws_name_or_id/obj_name_or_id'
where ws_name_or_id is the workspace name or id
and obj_name_or_id is the object name or id
list<DiffExprFile> diffexpr_data - list of DiffExprFiles (condition pair & file)
string tool_used - cufflinks, ballgown or deseq
string tool_version - version of the tool used
string genome_ref - genome object reference
**/
typedef structure {
string destination_ref;
list<DiffExprFile> diffexpr_data;
string tool_used;
string tool_version;
string genome_ref;
string description; /* Optional */
string type; /* Optional - default is 'log2_level' */
string scale; /* Optional - default is 1.0 */
} SaveDiffExprMatrixSetParams;
/** Output from upload differential expression **/
typedef structure {
string diffExprMatrixSet_ref;
} SaveDiffExprMatrixSetOutput;
/** Uploads the differential expression **/
funcdef save_differential_expression_matrix_set(SaveDiffExprMatrixSetParams params)
returns (SaveDiffExprMatrixSetOutput)
authentication required;
/* --------------------------------------------------------------------------------- */
/**
Required input parameters for downloading Differential expression
string source_ref - object reference of expression source. The
object ref is 'ws_name_or_id/obj_name_or_id'
where ws_name_or_id is the workspace name or id
and obj_name_or_id is the object name or id
**/
typedef structure {
string source_ref;
} DownloadDifferentialExpressionParams;
/** The output of the download method. **/
typedef structure {
string destination_dir; /* directory containing all the downloaded files */
} DownloadDifferentialExpressionOutput;
/** Downloads expression **/
funcdef download_differentialExpression(DownloadDifferentialExpressionParams params)
returns (DownloadDifferentialExpressionOutput)
authentication required;
/**
Required input parameters for exporting expression
string source_ref - object reference of Differential expression. The
object ref is 'ws_name_or_id/obj_name_or_id'
where ws_name_or_id is the workspace name or id
and obj_name_or_id is the object name or id
**/
typedef structure {
string source_ref; /* workspace object reference */
} ExportParams;
typedef structure {
string shock_id; /* shock id of file to export */
} ExportOutput;
/** Wrapper function for use by in-narrative downloaders to download expressions from shock **/
funcdef export_differentialExpression(ExportParams params)
returns (ExportOutput output)
authentication required;
typedef structure {
string input_ref;
} ExportMatrixTSVParams;
typedef structure {
string shock_id;
} ExportMatrixTSVOutput;
/*Export DifferenitalExpressionMatrix object as tsv
*/
funcdef export_diff_expr_matrix_as_tsv(ExportMatrixTSVParams params)
returns (ExportMatrixTSVOutput) authentication required;
};
|
22,987 | 81da4af8ecf4232a533f386f92d2e0e0a34e8d7b | import socket, time, datetime, logging
logging.basicConfig(filename="/logs/monitor.log", filemode="a", level=logging.DEBUG, format='%(asctime)s %(message)s')
def internet(host="8.8.8.8", port=53, timeout=3):
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error as ex:
return False
def statusChange(newstate):
# push update
print(datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S") + " internet connection is " + ("up" if newstate else "down"))
logging.info("internet connection is " + ("up" if newstate else "down"))
lastState = internet()
print(datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S") + " uplink monitor started")
logging.info("uplink monitor started")
statusChange(lastState)
while True:
currentState = internet()
if currentState != lastState:
# status changed, push update
statusChange(currentState);
lastState = currentState
time.sleep(2)
|
22,988 | 13f278d7b59b01408589e4ae7f4ae8c71f7b6c06 | from password_generator import PasswordGenerator
def random_pass_gerator():
pwo = PasswordGenerator()
print(pwo.generate())
if __name__ == "__main__":
random_pass_gerator() |
22,989 | 32a914f2d9df83b30eee9692441cf152f5ee8382 | from pyramid.response import Response
from pyramid.view import view_config
from pyramid.url import route_path
from pyramid.httpexceptions import (
HTTPFound,
HTTPUnauthorized,
HTTPBadRequest,
)
from ..models import (
DBSession,
User,
)
@view_config(route_name='admin', renderer='osmhm_site:templates/admin.mako',
permission='edit_user_or_object')
def admin(request):
return dict(page_id='admin')
@view_config(route_name='admin_user_list', renderer='osmhm_site:templates/admin_user_list.mako',
permission='super_admin')
def admin_user_list(request):
users = DBSession.query(User).all()
users.sort(key=lambda user: user.username)
return dict(page_id='users', users=users)
@view_config(route_name='promote_member', permission='super_admin')
def promote_dwg(request):
userid = request.matchdict['id']
promuser = DBSession.query(User).get(userid)
promuser.role = User.role_member if not promuser.is_member else None
DBSession.flush()
return HTTPFound(location=route_path('admin_user_list',request))
@view_config(route_name='promote_admin', permission='super_admin')
def promote_admin(request):
userid = request.matchdict['id']
promuser = DBSession.query(User).get(userid)
promuser.role = User.role_admin if not promuser.is_admin else None
DBSession.flush()
return HTTPFound(location=route_path('admin_user_list',request))
@view_config(route_name='promote_owner', permission='super_admin')
def promote_owner(request):
userid = request.matchdict['id']
promuser = DBSession.query(User).get(userid)
promuser.role = User.role_owner if not promuser.is_owner else None
DBSession.flush()
return HTTPFound(location=route_path('admin_user_list',request))
|
22,990 | 3843bfbb8a9693b954272b94aa92cbc84be9b71c | from setuptools import setup
from Cython.Build import cythonize
setup(ext_modules=cythonize("dataf.pyx")) |
22,991 | b771892ed74a1af8a2ea66d49f1cc06db3a1b8dc | import re
from math import ceil
from nonebot import on_regex, MatcherGroup
from nonebot.typing import T_State
from nonebot_adapter_gocq.exception import AdapterException
from cn2an import cn2an
from src.common import sl_settings, save_sl, Bot, GroupMessageEvent, MessageSegment, logger
from src.common.rules import sv_sw, comman_rule
from src.common.easy_setting import BOTNAME, SUPERUSERS
from src.common.levelsystem import UserLevel, cd_step
from src.utils import reply_header, FreqLimiter, DailyNumberLimiter
from src.utils.antiShielding import Image_Handler
from .mitu_lib import get_mitu
plugin_name = '美图'
plugin_usage = """还没完善,可以先忽略本功能
关于设置sl:
sl说明:
大概可以解释成本群能接收的工口程度,sl越高的图被人看见越会触发社死事件
!!!!!没有那种不属于人类的XP!!!!!
最低sl0:不含任何ero要素,纯陶冶情操,也有一部分风景图
最高sl5: 就是R18了
中间的等级依次过渡
────────────
[设置sl 最小sl-最大sl]
例如:设置sl 0-4
[锁定sl] 管理锁定之后群员不可设置sl,且锁定权限依据操作者权限
例如:群主锁定,管理员不可解锁;管理员锁定,群主可解锁但群员不可解锁
[解锁sl] 解锁之后群员可随意设置sl
[查询sl] 查看本群当前设置
[本群评级] 未开放(要写,没写,画线去掉)
────────────
""".strip()
#——————————————————设置sl——————————————————
lock_map = {
'member': 0,
'admin': 1,
'owner': 2
} # 把群权限转成int方便比较
lock_inv_map = {
0: '群员',
1: '管理员',
2: '群主'
} # 还要映射回来,好蠢,淦
sl = MatcherGroup(rule=comman_rule(GroupMessageEvent))
set_sl = sl.on_command('设置sl', aliases={'设置SL', '设置Sl'})
@set_sl.handle()
async def setsl_(bot: Bot, event: GroupMessageEvent, state: T_State):
gid = str(event.group_id)
locked = sl_settings[gid]['locked'] if gid in sl_settings else lock_map[event.sender.role]
if locked > lock_map[event.sender.role] and event.user_id not in SUPERUSERS:
await set_sl.finish(reply_header(event, f'sl被{lock_inv_map[locked]}锁定,低于此权限不可设置sl,或先以高级权限[解锁sl]重置锁定权限'))
args = event.get_plaintext().strip()
if not args:
await set_sl.finish('请输入本群sl等级范围,如:设置sl 0-4\n(最小0, 最大5)\n※注意是范围!几到几,不是单纯一个数字!')
parse =args.split('-')
if len(parse) == 2 and parse[0].isdigit() and parse[1].isdigit():
min_sl = int(parse[0])
max_sl = int(parse[1])
if min_sl < 0 or min_sl > 5 or max_sl < 0 or max_sl > 5:
await set_sl.finish(reply_header(event, '设置的数字必须在0~5区间'))
if min_sl > max_sl:
min_sl, max_sl = max_sl, min_sl
else:
await set_sl.finish(reply_header(event, '不符合格式的设置,比如:设置sl 0-4'))
gid = str(event.group_id)
sl_settings[gid]['min_sl'] = min_sl
sl_settings[gid]['max_sl'] = max_sl
sl_settings[gid]['locked'] = lock_map[event.sender.role]
if save_sl():
await set_sl.finish(f'已设置本群sl为[{min_sl}-{max_sl}]') # TODO:设置sl评级
else:
await set_sl.finish('设置sl功能故障,请联系维护组紧急修复!')
lock_sl = sl.on_command('锁定sl', aliases={'锁定SL', '锁定Sl'})
@lock_sl.handle()
async def lock_sl_(bot: Bot, event: GroupMessageEvent):
gid = str(event.group_id)
if gid not in sl_settings:
await lock_sl.finish('本群未设置sl')
if event.sender.role not in ('owner', 'admin'):
await lock_sl.finish('仅管理权限可锁定sl')
min_sl = sl_settings[gid]['min_sl']
max_sl = sl_settings[gid]['max_sl']
locked = sl_settings[gid]['locked']
if locked:
await lock_sl.finish(f'已经锁了,现在sl区间是[{min_sl}-{max_sl}]')
else:
sl_settings[gid]['locked'] = lock_map[event.sender.role]
if save_sl():
await set_sl.finish(f'已锁定本群sl为[{min_sl}-{max_sl}],管理员使用[解锁sl]功能可解除锁定')
else:
await set_sl.finish('sl功能故障,请联系维护组紧急修复!')
unlock_sl = sl.on_command('解锁sl', aliases={'解锁SL', '解锁Sl'})
@unlock_sl.handle()
async def unlock_sl_(bot: Bot, event: GroupMessageEvent):
gid = str(event.group_id)
if gid not in sl_settings:
await lock_sl.finish('本群未设置sl')
locked = sl_settings[gid]['locked']
if not locked:
await lock_sl.finish('本群sl未锁定')
if locked > lock_map[event.sender.role]:
await lock_sl.finish(reply_header(event, f'sl被{lock_inv_map[locked]}锁定,低于此权限不可解锁sl'))
sl_settings[gid]['locked'] = 0
if save_sl():
await set_sl.finish('已解锁sl,当前sl区间可由群员设置')
else:
await set_sl.finish('sl功能故障,请联系维护组紧急修复!')
# 查询当前群sl区间
query_sl = sl.on_command('查询sl', aliases={'查询SL', '查询Sl', '本群sl', '本群SL', '本群Sl'})
@query_sl.handle()
async def report_sl(bot: Bot, event: GroupMessageEvent):
gid = str(event.group_id)
if gid not in sl_settings:
await query_sl.finish('本群未设置sl')
min_sl = sl_settings[gid]['min_sl']
max_sl = sl_settings[gid]['max_sl']
locked = sl_settings[gid]['locked']
msg = f'本群sl区间为:[{min_sl}-{max_sl}]\n'
if not locked:
msg += '未锁定'
else:
msg += f'被{lock_inv_map[locked]}锁定'
await query_sl.finish(reply_header(event, msg))
#——————————————————————————————————————————————————
mitu = on_regex(
r'^ *再?[来來发發给給](?:(?P<num>[\d一二两三四五六七八九十]*)[张張个個幅点點份])?(?P<r18_call>[非(?:不是)]?R18)?(?P<kwd>.{0,10}?[^的])?的?(?P<r18_call2>[非(?:不是)]?R18)?的?美[图圖](?:(?P<num2>[\d一二两三四五六七八九十]*)[张張个個幅点點份])? *$',
flags=re.I,
rule=sv_sw(plugin_name, usage=plugin_usage) & comman_rule(GroupMessageEvent),
priority=2
)
kwdrex = re.compile(r'[,,]') # 分离逗号做交集搜索
@mitu.handle()
async def send_mitu(bot: Bot, event: GroupMessageEvent, state: T_State):
# 设置sl
gid = event.group_id
if str(gid) not in sl_settings:
await mitu.finish('''先设置本群sl再使用此功能吧
[设置sl 最小sl-最大sl]
例如:设置sl 0-4
────────────
sl说明:
大概可以解释成本群能接收的工口程度,sl越高的图被人看见越会触发社死事件
最低sl0:不含任何ero要素,纯陶冶情操,也有一部分风景图
最高sl5: 就是R18了
中间的等级依次过渡''')
min_sl = sl_settings[str(gid)]['min_sl']
max_sl = sl_settings[str(gid)]['max_sl']
# 限制条件优先度:r18,5张最大数,等级限制数量,频率,资金,由于要检测参数只好先把个别参数解析混入条款中了
uid = event.user_id
# r18限制条款,顺便解析了r18
r18_call = state["_matched_dict"]['r18_call'] or state["_matched_dict"]['r18_call2']
if r18_call and max_sl < 5:
await mitu.finish(reply_header(event, f'当前群内最大sl为{max_sl},不是5的话{BOTNAME}发不出R18图片哦~'))
# 5张最大数量限制条款,顺便解析了num
if state["_matched_dict"]['num']:
num = cn2an(state["_matched_dict"]['num'].replace('两', '二'), 'smart')
elif state["_matched_dict"]['num2']:
num = cn2an(state["_matched_dict"]['num2'].replace('两', '二'), 'smart')
else:
num = 1
if num > 5:
await mitu.finish(reply_header(event, '一次最多只能要5张'))
elif num == 0:
await mitu.finish(reply_header(event, '你好奇怪的要求'))
elif num < 0:
await mitu.finish(reply_header(event, f'好的,你现在欠大家{-num}张涩图,快发吧')) # TODO: 想想办法把负数给提取出来
# 等级限制数量条款,注册了用户信息
userinfo = UserLevel(uid)
if userinfo.level < num:
if userinfo.level > 0:
await mitu.finish(f'您当前等级为{userinfo.level},最多一次要{userinfo.level}张')
elif num > 1:
await mitu.finish(reply_header(event, '啊这..0级用户一次只能叫一张哦,使用[签到]或者学习对话可以提升等级~'))
# 频率限制条款,注册了频率限制器
flmt = FreqLimiter(uid, 'mitu')
if not flmt.check():
refuse = f'再等{ceil(flmt.left_time())}秒才能继续发图'
if userinfo.level == 0:
refuse += ',提升等级可以缩短冷却时间哦~'
await mitu.finish(reply_header(event, refuse)) # 不用round主要是防止出现'还有0秒'的不科学情况
cd = cd_step(userinfo.level, 480)
flmt.start_cd(cd) # 直接开始冷却,防止高频弹药击穿频率装甲,没返回图的话重新计算
# 资金限制条款,注册了每日次数限制器
cost = num * 3
dlmt = DailyNumberLimiter(uid, '美图', 3)
in_free = dlmt.check(close_conn=False)
if userinfo.fund < cost and not in_free:
if userinfo.fund > 0:
refuse = f'你还剩{userinfo.fund}块钱啦,要饭也不至于这么穷吧!'
elif userinfo.level == 0 and userinfo.fund == 0:
refuse = '每天有三次免费次数哦,使用[签到]领取资金来获得更多使用次数吧~'
else:
refuse = '你已经穷得裤子都穿不起了,到底是做了什么呀?!'
dlmt.conn.close() # 确认直接结束不会增加调用次数了,直接返还链接
flmt.start_cd(0)
await mitu.finish(reply_header(event, refuse))
kwd = state["_matched_dict"]['kwd']
if kwd:
kwds = tuple(kwdrex.split(kwd))
else:
kwds = ()
if r18_call:
min_sl = 5
success, result = get_mitu(event.group_id, kwds, num, min_sl, max_sl)
if not success:
flmt.start_cd(0)
dlmt.conn.close()
await mitu.finish(reply_header(event, result))
miss_count = 0 # 丢失的图片数量
count = len(result) # 返回数量,每次处理过后自减1
msg = MessageSegment.text('')
for data in result:
if not data:
miss_count += 1
count -= 1
continue
info = f"{data['title']}\n作者:{data['author']}\n来源:{data['source']}\n"
image = Image_Handler(data['file']).save2b64()
msg += MessageSegment.text(info) + MessageSegment.image(image)
if count > 1:
msg += MessageSegment.text('\n=====================\n')
count -= 1
elif len(result) < num:
msg += MessageSegment.text(f'\n=====================\n没搜到{num}张,只搜到这些了')
if miss_count > 0:
if len(result) > 1:
msg += MessageSegment.text(f'\n有{miss_count}张图丢掉了,{BOTNAME}去联系主人修复一下~')
else:
msg += MessageSegment.text(f'{BOTNAME}拿来图片但是丢掉了,我问问主人他看到没T_T')
for su in SUPERUSERS:
await bot.send_private_msg(user_id=su, message='貌似图库出了问题,错误记录在日志里了')
try:
await mitu.send(reply_header(event, msg))
except AdapterException as err:
logger.error(f'Some error happend when send mitu: {err}')
if miss_count < len(result):
if not in_free:
cost = (len(result) - miss_count) * 3 # 返回数量可能少于调用量,并且要减去miss的数量
userinfo.turnover(-cost) # 如果超过每天三次的免费次数则扣除相应资金
dlmt.increase() # 调用量加一
else:
flmt.start_cd(0)
dlmt.conn.close()
|
22,992 | b0efd61e517f51bffed57fe43d17042e72401b2d | # -*- coding:utf-8 -*-
import pytest
from r2api.config import Config
import r2pipe
def get_config():
r = r2pipe.open('test_bin')
return Config(r)
def test_set_variable():
c = get_config()
assert c.asm.bits == 64
c.asm.bits = 32
assert c.asm.bits == 32
c.r2.quit()
def test_get_variable_str():
c = get_config()
assert c.asm.arch == 'x86'
c.r2.quit()
def test_get_variable_bool():
c = get_config()
# TODO: This may fail depending on .r2rc ?
assert c.graph.format == 'dot'
c.r2.quit()
|
22,993 | d63d160e06c9916e15d9bac11c515f43115f0c3b | #!/usr/bin/env python
import random
def get_confusion_matrix(trues, preds):
c = {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}}
for t, p in zip(trues, preds):
c[t][p] += 1
return c
def get_accuracy(c):
return (c[0][0] + c[1][1]) / (c[0][1] + c[1][0] + c[0][0] + c[1][1])
def get_precision(c):
return (c[0][0]) / (c[0][0] + c[1][0])
def get_recall(c):
return (c[0][0]) / (c[0][0] + c[0][1])
def get_f1(c):
precision = get_precision(c)
recall = get_recall(c)
return 2 * (precision * recall) / (precision + recall)
def get_f1_2(c):
return 2 * (c[0][0]) / (2 * c[0][0] + c[1][0] + c[0][1])
if __name__ == '__main__':
n = 100000
nb_true = int(0.01 * n)
trues = [0] * (n - nb_true) + [1] * nb_true
print(sum(trues))
preds = [random.choice([0, 1]) for _ in range(n)]
c = get_confusion_matrix(trues, preds)
c = {0: {0: 5000, 1: 4000}, 1: {0: 10, 1: 990}}
#c = {0: {0: 990, 1: 10}, 1: {0: 4000, 1: 5000}}
print('Accuracy: {}%'.format(get_accuracy(c)))
print('F1: {}%'.format(get_f1(c)))
print('F1: {}%'.format(get_f1_2(c)))
|
22,994 | a823daa32822a6a83462c430660397fac93c537c | #!/usr/bin/env python3
## NOTE: requires pysocks
import requests, optparse, time
from threading import *
import re
maxConnections = 4
connection_lock = BoundedSemaphore(value=maxConnections)
#Global variables
Working = []
Tested = 0
To_test = 0
Force_quit = False
def get_tor_session():
session = requests.session()
# Tor uses the 9050 port as the default socks port
session.proxies = {'http': 'socks5h://127.0.0.1:9050',
'https': 'socks5h://127.0.0.1:9050'}
return session
def print_result():
global Working
print("[=] {} working URL".format(len(Working)))
for w in Working:
print("[+] {} for {}".format(w['status'], w['url']))
def connect(url, session, thr=False):
global Working
global Tested
global Force_quit
if not Force_quit:
time.sleep(1)
try:
resp = session.get(url, timeout=10)
Working.append({'url': url, 'status': resp.status_code})
except Exception as e:
if "Missing dependencies for SOCKS support." in str(e) and not Force_quit:
print("[!] Please that pysocks is installed (pip install pysocks)")
Force_quit = True
# elif "SOCKSHTTPConnectionPool" in str(e) and not Force_quit:
# print("[!] Please check that tor is running")
# Force_quit = True
else:
pass
# print(" - Error on {}".format(url))
finally:
if thr:
Tested = Tested + 1
connection_lock.release()
if (Tested == To_test): print_result()
def main():
global To_test
global Tested
global Force_quit
parser = optparse.OptionParser('usage%prog -f <hosts list>')
parser.add_option('-f', dest='hostFile', type='string', help='specify the file containing the list of urls to ping')
(options, args) = parser.parse_args()
hostFile = options.hostFile
if hostFile == None:
print(parser.usage)
exit(0)
fn = open(hostFile, 'r')
schemas = ['http']
session = get_tor_session()
checktor_resp = session.get("https://check.torproject.org").text
if "Congratulations. This browser is configured to use Tor." not in checktor_resp:
print("[!] Please make sure that TOR is running")
exit(0)
print("[i] Using a tor connection")
ip = re.search(r'<p>Your IP address appears to be: <strong>(\d+.\d+.\d+.\d+)</strong></p>', checktor_resp).group(1)
print("[i] Your IP address appears to be {}".format(ip))
lines = []
for line in fn.readlines():
lines.append(line.strip('\r').strip('\n'))
uniqueLines = set(lines)
To_test = len(uniqueLines)
print("[i] {} URL to test".format(To_test))
for ul in uniqueLines:
for s in schemas:
if Force_quit:
exit(0)
else:
url = "{}://{}".format(s, ul)
connection_lock.acquire()
print("[*] Testing: {}".format(url))
t = Thread(target=connect, args=(url, session, True))
child = t.start()
if __name__ == '__main__': main()
|
22,995 | a13156c61772c9f5e0933bd062003ecd9962971f | """
Copyright (c) 2016 Benoit CHAMPOUGNY. All right reserved.
This file is part of Arduifarm
Arduifarm is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Arduifarm is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Arduifarm. If not, see <http://www.gnu.org/licenses/>. 2
"""
"""
Define the dataframe for message exchange between components.
"""
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class Message(models.Model):
name = models.CharField(max_length=100)
label = models.IntegerField(validators=[MaxValueValidator(255)])
def __unicode__(self):
return "%s [%s]" % (self.name, str(self.label))
class Data(models.Model):
message = models.ForeignKey('Message')
name = models.CharField(max_length=100)
lsb = models.IntegerField(validators=[MinValueValidator(10), MaxValueValidator(29)])
msb = models.IntegerField(validators=[MinValueValidator(10), MaxValueValidator(29)])
class Meta:
ordering = ['lsb']
abstract = True
def __unicode__(self):
return "%s: %s" % (self.message, self.name)
class Units(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class BCDData(Data):
decimalPlaces = models.IntegerField(default=0)
minValue = models.IntegerField(blank=True, null=True)
maxValue = models.IntegerField(blank=True, null=True)
units = models.ForeignKey('Units', null=True, default=None, blank=True)
class BNRData(Data):
minValue = models.IntegerField()
maxValue = models.IntegerField()
units = models.ForeignKey('Units', null=True, default=None, blank=True)
class DiscreteData(Data):
trueName = models.CharField(max_length=100, null=True)
falseName = models.CharField(max_length=100, null=True)
|
22,996 | 5747e4d71f49879017c7bc6fe3a130bdeb5bf842 | from __future__ import annotations
from enum import IntEnum
from typing import Dict, List, Union, Optional, Tuple, Set
import click
class APTA:
class Node:
class NodeStatus(IntEnum):
REJECTING = 0
ACCEPTING = 1
UNDEFINED = 2
def is_acc(self) -> bool:
return self is self.ACCEPTING
def is_rej(self) -> bool:
return self is self.REJECTING
def __init__(self, id_: int, status: NodeStatus) -> None:
self._id = id_
self.status = status
self._children = {}
@property
def id_(self) -> int:
return self._id
@property
def children(self) -> Dict[str, APTA.Node]:
return self._children
def has_child(self, label: str) -> bool:
return label in self._children.keys()
def get_child(self, label: str) -> Optional[APTA.Node]:
return self._children[label] if self.has_child(label) else None
def add_child(self, label: str, node: APTA.Node) -> None:
self._children[label] = node
def is_accepting(self) -> bool:
return self.status.is_acc()
def is_rejecting(self) -> bool:
return self.status.is_rej()
@property
def root(self) -> Node:
return self._root
@property
def alphabet(self) -> List[str]:
return sorted(self._alphabet)
@property
def alphabet_size(self) -> int:
return len(self._alphabet)
@property
def size(self) -> int:
return len(self.nodes)
@property
def nodes(self) -> List[Node]:
return self._nodes
@property
def accepting_nodes(self) -> List[Node]:
return self._accepting_nodes
@property
def rejecting_nodes(self) -> List[Node]:
return self._rejecting_nodes
def get_node(self, i: int) -> Node:
return self._nodes[i]
def __init__(self, input_: Union[str, list, None]) -> None:
self._root = self.Node(0, self.Node.NodeStatus.UNDEFINED)
self._alphabet = set()
self._nodes = [self._root]
self._accepting_nodes = []
self._rejecting_nodes = []
if isinstance(input_, str):
with click.open_file(input_) as file:
examples_number, alphabet_size = [int(x) for x in next(file).split()]
for __ in range(examples_number):
self.add_example(next(file))
assert len(self._alphabet) == alphabet_size
elif isinstance(input_, list):
self.add_examples(input_)
elif input_ is None:
pass
def _get_node_by_prefix(self, word: List[str]) -> Optional[Node]:
cur_state = self._root
for label in word:
cur_state = cur_state.get_child(label)
if not cur_state:
return None
return cur_state
def add_examples(self, examples: List[str]) -> Tuple[int, List[int]]:
changed_statuses = []
old_size = self.size
for example in examples:
existing_node = self._get_node_by_prefix(example.split()[2:])
if existing_node:
changed_statuses.append(existing_node.id_)
self.add_example(example)
return old_size, changed_statuses
def add_example(self, example: str) -> None:
# example: status len l_1 l_2 l_3 ... l_len
parsed = example.split()
current_node = self._root
status = self.Node.NodeStatus(int(parsed[0]))
assert int(parsed[1]) == len(parsed[2:])
for label in parsed[2:]:
self._alphabet.add(label)
if current_node.has_child(label):
current_node = current_node.get_child(label)
else:
new_node = self.Node(len(self._nodes), self.Node.NodeStatus.UNDEFINED)
self._nodes.append(new_node)
current_node.add_child(label, new_node)
current_node = new_node
current_node.status = status
if status.is_acc():
self._accepting_nodes.append(current_node)
else:
self._rejecting_nodes.append(current_node)
def has_transition(self, from_: int, label: str, to: int) -> bool:
return self._nodes[from_].has_child(label) and self._nodes[from_].get_child(label).id_ == to
def to_dot(self) -> str:
s = (
"digraph APTA {\n"
" node [shape = circle];\n"
" rankdir=LR;\n"
" 0 [style = \"bold\"];\n"
)
for node in self._nodes:
if node.is_accepting():
s += " {0} [peripheries=2]\n".format(str(node.id_))
if node.is_rejecting():
s += " {0} [peripheries=3]\n".format(str(node.id_))
for label, to in node.children.items():
s += " {0} -> {1} [label = {2}];\n".format(str(node.id_), str(to.id_), label)
s += "}\n"
return s
def __str__(self) -> str:
return self.to_dot()
def __copy__(self) -> APTA:
new_apta = type(self)(None)
new_apta._root = self.root
new_apta._alphabet = self.alphabet
new_apta._nodes = self._nodes[:]
new_apta._accepting_nodes = self._accepting_nodes[:]
new_apta._rejecting_nodes = self._rejecting_nodes[:]
return new_apta
class DFA:
class State:
class StateStatus(IntEnum):
REJECTING, ACCEPTING = range(2)
@classmethod
def from_bool(cls, b: bool) -> DFA.State.StateStatus:
return cls.ACCEPTING if b else cls.REJECTING
def to_bool(self) -> bool:
return True if self is self.ACCEPTING else False
def __init__(self, id_: int, status: DFA.State.StateStatus) -> None:
self._id = id_
self.status = status
self._children = {}
@property
def id_(self) -> int:
return self._id
@property
def children(self) -> Dict[str, DFA.State]:
return self._children
def has_child(self, label: str) -> bool:
return label in self._children.keys()
def get_child(self, label: str) -> DFA.State:
return self._children[label]
def add_child(self, label: str, node: DFA.State) -> None:
self._children[label] = node
def is_accepting(self) -> bool:
return self.status is self.StateStatus.ACCEPTING
def __init__(self) -> None:
self._states = []
def add_state(self, status: DFA.State.StateStatus) -> None:
self._states.append(DFA.State(self.size(), status))
def get_state(self, id_: int) -> DFA.State:
return self._states[id_]
def get_start(self) -> DFA.State:
return self._states[0] if self.size() > 0 else None
def size(self) -> int:
return len(self._states)
def add_transition(self, from_: int, label: str, to: int) -> None:
self._states[from_].add_child(label, self._states[to])
def run(self, word: List[str], start: DFA.State = None) -> bool:
cur_state = start if start else self.get_start()
for label in word:
cur_state = cur_state.get_child(label)
return cur_state.is_accepting()
def check_consistency(self, examples: List[str]) -> bool:
for example in examples:
example_split = example.split()
if (example_split[0] == '1') != self.run(example_split[2:]):
return False
return True
def to_dot(self) -> str:
s = (
"digraph DFA {\n"
" node [shape = circle];\n"
" 0 [style = \"bold\"];\n"
)
for state in self._states:
if state.is_accepting():
s += " {0} [peripheries=2]\n".format(str(state.id_))
for label, to in state.children.items():
s += " {0} -> {1} [label = {2}];\n".format(str(state.id_), str(to.id_), label)
s += "}\n"
return s
def __str__(self) -> str:
return self.to_dot()
class InconsistencyGraph:
def __init__(self, apta: APTA, *, is_empty: bool = False) -> None:
self._apta = apta
self._size = apta.size
self._edges: List[Set[int]] = [set() for _ in range(self.size)]
if not is_empty:
for node_id in range(apta.size):
for other_id in range(node_id):
if not self._try_to_merge(self._apta.get_node(node_id), self._apta.get_node(other_id), {}):
self._edges[node_id].add(other_id)
def update(self, new_nodes_from: int):
for node_id in range(new_nodes_from, self._size):
self._edges.append(set())
for other_id in range(node_id):
if not self._try_to_merge(self._apta.get_node(node_id), self._apta.get_node(other_id), {}):
self._edges[node_id].add(other_id)
def _has_edge(self, id1: int, id2: int):
return id2 in self._edges[id1] or id1 in self._edges[id2]
@property
def size(self) -> int:
return self._size
@property
def edges(self) -> List[Set[int]]:
return self._edges
def _try_to_merge(self,
node: APTA.Node,
other: APTA.Node,
reps: Dict[int, Tuple[int, APTA.Node.NodeStatus]]) -> bool:
(node_rep_num, node_rep_st) = reps.get(node.id_, (node.id_, node.status))
(other_rep_num, other_rep_st) = (other.id_, other.status)
if node_rep_st.is_acc() and other_rep_st.is_rej() or node_rep_st.is_rej() and other_rep_st.is_acc():
return False
else:
if node_rep_num < other_rep_num:
reps[other_rep_num] = (node_rep_num, min(node_rep_st, other_rep_st))
else:
reps[node_rep_num] = (other_rep_num, min(node_rep_st, other_rep_st))
for label, child in node.children.items():
if other.has_child(label):
if not self._try_to_merge(child, other.get_child(label), reps):
return False
return True
def to_dot(self) -> str:
s = (
"digraph IG {\n"
" node [shape = circle];\n"
" edge [arrowhead=\"none\"];\n"
)
for node1 in range(self.size):
if self._edges[node1]:
for node2 in self._edges[node1]:
s += " {0} -> {1};\n".format(str(node1), str(node2))
else:
s += " {0};\n".format(str(node1))
s += "}\n"
return s
def __str__(self) -> str:
return self.to_dot()
|
22,997 | 8467a664696826d0779cc8c55cc4fc5f75e50498 | import cv2
import numpy as np
import sknw
from pygeoif import LineString
from scipy import ndimage
from scipy.ndimage import binary_dilation
from shapely.geometry import LineString, Point
from simplification.cutil import simplify_coords
from skimage.filters import gaussian
from skimage.morphology import remove_small_objects, skeletonize
def to_line_strings(mask, sigma=0.5, threashold=0.3, small_obj_size=300, dilation=1):
mask = gaussian(mask, sigma=sigma)
mask = mask[..., 0]
mask[mask < threashold] = 0
mask[mask >= threashold] = 1
mask = np.array(mask, dtype="uint8")
mask = mask[:1300, :1300]
mask = cv2.copyMakeBorder(mask, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
if dilation > 0:
mask = binary_dilation(mask, iterations=dilation)
mask, _ = ndimage.label(mask)
mask = remove_small_objects(mask, small_obj_size)
mask[mask > 0] = 1
ske = np.array(skeletonize(mask), dtype="uint8")
ske=ske[8:-8,8:-8]
graph = sknw.build_sknw(ske, multi=True)
line_strings = []
lines = []
all_coords = []
node, nodes = graph.node, graph.nodes()
# draw edges by pts
for (s, e) in graph.edges():
for k in range(len(graph[s][e])):
ps = graph[s][e][k]['pts']
coords = []
start = (int(nodes[s]['o'][1]), int(nodes[s]['o'][0]))
all_points = set()
for i in range(1, len(ps)):
pt1 = (int(ps[i - 1][1]), int(ps[i - 1][0]))
pt2 = (int(ps[i][1]), int(ps[i][0]))
if pt1 not in all_points and pt2 not in all_points:
coords.append(pt1)
all_points.add(pt1)
coords.append(pt2)
all_points.add(pt2)
end = (int(nodes[e]['o'][1]), int(nodes[e]['o'][0]))
same_order = True
if len(coords) > 1:
same_order = np.math.hypot(start[0] - coords[0][0], start[1] - coords[0][1]) <= np.math.hypot(end[0] - coords[0][0], end[1] - coords[0][1])
if same_order:
coords.insert(0, start)
coords.append(end)
else:
coords.insert(0, end)
coords.append(start)
coords = simplify_coords(coords, 2.0)
all_coords.append(coords)
for coords in all_coords:
if len(coords) > 0:
line_obj = LineString(coords)
lines.append(line_obj)
line_string_wkt = line_obj.wkt
line_strings.append(line_string_wkt)
new_lines = remove_duplicates(lines)
new_lines = filter_lines(new_lines, calculate_node_count(new_lines))
line_strings = [ l.wkt for l in new_lines]
return line_strings
def remove_duplicates(lines):
all_paths = set()
new_lines = []
for l, line in enumerate(lines):
points = line.coords
for i in range(1, len(points)):
pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))
pt2 = (int(points[i][0]), int(points[i][1]))
if (pt1, pt2) not in all_paths and (pt2, pt1) not in all_paths and not pt1 == pt2:
new_lines.append(LineString((pt1, pt2)))
all_paths.add((pt1, pt2))
all_paths.add((pt2, pt1))
return new_lines
def filter_lines(new_lines, node_count):
filtered_lines = []
for line in new_lines:
points = line.coords
pt1 = (int(points[0][0]), int(points[0][1]))
pt2 = (int(points[1][0]), int(points[1][1]))
length = np.math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
if not ((node_count[pt1] == 1 and node_count[pt2] > 2 or node_count[pt2] == 1 and node_count[pt1] > 2) and length < 10):
filtered_lines.append(line)
return filtered_lines
def calculate_node_count(new_lines):
node_count = {}
for l, line in enumerate(new_lines):
points = line.coords
for i in range(1, len(points)):
pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))
pt2 = (int(points[i][0]), int(points[i][1]))
pt1c = node_count.get(pt1, 0)
pt1c += 1
node_count[pt1] = pt1c
pt2c = node_count.get(pt2, 0)
pt2c += 1
node_count[pt2] = pt2c
return node_count
def split_line(line):
all_lines = []
points = line.coords
pt1 = (int(points[0][0]), int(points[0][1]))
pt2 = (int(points[1][0]), int(points[1][1]))
dist = np.math.hypot(pt1[0] - pt2[0], pt1[1] - pt2[1])
if dist > 10:
new_lines = cut(line, 5)
for l in new_lines:
for sl in split_line(l):
all_lines.append(sl)
else:
all_lines.append(line)
return all_lines
def cut(line, distance):
# Cuts a line in two at a distance from its starting point
# This is taken from shapely manual
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [
LineString(coords[:i+1]),
LineString(coords[i:])]
if pd > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:])] |
22,998 | 6fb5b5fd60f92c9ef1d831ad71fde2a4a5d0fac0 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('Wine.csv')
X = data.iloc[:, 0:13].values
y = data.iloc[:, 13].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X_train2 = pca.fit_transform(X_train)
X_test2 = pca.transform(X_test)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train,y_train)
classifier2 = LogisticRegression(random_state=0)
classifier2.fit(X_train2,y_train)
y_pred = classifier.predict(X_test)
y_pred2 = classifier2.predict(X_test2)
from sklearn.metrics import confusion_matrix
print('without PCA')
cm = confusion_matrix(y_test,y_pred)
print(cm)
#actual
print("with pca")
cm2 = confusion_matrix(y_test,y_pred2)
print(cm2)
|
22,999 | 771ad48b1e33f4075a34df350ca4c203e5238608 | import sys
import os
import pandas as pd
import numpy as np
import subprocess
import shlex
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib
import extra_program as ex
import ezgal
from rsz import RSModel
##----
def make_images(field,ax=None):
dir='final/'
ax.imshow(image.imread(dir+"img%s_2.eps" % field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
return None
def red_seq_color_plot(color,df,mags,ax=None):
if ax is None:
ax = plt.gca()
#https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php#Jordi2006
n=1000 #repeat number for sampling
slope_fit,i_band0,color_err,rs_models,band_1,band_2=color_sloan(color, mags)
ysample=df[df_color[band_1]]-df[df_color[band_2]]
ysample_err=np.sqrt(df[df_colorerr[band_1]]**2+df[df_colorerr[band_2]]**2)
total=[]
for i in ysample.index:
total.append(np.random.normal(loc=ysample[i],scale=ysample_err[i],size=n))
#total.append(0.1)
total=np.array(total)
band_x='sloan_i'
all_x=np.repeat(df[df_color[band_x]],n)
total=np.reshape(total, len(all_x))
bp=ax.errorbar(df[df_color[band_x]],ysample,yerr=ysample_err,fmt='.',alpha=0.5)
#bp=ax.errorbar(df[df_color[band_x]],ysample,fmt='.',alpha=0.5)
red_band=np.arange(16,25,0.01) #just for the line plot in the 3rd plot
redshift_range=np.arange(0.10,0.8,0.05) #for the actual data
number=[]
if color=='sloan_g-sloan_r':
redshift_range=np.arange(0.10,0.36,0.05)
elif color=='sloan_r-sloan_i':
redshift_range=np.arange(0.10,0.71,0.05)
for redshift in redshift_range:
if color=='sloan_g-sloan_r':
# i_band_cut=20.5
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
elif color=='sloan_r-sloan_i':
i_band_cut=i_band0+5.*np.log10(ex.d_L(redshift)*1e6)-5.
aa=red_band<i_band_cut
loc=[(all_x<i_band_cut)&\
(total < rs_models[color][round(redshift+0.025,2)].rs_color(all_x))&\
(total > rs_models[color][round(redshift-0.025,2)].rs_color(all_x))][0]
number.append(np.sum(loc))
ax.plot(red_band[aa],rs_models[color][round(redshift,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls='-')
ax.plot(red_band[aa],rs_models[color][round(redshift+0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.plot(red_band[aa],rs_models[color][round(redshift-0.025,2)].rs_color(red_band[aa]),\
color=s_m.to_rgba(round(redshift,2)),ls=':')
ax.set_xlim(16,25)
if color == 'sloan_g-sloan_i':
ax.set_ylim(0,4)
elif color == 'sloan_g-sloan_r':
ax.set_ylim(0.0,2.5)
else:
ax.set_ylim(-0.5,1.75)
ax.set_xlabel(band_x)
ax.set_ylabel(color)
return np.array(redshift_range),np.array(number)
def color_sloan(color, mags):
if color=='sloan_r-sloan_z':
slope_r_m_i=-0.0192138872893
slope_r_m_z=(1.584 * slope_r_m_i)
slope_fit=[slope_r_m_z, 0]
i_band0=-20.
elif color=='sloan_g-sloan_i':
slope_v_m_i=-0.029
slope_g_m_i=(1.481 * slope_v_m_i)
slope_fit=[slope_g_m_i, 0]
i_band0=-20.
elif color=='sloan_r-sloan_i':
slope_rc_m_ic=-0.0192138872893
slope_r_m_i=(1.007 * slope_rc_m_ic)
slope_fit=[slope_r_m_i, 0]
i_band0=-20.5
color_err=0.18
elif color=='sloan_g-sloan_r':
slope_v_m_r=-0.0133824600874
slope_g_m_r=(1.646 * slope_v_m_r)
slope_fit=[slope_g_m_r, 0]
i_band0=-20.5
color_err=0.15
band_1, band_2 = color.split("-")
band_1_idx=filters.index(band_1)
band_2_idx=filters.index(band_2)
rs_models=dict()
rs_models[color]=dict()
for z, m in zip(zs,mags):
#mag_1=m[band_1_idx]
mag_2=m[band_2_idx]
mag_1=blue_model(color,mags,z,mag_2)
this_model=RSModel(z, mag_1, mag_2, slope_fit)
rs_models[color][this_model.z]=this_model
return slope_fit,i_band0,color_err,rs_models,band_1,band_2
# adding the slope for different color set that we are interested in (01_rsz_test,fit_gr_ri01.ipyn)
def blue_model(color,mags,redshift,red_mag):
#g-r
if color=='sloan_g-sloan_r':
blue_mag=(0.787302458781+2.9352*redshift)+red_mag
elif color=='sloan_r-sloan_i':
if redshift <= 0.36:
blue_mag=(0.348871987852+0.75340856*redshift)+red_mag
else:
blue_mag=(-0.210727367027+2.2836974*redshift)+red_mag
else:
print 'This color has not been implemented.'
return blue_mag
def histogram_plot(xranf,numberf,df,ax=None,line=False,cbar=False):
l2=6
ax.set_xlim(0,0.8)
ic2,ic3=0,0
numbers=numberf[:6]
numbers2=numberf[l2:]
ax.bar(xranf[:6],numbers,width=0.05,color='red',alpha=0.5,align='center')
ax.bar(xranf[l2:],numbers2,width=0.05,alpha=0.5,align='center')
if cbar:
cbar=fig.colorbar(s_m, ax=ax)
cbar.set_label("redshift")
if line:
if dff_sdss.loc[ind].redshift!=-1:
ax.axvline(dff_sdss.redshift[ind],ls='--',color='#66cc00',lw=2.,label='qso z=%.2f'%dff_sdss.redshift[ind])
ax.axvline(xranf[:6][ic2],ls='--',color='black',lw=2.,label='red_seq g-r z=%.2f'%xranf[:6][ic2])
ax.axvline(xranf[l2:][ic3],ls='--',color='purple',lw=2.,label='red_seq r-i z=%.2f'%xranf[l2:][ic3])
ax.legend(loc='best',frameon=False)
sigma,sigma2,sigma3=0.,0.,0.
if line:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3,dff_sdss.redshift[ind],sigma])
else:
return np.array([xranf[:6][ic2],sigma2,xranf[l2:][ic3],sigma3])
def save_rgb_image_extra(field, f026):
cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_2.eps -exit" % \
(f026.RA0.values[0], f026.DEC0.values[0], field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
cmd = "ds9 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage final/img%s_3.eps -exit" % \
(field, field, field, field)
print cmd
sub = subprocess.check_call(shlex.split(cmd))
print 'finished saving final/img%s.eps' % field
def find_offset(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
band=[x.split(' ')[0][-1] for x in content[5:-1]]
corr=[float(x.split(' ')[1]) for x in content[5:-1]]
ecorr=[float(x.split(' ')[3]) for x in content[5:-1]]
return zip(band,corr,ecorr), corr
def find_num(fname):
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
num_2mass=content[0].split(' ')[3]
num_star=content[3].split(' ')[1]
chisq=content[2].split(' ')[1]
return num_2mass,num_star,chisq
##--------
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
filters=['sloan_r','sloan_i','sloan_z','sloan_g']
zfs = np.arange(1.0, 6.001, 0.05)
zf = 3.0 #formation redshift
spacing=0.01 #spacing of redshift for resolution (0.01 is high_res, 0.05 low_res)
zs = np.arange(0.05, 2.500001, spacing)
new_model = ezgal.model("pisco_pipeline/pisco_exp_chab_evolved.model")
new_model.set_normalization(filter='ks', mag=10.9, apparent=True, vega=True,z=0.023) ##normalize to Coma
new_mags = new_model.get_apparent_mags(zf, filters=filters, zs=zs, ab=True)
df_color=dict()
df_color['sloan_g']='MAG_g'
df_color['sloan_r']='MAG_r'
df_color['sloan_i']='MAG_i'
df_color['sloan_z']='MAG_z'
df_colorerr=dict()
df_colorerr['sloan_g']='MAGERR_g'
df_colorerr['sloan_r']='MAGERR_r'
df_colorerr['sloan_i']='MAGERR_i'
df_colorerr['sloan_z']='MAGERR_z'
zss=zs[0:80:5]
norm = matplotlib.colors.Normalize(vmin=np.min(zss),vmax=np.max(zss))
c_m = matplotlib.cm.RdYlBu
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
# Pipeline to run PISCO reduction data
#dir = str(sys.argv[1])
field = str(sys.argv[1])
slrdir = 'slr_output'
# field = 'Field054'
df_all = pd.read_csv("/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs_list_new.csv")
f026 = df_all[df_all["name"]==field]
redshift=f026.redshift.values[0]
priority=f026.priority.values[0]
seeing=Table.read('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/PISCO_Jan17_seeing.csv')
see=seeing[seeing['Field']==int(field[-3:])]['Seeing'][0]
offset=find_offset('slr_output/star_%s.fits.offsets.list' % field)
num_2mass,num_star,chisq=find_num('../pisco_code/slr_output/star_%s.fits.offsets.list' % field)
#save_rgb_image_extra(field, f026)
df = pd.read_csv(os.path.join(slrdir,'ntotal_%s.csv' % field),index_col=0)
c5 = SkyCoord(ra=df['XWIN_WORLD'].values*u.degree, dec=df['YWIN_WORLD'].values*u.degree)
c0 = SkyCoord(ra=f026.RA0*u.degree, dec=f026.DEC0*u.degree)
sep = c5.separation(c0)
cut=df[(sep.arcmin<ex.rad_A(redshift,dist=1.5)) & (df["CLASS_STAR"]<0.75)] #CLASS_STAR < 0.75
#ncut=df[(sep.arcmin>2.5) & (df["CLASS_STAR"]<0.8)]
print see
print offset[1]
fig,ax=plt.subplots(1,4,figsize=(20,5));
fig.suptitle(field+', Redshift='+str(redshift)+', Priority='+priority+', Seeing='+str(see)+', Offset(r,i,g,z)='+str(offset[1])+', #2mass='+str(num_2mass)+', #stars='+str(num_star)+', chisq='+str(chisq))
make_images(field,ax[0])
xran,numbers_gr=red_seq_color_plot('sloan_g-sloan_r',cut,new_mags,ax[1])
xran2,numbers_ri=red_seq_color_plot('sloan_r-sloan_i',cut,new_mags,ax[2])
total_sigma=histogram_plot(np.append(xran,xran2),np.append(numbers_gr,numbers_ri),cut,ax[3])
ax[3].axvline(redshift, color='green')
fig.tight_layout()
fig.savefig('plots/plot_%s.png' % (field), dpi=200)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.