text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that provides functions used in
the context to pre-process input
"""
import argparse
import numpy as np
import scipy as sp
def parse_args_for_image_input():
"""Parses the arguments for test ui's that accept images as input"""
parser = argparse.ArgumentParser(description="GUI that may be used to test models.")
parser.add_argument(
"--model",
metavar="PATH",
type=str,
required=True,
help="path to the Tensorflow model file",
)
parser.add_argument(
"--labels",
dest="labels_path",
metavar="PATH",
type=str,
help="path to the pickled array of labels",
)
parser.add_argument(
"--shape",
metavar="SHAPE",
type=str,
required=True,
help='Shape of the input data e.g "28,28"',
)
args = parser.parse_args()
args.shape = tuple([int(el) for el in args.shape.split(",", 3)])
return args
def resize_image_data(image_data, desired_shape):
""" Resize the image data"""
result = sp.misc.imresize(image_data, desired_shape[0:2])
result = result.reshape(desired_shape)
return result
def invert_image_data(image_data):
""" Inverts the image data"""
result = np.subtract(
np.full(image_data.shape, 255, dtype=image_data.dtype), image_data
)
return result
def normalize_image_data(image_data):
"""Normalizes the given image data"""
result = image_data.astype(float) / 255
return result
def get_bounding_box(img):
"""Finds the bounding box of the drawing in the image"""
col_sum = np.sum(img, axis=0).nonzero()
row_sum = np.sum(img, axis=1).nonzero()
bb_x1, bb_x2 = col_sum[0][0], col_sum[0][-1]
bb_y1, bb_y2 = row_sum[0][0], row_sum[0][-1]
result = ((bb_x1, bb_y1), (bb_x2, bb_y2))
return result
def crop_image_to_bounding_box(img, bnd_box):
"""Crops the given image to the given bounding box"""
result = img[bnd_box[0][1] : bnd_box[1][1] + 1, bnd_box[0][0] : bnd_box[1][0] + 1]
return result
def pad_image(img, shape):
"""Pads the given image"""
hpad_total = shape[1] - img.shape[1]
hpad_left = hpad_total // 2
hpad_right = hpad_total - hpad_left
hpad = (hpad_left, hpad_right)
vpad_total = shape[0] - img.shape[0]
vpad_left = vpad_total // 2
vpad_right = vpad_total - vpad_left
vpad = (vpad_left, vpad_right)
result = np.pad(img, (vpad, hpad), "constant", constant_values=0)
return result
def preprocess_image_data(image_data, shape, invert=False, center=False, fit=False):
"""Preprocesses the image data"""
if invert:
image_data = invert_image_data(image_data)
if fit or center:
bnd_box = get_bounding_box(image_data)
drawing_data = crop_image_to_bounding_box(image_data, bnd_box)
if fit:
new_shape = tuple([int(max(drawing_data.shape) * 1.2)] * 2)
else:
new_shape = image_data.shape
image_data = pad_image(drawing_data, new_shape)
sp.misc.imsave("outfile.jpg", image_data)
image_data = resize_image_data(image_data, shape)
result = normalize_image_data(image_data)
return result
|
#https://scipy-lectures.org/packages/scikit-learn/auto_examples/plot_tsne.html
import os
import numpy as np
import argparse
import math
import random
import pandas as pd
import csv
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
#matplotlib.use("Agg")
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from model_dann import CNNModel
import data_train
# ---------
# Parser
# ---------
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=200, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--gpu", type=int, default=0, help="GPU number")
parser.add_argument("--source", type=str, default="MNISTM", help="name of source dataset")
parser.add_argument("--data_dir", type=str, default="./data/", help="root path to the testing images")
parser.add_argument("--save_dir", type=str, default="./saved_models/", help="directory where the saved model is located")
opt = parser.parse_args()
# ---------------------
# Miscellaneous Setup
# ---------------------
# Set up GPU
if torch.cuda.is_available():
print("GPU found")
cuda = True
torch.cuda.set_device(opt.gpu)
else:
print("GPU not found")
cuda = False
# ------------
# Dataloader
# ------------
print("---> preparing dataloaders...")
if opt.source == "MNISTM":
source_test = data_train.MNISTM(opt, "test")
target_test = data_train.SVHN(opt, "test")
elif opt.source == "SVHN":
source_test = data_train.SVHN(opt, "test")
target_test = data_train.MNISTM(opt, "test")
dataloader_source_test = torch.utils.data.DataLoader(
dataset=source_test,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu
)
dataloader_target_test = torch.utils.data.DataLoader(
dataset=target_test,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu
)
# ------------
# Load Model
# ------------
print("---> preparing model...")
my_net = CNNModel()
# Select model that was trained on the correct dataset
if opt.source == "MNISTM":
model_name = "dann_mnistm_svhn.pth.tar"
model_pth = os.path.join(opt.save_dir, model_name)
elif opt.source == "SVHN":
model_name = "dann_svhn_mnistm.pth.tar"
model_pth = os.path.join(opt.save_dir, model_name)
# Load model
if os.path.exists(model_pth):
print("---> found previously saved {}, loading checkpoint...".format(model_name))
checkpoint = torch.load(model_pth)
my_net.load_state_dict(checkpoint)
else:
print("Error: model not loaded")
# Move to GPU
if cuda:
my_net = my_net.cuda()
# ----------
# Features
# ----------
print("---> generating features...")
my_net.eval()
# Load data from source
with torch.no_grad(): # do not need to calculate information for gradient during eval
for idx, (imgs, label) in enumerate(dataloader_source_test):
imgs = imgs.cuda()
source_features = my_net.return_feature(input_data=imgs).cpu().numpy() # tensor, size = batch size x 800 (each row is an image feature)
source_class = label.cpu().numpy() # numpy vector, size = batch size
if idx == 0:
break
# Load data from target
with torch.no_grad(): # do not need to calculate information for gradient during eval
for idx, (imgs, label) in enumerate(dataloader_target_test):
imgs = imgs.cuda()
target_features = my_net.return_feature(input_data=imgs).cpu().numpy() # tensor, size = batch size x 800 (each row is an image feature)
target_class = label.cpu().numpy() # numpy vector, size = batch size
if idx == 0:
break
# -------
# t-SNE
# -------
# Combine source and target features/classes into one array
combined_features = np.vstack( (source_features, target_features) ) # 2*batch size x 800
combined_class = np.hstack( (source_class, target_class) ) # 2*batch size
# Combine source and target domains into one array
source_domain = np.zeros(opt.batch_size, dtype=np.int16)
target_domain = np.ones(opt.batch_size, dtype=np.int16)
combined_domain = np.hstack( (source_domain, target_domain) ) # 2*batch size
# Perform t-SNE on features
print("---> performing t-SNE...")
tsne = TSNE(n_components=2)
features_tsne = tsne.fit_transform(combined_features) # numpy array, batch size x 2
# Assign different colors for each class and domain
colors_class = []
colors_domain = []
for i in range(features_tsne.shape[0]):
if combined_class[i] == 0:
colors_class.append('k')
elif combined_class[i] == 1:
colors_class.append('r')
elif combined_class[i] == 2:
colors_class.append('g')
elif combined_class[i] == 3:
colors_class.append('b')
elif combined_class[i] == 4:
colors_class.append('c')
elif combined_class[i] == 5:
colors_class.append('m')
elif combined_class[i] == 6:
colors_class.append('pink')
elif combined_class[i] == 7:
colors_class.append('gold')
elif combined_class[i] == 8:
colors_class.append('cyan')
elif combined_class[i] == 9:
colors_class.append('orange')
# Black = source domain, red = target domain
if combined_domain[i] == 0:
colors_domain.append('k')
elif combined_domain[i] == 1:
colors_domain.append('r')
# Plot t-SNE features, with color indicating class
plt.figure()
plt.scatter(features_tsne[:,0], features_tsne[:,1], s=4, c=colors_class)
plt.savefig("class")
# Plot t-SNE features, with color indicating domain
plt.figure()
plt.scatter(features_tsne[:,0], features_tsne[:,1], s=4, c=colors_domain)
plt.savefig("domain")
print("***** Plots Saved *****")
plt.close("all") |
#!/usr/bin/env python
import collectd
import collections
import json
import urllib2
CONFIGS = []
CONFIG_DEFAULT = [{
"host": "localhost",
"port": "9700",
"node": "filebeat",
"url": "http://localhost:9700/debug/vars"
}]
stat = collections.namedtuple("Stat", ("type", "path"))
# Metrics dictionary
STATS = {
# Harvesters:
"filebeat.harvester.closed": stat("gauge", "filebeat.harvester.closed"),
"filebeat.harvester.files_truncated": stat("counter", "filebeat.harvester.files.truncated"),
"filebeat.harvester.open_files": stat("gauge", "filebeat.harvester.open_files"),
"filebeat.harvester.running": stat("gauge", "filebeat.harvester.running"),
"filebeat.harvester.skipped": stat("counter", "filebeat.harvester.skipped"),
"filebeat.harvester.started": stat("gauge", "filebeat.harvester.started"),
# Prospectors:
"filebeat.prospector.log_files_renamed": stat("counter", "filebeat.prospector.log.files.renamed"),
"filebeat.prospector.log_files_truncated": stat("counter", "filebeat.prospector.log.files.truncated"),
# Config:
"libbeat.config.module.running": stat("counter", "libbeat.config.module.running"),
"libbeat.config.module.starts": stat("counter", "libbeat.config.module.starts"),
"libbeat.config.module.stops": stat("counter", "libbeat.config.module.stops"),
"libbeat.config.reloads": stat("counter", "libbeat.config.reloads"),
# Outputs:
"libbeat.es.call_count.publish_events": stat("counter", "libbeat.es.call_count.PublishEvents"),
"libbeat.es.publish.read_bytes": stat("counter", "libbeat.es.publish.read_bytes"),
"libbeat.es.publish.read_errors": stat("counter", "libbeat.es.publish.read_errors"),
"libbeat.es.publish.write_bytes": stat("counter", "libbeat.es.publish.write_bytes"),
"libbeat.es.publish.write_errors": stat("counter", "libbeat.es.publish.write_errors"),
"libbeat.es.published_and_acked_events": stat("counter", "libbeat.es.published_and_acked_events"),
"libbeat.es.published_but_not_acked_events": stat("counter", "libbeat.es.published_but_not_acked_events"),
"libbeat.kafka.call_count.publish_events": stat("counter", "libbeat.kafka.call_count.PublishEvents"),
"libbeat.kafka.published_and_acked_events": stat("counter", "libbeat.kafka.published_and_acked_events"),
"libbeat.kafka.published_but_not_acked_events": stat("counter", "libbeat.kafka.published_but_not_acked_events"),
"libbeat.logstash.call_count.publish_events": stat("counter", "libbeat.logstash.call_count.PublishEvents"),
"libbeat.logstash.publish.read_bytes": stat("counter", "libbeat.logstash.publish.read_bytes"),
"libbeat.logstash.publish.read_errors": stat("counter", "libbeat.logstash.publish.read_errors"),
"libbeat.logstash.publish.write_bytes": stat("counter", "libbeat.logstash.publish.write_bytes"),
"libbeat.logstash.publish.write_errors": stat("counter", "libbeat.logstash.publish.write_errors"),
"libbeat.logstash.published_and_acked_events": stat("counter", "libbeat.logstash.published_and_acked_events"),
"libbeat.logstash.published_but_not_acked_events": stat("counter", "libbeat.logstash.published_but_not_acked_events"),
"libbeat.outputs.messages_dropped": stat("counter", "libbeat.outputs.messages_dropped"),
"libbeat.publisher.messages_in_worker_queues": stat("counter", "libbeat.publisher.messages_in_worker_queues"),
"libbeat.publisher.published_events": stat("counter", "libbeat.publisher.published_events"),
"libbeat.redis.publish.read_bytes": stat("counter", "libbeat.redis.publish.read_bytes"),
"libbeat.redis.publish.read_errors": stat("counter", "libbeat.redis.publish.read_errors"),
"libbeat.redis.publish.write_bytes": stat("counter", "libbeat.redis.publish.write_bytes"),
"libbeat.redis.publish.write_errors": stat("counter", "libbeat.redis.publish.write_errors"),
"publish.events": stat("counter", "publish.events"),
# Memory:
"memstats.alloc": stat("counter", "memstats/Alloc"),
"memstats.buck_hash_sys": stat("counter", "memstats/BuckHashSys"),
"memstats.frees": stat("counter", "memstats/Frees"),
"memstats.gc_cpu_fraction": stat("counter", "memstats/GCCPUFraction"),
"memstats.gc_sys": stat("counter", "memstats/GCSys"),
"memstats.heap_alloc": stat("counter", "memstats/HeapAlloc"),
"memstats.heap_idle": stat("counter", "memstats/HeapIdle"),
"memstats.heap_inuse": stat("counter", "memstats/HeapInuse"),
"memstats.heap_objects": stat("counter", "memstats/HeapObjects"),
"memstats.heap_released": stat("counter", "memstats/HeapReleased"),
"memstats.heap_sys": stat("counter", "memstats/HeapSys"),
"memstats.last_gc": stat("counter", "memstats/LastGC"),
"memstats.lookups": stat("counter", "memstats/Lookups"),
"memstats.m_cache_inuse": stat("counter", "memstats/MCacheInuse"),
"memstats.m_cache_sys": stat("counter", "memstats/MCacheSys"),
"memstats.m_span_inuse": stat("counter", "memstats/MSpanInuse"),
"memstats.m_span_sys": stat("counter", "memstats/MSpanSys"),
"memstats.mallocs": stat("counter", "memstats/Mallocs"),
"memstats.next_gc": stat("counter", "memstats/NextGC"),
"memstats.num_gc": stat("counter", "memstats/NumGC"),
"memstats.other_sys": stat("counter", "memstats/OtherSys"),
"memstats.pause_total_ns": stat("counter", "memstats/PauseTotalNs"),
"memstats.stack_inuse": stat("counter", "memstats/StackInuse"),
"memstats.stack_sys": stat("counter", "memstats/StackSys"),
"memstats.sys": stat("counter", "memstats/Sys"),
"memstats.total_alloc": stat("counter", "memstats/TotalAlloc"),
# Registry
"registrar.states.cleanup": stat("counter", "registrar.states.cleanup"),
"registrar.states.current": stat("counter", "registrar.states.current"),
"registrar.states.update": stat("counter", "registrar.states.update"),
"registrar.writes": stat("counter", "registrar.writes"),
}
def extract_value(json, path):
val = json
for el in path.split("/"):
val = val[el]
return val
def fetch_stats():
global CONFIGS
if not CONFIGS: CONFIGS = CONFIG_DEFAULT
for config in CONFIGS:
try:
stats = json.load(urllib2.urlopen(config["url"], timeout=10))
except Exception as err:
collectd.error("Filebeat plugin ("+config["node"]+"): Error fetching stats from "+config["url"]+": "+str(err))
return None
parse_stats(stats, config)
def parse_stats(json, config):
for name, stat in STATS.iteritems():
try:
value = extract_value(json, STATS[name].path)
except Exception as err:
collectd.warning("Filebeat plugin ("+config["node"]+"): Could not process path "+STATS[name].path+": "+str(err))
continue
dispatch_stat(name, value, stat.type, config)
def dispatch_stat(stat_name, stat_value, stat_type, config):
val = collectd.Values(plugin=config["node"])
val.type_instance = stat_name
val.values = [stat_value]
val.type = stat_type
val.dispatch()
def read_callback():
stats = fetch_stats()
def config_callback(config):
global CONFIGS
for config in config.children:
host = CONFIG_DEFAULT[0]["host"]
port = CONFIG_DEFAULT[0]["port"]
node = CONFIG_DEFAULT[0]["node"]
if config.key == "Host": host = str(config.values[0])
elif config.key == "Port": port = str(int(config.values[0]))
elif config.key == "Name": node = str(config.values[0])
else: collectd.warning("Filebeat plugin: Unknown config key "+config.key)
CONFIGS.append({
"host": host,
"port": port,
"node": node,
"url": "http://"+host+":"+port+"/debug/vars"
})
collectd.register_config(config_callback)
collectd.register_read(read_callback)
|
#!c:\users\samb5\documents\visual studio 2015\Projects\django_azure\django_azure\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from .api import Legalizer
from .base import LegalizerBase
@Legalizer.register
class GraphDefLegalizer(LegalizerBase):
TARGET = 'tensorflow'
class _OpTypeRenamePostProcessing(object):
_RENAME_MAP = {
'BatchMatMulV2': 'MatMul',
# FIXME: need to update matcher before adding this line
# 'Add': 'AddOperator',
}
@classmethod
def apply(cls, ugraph):
for op_type, new_op_type in cls._RENAME_MAP.items():
for op_info in ugraph.get_ops_by_type(op_type):
op_info.op_type = new_op_type
def legalize_ops(self, ugraph):
'''Legalize ops to generic ops in given graph
'''
if not ugraph.lib_name == self.TARGET:
raise ValueError(
'expecting tensorflow graph, get {}'.format(ugraph.lib_name)
)
self._OpTypeRenamePostProcessing.apply(ugraph)
return ugraph
def legalize_dtype(self, ugraph):
'''Legalize data types of tensors in given graph
'''
if not ugraph.lib_name == self.TARGET:
raise ValueError(
'expecting tensorflow graph, get {}'.format(ugraph.lib_name)
)
return ugraph
|
import sample_oauth1_code
get_tokens(request_token_url='https://www.tumblr.com/docs/en/api/v2', client_key='', client_secret='') |
import base64
import boto3
import json
import os
ddb_client = boto3.client('dynamodb')
lambda_client = boto3.client('lambda')
table_name = os.environ['STM_TABLE_NAME']
def process_standard_record(parsed, decoded):
txn_id = parsed['txnId']
model_id = parsed['modelId']
print 'processing status for transaction {} model {}'.format(
txn_id, model_id
)
response = ddb_client.get_item(
TableName=table_name,
Key={
'modelId': {'S': model_id}
}
)
if not 'Item' in response:
print 'Model {} not present in {}'.format(model_id, table_name)
return
item = response['Item']
fn_name = item['functionName']['S']
print 'calling function {} with payload {}'.format(fn_name, decoded)
response = lambda_client.invoke(
FunctionName=fn_name,
InvocationType='Event',
#LogType='Tail',
#ClientContext=base64.b64encode('{"txnId":"what, me worry?"}'),
Payload=decoded
)
print response
def process_non_standard_record(parsed, decoded):
if not 'XtracEvent' in parsed:
print 'Event is the most non standard event in history, maybe ever'
return
model_id = 'xtrac-model'
response = ddb_client.get_item(
TableName=table_name,
Key={
'modelId': {'S': model_id}
}
)
if not 'Item' in response:
print 'Model {} not present in {}'.format(model_id, table_name)
return
item = response['Item']
fn_name = item['functionName']['S']
print 'calling function {} with payload {}'.format(fn_name, decoded)
response = lambda_client.invoke(
FunctionName=fn_name,
InvocationType='Event',
#LogType='Tail',
#ClientContext=base64.b64encode('{"txnId":"what, me worry?"}'),
Payload=decoded
)
def lambda_handler(event, context):
print 'event: {}'.format(event)
records = event['Records']
for rec in records:
data = rec['kinesis']['data']
decoded = base64.b64decode(data)
print 'decoded record data: {}'.format(decoded)
print 'parse data'
parsed = json.loads(decoded)
print parsed
if 'txnId' in parsed:
process_standard_record(parsed, decoded)
else:
process_non_standard_record(parsed, decoded)
|
for i in range(1,3):
print(i)
if i==2:
print("2 found")
print("1 and 2 are going to print")
print(" new line")
break
|
import matplotlib.pyplot as plt
#matplotlib.org basically matlab graphs in python
import data as d
purchases = d.selectAllPurchases() #return list of dictionary
costs = list(map(lambda m: float(m.get('Cost')), purchases))
category = list(map(lambda m: m.get('CategoryName')[0:10], purchases))
print(costs[0])
plt.figure(figsize=(40,3))
plt.bar(category, costs)
plt.show()
|
import os
class Config(object):
APP_NAME = os.getenv("APP_NAME", "Commentaria")
SECRET_KEY = os.getenv("SECRET_KEY")
DATABASE_URL = os.getenv("DATABASE_URL")
SQLALCHEMY_DATABASE_URI = os.getenv("SQLALCHEMY_DATABASE_URI", DATABASE_URL)
MAIL_SERVER = os.getenv("MAIL_SERVER")
MAIL_PORT = os.getenv("MAIL_PORT")
MAIL_USE_TLS = os.getenv("MAIL_USE_TLS")
if MAIL_USE_TLS.lower() in ("", "0", "f", "false"):
MAIL_USE_TLS = False
else:
MAIL_USE_TLS = True
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
DEV_MAIL = os.getenv("DEV_MAIL")
CLOUDINARY_CLOUD_NAME = os.getenv("CLOUDINARY_CLOUD_NAME")
CLOUDINARY_API_KEY = os.getenv("CLOUDINARY_API_KEY")
CLOUDINARY_SECRET = os.getenv("CLOUDINARY_SECRET")
|
seconds = int(input("Введите целое число - "))
second = seconds % 60
minutes = seconds % 3600 // 60
hours = seconds // 3660
print('%d:%d:%d' % (hours, minutes, second))
|
weight = int(input('Enter the weight: ' ))
unit = input('(l )bs or (k)g : ')
if unit.lower() == 'l':
converted = weight * 0.45
print(f'You are {converted} kilos')
else:
converted =weight /0.45
print(f"you are {converted} lbs")
|
class Node(object):
"""
Class to represent a node in the circuit. A node is simply a location
where multiple components connect.
"""
def __init__(self, node_id):
"""
Constructs a node, assigning it a unique identifier.
"""
self.node_id = node_id
self.components = []
def __hash__(self):
"""
Hash a node based on the node identifier.
"""
return hash(self.node_id)
def __eq__(self, other):
"""
Check if nodes are equal based on the node identifier.
"""
return self.node_id == other.node_id
def __lt__(self, other):
"""
Compares nodes based on the node identifier. Can be useful to
sort nodes for debugging purposes, for example.
"""
return self.node_id < other.node_id
def __repr__(self):
"""
Produce a string representation of the node from the node identifer
and the set of components that converge at this node.
"""
component_names = [c.get_name() for c in self.components]
return "Node {} - components: {}".format(self.node_id, component_names)
def __str__(self):
"""
Produce a string representation of the node from the node identifer
and the set of components that converge at this node.
"""
return "%r" % self
def add_component(self, component):
"""
Hook up a new component to this node.
"""
self.components.append(component)
|
#!/usr/bin/env python3
class StringOperations:
def isPalindrome(string):
return string == string[::-1]
def isPalindromeBrute(string):
strlen = len(string)
if strlen % 2 != 0:
strFirstHalf = string[0:int(strlen/2)]
strSecondHalf = string[int(strlen/2+1):]
else:
strFirstHalf = string[0:int(strlen/2)]
strSecondHalf = string[int(strlen/2):]
for i,j in zip(strFirstHalf, reversed(strSecondHalf)):
if i != j:
return False
return True
print(StringOperations.isPalindrome('abcdcba'))
print(StringOperations.isPalindrome('rahul'))
print(StringOperations.isPalindromeBrute('abcdcba'))
print(StringOperations.isPalindromeBrute('rahul'))
|
import pygame
from settings import Settings
import game_functions as gf
from boy import Boy
from ball import Ball
from pygame.sprite import Group
def run_game():
"""运行游戏"""
#初始化
pygame.init()
#导入设置
my_settings = Settings()
#创建屏幕实例
screen = pygame.display.set_mode(
(my_settings.screen_width, my_settings.screen_height))
#创建男孩实例
boy = Boy(screen, my_settings)
#创建球的编组
balls = Group()
while True:
#检查按键
gf.check_events(boy)
#boy更新
gf.update_boy(screen, my_settings, boy)
#ball更新
gf.update_ball(screen, my_settings, balls, boy)
#刷新屏幕
gf.update_screen(screen, my_settings, boy, balls)
run_game()
|
import glob
import rpm
import unittest
M = None
ErlDrvDep = ""
ErlNifDep = ""
class TestAllMethods(unittest.TestCase):
def test_sort_and_uniq(self):
self.assertEqual(M.sort_and_uniq([1,2,2,2,2,4,3,2,1]), [1,2,3,4])
def test_check_for_mfa(self):
# This test requires erlang-erts RPM package installed
ERLLIBDIR = glob.glob("/usr/lib*/erlang/lib")[0]
filepath = glob.glob('/usr/lib*/erlang/lib/erts-*/ebin/erlang.beam')[0]
self.assertEqual(M.check_for_mfa("%s/*/ebin" % ERLLIBDIR, {}, ('erlang', 'load_nif', 2)), filepath)
def test_inspect_so_library_nif(self):
# This test requires erlang-crypto RPM package installed
filepath = glob.glob("/usr/lib*/erlang/lib/crypto-*/priv/lib/crypto.so")[0]
self.assertEqual(M.inspect_so_library(filepath, 'nif_init', 'erlang(erl_nif_version)'), ErlNifDep)
def test_inspect_so_library_drv(self):
# This test requires erlang-erlsyslog RPM package installed
filepath = glob.glob("/usr/lib*/erlang/lib/erlsyslog-*/priv/erlsyslog_drv.so")[0]
self.assertEqual(M.inspect_so_library(filepath, 'driver_init', 'erlang(erl_drv_version)'), ErlDrvDep)
def test_inspect_beam_file_arch(self):
Deps = ['erlang-erts(x86-64)', 'erlang-kernel(x86-64)', 'erlang-stdlib(x86-64)']
self.assertEqual(M.inspect_beam_file('x86-64', "./test.beam"), Deps)
def test_inspect_beam_file_noarch(self):
Deps = ['erlang-erts', 'erlang-kernel', 'erlang-stdlib']
self.assertEqual(M.inspect_beam_file('noarch', "./test.beam"), Deps)
if __name__ == "__main__":
M = __import__("erlang-find-requires")
ts = rpm.TransactionSet()
mi = ts.dbMatch('name', "erlang-erts")
h = next(mi)
ds = dict(map(lambda x: x[0].split(" ")[1::2], h.dsFromHeader('providename')))
ErlDrvDep = "erlang(erl_drv_version) = %s" % ds['erlang(erl_drv_version)']
ErlNifDep = "erlang(erl_nif_version) = %s" % ds['erlang(erl_nif_version)']
unittest.main()
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from keras.layers import Input
from keras.engine.topology import Container
from .util import full_static_shape
class LazyContainer(Container):
'''Like Container. But lazy.'''
def __init__(self, container_function, use_method_disposable=True):
self._container_function = container_function
self._lazy_has_run = False
self.use_method_disposable = use_method_disposable
# Delay rest of construction until first call
def __call__(self, x, mask=None):
if not self._lazy_has_run:
# Make short-lived Input Layers for each x this was called with
# TODO: handle tuple or list x
x_shape = full_static_shape(x) # Uses var._keras_shape or var.get_shape()
if self.use_method_disposable:
inp_layer = Input(batch_shape=x_shape,
dtype=x.dtype,
name='tmp_input_from__%s' % x.name.replace('/','_').replace(':','_'))
else:
print 'Warning: using non-disposable approach. May not work yet.'
inp_layer = Input(tensor=x,
batch_shape=x_shape,
dtype=x.dtype, name='real_input_from__%s' % x.name.replace('/','_').replace(':','_'))
# Call function of inputs to get output tensors
outputs = self._container_function(inp_layer)
# Initialize entire Container object here (finally)
super(LazyContainer, self).__init__(inp_layer, outputs)
self._lazy_has_run = True
if not self.use_method_disposable:
return outputs
# Non-disposable mode: actually call the Container only the *second* and later times
# Disposable mode: call the Container now
ret = super(LazyContainer, self).__call__(x, mask=mask)
return ret
|
import dash_html_components as html
import db_interface
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
from plotly import graph_objs as go
import db_interface
import dash
from app import app, indicator, indicator_with_value
from datetime import date
import dateutil.parser
import common_db_calls_saved
import pandas as pd
import flask
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
from plotly import graph_objs as go
import db_interface
import pytz
from datetime import timedelta, datetime
from app import app, indicator, indicator_with_value
import pandas as pd
import pandas.io.sql as psql
import plotly.graph_objs as go
from collections import defaultdict
from collections import Counter
from itertools import combinations
def get_count_of_company_layout():
layout = go.Layout(
title='Number of Customers by Company'
)
return layout
def get_count_of_company_data(df, df_attr):
merged_df = df.join(df_attr.set_index('consumerid'), on='consumerid', how="outer", lsuffix='_left', rsuffix='_right')
df = merged_df.groupby(['companyid'])['consumerid'].nunique().reset_index()
X = df['companyid'].values
Y = df['consumerid'].values
trace = go.Bar(
x=X,
y=Y,
name='Count by Company',
)
data = [trace]
return data
def get_amount_of_company_layout():
layout = go.Layout(
title='Amount spent by a Company'
)
return layout
def get_amount_of_company_data(df, df_attr):
df = df_attr.groupby(['companyid'])['moneyspent'].sum().reset_index()
X = df['companyid'].values
Y = df['moneyspent'].values
trace = go.Bar(
x=X,
y=Y,
name='Amount by Company',
)
data = [trace]
return data
def get_list_of_stores():
list_of_stores = db_interface.get_list_of_stores()
return list(map(lambda x: {"label": x[0], "value": x[0]}, list_of_stores))
list_of_stores = get_list_of_stores()
layout = [
html.Div(id="chain_company_content", className="row", style={"margin": "2% 3%"}),
html.Div(
[
html.Div(
dcc.Dropdown(
id="store_name",
options=list_of_stores,
value=list_of_stores[0]['value'],
clearable=False,
),
className="two columns",
),
],
className="row",
style={"marginBottom": "10"},
),
html.Div(id="store_company_content", className="row", style={"margin": "2% 3%"}),
html.Div(
[
html.H1('Recommendations')
],),
html.Div(id="store_specific_company_recommendations", className="row", style={"margin": "2% 3%"}),
]
@app.callback(Output("chain_company_content", "children"), [Input("store_name", "value")])
def render_chain_content(store_name):
query = "SELECT orderid, consumerid, storeid, dishid, price, discount, coupon, orderdate, ordertime FROM order_details;"
df = psql.read_sql(query, db_interface.conn)
query_attr = "SELECT * FROM consumer_attributes;"
df_attr = psql.read_sql(query_attr, db_interface.conn)
layout = [
html.Div(
[
html.Div(
[
dcc.Graph(
id="chain_company_count",
figure=go.Figure(data = get_count_of_company_data(df, df_attr),
layout = get_count_of_company_layout()),
config=dict(displayModeBar=False),
style={"height": "89%", "width": "98%"},
),
],
className="six columns chart_div"
),
html.Div(
[
dcc.Graph(
id="chain_company_amount",
figure=go.Figure(data = get_amount_of_company_data(df, df_attr),
layout = get_amount_of_company_layout()),
config=dict(displayModeBar=False),
style={"height": "89%", "width": "98%"},
),
],
className="six columns chart_div"
),
])
]
return layout
@app.callback(Output("store_company_content", "children"), [Input("store_name", "value")])
def render_store_content(store_name):
store_id = common_db_calls_saved.store_name_to_id[store_name]
store_id = "'{}'".format(store_id)
query = "SELECT orderid, consumerid, storeid, dishid, price, discount, coupon, orderdate, ordertime FROM order_details WHERE storeId={};".format(store_id)
df = psql.read_sql(query, db_interface.conn)
query_attr = "SELECT * FROM consumer_attributes;"
df_attr = psql.read_sql(query_attr, db_interface.conn)
layout = [
html.Div(
[
html.Div(
[
dcc.Graph(
id="store_consumer_count",
figure=go.Figure(data = get_count_of_company_data(df, df_attr),
layout = get_count_of_company_layout()),
config=dict(displayModeBar=False),
style={"height": "89%", "width": "98%"},
),
],
className="six columns chart_div"
),
html.Div(
[
dcc.Graph(
id="store_company_amount",
figure=go.Figure(data = get_amount_of_company_data(df, df_attr),
layout = get_amount_of_company_layout()),
config=dict(displayModeBar=False),
style={"height": "89%", "width": "98%"},
),
],
className="six columns chart_div"
),
]),
]
return layout
@app.callback(Output("store_specific_company_recommendations", "children"), [Input("store_name", "value")])
def employee_recommendations_content(store_name):
if store_name == 'Ulsoor Road':
layout = [
html.Div(
html.H5('In Ulsoor Road store, Company1 can attract more customers. Maybe better marketing there.')
),
html.Div(
html.H5('In Ulsoor Road store, Company2 has lower per customer average moneyspent. Trying upselling.'),
),
]
elif store_name == 'IndiraNagar':
layout = [
html.Div(
html.H5('In IndiraNagar store, Company1 can attract more customers. Maybe better marketing there.')
),
html.Div(
html.H5('In IndiraNagar store, Company2 has lower per customer average moneyspent. Trying upselling'),
),
]
elif store_name == 'Airport':
layout = [
html.Div(
html.H5('In Airport store, Company1 can attract more customers. Maybe better marketing there.')
),
html.Div(
html.H5('In Airport store, Company2 has lower per customer average moneyspent. Trying upselling'),
),
]
return layout
|
import unittest
from function.function_01 import *
class loginTest(unittest.TestCase):
def setUp(self):
pass
def test_login(self):
print("执行用例:登录")
login("1451953028@qq.com","zdd123456")
def tearDown(self):
pass
# if __name__ == "__main__":
# unittest.main() |
"""
提供GUI界面
"""
import tkinter as tk
from enum import Enum, unique
import PySimpleGUI as sg
import matplotlib.backends.tkagg as tkagg
from PIL import Image, ImageTk
from matplotlib.backends.backend_tkagg import FigureCanvasAgg
from sensor.algorithm import AlgorithmManager
from sensor.algorithm import CycleDetectResult
from sensor.plot import PlotManager
from sensor.sensor import SensorManager
from settings import SENSOR_DATA
class GuiManager:
@unique
class KEYS(Enum):
CANVAS_RAW_DATA = "用于显示原始数据的区域"
CANVAS_GAIT_ACC = "显示加速度步态数据的区域"
CANVAS_GEI_ACC = "显示加速度GEI的区域"
CANVAS_GAIT_GYRO = "显示陀螺仪步态数据的区域"
CANVAS_GEI_GYRO = "显示陀螺仪GEI的区域"
CANVAS_GAIT_ANG = "显示欧拉角步态数据的区域"
CANVAS_GEI_ANG = "显示欧拉角GEI的区域"
CANVAS_STABILITY = "步态稳定性图"
IMAGE_STATUS = "当前运动状态的图片"
TEXT_ACTIVITY = "动作识别结果"
TEXT_WHO_YOU_ARE = "身份识别结果"
TEXT_IS_WALK_LIKE_DATA0 = "当前是否像data0一样"
TEXT_CYCLE_DETECT_HISTORY = "步态周期的检测历史"
TEXT_ACC_CYCLE_FEATURE = "加速度周期的特征"
TEXT_GYRO_CYCLE_FEATURE = "陀螺仪周期的特征"
TEXT_ANG_CYCLE_FEATURE = "欧拉角周期的特征"
def __init__(self):
# gui通用设置
sg.SetOptions(background_color="#FFFFFF", element_background_color="#FFFFFF", text_color="#000000")
# plot manager,用于获取绘图信息
self.sensor_manager = SensorManager(SENSOR_DATA)
self.algorithm_manager = AlgorithmManager(self.sensor_manager)
self.plot_manager = PlotManager(self.sensor_manager, self.algorithm_manager)
self.text_init_placeholder = " " * 100
# 构建gui
self.layout = [
[
sg.Column([
[sg.Frame("原始数据", [
[sg.Canvas(size=(self.plot_manager.fig_raw_data.width, self.plot_manager.fig_raw_data.height),
key=self.KEYS.CANVAS_RAW_DATA)]
])]
]),
sg.Column([
[sg.Frame("加速度步态", [
[sg.Canvas(size=(
self.plot_manager.fig_gait_acc.fig_width,
self.plot_manager.fig_gait_acc.fig_height),
key=self.KEYS.CANVAS_GAIT_ACC)],
[sg.Canvas(size=(
self.plot_manager.fig_gait_acc.fig_width,
self.plot_manager.fig_gait_acc.fig_height),
key=self.KEYS.CANVAS_GEI_ACC)],
[sg.Text(text=self.algorithm_manager.acc_data_pre_process.get_cycle_feature_for_gui(),
key=self.KEYS.TEXT_ACC_CYCLE_FEATURE)]])
],
[sg.Frame("陀螺仪步态", [
[sg.Canvas(size=(
self.plot_manager.fig_gait_gyro.fig_width,
self.plot_manager.fig_gait_gyro.fig_height),
key=self.KEYS.CANVAS_GAIT_GYRO)],
[sg.Canvas(size=(
self.plot_manager.fig_gait_gyro.fig_width,
self.plot_manager.fig_gait_gyro.fig_height),
key=self.KEYS.CANVAS_GEI_GYRO)],
[sg.Text(text=self.algorithm_manager.gyro_data_pre_process.get_cycle_feature_for_gui(),
key=self.KEYS.TEXT_GYRO_CYCLE_FEATURE)]])
],
[sg.Frame("欧拉角步态", [
[sg.Canvas(size=(
self.plot_manager.fig_gait_ang.fig_width,
self.plot_manager.fig_gait_ang.fig_height),
key=self.KEYS.CANVAS_GAIT_ANG)],
[sg.Canvas(size=(
self.plot_manager.fig_gait_ang.fig_width,
self.plot_manager.fig_gait_ang.fig_height),
key=self.KEYS.CANVAS_GEI_ANG)],
[sg.Text(text=self.algorithm_manager.ang_data_pre_process.get_cycle_feature_for_gui(),
key=self.KEYS.TEXT_ANG_CYCLE_FEATURE)]],
),
],
]),
sg.Column([
[sg.Frame("步态稳定性", [
[sg.Canvas(size=(
self.plot_manager.fig_stability.fig_width,
self.plot_manager.fig_stability.fig_height),
key=self.KEYS.CANVAS_STABILITY
)]
])],
[sg.Frame("步行检测", [
[sg.Text(text=" " * 50, key=self.KEYS.TEXT_IS_WALK_LIKE_DATA0)],
])],
[sg.Frame("身份识别结果", [
[sg.Text(text=" ", key=self.KEYS.TEXT_WHO_YOU_ARE)],
])],
[sg.Frame("动作识别结果", [
[sg.Text(text=" ", key=self.KEYS.TEXT_ACTIVITY)],
])],
[sg.Frame("步态周期历史", [
[sg.Text(text=" " * 100, key=self.KEYS.TEXT_CYCLE_DETECT_HISTORY)] # 100是为了搞个长的长度,不然显示不全
])]
])
],
]
self.window = sg.Window("demo").Layout(self.layout).Finalize()
def _get_element(self, key):
"""
获取GUI上的一个元素,不然太多的self.window.FindElement()
:return:
"""
return self.window.FindElement(key)
def _plot_pic(self, figure, gait_canvas):
"""
在pysimplegui上绘制plot。调用这个函数必须接受返回值,不接受的话无法绘图,我也不知道为啥,辣鸡tkinter
:param gait_canvas:
:param figure:
:return:
"""
figure_canvas_agg = FigureCanvasAgg(figure)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = figure.bbox.bounds
figure_w, figure_h = int(figure_w), int(figure_h)
photo = tk.PhotoImage(master=gait_canvas, width=figure_w, height=figure_h)
gait_canvas.create_image(figure_w / 2, figure_h / 2, image=photo)
tkagg.blit(photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
return photo
@staticmethod
def _update_gei_pic(gei_canvas, gei):
"""
更新gei图像
:return:
"""
if gei is None:
return None
figure_photo_gei = ImageTk.PhotoImage(image=Image.fromarray(gei))
gei_canvas.create_image(0, 0, image=figure_photo_gei, anchor=tk.NW)
return figure_photo_gei
def _update_gait_and_gei(self, fig, gait_canvas, gei_canvas, gei):
"""
同时更新gait和gei
:param fig:
:param gait_canvas:
:return:
"""
gait = self._plot_pic(fig, gait_canvas)
gei = self._update_gei_pic(gei_canvas, gei)
return gait, gei
def update_data(self):
"""
更新程序中所有的数据
:return:
"""
# 更新原始display数据
self.sensor_manager.update_display_raw_data()
# 更新算法的所有结果
self.algorithm_manager.update_data()
def update_fig(self):
"""
更新程序中所有plt显示的图
:return:
"""
# 更新原始display图像
self.plot_manager.update_display_raw_data_fig()
# 更新步态周期图像
self.plot_manager.update_gait_figure()
# 步态稳定性
self.plot_manager.fig_stability.update()
def update_gui(self):
"""
更新GUI。 注意:这里生成的各种乱七八糟的photo都要return回去,不然无法显示
:return:
"""
# 更新原始display图像
raw_data_pic = self._plot_pic(self.plot_manager.fig_raw_data.fig,
self._get_element(self.KEYS.CANVAS_RAW_DATA).TKCanvas)
# 当前是否在步行
try:
m = {0: "非步行", 1: "步行未稳定", 2: "步行稳定"}
v = "{0} 状态:{1} ".format(self.algorithm_manager.is_walking, m.get(self.algorithm_manager.stability[-1]))
except Exception as err:
v = ""
self._get_element(self.KEYS.TEXT_IS_WALK_LIKE_DATA0).Update(value=v)
# 步态周期图
acc_gait_and_gei_pic = self._update_gait_and_gei(self.plot_manager.fig_gait_acc.fig,
self._get_element(self.KEYS.CANVAS_GAIT_ACC).TKCanvas,
self._get_element(self.KEYS.CANVAS_GEI_ACC).TKCanvas,
self.plot_manager.fig_gait_acc.gei)
gyro_gait_and_gei_pic = self._update_gait_and_gei(self.plot_manager.fig_gait_gyro.fig,
self._get_element(self.KEYS.CANVAS_GAIT_GYRO).TKCanvas,
self._get_element(self.KEYS.CANVAS_GEI_GYRO).TKCanvas,
self.plot_manager.fig_gait_gyro.gei)
ang_gait_and_gei_pic = self._update_gait_and_gei(self.plot_manager.fig_gait_ang.fig,
self._get_element(self.KEYS.CANVAS_GAIT_ANG).TKCanvas,
self._get_element(self.KEYS.CANVAS_GEI_ANG).TKCanvas,
self.plot_manager.fig_gait_ang.gei)
# 身份识别
self._get_element(self.KEYS.TEXT_WHO_YOU_ARE).Update(value="{0}号志愿者".format(self.algorithm_manager.who_you_are))
gui_gait_stability = self._plot_pic(self.plot_manager.fig_stability.fig,
self._get_element(self.KEYS.CANVAS_STABILITY).TKCanvas)
self._get_element(self.KEYS.TEXT_CYCLE_DETECT_HISTORY).Update(value=" ".join(["{0}:{1}".format(
cycle_detect_result.value[0], self.algorithm_manager.cycle_detect_history[cycle_detect_result])
for cycle_detect_result in
CycleDetectResult]))
if self.algorithm_manager.acc_data_pre_process.last_cycle is not None:
self._get_element(self.KEYS.TEXT_ACC_CYCLE_FEATURE) \
.Update(value=self.algorithm_manager.acc_data_pre_process.get_cycle_feature_for_gui())
if self.algorithm_manager.gyro_data_pre_process.last_cycle is not None:
self._get_element(self.KEYS.TEXT_GYRO_CYCLE_FEATURE) \
.Update(value=self.algorithm_manager.gyro_data_pre_process.get_cycle_feature_for_gui())
if self.algorithm_manager.ang_data_pre_process.last_cycle is not None:
self._get_element(self.KEYS.TEXT_ANG_CYCLE_FEATURE) \
.Update(value=self.algorithm_manager.ang_data_pre_process.get_cycle_feature_for_gui())
self._get_element(self.KEYS.TEXT_ACTIVITY).Update(value=self.algorithm_manager.get_current_activity())
if self.sensor_manager.conn:
self.sensor_manager.send_msg(bytes("步行检测{0}\n步行状态{1}\n身份识别{2}\n" \
"{3}".format(
self.algorithm_manager.is_walking,
self.algorithm_manager.stability[-1],
self.algorithm_manager.who_you_are,
" ".join(["{0}:{1}".format(
cycle_detect_result.value[0], self.algorithm_manager.cycle_detect_history[cycle_detect_result])
for cycle_detect_result in
CycleDetectResult])
), encoding="utf-8"))
return raw_data_pic, acc_gait_and_gei_pic, gyro_gait_and_gei_pic, ang_gait_and_gei_pic, gui_gait_stability
def run(self):
while True:
event, values = self.window.Read(timeout=5)
if not event:
break
self.update_data()
self.update_fig()
gui = self.update_gui()
|
# -*- coding:utf-8 -*-
class Cat:
def say(self):
print('I am a cat.')
class Dog:
def say(self):
print('I am a dog.')
class Duck:
def say(self):
print('I am a duck.')
# Python中较灵活,只要实现say方法就行,实现了多态
animal = Cat
animal().say()
# 实现多态只要定义了相同方法即可
animal_list = [Cat, Dog, Duck]
for an in animal_list:
an().say()
"""
class Animal:
def say(self):
print('I am a animal.')
# 需要继承Animal,并重写say方法
class Cat(Animal):
def say(self):
print('I am a cat.')
# Java 中定义需要指定类型
Animal an = new Cat()
an.say()
"""
li1 = ['i1', 'i2']
li2 = ['i3', 'i4']
tu = ('i5', 'i6')
s1 = set()
s1.add('i7')
s1.add('i8')
# 转变观念,传入的不单单是list,甚至自己实现 iterable 对象
li1.extend(li2) # iterable
li1.extend(tu)
li1.extend(s1)
print(li1)
|
宝石与石头
class Solution:
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
sum =0
for i in J:
for a in S:
if i==a:
sum = sum +1
print(sum)
return sum
|
##############################################################################
#
# regress_public.gypi
# Copyright (c) 2014 Raphael DINGE
#
#Tab=3########################################################################
{
'targets': [
{
'target_name': 'regress',
'type': 'executable',
'xcode_settings': {
'CLANG_CXX_LANGUAGE_STANDARD': 'c++0x',
'OTHER_CFLAGS': [
'-fvisibility=default',
],
},
'defines': [
'flip_FATAL_MODE=flip_FATAL_MODE_THROW',
'flip_SPECS_CHECK_FLAG',
'flip_TEST_REGRESS_FLAG',
],
'include_dirs': [
'../../include',
],
'includes' : [
'src.gypi',
],
'configurations': {
##### Debug
'Debug': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'../lib/windows/Visual Studio 2013/Win32/flip.lib',
],
},
},
},
##### Release
'Release': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'../lib/windows/Visual Studio 2013/Win32/flip.lib',
],
},
},
},
},
'conditions': [
['OS=="mac"', {
'link_settings': {
'libraries': [
'../../lib/macos/libflip.a',
],
},
}],
['OS=="win"', {
'configurations': {
##### Debug_x64
'Debug_x64': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'../lib/windows/Visual Studio 2013/x64/flip.lib',
],
},
},
},
##### Release_x64
'Release_x64': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'../lib/windows/Visual Studio 2013/x64/flip.lib',
],
},
},
},
},
}],
],
},
]
}
|
from afthermal.text import Text, ByteStringVisitor, Bold, Node
import pytest
@pytest.fixture
def encoding():
return 'ascii'
@pytest.fixture
def bsv(encoding):
return ByteStringVisitor(encoding)
def test_simple_text(bsv, encoding):
tx = Text(u'hello, world')
assert bsv.visit(tx) == u'hello, world'.encode(encoding)
def test_simple_formatting(bsv, encoding):
tx = Text(u'hello')
fmtx = Bold(tx)
assert bsv.visit(fmtx) == (b'\x1B\x45\x01' + u'hello'.encode(encoding) +
b'\x1B\x45\x00')
def test_nested_formatting(bsv, encoding):
tx = Node(Bold(Bold(Text(u'hello')), Text(u'world')), Text(u'nonbold'))
assert bsv.visit(tx) == (
b'\x1B\x45\x01' + u'helloworld'.encode(encoding) + b'\x1B\x45\x00' +
'nonbold'.encode(encoding)
)
|
from enum import Enum
from typing import Tuple
class Region(Enum):
EUR, USA, JPN, KOR = range(4)
ALL = 255
@property
def country_code(self) -> str:
try:
return {
Region.EUR: 'GB',
Region.USA: 'US',
Region.JPN: 'JP',
Region.KOR: 'KR'
}[self]
except KeyError:
raise RuntimeError(f'{self} does not have a country code')
@staticmethod
def all_regions() -> Tuple['Region', ...]:
return tuple(r for r in Region if r != Region.ALL)
|
from app import db
contributions = db.Table('contributors',
db.Column('project_id', db.Integer, db.ForeignKey('projects.id'), primary_key=True),
db.Column('contributor_id', db.Integer, db.ForeignKey('users.id'), primary_key=True)
)
user_skills = db.Table('user_skills',
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True),
db.Column('skill_id', db.Integer, db.ForeignKey('skills.id'), primary_key=True)
)
project_likes = db.Table('project_likes',
db.Column('project_id', db.Integer, db.ForeignKey('projects.id'), primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True)
)
project_requests = db.Table('project_requests',
db.Column('project_id', db.Integer, db.ForeignKey('projects.id'), primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('users.id'), primary_key=True)
)
project_tags = db.Table('project_tags',
db.Column('project_id', db.Integer, db.ForeignKey('projects.id'), primary_key=True),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id'), primary_key=True)
)
project_skills = db.Table('project_skills',
db.Column('project_id', db.Integer, db.ForeignKey('projects.id'), primary_key=True),
db.Column('skill_id', db.Integer, db.ForeignKey('skills.id'), primary_key=True)
)
|
class Sort:
def __init__(self):
pass
def search_(j, x, search):
return x[j] == search
def sort_start(search, x):
leng = len(x)
center_index = (leng - 1) / 2
if x[center_index] > search:
return method_name(0, center_index, search, x)
return method_name(center_index, leng, search, x)
def method_name(start, center_index, search, x):
is_present = False
for i in range(start, center_index):
result = search_(i, x, search)
if result:
is_present = result
break
return is_present
|
#%%
from aiogram.dispatcher.filters.state import State, StatesGroup
class InterviewStates(StatesGroup):
question_number = State()
results = State()
if __name__ == '__main__':
print(InterviewStates) |
import urllib.request
from bs4 import BeautifulSoup
class Scraper:
def __init__(self, site):
self.site = site
def scrape(self):
r = urllib.request.urlopen(self.site)
html = r.read()
sp = BeautifulSoup(html, 'html.parser')
for tag in sp.find_all("div"):
url = tag.get("class")
if url is None:
continue
if "ng-star-inserted" in url:
print("\n" + url)
news = "http://ruz.hse.ru/ruz/main"
Scraper(news).scrape()
|
import time
import traceback
from threading import Lock, currentThread, Thread
import logging
logger = logging.getLogger(__name__)
MAX_THREADS = 200
def async_get_data(func, data, mapping=False):
"""
This is used to asynchronously get records.
:param func: The function that receives a single element of data as its first parameter
:param data: A set of data
:param mapping: Whether or not the result should contain a mapping of data -> result
:return: A list of results from calling func on each data element or a list of dicts in the format {'key': element, 'value': func_result}
"""
ret = []
ret_lock = Lock()
def _func(_chunk):
counter = 0
for element in _chunk:
# call func and append result to list
# in case of exception try again
recovered = False
for i in range(5): # try to recover only 5 times
try:
if mapping:
data_to_append = {'key': element, 'value': func(element)}
else:
data_to_append = func(element)
ret_lock.acquire()
# enter critical section
ret.append(data_to_append)
# exit critical section
ret_lock.release()
counter += 1
if recovered:
logger.info('%s: %d/%d RECOVERED FROM EXCEPTION', currentThread().name, counter, len(_chunk))
else:
logger.info('%s: %d/%d', currentThread().name, counter, len(_chunk))
break
except Exception:
logger.info('%s: %s', currentThread().name, traceback.format_exc())
recovered = True
logger.info('%s finished', currentThread().name)
partitioned_data = partition(data, MAX_THREADS)
thread_pool = []
start = time.time()
logger.info('Initializing %d threads...', len(partitioned_data))
for chunk in partitioned_data:
t = Thread(target=_func, args=(chunk,))
t.daemon = False
t.start()
thread_pool.append(t)
logger.info('Initialization finished. Waiting for threads to exit...')
for t in thread_pool:
t.join()
end = time.time()
logger.info('async_get_data finished in %s seconds', end - start)
return ret
def partition(lst, n):
q, r = divmod(len(lst), n)
indices = [q*i + min(i, r) for i in range(n+1)]
return [lst[indices[i]:indices[i+1]] for i in range(min(len(lst), n))]
|
import os
import cv2
from PIL import Image
import torchvision.transforms as transforms
from scipy import ndimage
def is_image_file(filename):
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_img(path, name, img_size):
x = Image.open(os.path.join(path, name)).convert('RGB')
transform = transforms.Compose([
transforms.Resize((img_size, img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
x = transform(x)
return x
def get_keras(path, name, img_size):
x = Image.open(os.path.join(path, name)).convert('L')
transform = transforms.Compose([
transforms.Resize((img_size, img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
x = transform(x)
return x
def get_distance_map(path, name, img_size):
x = Image.open(os.path.join(path, name)).convert('L')
x = ndimage.distance_transform_edt(x)
x = cv2.normalize(x, x, 0, 255, cv2.NORM_MINMAX)
x = Image.fromarray(x.astype('uint8')).convert("L")
transform = transforms.Compose([
transforms.Resize((img_size, img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
x = transform(x)
return x |
"""
Crie um programa que leia duas notas de um aluno e calcule sua media, mostrando uma mensagem no final, de acordo com
a media atingida:
-media abaixo de 5.0: REPROVADO
-media entre 5.0 e 6.9: RECUPERAÇÃO
-media 7.0 ou superior: APROVADO
"""
n1 = float(input('Primeira nota: '))
n2 = float(input('Segunda nota: '))
media = (n1 + n2) / 2
if (media < 5.0) :
print(f'Media: {media} -> REPROVADO')
elif (6.9 > media >= 5.0) :
print(f'Media: {media} -> RECUPERACAO')
else :
print(f'Media: {media} -> APROVADO')
|
from django.db import models
# Create your models here.
class CartModel(models.Model):
pro_id = models.IntegerField()
pro_name = models.CharField(max_length=100)
pro_brand = models.CharField(max_length=100)
pro_quantity = models.CharField(max_length=100)
pro_price = models.IntegerField()
pro_size = models.IntegerField()
class Meta:
db_table = 'cart' |
import optparse
import sys
import os
from twisted.internet import defer, reactor
from twisted.python import log
sys.path.append(os.getcwd())
from social import people, utils, db
@defer.inlineCallbacks
def sendInvitations(sender):
cols = yield db.get_slice(sender, "userAuth")
senderInfo = utils.columnsToDict(cols)
senderOrgId = senderInfo['org']
senderId = senderInfo['user']
cols = yield db.multiget_slice([senderId, senderOrgId], "entities", ['basic'])
entities = utils.multiSuperColumnsToDict(cols)
emails = sys.stdin.readlines()
emails = [x.strip() for x in emails]
yield people._sendInvitations([], emails, entities[senderId], senderId, entities[senderOrgId])
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-s', '--sender', dest="sender", action="store")
options, args = parser.parse_args()
if options.sender:
log.startLogging(sys.stdout)
db.startService()
d = sendInvitations (options.sender)
def finish(x):
db.stopService()
reactor.stop();
d.addErrback(log.err)
d.addBoth(finish)
reactor.run()
|
# psql: \conninfo
# You are connected to database "cicero" as user "cicero" via socket in "/tmp" at port "5432".
import psycopg2
conn = psycopg2.connect(
database="eventsdb",
user="cicero",
host="/tmp",
password="123"
) |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 07:42:25 2019
@author: USRBET
"""
import numpy as np
import pandas as pd
arr_pand = np.random.randint(0,10,6).reshape(2,3)
df1 = pd.DataFrame(arr_pand)
s1 = df1[0]
s2 = df1[1]
s3 = df1[2]
s1[0]
df1[3] = s1
#au = df1.add(serie_a)
df1[4] = s1 * s2
datos_fisicos_uno = pd.DataFrame(
arr_pand,columns=[
'estatura (cm)',
'peso (kg)',
'edad (anios)'])
datos_fisicos_dos = pd.DataFrame(
arr_pand,
columns=[
'estatura (cm)',
'peso (kg)',
'edad (anios)'],
index=['Ale','Vane'])
df1.index = ['Ale', 'Vane']
df1.columns = ['A','B','C','D','E','F']
|
import os
ROOT_DIR= os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + "/"
CONFIG_DIR= ROOT_DIR + "config/"
DATA_DIR= ROOT_DIR + "data/"
CACHE_DIR= ROOT_DIR + "cache/"
STRING_DIR= CONFIG_DIR + "strings/"
PERMS_DIR= CONFIG_DIR + "perms/"
COG_CONFIG_DIR= CONFIG_DIR + "cog_configs/"
BOT_CONFIG_FILE= CONFIG_DIR + "bot_config.yaml"
HELP_STRING_FILE= STRING_DIR + "help.yaml"
ERROR_STRING_FILE= STRING_DIR + "errors.yaml"
COG_STRING_FILE= STRING_DIR + "cog_descriptions.yaml"
PPRINT_STRING_FILE= STRING_DIR + "pprint.yaml"
NAME_STRING_FILE= STRING_DIR + "cog_command_names.yaml"
GLOBAL_PERMS_FILE= PERMS_DIR + "00globals.yaml"
AUCTION_CONFIG= COG_CONFIG_DIR + "equip_cog_config.yaml"
AUCTION_FILE= DATA_DIR + "merged_equip_data.json"
ITEM_CONFIG= COG_CONFIG_DIR + "item_cog_config.yaml"
ITEM_FILE= DATA_DIR + "merged_item_data.json"
PREVIEW_CONFIG= COG_CONFIG_DIR + "preview_cog_config.yaml"
REACTION_CONFIG= COG_CONFIG_DIR + "reaction_cog_config.yaml"
REACTION_ROLE_LOG_DIR= CACHE_DIR + "reaction_roles/"
SUPER_DIR= CACHE_DIR + "super/"
SUPER_HTML_DIR= SUPER_DIR + "html/"
SUPER_CACHE_FILE= SUPER_DIR + "cache.json"
SUPER_EQUIP_FILE= SUPER_DIR + "equips.json"
SUPER_ITEM_FILE= SUPER_DIR + "items.json"
MARKET_DIR= CACHE_DIR + "hvmarket/"
MARKET_CACHE_FILE= MARKET_DIR + "cache.json"
MARKET_ITEM_FILE= MARKET_DIR + "items.json"
KEDAMA_DIR= CACHE_DIR + "kedama/"
KEDAMA_HTML_DIR= KEDAMA_DIR + "html/"
KEDAMA_DEBUG_FILE= KEDAMA_DIR + "debug.json"
KEDAMA_EQUIP_FILE= KEDAMA_DIR + "equips.json"
KEDAMA_ITEM_FILE= KEDAMA_DIR + "items.json"
RANGES_FILE= DATA_DIR + "ranges.json"
UPDATE_LOG= CACHE_DIR + "update_log.json" |
import numpy as np
import sys
import copy
import pandas as pd
def assignmentToDf(assignment, real_dist_dict):
hist = [] # contiene le distanze derivanti dal nostro algoritmo
histn = [] # contiene i nomi degli aminoacidi
histi = [] # contiene l'indice degli aminoacidi
real_dist = [] # contiene le distanze reali / target fatte a mano
estimated_assignments = []
for triple in assignment:
# print(triple)
key_old = triple[0]
key_new = triple[1]
dist = triple[2]
hist.append(dist)
histn.append(key_old)
estimated_assignments.append(key_new)
if real_dist_dict is not None: # se esistono i dati target:
if key_old in real_dist_dict.keys(): # se keyold fa parte dell'assegnamento
real_dist.append(real_dist_dict[key_old]) # salviamo il suo shift
else:
real_dist.append(0.) # mettiamo shift nullo
strnkey1 = ''.join(char for char in key_old if char.isnumeric()) # estraiamo l'indice dal nome
histi.append(int(strnkey1))
df1 = pd.DataFrame({"Index": histi, "Name": histn, "AssignedTo":estimated_assignments, "Real_dist": real_dist, "Distance": hist})
df1 = df1.sort_values(by=['Index'])
return df1
def initial_radius(X):
assert len(X.shape) == 2 and X.shape[1] > 1
dist = np.sqrt(np.sum((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2, axis=-1))
#print(dist)
#print(X.shape)
distances = dist[np.triu_indices(X.shape[0], k=1)]
#print(distances)
return distances.mean()
def distance_matrix_bck(X, Y): # list of peaks
#X = np.array([peak.coordinates for peak in old_peaks])
#Y = np.array([peak.coordinates for peak in new_peaks])
#print("///////")
dist = np.sqrt(np.sum(( X[:, np.newaxis, :] - Y[np.newaxis, :, :] )**2, axis=-1))
assert dist.shape[0] == X.shape[0] and dist.shape[1] == Y.shape[0]
#dist = np.exp(dist)
return dist
def distance_matrix(X, Y, real_dist = False): # list of peaks
#X = np.array([peak.coordinates for peak in old_peaks])
#Y = np.array([peak.coordinates for peak in new_peaks])
#print("///////")
xx = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
if real_dist is False:
xx[:, :, 1] *= 0.2
dist = np.sqrt(np.sum(( xx )**2, axis=-1))
assert dist.shape[0] == X.shape[0] and dist.shape[1] == Y.shape[0]
max_dist = dist.max()
#dist = dist**2
return dist
class Peak:
def __init__(self, id, x, y):
self.id = id
self.x = x
self.y = y
self.radius = -1
self.closest_dist = -1
class Spectra:
def __init__(self, dataframe, deltas = None ):
self.__deltas = deltas
peaks_array = dataframe[['x', 'y']].to_numpy()
######################################
#self.suffix = suffix
self.__IDXS = [1]*len(peaks_array) # list type
self.__idxs = np.array(self.__IDXS) # bool type
self.removed = self.__idxs*0
#print(self.__IDXS)
self.peaks_array = peaks_array # a numpy array
self.peaks_dict = []
self.__keys = []
######################################
#if dataframe is None:
# for i, peak in enumerate(peaks_array):
# self.peaks_dict.append( (suffix+'_'+str(i), peak) )
# self.__keys.append(suffix+'_'+str(i))
#else:
self.__keys = dataframe.index.tolist()
for key, peak in zip(self.__keys, peaks_array):
self.peaks_dict.append( (key, peak) )
#if dataframe is None:
# self.dd = pd.DataFrame(columns=['x', 'y'], data = peaks_array, index=self.__keys)
#else:
self.dd = dataframe
def getItemByKey(self, key):
return self.dd.loc[key]
def mark_as_removed(self, peak_id):
idx = self.__keys.index(peak_id)
self.removed[idx] = 1
self.__IDXS[idx] = 0
self.__idxs[idx] = 0
def undo_mark_as_removed(self, peak_id):
idx = self.__keys.index(peak_id)
self.removed[idx] = 0
self.__IDXS[idx] = 1
self.__idxs[idx] = 1
def getidxs(self):
return self.__idxs
def resetidxs(self):
#print("==> ", self.suffix)
self.__IDXS = [1]*len(self.peaks_array) # list type
#print("self.__IDXS ", self.__IDXS)
#print("self.removed ", self.removed)
#print("self.__IDXS*=self.removed ", self.__IDXS*np.logical_not(self.removed))
self.__IDXS*=np.logical_not(self.removed)
self.__idxs = np.array(self.__IDXS) # bool type
def __len__(self):
# print(self.__idxs.sum(), sum(self.__IDXS))
assert sum(self.__IDXS) == self.__idxs.sum()
return sum(self.__IDXS)
#return len(self.peaks_dict)
def __getitem__(self, idx):
real_idx = np.flatnonzero(self.__IDXS)[idx]
return self.peaks_dict[real_idx]
# return self.peaks_dict[idx]
def xy(self):
indices = np.flatnonzero(self.__idxs)
return self.peaks_array[indices, :]
#return self.peaks_array
def keys(self):
indices = np.flatnonzero(self.__idxs)
return np.array(self.__keys)[indices]
#return self.__keys
def getRealIndices(self):
return np.flatnonzero(self.__idxs)
def get_deltas(self):
indices = np.flatnonzero(self.__idxs)
return self.__deltas[indices]
#return self.__keys
def __str__(self):
for item in self.peaks_dict:
print(item)
return ''
def remove_peakold(self, id):
for j, item in enumerate(self.peaks_dict):
a, b = item
if a == id:
self.peaks_dict.remove(item)
self.__keys.remove(a)
self.peaks_array = np.delete(self.peaks_array, j, axis=0)
def remove_peak(self, id):
iidx = self.__keys.index(id)
self.__IDXS[iidx] = 0
self.__idxs = np.array(self.__IDXS)
#def getAllButOne(self, peak_id):
# pass
class Assignment:
def __init__(self, old_peaks, new_peaks, level, log, dist_matrix):
self.log = log
if self.log: print(level*"| ","PEAKS TO ASSIGN")
#print(level*"| ",old_peaks.keys())
#print(level*"| ",new_peaks.keys())
#print("ok init assignment ???")
#old_peaks = copy.deepcopy(_old_peaks)
#new_peaks = copy.deepcopy(_new_peaks)
#self.old_peaks = copy.deepcopy(old_peaks)
#self.new_peaks = copy.deepcopy(new_peaks)
self.level = level
# se i new_peaks sono piu degli old
if len(old_peaks) <= len(new_peaks):
N_PEAKS = len(old_peaks)
# se i new_peaks sono di meno
else:
N_PEAKS = len(new_peaks)
#print("====> N_PEAKS: ", N_PEAKS)
self.old, self.new = [], []
#self.single = [] # picchi per cui non è stato individuato il partner
self.accoppiati = [] # picchi per cui è stato individuato il partner
#print("XY()")
#print(old_peaks.xy())
#self.distance_matrix = distance_matrix(old_peaks.xy(), new_peaks.xy())
row_idxs = old_peaks.getRealIndices()
col_idxs = new_peaks.getRealIndices()
#print(row_idxs)
#print(col_idxs)
#if dist_matrix is not None:
#print("==" * 50)
#print("==" * 50)
#print(dist_matrix.shape)
d1 = dist_matrix[row_idxs, :]
d2 = d1[:, col_idxs]
self.distance_matrix = d2
if 'avg_window' in old_peaks.dd.columns.tolist():
#print(old_peaks.dd)
old_keys = old_peaks.keys().tolist()
deltas = old_peaks.dd.loc[old_keys, 'avg_window'].to_numpy().reshape(-1,1) + 0.001
assignedTo = old_peaks.dd.loc[old_keys, 'assignedTo'].tolist()
print(self.distance_matrix.shape)
#indici delle colonne della dist_matrix relative allo assignedTo ??
'''
print(list(new_peaks.keys()))
print( len(old_peaks.keys()), len(new_peaks.keys() ))
print("==>", assignedTo)
print(new_peaks.keys(), len(new_peaks.keys()))
col_idxs = [ list(new_peaks.keys()).index(x)
for x in assignedTo if x != 'NotAssigned']
print(col_idxs)
#self.distance_matrix = self.distance_matrix / deltas
for i,j in enumerate(col_idxs):
print(i, j)
self.distance_matrix[i, j] /= deltas[i]
import sys
sys.exit()
'''
self.ddd = pd.DataFrame(data = self.distance_matrix,
index=old_peaks.keys(),
columns=new_peaks.keys())
self.cost = 0.
self.associations = []
self.not_allowed_associations = []
"""
# NUOVO APPROCCIO
rank = rank(lista_1, lista_2)
# finche tutti i picchi non sono sistemati...
while len(self.associations) != len(old_peaks):
for (p, s, dist) in rank:
if s is not assigned:
assign(p,s,dist)
update lista_1, lista_2
else:
add (p,s,d) to not_allowed_associations
rank = rank(lista_1, lista_2)
"""
#print("rank")
tmp_associations = self.rank(old_peaks, new_peaks)
#print("end rank")
# VECCHIO APPROCCIO
#while len(self.associations) != len(self.old_peaks):
while len(self.associations) != N_PEAKS:
#print("while")
#tmp_associations_old = self.closest(lista_0, lista_1)
# self.assign_peaks(tmp_associations)
for oldp, newp, dist in tmp_associations:
#print("-->", oldp, newp, dist)
if newp not in self.accoppiati:
self.associate(oldp, newp, dist)
#print("LEN self.associate: ", len(self.associations))
self.accoppiati.append(newp)
## update lista_1, lista_2 ##
#old_peaks1 = copy.deepcopy(old_peaks)
#print("BEFORE: ", old_peaks.peaks_dict)
old_peaks.remove_peak(oldp)
#print("AFTER: ", old_peaks.peaks_dict)
#new_peaks1 = copy.deepcopy(new_peaks)
#print("BEFORE: ", new_peaks.peaks_dict)
new_peaks.remove_peak(newp)
#print("AFTER: ", new_peaks.peaks_dict)
if len(self.associations) == N_PEAKS:
break
else:
self.not_allowed_associations.append((oldp, newp, dist))
tmp_associations = self.rank(old_peaks, new_peaks)
break
# print("LEN assoc ", len(self.associations), "LEN oldpeaks", len(self.old_peaks))
old_peaks.resetidxs()
new_peaks.resetidxs()
if self.log: print(level*"| ","Assignment completed.", self.cost)
def __len__(self):
return len(self.associations)
#def is_assigned(self, peak):
# return peak in self.new
def associate(self, peak0, peak1, radius):
self.associations.append( (peak0, peak1, radius) )
self.cost+=radius
def rank(self, peaks_list0, peaks_list1):
#print(self.level*"\t",'='*30, " rank function ", '='*30)
#print(self.level*"| ",'keys0', peaks_list0.keys())
#print(self.level*"| ",'keys1', peaks_list1.keys())
#print(self.level*"| ","---", peaks_list0.getidxs(), "---")
#print(self.level*"\t",'number peaks old: ', len(peaks_list0.keys))
#print(self.level*"\t",'number peaks new: ', len(peaks_list1.keys))
#print("------- debug 1---------")
local_distances = self.ddd.loc[peaks_list0.keys()][peaks_list1.keys()] # a sub-dataframe
#print("------- debug 2---------")
#print("peaks_list0.keys() ", peaks_list0.keys())
#print("peaks_list1.keys() ",peaks_list1.keys())
#print("local_distances:", local_distances)
closest_peaks = local_distances.idxmin(axis=1).tolist() # list of peak keys
#print("------- debug 3---------")
# print('closest_peaks', closest_peaks)
scalar_distances = local_distances.min(1).tolist()
# print('distances ', scalar_distances)
#print("------- debug 4---------")
dd = [ (peaks_list0[i][0], j, k) for (i, (j,k)) in enumerate(zip(closest_peaks, scalar_distances))]
# j is a destination peak key
# i is a source peak key
#print("------- debug 5---------")
dd = sorted(dd, key=lambda dd: dd[2])
return dd
def closest(self):
# individua i picchi piu vicini (per tutti)
closest_idxs = self.distance_matrix.argmin(axis = 1)
# print(self.distance_matrix)
#print(self.new_peaks.peaks_dict)
#print("closest_idxs: ", closest_idxs)
closest_peaks = [self.new_peaks[idx][0] for idx in closest_idxs]
distances = self.distance_matrix[np.arange(0, len(self.old_peaks)), closest_idxs]
print("closest peaks: ", closest_peaks)
print(list(distances))
dd = [ (self.old_peaks[i][0],j,k) for (i,(j,k)) in enumerate(zip(closest_peaks, distances)) ]
dd = sorted(dd, key=lambda dd: dd[2])
return dd
def assign_peaks(self, tmp_associations):
for oldp, newp, dist in tmp_associations:
# print("-->", oldp, newp, dist)
if newp not in self.accoppiati:
self.associate(oldp, newp, dist)
self.accoppiati.append(newp)
else:
self.not_allowed_associations.append((oldp, newp, dist))
# adesso sono rimasti
#in caso di conflitti:
#continua con le assegnazioni, assegnando i secondi piu vicini
#fino a completare la assegnazione
class PeakManager:
def __init__(self, search_depth = 3, max_search_per_level = 3, log = True):
self.search_depth = search_depth
self.max_search_per_level = max_search_per_level
self.log = log
self.assignments = None # traccia delle assegnazioni
self.confirmed_associations = []
#def update_radius(self, step):
# for p in self.assignments.single:
# p.radius += step
def assign(self, old_peaks, new_peaks, level=0, prev_limit = 0., DistMatrix = None):
changes = 0
if self.log: print(level*"| ",50*"--")
if self.log: print(level*"| ","LEVEL ", level, "len old peaks", len(old_peaks))
# print(old_peaks)
not_allowed = []
# se i new_peaks sono piu degli old
if len(old_peaks) <= len(new_peaks):
N = len(old_peaks)
# se i new_peaks sono di meno
else:
N = len(new_peaks)
# realizza assegnamento
# ASSIGNMENT-0
if DistMatrix is None:
DistMatrix = distance_matrix(old_peaks.xy(), new_peaks.xy())
assignment = Assignment(old_peaks, new_peaks, level, self.log, dist_matrix=DistMatrix)
#print("getidxs()", old_peaks.getidxs())
'''
# finchè i picchi non sono tutti accoppiati
while len(assignment) < N:
for p in old_peaks:
# fai crescere il raggio dei picchi
# e selezione l eventuale picco catturato dal raggio
print(p)
# prende il new_peak piu vicino a p
closest_p = self.get_closest() # closest_p = self.get_closest(p)
# se e occupato marcalo not_allowed,
if assignment.is_assigned(closest_p):
not_allowed.append( (p,closest_p) )
# altrimenti assegnalo al padrone del raggio
else:
assignment.associate(p, closest_p)
'''
#print("---------------------------------------------------", level*'-')
#print(level*"\t","====>>", assignment.associations)
#print(level*"\t","= = >>", assignment.not_allowed_associations)
if self.log: print(level * "| ","NOT ALLOWED => ",assignment.not_allowed_associations)
# len(assignment.not_allowed_associations))
if level <= self.search_depth:
# EVALUATING NOT ALLOWED ASSIGNMENT
for jj, couple in enumerate(assignment.not_allowed_associations):
if jj <= self.max_search_per_level:
#print(level * "| ","FIXED COST: ", couple[2], "conviene se < ", assignment.cost-couple[2])
if self.log: print(level * "| ", "reassign by fixing: ",couple)
#old_peaks1 = copy.deepcopy(old_peaks)
#print("BEFORE: ", old_peaks1.peaks_dict)
#old_peaks1.remove_peak(couple[0])
#print("AFTER: ", old_peaks1.peaks_dict)
old_peaks.mark_as_removed(couple[0])
#new_peaks1 = copy.deepcopy(new_peaks)
#print("BEFORE: ", new_peaks1.peaks_dict)
#new_peaks1.remove_peak(couple[1])
#print("AFTER: ", new_peaks1.peaks_dict)
new_peaks.mark_as_removed(couple[1])
#print(level*"\t","old_peaks1 ", old_peaks1)
#print(level*"\t","new_peaks1 ", new_peaks1)
#print(level*"\t","***>", couple)
# si rifa' l'assegnamento senza contare la not allowed
# CORE: rimosso il picco fissato, lancia l'assegnamento su un subset di picchi
#kk_x, kk_y = old_peaks.keys(), new_peaks.keys()
new_assignment = self.assign(old_peaks, new_peaks, level=level+1, DistMatrix=DistMatrix)
old_peaks.undo_mark_as_removed(couple[0])
new_peaks.undo_mark_as_removed(couple[1])
##
# la not_allowed si aggiunge dopo
if self.log: print(level * "| ", "sub assignment cost ", new_assignment.cost)
if self.log: print(level*"| ","external associate", couple)
new_assignment.associate(couple[0], couple[1], couple[2])
if self.log: print(level*"| ","LEVEL {} Assignment ({}) cost is: ".format(level, jj+1), new_assignment.cost)
if new_assignment.cost < assignment.cost:
gain = assignment.cost - new_assignment.cost
assignment = new_assignment
changes+=1
if self.log: print(level*"| ",50*"*", gain)
#print(level*"| ","RETURN")
#print(level*"| ","ASSIGNMENT COST: ", assignment.cost)
if self.log: print(level * "| ", 50 * "--")
return assignment
def getAssociations(self, old_peaks, new_peaks, _distance_matrix = None, log=False):
if log: print("Running...")
assignment = self.assign(old_peaks, new_peaks, DistMatrix=_distance_matrix)
associations = assignment.associations
associations = sorted(associations, key=lambda associations: associations[2], reverse=True)
# print(associations)
free_peaks, peaks_with_ligands = [], []
good = 0.
wrong = 0.
for triple in associations:
p_key = triple[0]
s_key = triple[1]
if p_key==s_key:
good+=1
else:
wrong+=1
p_xy = old_peaks.getItemByKey(p_key)
s_xy = new_peaks.getItemByKey(s_key)
#print(triple)
#print("==>", p_key, s_key)
#print(p_xy.tolist(), s_xy.tolist())
free_peaks.append(p_xy.tolist())
peaks_with_ligands.append(s_xy.tolist())
print("good: ", good, "wrong: ", wrong, "TOT: ", good+wrong)
if log: print("Cost: ", assignment.cost)
if log: print("good: ", good)
if log: print("wrong: ", wrong)
if log: print("Completed.")
accuracy = float(good)/(good+wrong)
if log: print(good/(good+wrong))
return np.array(free_peaks), np.array(peaks_with_ligands), associations, accuracy
######################################################
def demo():
from peaksIdentification.peaks_assignement import generate_data
N_PEAKS = 100
N_SHIFTS = 8
peaks, new_peaks = generate_data( n_peaks=N_PEAKS, n_shifts=N_SHIFTS)
new_peaks = np.delete(new_peaks, [0,1], axis=0)
old_spectra = Spectra(peaks, suffix='p')
new_spectra = Spectra(new_peaks, suffix='s')
pm = PeakManager(search_depth=10, max_search_per_level=5 , log=False)
# peaks_assignment = pm.assign(peaks, new_peaks)
#peaks_assignment = pm.assign(old_spectra, new_spectra)
#print("BEST ASSIGNMENT COST IS: ", peaks_assignment.cost)
xy_free, xy_with_ligands = pm.getAssociations(old_spectra, new_spectra)
#print(xy_free)
#print(xy_with_ligands)
# To run the demo, uncomment the following line
# demo() |
rent = 12000
gas = 800
groceries = 300
total = rent + gas + groceries
print(total)
rent = 15000
item1 = "gas"
item2 = "groceries"
item3 = "rent"
print("Expense List: ", item3, item1, item2)
|
import requests
import time
import json
vis_url = 'http://0.0.0.0:5000/events'
vis_data = "../data/StreamingNWChem/"
res = requests.post(vis_url, json={'type':'reset'})
print(res.json())
#----set function dictionary----
fun_names = []
with open(vis_data+"function.json", 'r') as f:
fun_names = json.load(f)
requests.post(vis_url, json={'type':'functions', 'value':fun_names})
#----set event types, they are not fixed----
et = []
with open(vis_data+"et.json", 'r') as f:
et = json.load(f)
requests.post(vis_url, json={'type':'event_types','value':et})
#----simulating update----
import glob
event_list = glob.glob(vis_data+"trace.*.json")
event_list.sort(key=lambda x: int(x.split('.')[-2]))
anomaly_list = glob.glob(vis_data+"anomaly.*.json")
anomaly_list.sort(key=lambda x: int(x.split('.')[-2]))
foi_list = glob.glob(vis_data+"foi.*.json")
foi_list.sort(key=lambda x: int(x.split('.')[-2]))
for i in range(len(event_list)):
#----set function of interest----
foi = []
with open(foi_list[i], 'r') as f:
foi = json.load(f)
labels = []
with open(anomaly_list[i], 'r') as f:
labels = json.load(f)
events = []
with open(event_list[i], 'r') as f:
events = json.load(f)
res = requests.post(vis_url, json={'type':'info','value':{
"events": events,
"foi": foi,
"labels": labels
}})
print(res.json())
# requests.post('http://127.0.0.1:5000/log', json={'type':'log'})
|
from functools import reduce
import time
##li = [1,2,3,4,5,6,7,8,9,10]
##sum = reduce(lambda n,m:n+m,li)
##print(sum)
##
##li2 = [1,2,3,4,5,6,7,8,9,10]
##mul = reduce(lambda n,m:n*m,li2)
##print(mul)
##add = reduce(lambda n,m:n+m,range(1,100))
##print(add)
t1 = time.time()
print(t1)
mul = reduce(lambda n,m:n+m,range(1,100))
print(mul)
t2 = time.time()
print(t2)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 16:01:37 2020
@author: Felix
"""
"""
IMPORTACION DE LIBRERIAS
"""
from bokeh.layouts import layout
from bokeh.models import CategoricalColorMapper, ColumnDataSource, NumeralTickFormatter
from bokeh.plotting import figure
from source.common.Funciones_Generales import ConversorCoordenadasMercator, FormateoTiempos, FormulaKarvonen, Reescalado, LecturaBBDDActividades, EscrituraBBDDActividades, GeneracionCodigoJS, CreacionDirectoriosProyecto
from source.common.Funciones_DataFrame import CalculosVectoresAgregados, HitosKilometricos, HitosPausas, TablaParcialesKilometricos, TablaParcialesPausas, TablaZonasCardiacas, IdentificacionTipoActividad, LecturaBBDDActividades, AnalisisActividadActual, DeteccionVariables
from source.common.Funciones_CulumnDataSource import FormateoEjes, CalculoOffsetAltitud, LimiteEjeY, ParametrosVariables
from source.common.PaletasColores import Grapefruit, Bittersweet, Sunflower, Grass, Mint, Aqua, BlueJeans, Lavender, PinkRose, SkinTone, LightGray, DarkGray
def TiempoZonasFC(df, FCMax, FCRep):
dfTiempoZonasFC = TablaZonasCardiacas(FCMax, FCRep, df)
dfTiempoZonasFC['TiempoTotal'] = dfTiempoZonasFC.DeltaTiempo.apply(lambda x: FormateoTiempos(x, 'T'))
OrigenZonasCardiacas = ColumnDataSource(dfTiempoZonasFC[dfTiempoZonasFC['ZonaFC'].isin(['Z5', 'Z4', 'Z3', 'Z2', 'Z1'])].sort_values('ZonaFC', ascending=False))
"""
ZONAS CARDIACAS
"""
MapaColorZonaCardiaca = CategoricalColorMapper(factors= ['Z5', 'Z4', 'Z3', 'Z2', 'Z1'], palette= [Grapefruit[2], Bittersweet[2], Grass[2], BlueJeans[2], LightGray[2]])
PLT_ZonasCardiacas = figure(plot_width= 400, plot_height= 350, x_range= (0, dfTiempoZonasFC['PorcentajeTiempo'].max()+dfTiempoZonasFC['PorcentajeTiempo'].max()*0.1), y_range= ['Z1', 'Z2', 'Z3', 'Z4', 'Z5'], tools= '', toolbar_location= None)
PLT_ZonasCardiacas.hbar(y= 'ZonaFC', right= 'PorcentajeTiempo', height= 0.9, source= OrigenZonasCardiacas, line_color= 'black', fill_color= {'field':'ZonaFC', 'transform':MapaColorZonaCardiaca})
PLT_ZonasCardiacas.text(x= 5, y= 'ZonaFC', text= 'TiempoTotal', source= OrigenZonasCardiacas)
PLT_ZonasCardiacas.title.text = 'ZONAS CARDIACAS'
PLT_ZonasCardiacas.sizing_mode = 'fixed'
PLT_ZonasCardiacas.grid.visible = False
PLT_ZonasCardiacas.xaxis.formatter = NumeralTickFormatter(format= '0%')
return PLT_ZonasCardiacas |
# -*- coding: utf-8 -*-
"""
Модуль *event* позволяет обрабатывать события, происходящие в приложении.
event.py::
def before_event(error, event):
...
def after_event(event):
...
def error_event(event):
...
События происходят при изменении данных в приложении пользователем
и программный интерфейс позволяет управлять этими событиями.
Приложение генерирует события при выполнении
определенных действий. При этом вызываются соответствующие функции
:py:func:`before_event() <event.before_event>`, :py:func:`after_event()<event.after_event>`
или :py:func:`error_event() <event.error_event>` из :ref:`модуля event <capi-templates>`.
События происходят при операциях изменения данных,
поэтому смысл этих функций сводится к следующему:
* before_event() - действие перед изменением данных;
* after_event() - действие после изменения данных;
* error_event() - обработка ошибки изменения данных.
"""
# Для включения обработки событий переименуйте этот файл в event.py
# Подробнее об обработке событий смотрите в документации - http://cerebrohq.com/documentation/ru/
import cerebro
import pstatuses.src
# Файлы с примерами лежат в папке ./examples
# Чтобы включить тот или иной пример, необходимо раскомментировать строку с вызовом примера
def before_event(event):
pass
def after_event(event):
pstatuses.src.event.after_event(event)
def error_event(error, event):
pass
|
"""
Classes from the 'FeatureFlagsSupport' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
FFConfiguration = _Class("FFConfiguration")
FFFeatureState = _Class("FFFeatureState")
FFFeatureAttribute = _Class("FFFeatureAttribute")
|
# -*- coding: utf-8 -*-
from . import get_user_model
class SuBackend(object):
supports_inactive_user = False
def authenticate(self, su=False, user_id=None, **kwargs):
if not su:
return None
try:
user = get_user_model()._default_manager.get(
pk=user_id) # pylint: disable=W0212
except (get_user_model().DoesNotExist, ValueError):
return None
return user
def get_user(self, user_id):
try:
return get_user_model()._default_manager.get(
pk=user_id) # pylint: disable=W0212
except get_user_model().DoesNotExist:
return None
|
import sys
from collections import deque
input = sys.stdin.readline
for testcase in range(int(input())) :
q = []
n = int(input())
for pyun in range(n+2) :
px, py = map(int,input().split())
q.append((px,py))
v = [[10e7 for i in range(n+2)] for _ in range(n+2)]
for i in range(n+2) :
for j in range(n+2) :
if i == j :
continue
distance = abs(q[i][0] - q[j][0]) + abs(q[i][1] - q[j][1])
if distance <= 1000 :
v[i][j] = 1
for k in range(n+2) :
for i in range(n+2) :
for j in range(n+2) :
if (v[i][j] > v[i][k] + v[k][j]) :
v[i][j] = v[i][k] + v[k][j]
if v[0][-1] == 10e7 :
print('sad')
else :
print('happy')
|
""" This is a solution to an exercise from
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
Exercise 10-2:
Write a function called cumsum that takes a list of numbers and returns the cumulative
sum; that is, a new list where the ith element is the sum of the first i+1 elements from
the original list.
"""
def cumsum(t):
"""Takes in a list t as input and outputs a new list where each
element is the cumulative sum of each ith element"""
running_sum = 0
for i in range(len(t)):
t[i] = running_sum + t[i]
running_sum = t[i]
return t
t = [1, 2, 3, 4]
t = cumsum(t)
for x in t:
print(x) |
from data_analysis_base import AnalysisBaseClass
def get_ids(fname):
with open(fname, 'r') as f:
return [int(img_id.split()[0]) for img_id in f.readlines()]
# Crete analysis tools
# Get caption results
caption_paths = []
base_dir = '../final_captions_eccv2018/'
baseline_ft = ('Baseline-FT', base_dir + 'baseline_ft.json')
caption_paths.append(baseline_ft)
uw = ('UpWeight', base_dir + 'upweight.json')
caption_paths.append(uw)
balanced = ('Balanced', base_dir + 'balanced.json')
caption_paths.append(balanced)
confident = ('Equalizer w/o ACL', base_dir + 'confident.json')
caption_paths.append(confident)
acl = ('Equalizer w/o Confident', base_dir + 'confusion.json')
caption_paths.append(acl)
equalizer = ('Equalizer', base_dir + 'equalizer.json')
caption_paths.append(equalizer)
analysis_computer = AnalysisBaseClass(caption_paths)
# Get datasets
shopping_dev_split = analysis_computer.get_shopping_split()
shopping_dev_split_ids = analysis_computer.convert_filenames_to_ids(
shopping_dev_split
)
shopping_test_split = analysis_computer.get_shopping_split(
fpath='../data/bias_splits/test.data'
)
shopping_test_split_ids = analysis_computer.convert_filenames_to_ids(
shopping_test_split
)
print('dev')
analysis_computer.bias_amplification_objects_stats(gt_captions, shopping_dev_split_ids)
print('test')
analysis_computer.bias_amplification_objects_stats('../data/captions_val2014.json', shopping_test_split_ids)
|
import time
import random
#Updated 8/24
print('Please refer to README.md before playing.\n')
time.sleep(1)
print('There is darkness, all around you. You have no idea where you are.')
print('A voice calls out to you.\n')
time.sleep(1)
#Character Maker
charComplete = ('no')
while charComplete in ['n', 'NO','no','N','No']:
charName = input('What is your name?:')
charAge = input('How old are you?:')
charRace = input('What is your race?:')
charGender = input('What is your gender?:')
charClass = input('What is your class?:')
print("\nYour are %s, the %s year old %s %s %s.\n" % (charName, charAge, charRace,charGender, charClass))
charComplete = input('Is this information correct?(y/n):')
time.sleep(1)
if charComplete in ['y','YES','yes','Y', 'Yes']:
print('\nGood! Character creation complete!\n')
break
elif charComplete in ['n', 'NO','no','N','No']:
print('\nOh sorry. I am a lot less sharp than I used to be. Could you repeat that all again for me?\n')
else:
charComplete = ('no')
print('\nI am afraid I do not understand your answer. Could you repeat that all again for me?\n')
#Character Maker
time.sleep(1)
print('The darkness fades and you can suddenly see much more clearly. Your vision adjusts to find that you are in a small bed.')
print('A middle aged dwarf looks down on you from a nearby chair. He speaks.\n')
time.sleep(1)
print('Voldrek: Aye, glad to see your still alive. I thought ye might be dead considering I found ye on the side of the road.\n')
charAction = ('INVALID')
while charAction in ['INVALID']:
print('What do you do?:')
print('A) Speak')
print('B) Inspect')
print('C) Run')
print('D) End')
charAction = input ('Input:')
if charAction in ['a','A']:
print('\nWhat do you say?:')
print('A) Who are you?')
print('B) Where am I?')
charSpeak = input ('Input:')
if charSpeak in ['a','A']:
charAction = ('INVALID')
print('\nVoldrek: I am Voldrek Stormhelm, the simple blacksmith here in the town of dirt.\n')
time.sleep(1)
elif charAction in ['b','B']:
print('What do you want to inspect?:')
print('A) The Bed')
print('B) The Bookshelf')
print('C) The Dwarf')
print('D) The Room')
charInspect = input ('Input:')
if charInspect in ['a','A']:
charAction = ('INVALID')
print('The bed you lay in is small, but soft. The bead spread is a bright red.')
elif charInspect in ['b','B']:
charAction = ('INVALID')
print('The bookshelf contains many books of various shapes and sizes. Many look old and mysterious.')
elif charInspect in ['c','C']:
charAction = ('INVALID')
print('The old dwarf sits at the foot of the bed. His face is red and stern. His long grey beard drapes across his lap.')
elif charInspect in ['d','D']:
charAction = ('INVALID')
print('The room is full of dusty furnishings and cobwebs. Only you and the dwarf reside in the room.')
else:
print('Incorrect Input')
elif charAction in ['c','C']:
charAction = ('INVALID')
print('You feel too weak to run!')
|
# loader_Observer.py 4-Apr
print("loader_Observer.py..")
import matplotlib.pyplot as plt
import numpy as np
import ipywidgets as widgets
print("done.")
#end of module |
# Generated by Django 3.0.5 on 2020-06-07 02:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='details',
name='city',
field=models.CharField(max_length=15, null=True),
),
migrations.AlterField(
model_name='details',
name='confirm',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='details',
name='confirm_add',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='details',
name='dead',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='details',
name='heal',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='details',
name='province',
field=models.CharField(max_length=15, null=True),
),
migrations.AlterField(
model_name='details',
name='update_time',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='history',
name='confirm',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='confirm_add',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='dead',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='dead_add',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='heal',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='heal_add',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='suspect',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='history',
name='suspect_add',
field=models.IntegerField(null=True),
),
]
|
import numpy as np
def partition(a, left, right):
pivot = right
right -= 1
while left < right:
while left <= right and a[left] <= a[pivot]:
left += 1
while left <= right and a[right] >= a[pivot]:
right -= 1
if left < right:
a[left], a[right] = a[right], a[left]
if left < pivot and a[left] > a[pivot]:
a[left], a[pivot] = a[pivot], a[left]
return left
#use median3 for better run time
def quick_select(a, left, right, k):
if k > 0 and right - left + 1 >= k:
pivot = partition(a, left, right)
len_left = pivot - left # length of left sub array = (pivot - 1) - left + 1
if len_left + 1 == k:
return a[pivot]
elif len_left + 1 > k:
return quick_select(a, left, pivot - 1, k)
else:
return quick_select(a, pivot + 1, right, k - len_left - 1) # k - (len_left + 1)
return False
def k_select(a, left, right, k):
if k > 0 and right - left + 1 >= k:
pivot = partition(a, left, right)
len_left = pivot - left # length of left sub array = (pivot - 1) - left + 1
if len_left + 1 == k:
return pivot
elif len_left + 1 > k:
return k_select(a, left, pivot - 1, k)
else:
return k_select(a, pivot + 1, right, k - len_left - 1)
return False
def max_k_numbers(a, left, right, k):
n = len(a)
if k == n:
return a
if k > 0 and k <= n:
x = n - k
pivot = k_select(a, left, right, x)
return a[pivot + 1:]
return False
n = int(input('Enter a positive integer: '))
a = np.random.randint(-100, 101, n)
print(a)
msg = 'Enter a number between 1 to ' + str(n) + ': '
k = int(input(msg))
left = 0
right = len(a) - 1
b=a.copy()
print('the', k, '-th least element is ', quick_select(a, left, right, k))
print('the max', k, 'numbers of elements are\n', max_k_numbers(b, left, right, k))
|
import requests
def getHTMLText(url):
try:
r = requests.get(url,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "出现异常: "+r.status_code
if __name__ == "__main__":
url = "https://item.jd.com/2967929.html"
print(getHTMLText(url)[:1000]) #只返回前1000个字符 |
from ll import *
test_list = cons("a", cons("b", nil))
def test_basic():
assert head(test_list) == "a"
assert head(tail(test_list)) == "b"
def test_length():
assert len(nil) == 0
assert len(test_list) == 2
def test_iter():
assert list(test_list) == ["a", "b"]
assert "b" in test_list
def test_map():
lstlst = map(lambda x: x * 2, test_list)
assert len(lstlst) == 2
assert list(lstlst) == ["aa", "bb"]
def test_filter():
x = filter(lambda x: x.startswith('b'), test_list)
assert list(x) == ['b']
def test_foldl():
assert foldl(lambda a, b: a + b, "", test_list) == "ab"
def test_repr():
assert repr(nil) == "()"
assert repr(test_list) == "('a' 'b')"
def test_from_iter():
assert LinkedList.from_iter(["a", "b"]) == test_list
def test_class():
class c(test_list):
pass
assert tail(c) == test_list
assert head(c) == 'c'
assert len(c) == 3
|
import sqlite3
import json
#Connect to database
conn=sqlite3.connect('csc455.db')
#Request a cursor from the database
c=conn.cursor()
#Create the table
TwitterTable= '''CREATE TABLE Twitter
(
created_at VARCHAR(50),
id_str NUMBER(50),
text VARCHAR(160),
source VARCHAR(100),
in_reply_to_user_id VARCHAR(25),
in_reply_to_screen_name VARCHAR(25),
in_reply_to_status_id VARCHAR(25),
retweet_count NUMBER(5),
contributors VARCHAR(25),
CONSTRAINT TwitterPK
Primary Key(id_str)
);'''
#Drop tables if they exists
c.execute("DROP TABLE IF EXISTS Twitter")
#Create the tables
c.execute(TwitterTable)
#Open and read file
fd = open('/Users/sarahcummings/Documents/csc455/Assignment4.txt', 'r', encoding='utf8')
#split file on end of tweet deliminator and creates strings for each line
tweetList = fd.readline().split('EndOfTweet')
fd.close()
for tweet in tweetList:
decoded_line = json.loads(tweet)
insertvalues2 = (decoded_line.get(u'created_at'), decoded_line.get(u'id_str'), decoded_line.get(u'text'), decoded_line.get(u'source'), decoded_line.get(u'in_reply_to_user_id'), decoded_line.get(u'in_reply_to_screen_name'), decoded_line.get(u'in_reply_to_status_id'), decoded_line.get(u'retweet_count'), decoded_line.get(u'contributors'))
c.execute('INSERT INTO Twitter VALUES (?,?,?,?,?,?,?,?,?);', insertvalues2)
#Use the code in comments below to see what's in the table
#allSelectedRows = c.execute("SELECT * FROM Twitter;").fetchall()
#for eachRow in allSelectedRows:
#for value in eachRow:
#print (value, "\t",)
#print ("\n",) # \n is the end of line symbol
conn.commit()
conn.close()
|
# Solution of;
# Project Euler Problem 302: Strong Achilles Numbers
# https://projecteuler.net/problem=302
#
# A positive integer n is powerful if p2 is a divisor of n for every prime
# factor p in n. A positive integer n is a perfect power if n can be expressed
# as a power of another positive integer. A positive integer n is an Achilles
# number if n is powerful but not a perfect power. For example, 864 and 1800
# are Achilles numbers: 864 = 25·33 and 1800 = 23·32·52. We shall call a
# positive integer S a Strong Achilles number if both S and φ(S) are Achilles
# numbers. 1For example, 864 is a Strong Achilles number: φ(864) = 288 =
# 25·32. However, 1800 isn't a Strong Achilles number because: φ(1800) = 480 =
# 25·31·51. There are 7 Strong Achilles numbers below 104 and 656 below 108.
# How many Strong Achilles numbers are there below 1018?1 φ denotes Euler's
# totient function.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 302
timed.caller(dummy, n, i, prob_id)
|
# -*- coding: utf-8 -*-
from anima.dcc import empty_reference_resolution
from anima.dcc.base import DCCBase
from anima.testing import count_calls
class TestEnvironment(DCCBase):
"""A test DCC which just raises errors to check if the correct
method has been called
"""
name = "TestEnv"
representations = ["Base", "BBox", "GPU", "ASS"]
test_data = {}
def __init__(self, name="TestEnv"):
DCCBase.__init__(self, name=name)
# initialize test_data counter
for f in dir(self):
if callable(f):
self.test_data[f.__name__] = {"call count": 0, "data": None}
self._version = None
@count_calls
def export_as(self, version):
pass
@count_calls
def save_as(self, version, run_pre_publishers=True):
pass
@count_calls
def open(
self,
version,
force=False,
representation=None,
reference_depth=0,
skip_update_check=False,
):
self._version = version
return self.check_referenced_versions()
@count_calls
def reference(self, version):
pass
@count_calls
def import_(self, version):
pass
@count_calls
def get_last_version(self):
"""mock version of the original this returns None all the time"""
return None
@count_calls
def get_current_version(self):
return self._version
@count_calls
def get_referenced_versions(self):
return self._version.inputs
@count_calls
def check_referenced_versions(self):
"""Deeply checks all the references in the scene and returns a
dictionary which uses the ids of the Versions as key and the action as
value.
Uses the top level references to get a Stalker Version instance and
then tracks all the changes from these Version instances.
:return: list
"""
# reverse walk in DFS
dfs_version_references = []
version = self.get_current_version()
resolution_dictionary = empty_reference_resolution(
root=self.get_referenced_versions()
)
# TODO: with Stalker v0.2.5 replace this with Version.walk_inputs()
for v in version.walk_hierarchy():
dfs_version_references.append(v)
# pop the first element which is the current scene
dfs_version_references.pop(0)
# iterate back in the list
for v in reversed(dfs_version_references):
# check inputs first
to_be_updated_list = []
for ref_v in v.inputs:
if not ref_v.is_latest_published_version():
to_be_updated_list.append(ref_v)
if to_be_updated_list:
action = "create"
# check if there is a new published version of this version
# that is using all the updated versions of the references
latest_published_version = v.latest_published_version
if latest_published_version and not v.is_latest_published_version():
# so there is a new published version
# check if its children needs any update
# and the updated child versions are already
# referenced to the this published version
if all(
[
ref_v.latest_published_version
in latest_published_version.inputs
for ref_v in to_be_updated_list
]
):
# so all new versions are referenced to this published
# version, just update to this latest published version
action = "update"
else:
# not all references are in the inputs
# so we need to create a new version as usual
# and update the references to the latest versions
action = "create"
else:
# nothing needs to be updated,
# so check if this version has a new version,
# also there could be no reference under this referenced
# version
if v.is_latest_published_version():
# do nothing
action = "leave"
else:
# update to latest published version
action = "update"
# before setting the action check all the inputs in
# resolution_dictionary, if any of them are update, or create
# then set this one to 'create'
if any(
rev_v in resolution_dictionary["update"]
or rev_v in resolution_dictionary["create"]
for rev_v in v.inputs
):
action = "create"
# so append this v to the related action list
resolution_dictionary[action].append(v)
return resolution_dictionary
@count_calls
def update_first_level_versions(self, reference_resolution):
"""Updates the versions to the latest version.
:param reference_resolution: A dictionary with keys 'leave', 'update'
and 'create' with a list of :class:`~stalker.models.version.Version`
instances in each of them. Only 'update' key is used and if the
Version instance is in the 'update' list the reference is updated to
the latest version.
"""
latest = []
for version in self._version.inputs:
latest_published_version = version.latest_published_version
latest.append(latest_published_version)
self._version.inputs = latest
@count_calls
def update_versions(self, reference_resolution):
"""A mock update_versions implementation, does the update indeed but
partially.
:param reference_resolution: The reference_resolution dictionary
:return: a list of new versions
"""
# first get the resolution list
new_versions = []
from stalker import Version
# store the current version
current_version = self.get_current_version()
# loop through 'create' versions and update their references
# and create a new version for each of them
for version in reference_resolution["create"]:
local_reference_resolution = self.open(version, force=True)
# save as a new version
new_version = Version(
task=version.task,
take_name=version.take_name,
parent=version,
description="Automatically created with " "Deep Reference Update",
)
new_version.is_published = True
for v in self._version.inputs:
new_version.inputs.append(v.latest_published_version)
new_versions.append(new_version)
# check if we are still in the same scene
current_version_after_create = self.get_current_version()
if current_version:
if current_version != current_version_after_create:
# so we are in a different scene just reopen the previous scene
self.open(current_version)
# we got a new local_reference_resolution but we should have given
# a previous one, so use it,
#
# append all the 'create' items to 'update' items,
# so we can update them with update_first_level_versions()
reference_resolution["update"].extend(reference_resolution["create"])
self.update_first_level_versions(reference_resolution)
return new_versions
|
class Scene:
def __init__(self, camera, shapes, bsdfs, mediums, phases, area_lights):
self.camera = camera
self.shapes = shapes
self.bsdfs = bsdfs
self.area_lights = area_lights
self.mediums = mediums
self.phases = phases
|
from math import *
from numpy import *
from scipy import *
import cosmolopy.constants as cc
import cosmolopy.distance as cd
import cosmolopy.perturbation as cp
import matplotlib.pyplot as plt
from scipy.integrate import quad
from scipy import special
#******************************************************
#FUNCTIONS
#******************************************************
def dNOverdz(zmin,zmax, c1,c2,c3):
z= 0.5 * (zmin+ zmax)
return 10**c1 * z**c2 * exp(-c3 * z)
def NOfz(zmin, zmax, c1,c2,c3):
return quad(dNOverdz, zmin, zmax, args=(zmax,c1,c2,c3,))[0]
def D(z):
return cp.fgrowth(z,omega_M_0, unnormed=False)
if __name__ == "__main__":
#*****************Testing the Functions**************************
import doctest
doctest.testmod()
#*********************INPUT DATA*********************************
(x, total, rms0muJy,rms1muJy, rms3muJy, rms5muJy, rms6muJy, rms73muJy, rms10muJy, rms23muJy, rms40muJy, rms70muJy, rms100muJy, rms150muJy, rms200muJy) = loadtxt('HIdndzb3.txt', unpack=True)
c11 =6.23000844621; c21 = 1.82232488932 ; c31 = 0.89608495919 #0 muJy
c12 = 7.33473992074 ; c22 = 3.02002614773 ; c32 = 5.33842588543 # 1 muJy
c5 = 6.91214734152 ; c51 = 2.38186955537 ; c52 = 5.84080403921 # 3 muJy
c13 = 6.75522312458 ; c23 = 2.13569693813 ; c33 = 7.35533385121 # 7.3 muJy
c14 = 6.01593890751 ; c24 = 1.43281797508 ; c34 =9.03321046833 # 23 muJy
c16 =5.6200078849 ; c26 = 1.11377237426 ; c36 =13.0257055979 # 70 muJy
c15 = 5.6266673048 ; c25 =1.40867290563 ; c35 =15.4937962327 # 100 muJy
c200 = 5.00377325178 ; c200_2 = 1.04281566255 ; c200_3 = 17.5261229518 #200 muJy
#****************************************************************************
dn = [] ; dn_f1 = [] ; dn_f2 = []; dn_f3 = []; dn_f4 = []; dn_f5 = []; dn_f6 = []; dn_f200 = []
#print x
xrange = array([ 0.02, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 , 1.0, 1.1 , 1.2, 1.3 , 1.4 , 1.5 , 1.6 ,1.7 , 1.8, 1.9, 2.0])
#xrange = array([ 0.116, 0.144, 0.2075 , 0.4079 , 0.6236 , 0.8277, 0.988 , 1.1734 , 1.3897 , 1.6303 , 1.7666 , 2.07])
#xmin = xrange -0.1
#xmax =xrange+ 0.1
xmin = [ 0.01, 0.03 , 0.05, 0.07, 0.0 , 0.1, 0.2 , 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. , 1.1, 1.2 , 1.3 , 1.4 , 1.5 , 1.6 ,1.7 ,1.8 ,1.9]
xmax = [ 0.03, 0.05, 0.07, 0.09, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 , 0.9 , 1. , 1.1 , 1.2 , 1.3 , 1.4 , 1.5 , 1.6 , 1.7 , 1.8, 1.9 , 2., 2.1]
kmax = empty(len(xrange)); kmax.fill(0.2)
err_z =empty(len(xrange)); err_z.fill(0.0)
volume =empty(len(xrange)); volume.fill(30000.0)
bias = empty(len(xrange));bias.fill(1.0)
#z = xrange
print
print '==============================RESULTS========================='
print
print 'xrange = ', xrange
print 'The Output dndz for 1, 0, 3, 7.3, 23, 70, 100 muJy has been saved in: data_all_NOfz_SAX3_diff_14bin_new.txt'
print
print '======================Thanks!========================================'
for i in range(len(xmin)):
dn.append(NOfz(xmin[i],xmax[i], c11,c21,c31)) ;dn_f1.append(NOfz(xmin[i],xmax[i], c12,c22,c32)); dn_f2.append(NOfz(xmin[i],xmax[i], c13,c23,c33)); dn_f3.append(NOfz(xmin[i],xmax[i], c14,c24,c34)); dn_f4.append(NOfz(xmin[i],xmax[i], c15,c25,c35)) ; dn_f5.append(NOfz(xmin[i],xmax[i], c16,c26,c36)); dn_f6.append(NOfz(xmin[i],xmax[i], c5,c51,c52)); dn_f200.append(NOfz(xmin[i],xmax[i],c200,c200_2,c200_3))
data_all_NOfz= concatenate((reshape(xrange,(len(xrange),1)),reshape(dn,(len(xrange),1)),reshape(dn_f1,(len(xrange),1)),reshape(dn_f6,(len(xrange),1)),reshape( dn_f2,(len(xrange),1)),reshape(dn_f3,(len(xrange),1)), reshape(dn_f5,(len(xrange),1)), reshape(dn_f4,(len(xrange),1)), reshape(dn_f200,(len(xrange),1))),axis=1)
savetxt('data_all_NOfz_SAX3_diff_14bin_new.txt' , data_all_NOfz)
|
"""empty message
Revision ID: 47b837ea14fa
Revises: 523f8db3a8ac
Create Date: 2017-06-18 08:51:36.170425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '47b837ea14fa'
down_revision = '523f8db3a8ac'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('message', sa.Column('message_create_time', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('message', 'message_create_time')
# ### end Alembic commands ###
|
from src_describe.standard_deviation import standard_deviation
from src_describe.sum_values import sum_values
from src_describe.count_values import count_values
from src_histogram.get_num_headers import get_num_headers
from src_scatter_plot.calc_cov import calc_cov
"""
GOAL: return a table of all Pearson correlations between the variables
For all couples of subjects x and y where x != y:
r = cov(X, Y) / (ecart_type(X) * (ecart_type(Y))
"""
def calc_pearson_cor(values):
num_headers = get_num_headers(values)
pearson_cor = {head: {elem: 0 for elem in num_headers} for head in num_headers}
for h1 in num_headers:
for h2 in num_headers:
if h1 == h2:
pearson_cor[h1][h2] = 0
else:
cov = calc_cov(values[h1], values[h2])
std_1 = standard_deviation(values[h1], sum_values(values[h1]) / len(values[h1]), count_values(values[h1]))
std_2 = standard_deviation(values[h2], sum_values(values[h2]) / len(values[h2]), count_values(values[h2]))
pearson_cor[h1][h2] = cov / (std_1 * std_2)
return (pearson_cor) |
'''
Django app for shared code and functionality used
across other applications within the project. Currently includes
abstract database models with common functionality or fields that are
used by models in multiple apps within the project.
'''
import rdflib
default_app_config = 'mep.common.apps.CommonConfig'
SCHEMA_ORG = rdflib.Namespace('http://schema.org/')
|
#! /usr/bin/python3
import zmq
import os
import sys
class SignalingClientHelper():
def Connect(self, ):
context = zmq.Context()
ws_top = os.path.dirname(sys.argv[0]) + '/../'
ws_top = os.path.abspath(ws_top)
#os.environ['WS_TOP'] = ws_top
print( "Connecting to signaling server at %s/zmqsockmbt" %(ws_top))
self.socket = context.socket(zmq.REQ)
self.socket.connect("ipc://%s/zmqsockmbt" % ws_top)
def SendSignalingData(self, string):
self.socket.send_string(string)
def Wait(self, ):
# This is a blocking call which does a wait.
message = self.socket.recv()
assert message.decode("utf-8") == "Proceed"
SignalingClient = SignalingClientHelper()
|
import pytest
from parseval.parser import FloatParser
from parseval.exceptions import (
UnexpectedParsingException,
NullValueInNotNullFieldException,
UnsupportedDatatypeException,
ValidValueCheckException,
MaximumValueConstraintException,
MinimumValueConstraintException
)
# Valid value tests
def test_valid_value():
func = FloatParser().build()
assert func(1.0) == 1.0
assert func("1") == 1.0
assert func(1.0) == 1.0
with pytest.raises(UnexpectedParsingException):
assert func("1a")
# Quoted string handling tests
def test_non_quoted_data():
func = FloatParser().build()
data = 1.0
assert func(data) == 1.0
def test_double_quoted_data():
func = FloatParser(quoted=1).build()
data = '"1.0"'
assert func(data) == 1.0
def test_single_quoted_data():
func = FloatParser(quoted=2).build()
data = "'1.0'"
assert func(data) == 1.0
# Type enforce tests
def test_enforce_type():
func = FloatParser(enforce_type=False).build()
data = "1.0"
assert func(data) == "1.0"
# Validators test
def test_not_null_validator():
func = FloatParser(quoted=0).not_null().build()
assert func("123.50") == 123.5
with pytest.raises(NullValueInNotNullFieldException):
assert func(None)
assert func("")
# Default value assignment check
func = FloatParser(quoted=0).not_null(default_value=0).build()
assert func(None) == 0
assert func("") == 0
with pytest.raises(UnsupportedDatatypeException):
FloatParser(quoted=0).not_null(default_value="0").build()
def test_value_set_validator():
allowed_values = [100.50, 200.60, 300.70]
func = FloatParser(quoted=0).value_set(allowed_values).build()
assert func(100.50) == 100.50
assert func("300.70") == 300.70
with pytest.raises(ValidValueCheckException):
assert func('201')
with pytest.raises(UnsupportedDatatypeException):
FloatParser(quoted=0).value_set(["100", "200", "300"]).build()
def test_max_value_validator():
func = FloatParser(quoted=0).max_value(100.9).build()
assert func('100.8') == 100.8
assert func('100.89') == 100.89
with pytest.raises(MaximumValueConstraintException):
assert func(101)
with pytest.raises(UnsupportedDatatypeException):
FloatParser(quoted=0).max_value("300").build()
def test_min_value_validator():
func = FloatParser(quoted=0).min_value(99.99).build()
assert func('99.99') == 99.99
assert func('99.991') == 99.991
with pytest.raises(MinimumValueConstraintException):
assert func(10)
with pytest.raises(UnsupportedDatatypeException):
FloatParser(quoted=0).min_value("300").build()
def test_range_validator():
func = FloatParser(quoted=0).range(lower_bound=99.99, upper_bound=100.0).build()
assert func('99.99') == 99.99
assert func('100') == 100
assert func('99.999') == 99.999
with pytest.raises(MaximumValueConstraintException):
assert func(100.0000000000001)
with pytest.raises(MinimumValueConstraintException):
assert func(99.989999999)
def _parity_check(data):
if data:
i_data = float(data)
if i_data % 2 != 0:
raise Exception("The data has to be even!")
return data
def test_add_func_validator():
func = FloatParser().add_func(_parity_check).build()
assert func(298.0) == 298.0
with pytest.raises(Exception):
func(298.1)
|
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
if not num1:
return num2
if not num2:
return num1
carry = 0
res = []
m, n = len(num1), len(num2)
for i in xrange(min(m, n)):
tmp = carry + int(num1[m - 1 - i]) + int(num2[n - 1 - i])
res.append(str(tmp % 10))
carry = tmp / 10
if m > n:
for i in xrange(n, m):
tmp = carry + int(num1[m - 1 - i])
res.append(str(tmp % 10))
carry = tmp / 10
else:
for i in xrange(m, n):
tmp = carry + int(num2[n - 1 - i])
res.append(str(tmp % 10))
carry = tmp / 10
if carry:
res.append('1')
return ''.join(res[::-1])
|
import numpy as np
import nltk
import pickle
# nltk.download()
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
wordLeammer=WordNetLemmatizer()
porterStemmer=PorterStemmer()
stopDict=set(stopwords.words('english'))
target=open('/kaggle/input/science.sql','r')
# target=open('source.txt','r')
remap={
ord('\''):' ',
ord('\n'): None,
ord('\r'): None,
ord('\\'):' ',
# ord(','):' '
}
textPast=[]
for x in target:
x=x.replace('\\n','')
# inTarget.append(x.translate(remap))
textPast.append("".join(x.translate(remap).split(',')[5:-2]))
text=list(set(textPast))
text.sort(key=textPast.index)
# text=text[:100]
# text=text[:1000]
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
vectorizer=TfidfVectorizer(stop_words=nltk.corpus.stopwords.words('english'))
transformer=TfidfTransformer()
tfidf=transformer.fit_transform(vectorizer.fit_transform(text))
word=vectorizer.get_feature_names()
weight=tfidf.toarray()
def vectorDistance(v1,v2):
return np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))
final=[]
for i in range(len(weight)):
if(i%50==0):
print("Now",i)
final.append([])
for j in range(len(weight)):
# final[i].append((j,sess.run(tf.reduce_sum(tf.multiply(weight[i],weight[j])))))
final[i].append((j,vectorDistance(weight[i],weight[j])))
final[i].sort(key=lambda x:x[1],reverse=True)
import csv
csvFile=open('saveCsv.csv','w')
writer=csv.writer(csvFile)
for row in final:
writer.writerow(row)
csvFile=open('saveText.csv','w')
writer=csv.writer(csvFile)
for row in text:
writer.writerow(row)
csvFile=open('saveReport.txt','w')
for i in range(len(final)):
print("Document",i,file=csvFile)
for j in range(5):
print(final[i][j+1],file=csvFile)
|
from typing import Optional
from context import ExecutionContext
from fastapi import FastAPI, Request
from time import sleep
from database import Database
import logging
logging.config.fileConfig('logging.conf', disable_existing_loggers=False)
app = FastAPI()
database_instance = Database()
@app.middleware("http")
async def keep_context(request: Request, call_next):
user_agent = request.headers.get('user-agent', 'Unkown')
username = request.query_params.get('username', 'anonymous user')
with ExecutionContext(user_agent=user_agent, username=username) as context:
# Process the request within an execution context.
response = await call_next(request)
return response
@app.get("/")
def read_root():
execution_context = ExecutionContext.current()
return {"Hello": "World", "context": dict(execution_context)}
@app.get("/pretend-query/{string_to_match}")
def read_item(string_to_match: str, username: Optional[str] = "some-username"):
# Note the context is not passed as parameter
return database_instance.pretend_sql_query(string_to_match)
|
# Generated by Django 3.0.8 on 2020-07-25 03:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('curricula', '0007_delete_alumno'),
]
operations = [
migrations.AlterField(
model_name='aniolectivo',
name='nombre',
field=models.CharField(max_length=150),
),
]
|
import shutil
from unittest import TestCase
from FileHandling import *
class TestFileHandling(TestCase):
def test_CreateFileIfNotExist(self):
os.mkdir("tempPath")
self.addCleanup(lambda: shutil.rmtree("tempPath"))
fileName = "tempPath/a/b/c/d/e/file"
createFilesIfNotExist(fileName)
self.assertTrue(os.path.exists(fileName))
self.assertTrue(os.path.isfile(fileName))
def test_GetFolderPath(self):
folderPath = "tempPath/a/b/c/d/e/"
self.assertEqual(folderPath, getFolderPath("tempPath", "a", "b", "c", "d", "e"))
self.assertEqual("users/", getFolderPath("users", ))
def test_WriteAndReadFirstLine(self):
os.mkdir("tempPath")
self.addCleanup(lambda: shutil.rmtree("tempPath"))
filename = "tempPath/fileName"
string = "1====Name====123456====email@ex.com====h 43, Delhi"
writeFirstLine(string, filename)
self.assertEqual(string, readFirstLine(filename))
def test_WriteAndReadLastLine(self):
os.mkdir("tempPath")
self.addCleanup(lambda: shutil.rmtree("tempPath"))
filename = "tempPath/fileName"
string = "1====Name====123456====email@ex.com====h 43, Delhi"
writeFirstLine("SomeRandomText", filename) # this is necessary else the test will break
writeLastLine(string, filename)
self.assertEqual(string, readLastLine(filename))
def test_WriteThenReadThenWriteAtIdAndThenReadAtId(self):
os.mkdir("tempPath")
self.addCleanup(lambda: shutil.rmtree("tempPath"))
filename = "tempPath/fileName"
string1 = "1====Name====123456====email@ex.com====h 43, Delhi"
string2 = "2====Name====123456====email@ex.com====h 43, Delhi"
string3 = "3====Name====123456====email@ex.com====h 43, Delhi"
string4 = "4====Name====123456====email@ex.com====h 43, Delhi"
writeFirstLine(string1, filename)
writeLastLine(string2, filename)
writeLastLine(string3, filename)
writeLastLine(string4, filename)
self.assertEqual(string2, readLineBySpecifiedId(filename, "2"))
testString = "3====Name====12345678910====email@ex.com====h 43, Delhi"
writeLineBySpecifiedId(testString, filename, "3")
self.assertEqual(testString, readLineBySpecifiedId(filename, "3"))
def test_getFilesByString(self):
os.mkdir("tempPath")
self.addCleanup(lambda: shutil.rmtree("tempPath"))
createFilesIfNotExist("tempPath/0-name0") # I should not use this function to create files but
createFilesIfNotExist("tempPath/1-name1") # just leave it for this time
createFilesIfNotExist("tempPath/2-name2")
createFilesIfNotExist("tempPath/3-name3")
files = getFilesByString("2", "tempPath/")
self.assertEqual(1, len(files))
self.assertEqual("2-name2", files[0])
files = getFilesByString("name", "tempPath/")
self.assertEqual(4, len(files))
|
#js DOM can access any elements on web page just like how selenium does
#selenium hav ea method to execute javascript code in it
from selenium import webdriver
driver=webdriver.Chrome(executable_path="C:\\chromedriver.exe")
driver.get("https://rahulshettyacademy.com/angularpractice/")
driver.find_element_by_name("name").send_keys("hello")
print(driver.find_element_by_name("name").text)#- user entered value cant print
print(driver.find_element_by_name("name").get_attribute("value"))
print(driver.execute_script('return document.getElementsByName("name")[0].value'))
shopButton= driver.find_element_by_css_selector("a[href*='shop']")
driver.execute_script("arguments[0].click();",shopButton)
#by default selenium does not have scroll method.we have to rely on javascript
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
|
#to find the highest value of palindrome
'''max1=0
for i in range(100,1000):
for j in range(100,1000):
x=i*j
y=str(x)
if y==y[::-1]:
if x>max1:
max1=x
a,b=i,j
print(max1,a,b)'''
#to find the factorial
'''count=1
n=5
for i in range(1,n+1):
count=count*i
print(count)'''
#to find the fabonacci series
'''a,b=0,1
for i in range(0,10):
c=a+b
a,b=b,c
print(c)'''
#program workout1(sum of square and square of sum)
'''count1,count2=0,0
for i in range(0,11):
x=i*i
count1=count1+x
a=count1
print(a)
for j in range(0,11):
count2=count2+j
y=count2*count2
print(y)
print(y-a)'''
#prime numbers
|
import cv2
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
import torchvision.datasets as datasets
from torch.autograd import Variable
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import CNN
# transformation for image
transform_ori = transforms.Compose([
transforms.RandomResizedCrop(64), #create 64x64 image
transforms.RandomHorizontalFlip(), # flipping image horizontally
transforms.ToTensor(), #convert image to Tensor
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) #normalize image
# Load dataset
train_dataset = datasets.ImageFolder(root = 'dataset/training_set', transform = transform_ori)
test_dataset = datasets.ImageFolder(root = 'dataset/test_set', transform = transform_ori)
# make dataset iterable
batch_size = 65 # changed from 100
train_load = torch.utils.data.DataLoader(
dataset = train_dataset,
batch_size = batch_size,
shuffle = True) # shuffle to create mixed batches of receipt and non-receipt images
test_load = torch.utils.data.DataLoader(
dataset = test_dataset,
batch_size = batch_size,
shuffle = False)
print('{} images in training set'.format(len(train_dataset)))
print('{} images in training set'.format(len(test_dataset)))
print('{} batches in train loader'.format(len(train_load)))
print('{} batches in test loader'.format(len(test_load)))
model = CNN.CNN()
CUDA = torch.cuda.is_available()
if CUDA:
print("CUDA available") # looks like it's not available
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)
# Training the CNN
#%%time
import time
num_epochs = 10
# define the lists to store the results of loss and accuracy
train_loss = []
test_loss = []
train_acc = []
test_acc = []
# Training
for epoch in range(num_epochs):
# reset these below variables to 0 at beginning of every epoch
start = time.time()
correct = 0
iterations = 0
iter_loss = 0.0
model.train() # put network into training mode
for i, (inputs, labels) in enumerate(train_load):
# convert torch tensor to Variable
inputs = Variable(inputs)
labels = Variable(labels)
# if we have GPU, shift data to GPU
CUDA = torch.cuda.is_available()
if CUDA:
inputs = inputs.cuda()
labels = labels.cuda()
optimizer.zero_grad() # clear off gradient in (w = w- gradient)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
iter_loss += loss.data # accumulate loss
loss.backward() # backpropagation
optimizer.step() # update the weights
# record the correct predictions for training data
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum()
iterations += 1
# record the training loss
train_loss.append(iter_loss/iterations)
# record the training accuracy
train_acc.append((100 * correct / len(train_dataset)))
# testing
loss = 0.0
correct = 0
iterations = 0
model.eval() # put the network into evaluation mode
for i, (inputs, labels) in enumerate(test_load):
# convert torch tensor to Variable
inputs = Variable(inputs)
labels = Variable(labels)
#print(labels)
CUDA = torch.cuda.is_available()
if CUDA:
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
loss = loss_fn(outputs, labels) # calculate loss
loss += loss.data
# record the correct predictions for training data
_, preicted = torch.max(outputs, 1)
#print(len(predicted))
#print(len(labels))
correct += (predicted == labels).sum()
iterations += 1
# record testing loss
test_loss.append(loss/iterations)
# record testing accuracy
test_acc.append((100 * correct / len(test_dataset)))
stop = time.time()
print('Epoch {}/{}, Training Loss: {:.3f}, Training Accuracy: {:.3f}, Testing Loss: {:.3f}, Testing Acc: {:.3f}, Time: {}s'
.format(epoch+1, num_epochs, train_loss[-1], train_acc[-1], test_loss[-1], test_acc[-1], stop-start))
# Loss
f = plt.figure(figsize=(10, 10))
plt.plot(train_loss, label='Training Loss')
plt.plot(test_loss, label='Testing Loss')
plt.legend()
plt.savefig('loss.png')
# Accuracy
f = plt.figure(figsize=(10, 10))
plt.plot(train_acc, label='Training Accuracy')
plt.plot(test_acc, label='Testing Accuracy')
plt.legend()
plt.savefig('accuracy.png')
# save model
torch.save(model.state_dict(), 'classify_receipts.pth')
|
from catan.board import *
from catan.utils import *
from catan.player import *
from catan.board import *
import pygame
def main():
screen = pygame.display.set_mode(1024, 768)
board = Board()
resource_cards = ResourceCardDeck()
dev_cards = DevelopmentCardDeck()
if __name__ == "__main__":
main() |
"""后台添加的自定义方法,公用部分"""
from django.contrib import messages
def set_invalid(modeladmin, request, queryset):
# 批量禁用
queryset.update(is_valid=False)
messages.success(request, '操作成功')
set_invalid.short_description = '批量禁用所选对象'
def set_valid(modeladmin, request, queryset):
# 批量启用
queryset.update(is_valid=True)
messages.success(request, '操作成功')
set_valid.short_description = '批量启用所选对象'
|
#-*-coding:utf-8-*-
from __future__ import division
if __name__=="__main__":
data = "Netflix"
k = 50
innerProduct = {}
for i in xrange(1, k+1):
innerProduct[i] = 0
count = 0
with open(data + "-50.txt") as input:
currentUserID = None
currentUserResult = []
for line in input.readlines():
line = line.strip()
if line == "":
break
count = count + 1
paras = line.split(" ")
if currentUserID == None:
currentUserID = paras[0]
elif paras[0]!=currentUserID:
currentUserResult.sort(reverse=True)
for i in xrange(1, k+1):
innerProduct[i] = innerProduct[i] + currentUserResult[i-1]
currentUserResult = []
currentUserID = paras[0]
currentUserResult.append(float(paras[2]))
count = count / k
print "count:" + str(count)
for i in xrange(1, k+1):
innerProduct[i] = innerProduct[i]/count
with open(data + "_topk_innerproduct.txt", "w") as output:
for i in xrange(1, k + 1):
output.write(str(i) + "," + str(innerProduct[i]) + "\n")
|
#import library
import cv2
import os
from skimage import exposure
from skimage.exposure import match_histograms
from matplotlib import pyplot as plt
#import images
ref = os.path.basename('reference.jpg')
sour = os.path.basename('source.jpg')
#opencv read images with gray scale
ref_img = cv2.imread(ref,0)
sour_img = cv2.imread(sour,0)
#print out resulotion of two images
ref_size = ref_img.shape
sour_size = sour_img.shape
print("Reference size:{}\nSource size: {}".format(ref_size,sour_size))
#blending two images together
combin = cv2.addWeighted(ref_img,0.6,sour_img,0.4,0)
#---------display img-----------#
# cv2.imshow('Combin',combin)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#---------display img-----------#
#----------save the image to file----------#
# cv2.imwrite('Match.jpg',combin)
#----------save the image to file----------#
#use match_histograms function to let combin histogram
#match to reference image
match = match_histograms(combin,ref_img,multichannel=True)
#apply cumulative distribution function to plot
#out cumulative plot,bins value was defaulted as 256
#for match img
match_cdf,bins = exposure.cumulative_distribution(match)
#for source img
sour_cdf,bins = exposure.cumulative_distribution(sour_img)
#for reference img
ref_cdf,bins = exposure.cumulative_distribution(ref_img)
#-------------------display everything into one plot-------------------#
#give name for plot called total_imgs, set subplot as 3x3 matrix
total_imgs, axes = plt.subplots(nrows=3, ncols=3, figsize=(15, 11))
#0 column source img, histogram and cumulative distribution of source img
axes[0,0].set_title('Source',fontsize =30)
axes[0,0].imshow(sour_img,cmap = 'gray')
axes[1,0].hist(sour_img.ravel(),256,[0,256])
axes[2,0].plot(bins,sour_cdf,color = 'r')
#1 column ref img, histogram and cumulative distribution of ref img
axes[0,1].set_title('Reference',fontsize =30)
axes[0,1].imshow(ref_img,cmap = 'gray')
axes[1,1].hist(ref_img.ravel(),256,[0,256])
axes[2,1].plot(bins,ref_cdf,color = 'r')
#2 column match img, histogram and cumulative distribution of match img
axes[0,2].set_title('Matched',fontsize =30)
axes[0,2].imshow(combin,cmap = 'gray')
axes[1,2].hist(match.ravel(),256,[0,256])
axes[2,2].plot(bins,match_cdf,color = 'r')
plt.tight_layout()
plt.show()
#-------------save the figure to file--------------#
# total_imgs.savefig('P1_hist_match_output.jpg')
# plt.close(total_imgs)
#-------------save the figure to file--------------#
#-------------------display everything into one plot-------------------#
|
#p70
# (1) 문자열 열거형객체 이용
string="홍길동"
print(len(string))
for s in string:
print(s)
# (2) list 열거형객체 이용
lstset = [1,2,3,4,5]
for e in lstset:
print('원소:',e)
#p72
# (1) range 객체 형성
num1=range(10)
print('num1:',num1)
num2=range(1,10)
print('num2:',num2)
num3=range(1,10,2)
print('num3=',num3)
# (2) range 객체 활용
for n in num1:
print(n,end='')
print()
for n in num2:
print(n, end='')
print()
for n in num3:
print(n,end='')
#p73
# (1) list에 자료 저장
lst=[]
for i in range(10):
r = random.randint(1,10)
lst.append(r)
print('lst=',lst)
# (2) list에 자료 참조
for i in range(10):
print(lst[i]*0.25)
#p74
#구구단 출력: range 함수이용
# (1) 바깥쪽 반복문
for i in range(2,10):
print('~~~ {}단~~~'.format(i))
# (2) 안쪽 반복문
for j in range(1,10):
print('%d * %d = %d'%(i,j,i*j))
#p75
string=""" 나는 홍길동입니다
주소는 서울시 입니다
나이는 35세입니다"""
sents=[]
words=[]
# (1) 문단 -> 문장
for sen in string.split(sep="\n"):
sents.append(sen)
# (2) 문장 -> 단어
for word in sen.split():
words.append(word)
print('문장:',sents)
print('문장수:',len(sents))
print('단어:',words)
print('단어수:','len(words)')
|
def printbaar_rek(rek):
rij_1 = rek[0][0] + rek[0][1] + rek[0][2] + rek[0][3] + rek[0][4]
rij_2 = rek[1][0] + rek[1][1] + rek[1][2] + rek[1][3] + rek[1][4]
rij_3 = rek[2][0] + rek[2][1] + rek[2][2] + rek[2][3] + rek[2][4]
rij_4 = rek[3][0] + rek[3][1] + rek[3][2] + rek[3][3] + rek[3][4]
oplossing = rij_4 + '\n' + rij_3 + '\n' + rij_2+ '\n' + rij_1
return oplossing
def speel(kleur,kolom,bord):
hulp = 0
for i in range(len(bord)):
if bord[i][kolom-1] == 'O':
hulp += 1
if hulp == 4:
bord[0][kolom] = kleur
if hulp == 3:
bord[1][kolom] = kleur
if hulp == 2:
bord[2][kolom] = kleur
if hulp == 1:
bord[3][kolom] = kleur
return printbaar_rek(bord)
print(speel('G',3,[['R', 'R', 'R', 'R', 'G'], ['G', 'G', 'R', 'G', 'R'], ['O', 'G', 'O', 'O', 'O'], ['O', 'R', 'O', 'O', 'O']])) |
def drawDiamond():
for i in range(2):
toytle.right(30)
toytle.forward(100)
toytle.left(60)
toytle.forward(100)
if i >0:
break
else:
toytle.left(150)
import turtle
toytle = turtle.Turtle()
toytle.hideturtle()
toytle.color("black")
toytle.speed(10)
for i in range(9):
drawDiamond()
toytle.left(190)
turtle.done() |
# -*- coding:utf-8 -*-
'''
1.6 字典中的键映射多个值
怎样实现一个键对应多个值的字典(也叫 multidict)?
一个字典就是一个键对应一个单值的映射。如果你想要一个键映射多个值,
那么你就需要将这多个值放到另外的容器中, 比如列表或者集合里面。比如,
你可以像下面这样构造这样的字典:
'''
d = {
'a':[1,2,3],
'b':[4,5]
}
e = {
'a':{1,2,3},
'b':{4,5}
}
'''
选择使用列表还是集合取决于你的实际需求。如果你想保持元素的插入顺序就应该使用列表,
如果想去掉重复元素就使用集合(并且不关心元素的顺序问题)
你可以很方便的使用 collections 模块中的 defaultdict 来构造这样的字典。
defaultdict 的一个特征是它会自动初始化每个 key 刚开始对应的值,
所以你只需要关注添加元素操作了。比如:
'''
from collections import defaultdict
d2 = defaultdict(list)
d2['a'].append(1)
d2['a'].append(2)
d2['a'].append(4)
# print(d2)
d3 = defaultdict(set)
d3['a'].add(3)
d3['a'].add(2)
d3['a'].add(4)
# print(d3)
'''
需要注意的是, defaultdict 会自动为将要访问的键(就算目前字典中并不存在这样的键)创建映射实体。
如果你并不需要这样的特性,你可以在一个普通的字典上使用 setdefault() 方法来代替。比如:
'''
d4 = {}
d4.setdefault('a',[]).append(1)
d4.setdefault('a',[]).append(2)
d4.setdefault('a',[]).append(4)
print(d4)
'''
一般来讲,创建一个多值映射字典是很简单的。但是,如果你选择自己实现的话,
那么对于值的初始化可能会有点麻烦, 你可能会像下面这样来实现:
'''
d5 = {'a':'apple','b':'bananer'}
pairs = {'b':'bananer'}
for key,value in pairs:
if key not in d5:
d[key] = []
d[key].append(value)
print(d5)
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from ..base import LowResEmbedder, AttentionBox, CoreAndProposalLayer
class TripleMNISTLowResEmbedder(LowResEmbedder):
def __init__(self):
super(TripleMNISTLowResEmbedder, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 3)
self.conv2 = nn.Conv1d(20, 10, 4)
self.fcn1 = nn.Linear(100, 40)
self.fcn2 = nn.Linear(40, 20)
def forward(self, images):
x = images.lowResView()
x = x.view(-1, 1, 3, 15)
x = F.relu(self.conv1(x))
x = x.view(-1, 20, 13)
x = F.relu(self.conv2(x))
x = x.view(-1, 100)
x = F.relu(self.fcn1(x))
x = F.relu(self.fcn2(x))
return x
class TripleMNISTCoreAndProposalLayer(CoreAndProposalLayer):
def __init__(self):
super(TripleMNISTCoreAndProposalLayer, self).__init__()
self.fcn1 = nn.Linear(40, 40)
self.fcn2 = nn.Linear(40, 20)
self.fcn3 = nn.Linear(20, 10)
def forward(self, low_res_embedding, focus_embedding):
x = torch.cat([low_res_embedding, focus_embedding], 1)
x = F.relu(self.fcn1(x))
x = F.relu(self.fcn2(x))
x = F.relu(self.fcn3(x))
proposal_dist = F.log_softmax(x)
return proposal_dist
class TripleMNISTSoftAttentionBox(AttentionBox):
def __init__(self, weight_layer_type="fcn", attention_type="soft"):
super(TripleMNISTSoftAttentionBox, self).__init__()
if weight_layer_type == "fcn":
self.attention_weights_layer = TripleMNISTAttentionWeightsLayer()
elif weight_layer_type == "conv":
self.attention_weights_layer = TripleMNISTConvAttentionWeightsLayer()
else:
raise Exception("{} not a valid type".format(weight_layer_type))
self.focus_embedder = TripleMNISTFocusEmbedder()
self.prev_attention_weights = None
self.n_locations = 13
self.attention_type = attention_type
def forward(self, images):
low_res_view = images.lowResView()
attention_weights = self.attention_weights_layer(low_res_view)
self.most_recent_attention_weights = attention_weights
if self.attention_type == "soft":
# add a weighted embedding of each view to the full embedding
for location in range(self.n_locations):
high_res_images = images.focusView([location]*images.nImages())
high_res_images = high_res_images.view(-1, 1, 28, 28)
local_focus_embeddings = self.focus_embedder(high_res_images)
local_attention_weights = attention_weights[:, location]
if location == 0:
focus_embedding = local_focus_embeddings * 0
for img_no in range(images.nImages()): # TODO: check this
focus_embedding[img_no] = focus_embedding[img_no]\
+ local_focus_embeddings[img_no]\
* local_attention_weights[img_no]
elif self.attention_type == "hard":
n_samples = 5
# do some sneaky stuff while hiding from autograd:
# (get the samples * 1/q)
np_attention_weights = attention_weights.data.numpy()
try:
attention_choices = [np.random.multinomial(n_samples, img_probs/sum(img_probs))/n_samples for img_probs in np_attention_weights]
except:
for img_probs in np_attention_weights:
print(img_probs, sum(img_probs))
raise Exception
attention_choices = np.divide(attention_choices, np_attention_weights)
attention_choices = Variable(torch.from_numpy(np.array(attention_choices))).type(torch.FloatTensor)
# we've done our job and can let autograd watch us again
# restore to original samples with weight of 1 (but variable)
attention_choices = torch.mul(attention_choices, attention_weights)
for location in range(self.n_locations):
high_res_images = images.focusView([location]*images.nImages())
high_res_images = high_res_images.view(-1, 1, 28, 28)
local_focus_embeddings = self.focus_embedder(high_res_images)
local_attention_choices = attention_choices[:, location]
if location == 0:
focus_embedding = local_focus_embeddings * 0
for img_no in range(images.nImages()): # TODO: check this
focus_embedding[img_no] = focus_embedding[img_no]\
+ local_focus_embeddings[img_no]\
* local_attention_choices[img_no]
else:
raise Exception("{} not a valid attention type".format(self.attention_type))
return focus_embedding
def getAttentionSummary(self):
""" returns Variable of attention weights
"""
return self.most_recent_attention_weights
class TripleMNISTAttentionPolicy(nn.Module):
def __init__(self):
super(TripleMNISTAttentionWeightsLayer, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 3)
self.conv2 = nn.Conv1d(20, 10, 4)
self.fcn1 = nn.Linear(100, 40)
self.fcn2 = nn.Linear(40, 13)
self.softmax = nn.Softmax()
def forward(self, x):
x = x.view(-1, 1, 3, 15)
x = F.relu(self.conv1(x))
x = x.view(-1, 20, 13)
x = F.relu(self.conv2(x))
x = x.view(-1, 100)
x = F.relu(self.fcn1(x))
x = F.relu(self.fcn2(x))
weights = self.softmax(x)
self.previous_weights = weights
return weights
# class TripleMNISTConvAttentionWeightsLayer(nn.Module):
# def __init__(self):
# super(TripleMNISTConvAttentionWeightsLayer, self).__init__()
# self.conv1 = nn.Conv2d(1, 10, 3, padding=1)
# self.conv2 = nn.Conv2d(10, 10, 3, padding=0)
# self.conv3 = nn.Conv1d(10, 1, 5, padding=2)
# self.softmax = nn.Softmax()
#
# def forward(self, x):
# x = x.view(-1, 1, 3, 15)
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = x.view(-1, 10, 13)
# x = F.relu(self.conv3(x))
# x = x.view(-1, 13)
# weights = self.softmax(x)
# self.previous_weights = weights
# return weights
class TripleMNISTFocusEmbedder(nn.Module):
def __init__(self):
super(TripleMNISTFocusEmbedder, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 3)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(10, 20, 4)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(20, 10, 3)
self.fcn1 = nn.Linear(90, 40)
self.fcn2 = nn.Linear(40, 20)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = x.view(-1, 90)
x = F.relu(self.fcn1(x))
x = F.relu(self.fcn2(x))
return x
|
#!/usr/bin/python
###################
###Documentation###
###################
"""
gluIdaifFour.py: Calculates cmrGlu using a FDG water scan and an image-derived input function
Uses basic four parameter kinetic model
See Phelps et al, Annals of Neurology 1979
Can perform an optional CBV correction using method used in: Sasaki et al, JCBF&M 1986
Requires the following inputs:
pet -> PET FDG image.
info -> Yi Su style PET info file.
idaif -> Image derivied arterial input function
brain -> Brain mask in the same space as pet.
blood -> Estimate value for blood glucose level in mg/dL.
out -> Root for all outputed file
User can set the following options:
d -> Density of brain tissue in g/mL. Default is 1.05
lc -> Value for the lumped constant. Default is 0.52
oneB -> Bounds for k1. Default is 10 times whole brain value.
twoB -> Bounds for k2. Default is 10 times whole brain value.
thrB -> Bounds for k3. Default is 10 times whole brain value.
fourB -> Bounds for k4. Default is 10 times whole brian value.
cbv -> CBV pet iamge in mL/hg
omega -> Ratio of FDG radioactivity in whole blood and plasma for CBV correction. Default is 0.9.
Produces the following outputs:
kOne -> Voxelwise map of k1 in 1/seconds.
kTwo -> Voxelwise map of k2 in 1/seconds.
kThree -> Voxelwise map of k3 in 1/seconds.
KFour -> Voxelwise map of k4 in 1/seconds.
cmrGlu -> Voxelwise map of cerebral metabolic rate of glucose in uMol/(hg*min)
kOne_var -> Variance of k1 estimate.
kTwo_var -> Variance of k2 estimate.
kThree_var -> Variance of k3 estimate.
kFour_var -> Variance of k4 estimate.
cmrGlu_var -> Variance of cmrGlu estimate.
nRmsd -> Normalized root-mean-square deviation for fit.
Requires the following modules:
argparse, numpy, nibabel, nagini, tqdm, scipy
Tyler Blazey, Spring 2016
blazey@wustl.edu
"""
#####################
###Parse Arguments###
#####################
import argparse, sys
argParse = argparse.ArgumentParser(description='Estimates metabolic rate of glucose using:')
argParse.add_argument('pet',help='Nifti FDG image',nargs=1,type=str)
argParse.add_argument('info',help='Yi Su style info file',nargs=1,type=str)
argParse.add_argument('idaif',help='Image-derived input function',nargs=1,type=str)
argParse.add_argument('brain',help='Brain mask in PET space',nargs=1,type=str)
argParse.add_argument('blood',help='Blood glucose level in mg/dL',nargs=1,type=float)
argParse.add_argument('out',help='Root for outputed files',nargs=1,type=str)
argParse.add_argument('-d',help='Density of brain tissue in g/mL. Default is 1.05',default=1.05,metavar='density',type=float)
argParse.add_argument('-lc',help='Value for the lumped constant. Default is 0.52.',default=0.52,metavar='lumped constant',type=float)
argParse.add_argument('-oneB',nargs=2,type=float,metavar=('lower', 'upper'),help='Bounds of k1. Default is 10 times whole brain value')
argParse.add_argument('-twoB',nargs=2,type=float,metavar=('lower', 'upper'),help='Bounds of k2. Default is 10 times whole brain value')
argParse.add_argument('-thrB',nargs=2,type=float,metavar=('lower', 'upper'),help='Bounds of k3. Default is 10 times whole brain value')
argParse.add_argument('-fourB',nargs=2,type=float,metavar=('lower', 'upper'),help='Bounds of k4. Default is 10 times whole brain value')
argParse.add_argument('-cbv',nargs=1,help='Estimate of CBV in mL/hg. If given, corrects for blood volume.')
argParse.add_argument('-omega',nargs=1,help='Ratio of FDG in whole brain and plasma for CBV correction. Default is 0.9',default=0.9)
args = argParse.parse_args()
#Make sure sure user set bounds correctly
for bound in [args.oneB,args.twoB,args.thrB,args.fourB]:
if bound is not None:
if bound[1] <= bound[0]:
print 'ERROR: Lower bound of %f is not lower than upper bound of %f'%(bound[0],bound[1])
sys.exit()
#Load needed libraries
import numpy as np, nibabel as nib, nagini, sys, scipy.optimize as opt, scipy.interpolate as interp
from tqdm import tqdm
#########################
###Data Pre-Processing###
#########################
print ('Loading images...')
#Load image headers
pet = nagini.loadHeader(args.pet[0])
brain = nagini.loadHeader(args.brain[0])
#Load in the idaif.
idaif = nagini.loadIdaif(args.idaif[0])
#Load in the info file
info = nagini.loadInfo(args.info[0])
#Check to make sure dimensions match
if pet.shape[0:3] != brain.shape[0:3] or pet.shape[3] != idaif.shape[0] or pet.shape[3] != info.shape[0]:
print 'ERROR: Data dimensions do not match. Please check...'
sys.exit()
#Get the image data
petData = pet.get_data()
brainData = brain.get_data()
#Flatten the PET images and then mask
petMasked = nagini.reshape4d(petData)[brainData.flatten()>0,:]
#Use middle times as pet time. Account for any offset
petTime = info[:,1] - info[0,0]
#If cbv image is given, correct for blood volume
if ( args.cbv is not None ):
#Load in CBV image
cbv = nagini.loadHeader(args.cbv[0])
if cbv.shape[0:3] != pet.shape[0:3]:
print 'ERROR: CBV image does not match PET resolution...'
sys.exit()
cbvData = cbv.get_data()
#Mask it and convert it to original units
cbvMasked = cbvData.flatten()[brainData.flatten()>0] / 100 * args.d
#Correct all the tacs for blood volume
petMasked = petMasked - (args.omega*cbvMasked[:,np.newaxis]*idaif)
#Interpolate the aif to minimum sampling time
minTime = np.min(np.diff(petTime))
interpTime = np.arange(petTime[0],petTime[-1],minTime)
aifInterp = interp.interp1d(petTime,idaif,kind="linear")(interpTime)
#Get the whole brain tac and interpolate that
wbTac = np.mean(petMasked,axis=0)
wbInterp = interp.interp1d(petTime,wbTac,kind="linear")(interpTime)
#Set scale factor to get cmrGlu to uMole / (hg*min)
gluScale = 333.0449 / args.d / args.lc * args.blood[0]
###################
###Model Fitting###
###################
print ('Beginning fitting procedure...')
#Attempt to fit model to whole-brain curve
fitX = np.vstack((interpTime,aifInterp))
try:
wbFit = opt.curve_fit(nagini.gluFourIdaif,fitX,wbInterp,p0=[0.001,0.001,0.001,0.001],bounds=([0,0,0,0],[1,1,1,1]))
except(RuntimeError):
print 'ERROR: Cannot estimate four-parameter model on whole-brain curve. Exiting...'
sys.exit()
#Use whole-brain values as initilization
init = wbFit[0]
#Set bounds
bounds = np.array(([0,0,0,0],init*25),dtype=np.float);
bounds[1,3] = bounds[1,3] * 4
bIdx = 0
for bound in [args.oneB,args.twoB,args.thrB,args.fourB]:
#If user wants different bounds, use them.
if bound is not None:
bounds[0,bIdx] = bound[0]
bounds[1,bIdx] = bound[1]
#Use midpoint between bounds as initial value if whole brain estimate is not in bounds
if init[bIdx] < bound[0] or init[bIdx] > bound[1]:
init[bIdx] = (bound[0]+bound[1]) / 2
bIdx += 1
#Loop through every voxel
nVox = petMasked.shape[0]; fitParams = np.zeros((nVox,11)); noC = 0;
for voxIdx in tqdm(range(nVox)):
#Get voxel tac and then interpolate it
voxTac = petMasked[voxIdx,:]
voxInterp = interp.interp1d(petTime,voxTac,kind="linear")(interpTime)
try:
#Get voxel fit with three parameter model
voxFit = opt.curve_fit(nagini.gluFourIdaif,fitX,voxInterp,p0=init,bounds=bounds)
#Save parameter estimates.
fitParams[voxIdx,0:4] = voxFit[0]
fitParams[voxIdx,4] = ((voxFit[0][0]*voxFit[0][2])/(voxFit[0][1]+voxFit[0][2]))*gluScale
#Save estimated parameter variances. Use delta method to get cmrGlu variance.
fitParams[voxIdx,5:9] = np.diag(voxFit[1])
gluGrad = np.array([(gluScale*voxFit[0][2])/(voxFit[0][1]+voxFit[0][2]),
(-1*voxFit[0][0]*voxFit[0][2]*gluScale)/np.power(voxFit[0][1]+voxFit[0][2],2),
(voxFit[0][0]*voxFit[0][1]*gluScale)/np.power(voxFit[0][1]+voxFit[0][2],2)])
fitParams[voxIdx,9] = np.dot(np.dot(gluGrad.T,voxFit[1][0:3,0:3]),gluGrad)
#Get normalized root mean square deviation
fitResid = voxInterp - nagini.gluFourIdaif(fitX,voxFit[0][0],voxFit[0][1],voxFit[0][2],voxFit[0][3])
fitRmsd = np.sqrt(np.sum(np.power(fitResid,2))/voxInterp.shape[0])
fitParams[voxIdx,10] = fitRmsd / np.mean(voxInterp)
except(RuntimeError):
noC += 1
#Warn user about lack of convergence
if noC > 0:
print('Warning: %i of %i voxels did not converge.'%(noC,nVox))
#############
###Output!###
#############
print('Writing out results...')
#Write out two parameter model images
imgNames = ['kOne','kTwo','kThree','kFour','cmrGlu','kOne_var','kTwo_var','kThree_var','kFour_var','cmrGlu_var','nRmsd']
for iIdx in range(len(imgNames)):
nagini.writeMaskedImage(fitParams[:,iIdx],brain.shape,brainData,brain.affine,'%s_%s'%(args.out[0],imgNames[iIdx]))
|
import csv
import sys
if len(sys.argv) != 3:
print("Usage: python dna.py csv_file txt_file.", file=sys.stderr)
sys.exit(1)
csv_file_name = sys.argv[1]
txt_file_name = sys.argv[2]
# Load a csv file
try:
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.reader(csv_file)
ref = [row for row in csv_reader]
except (OSError, PermissionError, FileNotFoundError) as e:
print(e)
sys.exit(1)
# Load a txt file
try:
with open(txt_file_name, "r") as txt_file:
contents = txt_file.read()
contents_length = len(contents)
except (OSError, PermissionError, FileNotFoundError) as e:
print(e)
sys.exit(1)
count = {k: 0 for k in ref[0][1::]}
for key in count.keys():
local_count = 0
i = 0
while i < contents_length:
step = 1
flag = False
key_length = len(key)
seq_end = i + key_length
if seq_end < contents_length and key == contents[i:seq_end]:
local_count += 1
step = key_length
else:
if local_count > count[key]:
count[key] = local_count
local_count = 0
i += step
# STR
max_count = [str(v) for _, v in count.items()]
# Identify whose DNA it is
for r in ref[1::]:
if r[1::] == max_count:
print(r[0])
break
elif r == ref[-1]:
print("No match") |
import glob
import os
import pickle as pkl
import numpy as np
from PIL import Image, ImageChops
from nltk.tokenize import RegexpTokenizer
image_size = 64
# Maximum number of captions to use
SEQ_LENGTH = 40
def get_dict_correspondance(worddict="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/worddict.pkl"):
with open(worddict, 'rb') as fd:
dictionary = pkl.load(fd)
word_to_ix = {word: i for i, word in enumerate(dictionary)}
ix_to_word = {i: word for i, word in enumerate(dictionary)}
# print(ix_to_word[200]) = plane
# print(word_to_ix["plane"]) = 200
return [word_to_ix, ix_to_word]
def get_nb_train(data_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/train2014/"):
imgs = glob.glob(data_path + "/*.jpg")
return len(imgs)
def get_nb_val(data_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/val2014/"):
imgs = glob.glob(data_path + "/*.jpg")
return len(imgs)
def get_train_batch(batch_idx, batch_size,
data_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/train2014/",
caption_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/dict_key_imgID_value_caps_train_and_valid.pkl",
active_shift=True, active_rotation=True):
imgs = glob.glob(data_path + "/*.jpg")
batch_imgs = imgs[batch_idx * batch_size:(batch_idx + 1) * batch_size]
input_batch = np.empty((0, 3, image_size, image_size), dtype=np.float32)
target_batch = np.empty((0, 3, image_size // 2, image_size // 2), dtype=np.float32)
# Read the caption dictionnary (train + valid)
with open(caption_path, 'rb') as fd:
caption_dict = pkl.load(fd)
# Get the correspondance to create the captions array
[word_to_index, index_to_word] = get_dict_correspondance()
vocab_size = len(word_to_index)
# Shape for a 1D-CNN (batch_size, nb_channel = vocab_size, height = SEQ_LENGTH)
captions_array = np.zeros((batch_size, vocab_size, SEQ_LENGTH), dtype=np.float32)
# Liste des mots disponibles
# for x in dictionary:
# print(x)
# Tokenizer wich remove punctuation
tokenizer = RegexpTokenizer(r'\w+')
for i, img_path in enumerate(batch_imgs):
# treat the caption
cap_id = os.path.basename(img_path)[:-4]
caption = caption_dict[cap_id]
tokenize_caption = []
for j in range(len(caption)):
tokenize_caption = tokenize_caption + tokenizer.tokenize(caption[j])
len_caption = len(tokenize_caption)
# Create the one hot vector for the current sentence
for j in range(SEQ_LENGTH):
# If the sentence is smaller than the sentence size we keep 0
if j < len_caption:
word = tokenize_caption[j]
captions_array[i, word_to_index[word], j] = 1.
# print(np.sum(captions_array[i])) # Give SEQ_LENGHT most of the time the processing seems correct
img = Image.open(img_path)
# Dynamic data augmentation
# rotation aleatoire (dans un angle de 50 deg)
if active_rotation:
random_angle = np.random.uniform(-25, 25)
img = img.rotate(random_angle)
# shift aleatoire (de 20% de la taille de l'image maximum)
if active_shift:
random_y_shift = np.random.randint(-(image_size // 20), image_size // 20)
random_x_shift = np.random.randint(-(image_size // 20), image_size // 20)
img = ImageChops.offset(img, random_x_shift, random_y_shift)
img_array = np.array(img)
center = (int(np.floor(img_array.shape[0] / 2.)), int(np.floor(img_array.shape[1] / 2.)))
if len(img_array.shape) == 3:
input = np.copy(img_array)
input[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16, :] = 0
target = img_array[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16, :]
# transform size to fit our neural network
input = input.transpose(2, 0, 1)
input = input.reshape(1, 3, image_size, image_size)
target = target.transpose(2, 0, 1)
target = target.reshape(1, 3, image_size // 2, image_size // 2)
# append to the minibatch
input_batch = np.append(input, input_batch, axis=0)
target_batch = np.append(target, target_batch, axis=0)
else:
input = np.copy(img_array)
input[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16] = 0
target = img_array[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16]
input = input.reshape(1, 1, image_size, image_size)
input = np.repeat(input, 3, axis=1)
target = target.reshape(1, 1, image_size // 2, image_size // 2)
target = np.repeat(target, 3, axis=1)
input_batch = np.append(input, input_batch, axis=0)
target_batch = np.append(target, target_batch, axis=0)
# We want input in the interval [ - 1, 1 ]
return [(input_batch / 256) * 2 - 1, (target_batch / 256) * 2 - 1, captions_array]
def get_val_batch(batch_idx, batch_size,
data_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/val2014/",
caption_path="/home/davidkanaa/Documents/UdeM/ift6266_h17_deep-learning/data/inpainting/dict_key_imgID_value_caps_train_and_valid.pkl",
active_shift=True, active_rotation=True):
imgs = glob.glob(data_path + "/*.jpg")
batch_imgs = imgs[batch_idx * batch_size:(batch_idx + 1) * batch_size]
input_batch = np.empty((0, 3, image_size, image_size), dtype=np.float32)
target_batch = np.empty((0, 3, image_size // 2, image_size // 2), dtype=np.float32)
# Read the caption dictionnary (train + valid)
with open(caption_path, 'rb') as fd:
caption_dict = pkl.load(fd)
# Get the correspondance to create the captions array
[word_to_index, index_to_word] = get_dict_correspondance()
vocab_size = len(word_to_index);
# Shape for a 1D-CNN (batch_size, nb_channel = vocab_size, height = SEQ_LENGTH)
captions_array = np.zeros((batch_size, vocab_size, SEQ_LENGTH), dtype=np.float32)
# Liste des mots disponibles
# for x in dictionary:
# print(x)
# Tokenizer wich remove punctuation
tokenizer = RegexpTokenizer(r'\w+')
for i, img_path in enumerate(batch_imgs):
# treat the caption
cap_id = os.path.basename(img_path)[:-4]
caption = caption_dict[cap_id]
tokenize_caption = []
for j in range(len(caption)):
tokenize_caption = tokenize_caption + tokenizer.tokenize(caption[j])
len_caption = len(tokenize_caption)
# Create the one hot vector for the current sentence
for j in range(SEQ_LENGTH):
# If the sentence is smaller than the sentence size we keep 0
if j < len_caption:
word = tokenize_caption[j]
captions_array[i, word_to_index[word], j] = 1.
# print(np.sum(captions_array[i])) # Give SEQ_LENGHT most of the time the processing seems correct
img = Image.open(img_path)
# Dynamic data augmentation
# rotation aleatoire (dans un angle de 50 deg)
if active_rotation:
random_angle = np.random.uniform(-25, 25)
img = img.rotate(random_angle)
# shift aleatoire (de 20% de la taille de l'image maximum)
if active_shift:
random_y_shift = np.random.randint(-(image_size // 20), image_size // 20)
random_x_shift = np.random.randint(-(image_size // 20), image_size // 20)
img = ImageChops.offset(img, random_x_shift, random_y_shift)
img_array = np.array(img)
center = (int(np.floor(img_array.shape[0] / 2.)), int(np.floor(img_array.shape[1] / 2.)))
if len(img_array.shape) == 3:
input = np.copy(img_array)
input[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16, :] = 0
target = img_array[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16, :]
# transform size to fit our neural network
input = input.transpose(2, 0, 1)
input = input.reshape(1, 3, image_size, image_size)
target = target.transpose(2, 0, 1)
target = target.reshape(1, 3, image_size // 2, image_size // 2)
# append to the minibatch
input_batch = np.append(input, input_batch, axis=0)
target_batch = np.append(target, target_batch, axis=0)
else:
input = np.copy(img_array)
input[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16] = 0
target = img_array[center[0] - 16:center[0] + 16, center[1] - 16:center[1] + 16]
input = input.reshape(1, 1, image_size, image_size)
input = np.repeat(input, 3, axis=1)
target = target.reshape(1, 1, image_size // 2, image_size // 2)
target = np.repeat(target, 3, axis=1)
input_batch = np.append(input, input_batch, axis=0)
target_batch = np.append(target, target_batch, axis=0)
# We want input in the interval [ - 1, 1 ]
return [(input_batch / 256) * 2 - 1, (target_batch / 256) * 2 - 1, captions_array]
class Trainset(object):
def __init__(self):
self._n_examples = get_nb_train()
self._index = 0
self._images, self._targets, self._captions = self.next_batch(1)
@property
def images(self):
return self._images
@property
def captions(self):
return self._captions
@property
def targets(self):
return self._targets
@property
def n_examples(self):
return self._n_examples
def next_batch(self, size):
batch = get_train_batch(self._index, size)
if self._index > self._n_examples:
self._index = 0
else:
self._index += 1
return batch
class Valset(object):
def __init__(self):
self._n_examples = get_nb_train()
self._index = 0
self._images, self._targets, self._captions = self.next_batch(1)
@property
def images(self):
return self._images
@property
def captions(self):
return self._captions
@property
def targets(self):
return self._targets
@property
def n_examples(self):
return self._n_examples
def next_batch(self, size):
batch = get_val_batch(self._index, size)
if self._index > self._n_examples:
self._index = 0
else:
self._index += 1
return batch
from collections import namedtuple
Datasets = namedtuple(typename="Datasets", field_names=["train", "validation", "test"])
mscoco = Datasets(train=Trainset(), validation=Valset(), test=None)
if __name__ == '__main__':
# resize_mscoco()
# show_examples(5, 10)
# get_nb_train()
[data_input, data_target, captions_array] = get_train_batch(1, 10)
# get_dict_correspondance()
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from bs4 import BeautifulSoup
import requests
class GetStats(APIView):
def get(self, request):
countries = self.request.query_params.get('countries', '')
if not countries:
return Response({'result':"Please Enter a Country"})
content = requests.get('https://www.worldometers.info/coronavirus/').text
soup = BeautifulSoup(content, "lxml")
covid_table = soup.find("table", attrs={"id": "main_table_countries_today"})
body = covid_table.tbody.find_all("tr")
data_dict = {}
for i in range(8, len(body)):
row = body[i].find_all("td")
data_dict[row[1].text.replace("\n", "").strip()] = {'country':row[1].text.replace("\n", "").strip(), 'total':row[2].text.replace(",", ""),\
'death': row[4].text.replace(",", ""), 'active': row[8].text.replace(",", ""), \
'recoverd': row[6].text.replace(",", ""), 'population':row[14].text.replace(",", "")}
result = []
for country in countries.split(', '):
result_dict = {}
result_dict['Country'] = data_dict[country]['country']
result_dict['Total'] = data_dict[country]['total']
result_dict['Deaths'] = data_dict[country]['death']
result_dict['Active'] = data_dict[country]['active']
try:
result_dict['RecoveryRate'] = str(int(data_dict[country]['recoverd'])/int(data_dict[country]['total']))
except:
result_dict['RecoveryRate'] = 'insufficient data'
try:
result_dict['PPInfected'] = str(int(data_dict[country]['total'])/int(data_dict[country]['population']))
except:
result_dict['PPInfected'] = 'insufficient data'
result.append(result_dict)
return Response({'result':result})
|
# AWS specific configuration
# ** IMPORTANT NOTE: Please do not check in this file. This is machine specific
# config **
# the region which we are going to use
region = "us-east-1"
# describe credentials
credentials = {
"aws_access_key_id": "your-access-key",
"aws_secret_access_key": "your-access-secret"
}
# give then key file path - relative to the src directory
# NOTE: advisable to use absolute paths..
key_file_path = "../config/key_file_name.pem"
# the subnet id to be used
subnet_id = "subnet-id"
# the security groups ids to be used - a list
security_group_ids = ['security group id1', 'security group id 2']
# the default gateway the VMs would use; if no gateway needs to be set, leave it
# empty..
default_gateway = "<ip-address-of-gateway>"
# Configure here the list of supported AMIs
# Its a list of dictionaries.
# Each dictionary must contain the ami_id, os and version fields describing each
# AMI. More details can also be given like tag names etc.
# NOTE: The following can be used w/o modifications. Leave it as it is.
supported_amis = [
{'os': 'UBUNTU', 'version': '12.04', 'ami_id': 'ami-9c3b0cf4'}
# {'os': 'UBUNTU', 'version': '14.04', 'ami_id': 'ami-9a562df2'},
# {'os': 'CENTOS', 'version': '6.6', 'ami_id': 'ami-61655b08'},
# {'os': 'DEBIAN', 'version': '7.0', 'ami_id': 'ami-e0efab88'}
]
# Configure here the available/supported instance types
# NOTE: The following can be used w/o modifications. Leave it as it is.
available_instance_types = [
{'ram': 1024, 'instance_type': 't2.micro'},
{'ram': 2048, 'instance_type': 't2.small'}
]
# Name of the VMs that you want to tag.
# This name is visible on the AWS console
vm_tag = "test.aws.adapter.ads"
# timeout in secs after which waiting for service should be abandoned
TIMEOUT = 300
|
import requests
import json
import configs.config as config
class News_api:
def __init__(self):
# api.openweathermap.org/data/2.5/forecast?id=
self.category_i = 0
self.api_url_website = "https://newsapi.org/v2/top-headlines?"
self.api_key = config.news_api['key']
self.country = config.news_api['country']
self.language = config.news_api['language']
self.source = config.news_api['source']
self.pageSize = config.news_api['pageSize']
self.category = config.news_api['category'][self.category_i]
self.category_total = len(config.news_api['category'])
self.generate_url(False)
def generate_url(self,new_category):
if(new_category):
self.category_number()
self.category = config.news_api['category'][self.category_i]
self.api_url = self.api_url_website #+ "" + str(self.api_id_city) + "&appid=" + self.api_key;
self.api_url += "apiKey=" + self.api_key
if(len(self.source) != 0):
self.api_url += "&sources=" + self.source
if(len(self.country) != 0):
self.api_url += "&country=" + self.country
if(len(self.language) != 0):
self.api_url += "&language=" + self.language
if(len(self.category) != 0):
self.api_url += "&category=" + self.category
if(self.pageSize != 0):
self.api_url += "&pageSize=" + str(self.pageSize)
def get_data(self):
response = requests.get(self.api_url)
return response.json()
def category_number(self):
self.category_i += 1
print(self.category_i)
if(self.category_total == self.category_i):
self.category_i = 0
def get_category(self):
return self.category
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 10:50:13 2018
@author: Administrator
"""
import tensorflow as tf
#from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.ops.rnn_cell_impl import DropoutWrapper
import scipy.io as sio
import numpy as np
import random as rd
import os
# Get Mnist Datapath
path="E:\ysdeeplearn\结果2\\"
p='s.npy'
p1='s1.npy'
#mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
for jj in range(60,80):
s=np.array([])
s1=[]
#load_test_label = r'E:\ysdeeplearn\deepcode\deeptrain\sp256\基于人的\12\test_label.mat'
#load_data = sio.loadmat(load_test_label)
#load_matrix = load_data['test_label'] #假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);
#test_label1=np.array(load_matrix,dtype='float32')
def get():
load_train_label= r'E:\ysdeeplearn\deepcode\deeptrain\何飞\yumu\order_LSTM20.mat'
load_data = sio.loadmat(load_train_label)
load_matrix = load_data['order_l'] #假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);
train_label=np.array(load_matrix,dtype='float32')
load_train_data=r'E:\ysdeeplearn\deepcode\deeptrain\何飞\yumu\data_LSTM20.mat'
load_data = sio.loadmat(load_train_data)
load_matrix = load_data['data_l'] #假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);
train_data=np.array(load_matrix,dtype='float32')
load_test_label = r'E:\ysdeeplearn\deepcode\deeptrain\何飞\yumu\label_LSTM20.mat'
load_data = sio.loadmat(load_test_label)
load_matrix = load_data['label_l'] #假设文件中存有字符变量是matrix,例如matlab中save(load_fn, 'matrix');当然可以保存多个save(load_fn, 'matrix_x', 'matrix_y', ...);
test_label1=np.array(load_matrix,dtype='float32')
n=np.shape(test_label1)[1]
test_label=np.zeros((n,4))
for i in range(n):
if (test_label1[0,i]==1):
test_label[i,:]=[1 ,0,0,0]
elif (test_label1[0,i] == 2):
test_label[i,:]=[0 ,1,0,0]
elif (test_label1[0,i] == 3):
test_label[i,:]=[0 ,0,1,0]
elif (test_label1[0,i] == 4):
test_label[i,:]=[0 ,0,0,1]
idx=np.where(train_label==jj)[1]
#idx=idx.reshape(1,58)
# print(idx[-1])
# train_data=train_data*1024
mu = np.mean(train_data)
sigma = np.std(train_data)
# zui=np.max(train_data)
#
train_data=(train_data-mu)/sigma
# train_data=train_data/zui
#
#
test_data1=train_data[idx[0]:(idx[-1]+1),:]
test_label2=test_label[idx[0]:(idx[-1]+1),:]
train_data_copy=train_data
test_label_copy=test_label
train_data_shan=np.delete(train_data_copy, idx, axis=0)
test_label_shan=np.delete(test_label_copy, idx, axis=0)
l1=np.shape(train_data_shan)[0]
#l2=len(l1)
idx1=rd.sample(range(l1),l1)
train_data_shan1=train_data_shan[idx1,:]
train_label_shan=test_label_shan[idx1,:]
return train_data_shan1,train_label_shan,test_data1,test_label2
train_data,train_label,test_data,test_label=get()
with tf.Graph().as_default():
# Variable
learning_rate = 1e-3
num_units = 512
num_layer = 3
input_size = 280
time_step = 10
total_steps = 2000
category_num = 4
steps_per_validate = 100
steps_per_test = 100
batch_size = tf.placeholder(tf.int32, [])
keep_prob = tf.placeholder(tf.float32, [])
# Get RNN Cell
def cell(num_units):
cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=num_units,state_is_tuple=True)
return DropoutWrapper(cell, output_keep_prob=keep_prob)
# Initial
x = tf.placeholder(tf.float32, [None, 2800])
y_label = tf.placeholder(tf.float32, [None, 4])
x_shape = tf.reshape(x, [-1, time_step, input_size])
# RNN Layers
cells = tf.nn.rnn_cell.MultiRNNCell([cell(num_units) for j in range(num_layer)])
h0 = cells.zero_state(batch_size, dtype=tf.float32)
output, hs = tf.nn.dynamic_rnn(cells, inputs=x_shape, initial_state=h0,time_major=False)
output = output[:, -1, :]
# Or h = hs[-1].h
# Output Layer
w = tf.Variable(tf.truncated_normal([num_units, category_num], stddev=0.1), dtype=tf.float32)
b = tf.Variable(tf.constant(0.1, shape=[category_num]), dtype=tf.float32)
y = tf.nn.softmax(tf.matmul(output, w) + b)
# Loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_label, logits=y)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# Prediction
correction_prediction = tf.equal(tf.argmax(y, axis=1), tf.argmax(y_label, axis=1))
accuracy = tf.reduce_mean(tf.cast(correction_prediction, tf.float32))
def get_batch_data():
[label, images] = [train_label,train_data]
images = tf.cast(images, tf.float32)
label = tf.cast(label, tf.float32)
input_queue = tf.train.slice_input_producer([images, label], shuffle=False)
image_batch, label_batch = tf.train.batch(input_queue, batch_size=1024, num_threads=1, capacity=2048)
return image_batch, label_batch
image_batch, label_batch = get_batch_data()
# Train
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
sess.run(tf.global_variables_initializer())
for step in range(total_steps + 1):
image_batch_v, label_batch_v = sess.run([image_batch, label_batch])
sess.run(train, feed_dict={x: image_batch_v, y_label: label_batch_v, keep_prob: 0.4, batch_size: image_batch_v.shape[0]})
# Train Accuracy
if step % steps_per_validate == 0:
t1= sess.run(accuracy, feed_dict={x:image_batch_v, y_label: label_batch_v, keep_prob: 1,batch_size: image_batch_v.shape[0]})
print('Train', step,t1)
s1.append(t1)
# Test Accuracy
if step % steps_per_test == 0:
t= sess.run(accuracy, feed_dict={x: test_data, y_label: test_label, keep_prob: 1, batch_size: test_data.shape[0]})
# test_x, test_y = mnist.test.images, mnist.test.labels
print('Test', step,t)
s1.append(t)
s=sess.run(y, feed_dict={x: test_data, y_label: test_label, keep_prob: 1, batch_size: test_data.shape[0]})
new1=os.path.join(path,str(jj))
new2=new1+p
new3=new1+p1
np.save(new2,s)
np.save(new3,s1)
|
"""
Copyright 2021 Mohamed Khalil
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
os.environ['NUMPY_EXPERIMENTAL_ARRAY_FUNCTION'] = '0'
import numpy as np
from pyHype.states import ConservativeState, PrimitiveState
from pyHype.input.input_file_builder import ProblemInput
from pyHype.fvm.Gradients.least_squares import least_squares_9_point
class Gradient:
def __init__(self, inputs: ProblemInput):
self.inputs = inputs
class LeastSquares9Point:
def __init__(self, inputs: ProblemInput):
self.inputs = inputs
self.stencilSW = [[0, 0], [0, 1], [0, 0], [1, 0], [0, 1], [1, 0], [1, 1]]
self.stencilNW = [[-2, 0], [-1, 0], [0, 0], [0, 1], [-2, 0], [-2, 1], [-1, 1]]
self.stencilSE = [[0, 0], [1, 0], [0, -1], [0, -2], [0, -1], [1, -1], [1, -2]]
self.stencilNE = [[0, -1], [0, -2], [-1, 0], [-2, 0], [-1, -2], [1, -1], [1, -2]]
def __call__(self, refBLK):
return self.least_squares_nearest_neighbor(refBLK)
def least_squares_nearest_neighbor(self, refBLK):
bdr = refBLK.boundary_blocks
dQdx, dQdy = least_squares_9_point(refBLK.state.Q,
bdr.E.state.Q, bdr.W.state.Q, bdr.N.state.Q, bdr.S.state.Q,
refBLK.mesh.x, refBLK.mesh.y,
bdr.E.x, bdr.E.y, bdr.W.x, bdr.W.y,
bdr.N.x, bdr.N.y, bdr.S.x, bdr.S.y,
self.inputs.nx, self.inputs.ny,
self.stencilSW, self.stencilNW, self.stencilSE, self.stencilNE)
return dQdx, dQdy
class GreenGauss:
def __init__(self, inputs: ProblemInput):
self.inputs = inputs
def __call__(self, refBLK):
return self.green_gauss(refBLK)
def green_gauss(self, refBLK):
# Concatenate mesh state and ghost block states
interfaceEW, interfaceNS = refBLK.get_interface_values()
# Get each face's contribution to dUdx
E, W, N, S = self.face_contribution(interfaceEW, interfaceNS, refBLK)
# Compute dUdx
refBLK.gradx = (E * refBLK.mesh.faceE.xnorm +
W * refBLK.mesh.faceW.xnorm +
N * refBLK.mesh.faceN.xnorm +
S * refBLK.mesh.faceS.xnorm
) / refBLK.mesh.A
# Compute dUdy
refBLK.grady = (E * refBLK.mesh.faceE.ynorm +
W * refBLK.mesh.faceW.ynorm +
N * refBLK.mesh.faceN.ynorm +
S * refBLK.mesh.faceS.ynorm
) / refBLK.mesh.A
@staticmethod
def face_contribution(interfaceEW, interfaceNS, refBLK):
E = interfaceEW[:, 1:, :] * refBLK.mesh.faceE.L
W = interfaceEW[:, :-1, :] * refBLK.mesh.faceW.L
N = interfaceNS[1:, :, :] * refBLK.mesh.faceN.L
S = interfaceNS[:-1, :, :] * refBLK.mesh.faceS.L
return E, W, N, S
|
#################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
"""
Tests for zero-order bioreactor with simple reactions
"""
import pytest
import os
from pyomo.environ import (
ConcreteModel,
Block,
value,
Var,
assert_optimal_termination,
)
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.solvers import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.core import UnitModelCostingBlock
from watertap.unit_models.zero_order import MetabZO
from watertap.core.wt_database import Database
from watertap.core.zero_order_properties import WaterParameterBlock
from watertap.core.zero_order_costing import ZeroOrderCosting
solver = get_solver()
class TestMetabZO_hydrogen:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["cod", "hydrogen"])
m.fs.unit = MetabZO(
property_package=m.fs.params, database=m.db, process_subtype="hydrogen"
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "cod"].fix(0.01)
m.fs.unit.inlet.flow_mass_comp[0, "hydrogen"].fix(0)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database == model.db
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("metab")
model.fs.unit.load_parameters_from_database(use_default_removal=True)
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert (
model.fs.unit.recovery_frac_mass_H2O[0].value
== data["recovery_frac_mass_H2O"]["value"]
)
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert pytest.approx(1, rel=1e-3) == value(
model.fs.unit.properties_treated[0].flow_mass_comp["H2O"]
)
assert pytest.approx(1.107e-5, rel=1e-3) == value(
model.fs.unit.properties_byproduct[0].flow_mass_comp["hydrogen"]
)
assert pytest.approx(7.800e-3, rel=1e-3) == value(
model.fs.unit.properties_treated[0].flow_mass_comp["cod"]
)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, model):
for j in model.fs.params.component_list:
assert 1e-6 >= abs(
value(
model.fs.unit.inlet.flow_mass_comp[0, j]
+ sum(
model.fs.unit.generation_rxn_comp[0, r, j]
for r in model.fs.unit.reaction_set
)
- model.fs.unit.treated.flow_mass_comp[0, j]
- model.fs.unit.byproduct.flow_mass_comp[0, j]
)
)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_report(self, model):
model.fs.unit.report()
class TestMetabZO_methane:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["cod", "hydrogen", "methane"])
m.fs.unit = MetabZO(
property_package=m.fs.params, database=m.db, process_subtype="methane"
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "cod"].fix(0.01)
m.fs.unit.inlet.flow_mass_comp[0, "hydrogen"].fix(0)
m.fs.unit.inlet.flow_mass_comp[0, "methane"].fix(0)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database == model.db
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("metab")
model.fs.unit.load_parameters_from_database(use_default_removal=True)
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert (
model.fs.unit.recovery_frac_mass_H2O[0].value
== data["recovery_frac_mass_H2O"]["value"]
)
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert pytest.approx(1, rel=1e-3) == value(
model.fs.unit.properties_treated[0].flow_mass_comp["H2O"]
)
assert pytest.approx(5.959e-4, rel=1e-3) == value(
model.fs.unit.properties_byproduct[0].flow_mass_comp["methane"]
)
assert pytest.approx(4.100e-3, rel=1e-3) == value(
model.fs.unit.properties_treated[0].flow_mass_comp["cod"]
)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, model):
for j in model.fs.params.component_list:
assert 1e-6 >= abs(
value(
model.fs.unit.inlet.flow_mass_comp[0, j]
+ sum(
model.fs.unit.generation_rxn_comp[0, r, j]
for r in model.fs.unit.reaction_set
)
- model.fs.unit.treated.flow_mass_comp[0, j]
- model.fs.unit.byproduct.flow_mass_comp[0, j]
)
)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_report(self, model):
model.fs.unit.report()
class TestMetabZO_hydrogen_cost:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["cod", "hydrogen"])
source_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"..",
"..",
"examples",
"flowsheets",
"case_studies",
"wastewater_resource_recovery",
"metab",
"metab_global_costing.yaml",
)
m.fs.costing = ZeroOrderCosting(case_study_definition=source_file)
m.fs.unit = MetabZO(
property_package=m.fs.params, database=m.db, process_subtype="hydrogen"
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "cod"].fix(0.01)
m.fs.unit.inlet.flow_mass_comp[0, "hydrogen"].fix(0)
m.db.get_unit_operation_parameters("metab")
m.fs.unit.load_parameters_from_database(use_default_removal=True)
m.fs.unit.costing = UnitModelCostingBlock(flowsheet_costing_block=m.fs.costing)
m.fs.costing.cost_process()
return m
@pytest.mark.unit
def test_build(self, model):
# unit costing
assert isinstance(model.fs.costing.metab, Block)
assert isinstance(model.fs.unit.costing.capital_cost, Var)
assert isinstance(model.fs.unit.costing.fixed_operating_cost, Var)
# flowsheet block
assert (
model.fs.unit.electricity[0]
in model.fs.costing._registered_flows["electricity"]
)
assert model.fs.unit.heat[0] in model.fs.costing._registered_flows["heat"]
assert "hydrogen_product" in model.fs.costing._registered_flows
assert isinstance(model.fs.costing.total_capital_cost, Var)
assert isinstance(model.fs.costing.total_fixed_operating_cost, Var)
assert isinstance(model.fs.costing.aggregate_flow_costs, Var)
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_cost_solution(self, model):
# unit model
assert pytest.approx(3.091e6, rel=1e-3) == value(
model.fs.unit.costing.capital_cost
)
assert pytest.approx(5069532.966912, rel=1e-3) == value(
model.fs.unit.costing.fixed_operating_cost
)
# flowsheet
assert pytest.approx(
value(model.fs.unit.costing.capital_cost), rel=1e-5
) == value(model.fs.costing.total_capital_cost)
assert pytest.approx(5162268.806429799, rel=1e-3) == value(
model.fs.costing.total_fixed_operating_cost
)
agg_flow_costs = model.fs.costing.aggregate_flow_costs
assert pytest.approx(-698.4, rel=1e-3) == value(
agg_flow_costs["hydrogen_product"]
)
assert pytest.approx(2.583e5, rel=1e-3) == value(agg_flow_costs["electricity"])
assert pytest.approx(1.531e4, rel=1e-3) == value(agg_flow_costs["heat"])
class TestMetabZO_methane_cost:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.params = WaterParameterBlock(solute_list=["cod", "methane"])
source_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..",
"..",
"..",
"examples",
"flowsheets",
"case_studies",
"wastewater_resource_recovery",
"metab",
"metab_global_costing.yaml",
)
m.fs.costing = ZeroOrderCosting(case_study_definition=source_file)
m.fs.unit = MetabZO(
property_package=m.fs.params, database=m.db, process_subtype="methane"
)
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "cod"].fix(0.01)
m.fs.unit.inlet.flow_mass_comp[0, "methane"].fix(0)
m.db.get_unit_operation_parameters("metab")
m.fs.unit.load_parameters_from_database(use_default_removal=True)
m.fs.unit.costing = UnitModelCostingBlock(flowsheet_costing_block=m.fs.costing)
m.fs.costing.cost_process()
return m
@pytest.mark.unit
def test_build(self, model):
# unit costing
assert isinstance(model.fs.costing.metab, Block)
assert isinstance(model.fs.unit.costing.capital_cost, Var)
assert isinstance(model.fs.unit.costing.fixed_operating_cost, Var)
# flowsheet block
assert (
model.fs.unit.electricity[0]
in model.fs.costing._registered_flows["electricity"]
)
assert model.fs.unit.heat[0] in model.fs.costing._registered_flows["heat"]
assert "methane_product" in model.fs.costing._registered_flows
assert isinstance(model.fs.costing.total_capital_cost, Var)
assert isinstance(model.fs.costing.total_fixed_operating_cost, Var)
assert isinstance(model.fs.costing.aggregate_flow_costs, Var)
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert_optimal_termination(results)
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_cost_solution(self, model):
# unit model
assert pytest.approx(3.856e7, rel=1e-3) == value(
model.fs.unit.costing.capital_cost
)
assert pytest.approx(6.337e7, rel=1e-3) == value(
model.fs.unit.costing.fixed_operating_cost
)
# flowsheet
assert pytest.approx(
value(model.fs.unit.costing.capital_cost), rel=1e-5
) == value(model.fs.costing.total_capital_cost)
assert pytest.approx(6.453e7, rel=1e-3) == value(
model.fs.costing.total_fixed_operating_cost
)
agg_flow_costs = model.fs.costing.aggregate_flow_costs
assert pytest.approx(-5735, rel=1e-3) == value(
agg_flow_costs["methane_product"]
)
assert pytest.approx(4.209e4, rel=1e-3) == value(agg_flow_costs["electricity"])
assert pytest.approx(0, abs=1e-3) == value(agg_flow_costs["heat"])
|
from tqdm import tqdm
import torch
import config
def train(model, dataloader, optimizer):
model.train()
fn_loss = 0
tk = tqdm(dataloader, total=len(dataloader))
for data in tk:
for k, v in data.items():
data[k] = v.to(config.DEVICE)
optimizer.zero_grad()
_, loss = model(**data)
loss.backward()
optimizer.step()
fn_loss += loss.item()
return fn_loss / len(dataloader)
def eval(model, dataloader):
model.eval()
fn_loss = 0
fn_preds = []
tk = tqdm(dataloader, total=len(dataloader))
with torch.no_grad():
for data in tk:
for k, v in data.items():
data[k] = v.to(config.DEVICE)
batch_preds, loss = model(**data)
fn_loss += loss.item()
fn_preds.append(batch_preds)
return fn_preds, fn_loss / len(dataloader)
|
# Generated by Django 2.2.1 on 2019-05-13 18:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20190506_1451'),
]
operations = [
migrations.AddField(
model_name='tasklist',
name='created_by',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='task',
name='due_on',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.CharField(default='TODO', max_length=50),
),
migrations.AlterField(
model_name='tasklist',
name='name',
field=models.CharField(max_length=50),
),
]
|
LIST_CMD = "/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport scan"
IS_WIFI_OFF = "/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I"
TURN_WIFI_ON = "networksetup -setairportpower {port} on"
TURN_WIFI_OFF = "networksetup -setairportpower {port} off"
GET_WIFI_PORT = "networksetup -listallhardwareports"
CONNECT_TO_NETWORK = "networksetup -setairportnetwork {port} {ssid} {password}"
NETWORK_STATUS = "/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I"
|
from typing import Tuple, List
def validate_multiple_inputs(input: List[str]) -> Tuple[List[str], str]:
errors = None
try:
# check if JSON contains the key "input_texts"
input = input.get("input_data", None)
if input is None:
raise KeyError("The key 'input_data' was not found in the received JSON.")
# check if input is list
if isinstance(input,list):
# check if list is empty
if len(input) == 0:
raise ValueError("Passed an empty list.")
# check if all list items are non-empty strings
for i, item in enumerate(input):
if not isinstance(item,str):
raise TypeError(f"The list item at position {i} is not a string.")
if item == "":
raise ValueError(f"The list item at position {i} is an empty string.")
else:
raise TypeError("The passed object is not a list of strings.")
except (ValueError, TypeError, KeyError) as exc:
errors = str(exc)
return input, errors
def validate_single_input(input: str) -> Tuple[str, str]:
errors = None
try:
# check if JSON contains the key "input_texts"
input = input.get("input_data", None)
if input is None:
raise KeyError("The key 'input_data' was not found in the received JSON.")
# check if input is non-empty string
if isinstance(input,str):
if input == "":
raise ValueError("Passed an empty string.")
else:
raise TypeError("The passed object is not a string.")
except (ValueError, TypeError, KeyError) as exc:
errors = str(exc)
return input, errors |
#This code tracks Twitter's South America's trending topics. Countries not listed are not on Twitter's API
ARGENTINA_WOE_ID = 23424747
argentina_trends = twitter_api.trends.place(_id=ARGENTINA_WOE_ID)
print(json.dumps(argentina_trends, indent=1)
argentina_set = set([trend['name']
for trend in argentina_trends[0]['trends']])
BRAZIL_WOE_ID = 23424768
brazil_trends = twitter_api.trends.place(_id=BRAZIL_WOE_ID)
print(json.dumps(brazil_trends, indent=1)
brazil_set = set([trend['name']
for trend in brazil_trends[0]['trends']])
CHILE_WOE_ID = 23424782
chile_trends = twitter_api.trends.place(_id=CHILE_WOE_ID)
print(json.dumps(chile_trends, indent=1)
chile_set = set([trend['name']
for trend in chile_trends[0]['trends']])
COLOMBIA_WOE_ID = 23424787
colombia_trends = twitter_api.trends.place(_id=COLOMBIA_WOE_ID)
print(json.dumps(colombia_trends, indent=1)
colombia_set = set([trend['name']
for trend in colombia_trends[0]['trends']])
ECUADOR_WOE_ID = 23424801
ecuador_trends = twitter_api.trends.place(_id=ECUADOR_WOE_ID)
print(json.dumps(ecuador_trends, indent=1)
ecuador_set = set([trend['name']
for trend in ecuador_trends[0]['trends']])
PERU_WOE_ID = 23424919
peru_trends = twitter_api.trends.place(_id=PERU_WOE_ID)
print(json.dumps(peru_trends, indent=1)
peru_set = set([trend['name']
for trend in peru_trends[0]['trends']])
VENEZUELA_WOE_ID = 23424982
venezuela_trends = twitter_api.trends.place(_id=VENEZUELA_WOE_ID)
print(json.dumps(venezuela_trends, indent=1)
venezuela_set = set([trend['name']
for trend in venezuela_trends[0]['trends']])
|
from numpy import sum, power, ones, mean, sqrt
from scipy import stats
from .linalg import as_array
def RSS(x, y, idx=0):
return ((as_array(x)[idx:] - as_array(y)[idx:]) ** 2).sum()
def RWSE(x, y, idx=0):
x, y = as_array(x), as_array(y)
w = sum(x[idx:])
return (((x[idx:] - y[idx:]) * x[idx:]/w) ** 2).sum()
def RMSE(x, y, idx=0):
return sqrt(MSE(x, y, idx))
def MSE(X, Y, idx=0, axis=None):
return power((as_array(X)[idx:] - as_array(Y)[idx:]), 2).mean(axis=axis)
def MSPE(x, y, idx=0):
'''x is the base array for comparison
s.t.
error = \sum_i ((x_i - y_i) / x_i ** 100) ^ 2
'''
return mean((ones(len(x[idx:])) - as_array(y[idx:]) / as_array(x[idx:])) ** 2)
def confidence_interval(data, confidence=0.95):
a = as_array(data)
if len(a.shape) > 1:
axis, n = 0, a.shape[1]
else:
axis, n = None, a.shape[0]
m, se = mean(a, axis=axis), stats.sem(a, axis=axis)
h = se * stats.t._ppf((1+confidence)/2., n-1)
return m - h, m, m + h
|
# -*- coding: utf-8 -*-
import sqlite3
class DatabaseHelper():
# This class is a singleton instance
__instance = None
@staticmethod
def getInstance():
if DatabaseHelper.__instance == None:
DatabaseHelper()
return DatabaseHelper.__instance
'''Constructor'''
def __init__(self):
if DatabaseHelper.__instance != None:
print("Returning Database Helper Instance")
else:
DatabaseHelper.__instance = self
print('[BOOTING]: Database Helper Module')
self.create_tables()
# Method to allow for easy connections to the database
def connect_db(self):
try:
conn = sqlite3.connect('database.db',timeout = 10)
print('[SUCCESS]: Connected to the Database')
return conn
except Exception as e:
print("[FAIL]: Could not connect to the Database")
raise e
def create_tables(self):
try:
conn = self.connect_db()
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users(ID INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR(20) NOT NULL UNIQUE,
password VARCHAR(20) NOT NULL,
sex VARCHAR(10) NOT NULL,
age int,
diagnosed int)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS audio_recordings(ID INTEGER PRIMARY KEY AUTOINCREMENT,
accuracypercentage float NOT NULL,
pitch_variance float NOT NULL,
wordspermin float NOT NULL,
modal_frequency float NOT NULL,
breath_time float NOT NULL,
avg_amplitude float NOT NULL,
filename text NOT NULL,
userID int,
FOREIGN KEY (userID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS prescreening(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int,
mood VARCHAR(10) NOT NULL,
medication VARCHAR(10) NOT NULL,
food VARCHAR(10),
FOREIGN KEY(recordID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS config(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int,
time INTEGER NOT NULL,
ch_acc INTEGER NOT NULL,
ch_wpm INTEGER,
ch_freq INTEGER,
ch_mod_freq INTEGER,
ch_avg_amp INTEGER,
ch_breath INTEGER,
FOREIGN KEY(recordID) REFERENCES users(ID)
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS video_recordings(ID INTEGER PRIMARY KEY AUTOINCREMENT,
recordID int NOT NULL,
total_blinks int NOT NULL,
total_peaks int NOT NULL,
total_troughs int
blinkspermin float NOT NULL,
peakspermin float NOT NULL,
troughspermin float NOT NULL,
video_duration float NOT NULL,
FOREIGN KEY(recordID) REFERENCES users(ID))
''')
conn.commit()
print("[SUCCESS]: Created the tables")
except Exception as e:
print("[FAIL]: Could not create tables")
raise e
conn.rollback()
finally:
conn.close()
def insert_users(self,username,password,sex,age,diagnosed,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO users(username,password,sex,age,diagnosed) VALUES(?,?,?,?,?)
''', (username,password,sex,age,diagnosed))
print('[SUCCESS]: Inserted a user')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert the user into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_audiorecordings(self,userId,accuracypercent,filename,wpm,pitch_var,mfreq,breath_time,avgAmp,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
# INSERT VALUES
cursor = conn.cursor()
cursor.execute('''
INSERT INTO audio_recordings(accuracypercentage,filename,pitch_variance,wordspermin,modal_frequency, breath_time, avg_amplitude,userID)
VALUES(?,?,?,?,?,?,?,?)''',(accuracypercent,filename,pitch_var,wpm,mfreq,breath_time,avgAmp,userId))
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert recordings into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_videorecordings(self,userId,blinks,peaks,troughs,blinkspermin,peakspermin,troughspermin,duration,case):
try:
conn = self.connect_db()
cursor = conn.cursor()
cursor.execute('''
INSERT INTO video_recordings(recordID,total_blinks,total_peaks,total_troughs,blinkspermin,
peakspermin,troughspermin,video_duration) VALUES(?,?,?,?,?,?,?,?)
''',(userId,blinks,peaks,troughs,blinkspermin,peakspermin,troughspermin,duration))
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print("[FAIL]: Failed to insert video recordings into DB")
raise e
conn.rollback()
finally:
conn.close()
def insert_prescreening(self, mood,medication,food,userId,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO prescreening(recordID,mood,medication,food)
VALUES(?,?,?,?)''',(userId,mood,medication,food))
print('[SUCCESS]: Inserted prescreening')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print('[FAIL]: Failed to insert a text sample into the DB')
raise e
conn.rollback()
finally:
conn.close()
def insert_config(self,userId,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp,case):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db',timeout = 10)
cursor = conn.cursor()
cursor.execute('''
INSERT INTO config(recordID,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp)
VALUES(?,?,?,?,?,?,?,?)''',(userId,time,ch_acc,ch_wpm,ch_freq,ch_mod_freq,ch_breath,ch_avg_amp))
print('[SUCCESS]: Inserted config')
if(case == "Testing"):
conn.rollback()
return
else:
conn.commit()
except Exception as e:
print('[FAIL]: Failed to insert a text sample into the DB')
raise e
conn.rollback()
finally:
conn.close()
# SELECT function to return values
# Placeholder for now
# But it works
def return_data(self):
conn = self.connect_deb()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT * FROM users''')
for row in selectcur:
print('{0} : {1}, {2}'.format(row[0],row[1],row[2]))
def checkUserCredentials(self,function,username,password):
try:
data = []
conn = self.connect_db()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT ID,username FROM users WHERE username = ? AND password = ?''',
(username,password))
row = selectcur.fetchone()
# If from Login function
if(function == 'Login'):
# Check if user exists
if row is None:
return "Invalid"
else:
for member in row:
data.append(member)
# Check if the form username is the same as the Database
if(username == data[1]):
# Return the user ID at user table
return data[0]
else:
return 'Invalid'
# Else if from Register function
elif(function == 'Register'):
if row is None:
return "Valid"
else:
# User already exists in the Database
return "Invalid"
except Exception as e:
print('[FAIL]: Failed to perform check function - DB error')
raise e
conn.rollback()
finally:
conn.close()
def return_audiorecordings(self,userId):
try:
conn = self.connect_db()
#conn = sqlite3.connect('database.db')
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT accuracypercentage, filename, pitch_variance,wordspermin,modal_frequency, breath_time, avg_amplitude FROM audio_recordings
WHERE userID = ?
''',(userId,))
# This took me ages.....
row = selectcur.fetchall()
acc = []
fn = []
pitch_var = []
wpm = []
mFreq = []
brTime = []
avgAmp = []
for i in row:
acc.append(i[0])
fn.append(i[1])
pitch_var.append(i[2])
wpm.append(i[3])
mFreq.append(i[4])
brTime.append(i[5])
avgAmp.append(i[6])
return acc, fn, wpm, pitch_var,mFreq,brTime,avgAmp
except Exception as e:
print('[FAIL]: Failed to return audio recordings- DB error')
raise e
conn.rollback()
finally:
conn.close()
def return_config(self,userID):
try:
conn = self.connect_db()
cursor = conn.cursor()
selectcur = cursor.execute('''
SELECT ch_acc, ch_wpm, ch_freq, ch_mod_freq,ch_breath,ch_avg_amp FROM config
WHERE recordID = ?
''',(userID,))
row = selectcur.fetchall()
acc = []
pitch_var = []
wpm = []
mFreq = []
brTime = []
avgAmp = []
for i in row:
acc.append(i[0])
pitch_var.append(i[1])
wpm.append(i[2])
mFreq.append(i[3])
brTime.append(i[4])
avgAmp.append(i[5])
try:
return acc,pitch_var,wpm,mFreq,brTime,avgAmp
except IndexError:
return None,None,None,None,None,None
except Exception as e:
print('[FAIL]: Failed to return config settings- DB error')
raise e
conn.rollback()
finally:
conn.close()
|
def quickSort(alist,left,right):
middle=0
pivot=alist[left]
l=left
r=right
while 1:
while alist[l]<=pivot:
if l==r:
break
l=l+1
while alist[r]>pivot:
r=r-1
if l<r:
alist[l],alist[r]=alist[r],alist[l]
else:
break
alist[left],alist[r]=alist[r],alist[left]
middle=r
quickSort(alist,left,middle-1)
quickSort(alist,middle,right)
alist=[20,18,16,14,12,10,8,6,4,2]
quickSort(alist,0,len(alist)-1)
print(alist) |
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for testing the xgraph base passing functionality
"""
import unittest
import numpy as np
from pyxir.graph.layer.xlayer import XLayer, ConvData, defaultXLayer
from pyxir.graph.xgraph_factory import XGraphFactory
from pyxir.graph.xgraph import XGraph
from pyxir.graph.passing.base_pass import XGraphBasePass
class TestPass(XGraphBasePass):
def execute(self, xgraph):
def replace_func(bottom_Xs, X, top_Xs):
""" Replace Convolution with Pooling operation """
new_Xs = []
if X.type[0] in ['Convolution']:
new_X = defaultXLayer()
new_X = new_X._replace(
type = ['Pooling'],
name = X.name,
shapes = X.shapes,
sizes = X.sizes,
bottoms = X.bottoms,
tops = X.tops
)
new_Xs.append(new_X)
else:
new_Xs.append(X)
return new_Xs
new_xgraph = self._replace_layer_pass(
xgraph = xgraph,
replace_func = replace_func
)
return new_xgraph
class TestXGraphBasePass(unittest.TestCase):
xgraph_factory = XGraphFactory()
def test_xgraph_factory(self):
xlayers = [
XLayer(
name='in1',
type=['Input'],
bottoms=[],
tops=['conv1'],
targets=[]
),
XLayer(
name='in2',
type=['Input'],
bottoms=[],
tops=['add1'],
targets=[]
),
XLayer(
name='conv1',
type=['Convolution'],
bottoms=['in1'],
tops=['add1'],
data=ConvData(
weights=np.array([[[[1, 2], [3, 4]]]], dtype=np.float32),
biases=np.array([0., 1.], dtype=np.float32)
),
targets=[]
),
XLayer(
name='add1',
type=['Eltwise'],
bottoms=['conv1', 'in2'],
tops=[],
targets=[]
)
]
xgraph = TestXGraphBasePass.xgraph_factory.build_from_xlayer(xlayers)
test_pass = TestPass()
new_xgraph = test_pass.execute(xgraph)
assert(len(new_xgraph) == 4)
assert(new_xgraph.get('conv1').type[0] == 'Pooling')
|
#!/usr/bin/env python3
# Match with fasta input file
__appname__ = 'align_seq_fasta.py'
__author__ = 'Olivia Haas o.haas@imperial.ac.uk'
#Import sys
import sys
#seq2 = "ATCGCCGGATTACGGG"
#seq1 = "CAATTCGGAT"
# Assign the longer sequence s1, and the shorter to s2
# l1 is length of the longest, l2 that of the shortest
#first_fasta_file = open('../Data/E.coli.fasta')
#seq1 = first_fasta_file.read()
#second_fasta_file = open('../Data/407228412.fasta')
#seq2 = second_fasta_file.read()
#second_fasta_file.close()
def definesquence(seq1, seq2):
l1 = len(seq1)
l2 = len(seq2)
if l1 >= l2:
s1 = seq1
s2 = seq2
else:
s1 = seq2
s2 = seq1
l1, l2 = l2, l1 # swap the two lengths
return s1, s2, l1, l2
# A function that computes a score by returning the number of matches starting
# from arbitrary startpoint (chosen by user)
def calculate_score(s1, s2, l1, l2, startpoint):
matched = "" # to hold string displaying alignements
score = 0
for i in range(l2):
if (i + startpoint) < l1:
if s1[i + startpoint] == s2[i]: # if the bases match
matched = matched + "*"
score = score + 1
else:
matched = matched + "-"
# some formatted output
print("." * startpoint + matched)
print("." * startpoint + s2)
print(s1)
print(score)
print(" ")
return score
# Test the function with some example starting points:
# calculate_score(s1, s2, l1, l2, 0)
# calculate_score(s1, s2, l1, l2, 1)
# calculate_score(s1, s2, l1, l2, 5)
# now try to find the best match (highest score) for the two sequences
my_best_align = None
my_best_score = -1
def calculatebestscore(s1, s2, l1, l2, i):
for i in range(l1): # Note that you just take the last alignment with the highest score
z = calculate_score(s1, s2, l1, l2, i)
if z > my_best_score:
my_best_align = "." * i + s2 # think about what this is doing!
my_best_score = z
print(my_best_align)
print(s1)
print("Best score:", my_best_score)
# open default fasta files
def main(argv):
if len(argv) == 1:
# open default fasta files
first_fasta_file = open('../Data/E.coli.fasta')
seq1 = first_fasta_file.read()
first_fasta_file.close()
second_fasta_file = open('../Data/407228412.fasta')
seq2 = second_fasta_file.read()
second_fasta_file.close()
elif len(argv) == 2:
print("Missing an input file")
return None
elif len(argv) == 3:
first_fasta_file = read.fasta(argv[2])
seq1 = first_fasta_file.read()
first_fasta_file.close()
second_fasta_file = read.fasta(argv[3])
seq2 = second_fasta_file.read()
second_fasta_file.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.