text string | size int64 | token_count int64 |
|---|---|---|
import os
import numpy as np
from pycocotools.coco import COCO
import cv2
from tqdm import tqdm
import argparse
import json
import torch
from fcos_core.structures.bounding_box import BoxList
from fcos_core.structures.boxlist_ops import boxlist_iou
def generate_pseudo_label_with_confidence_score(boxes, image_id, score_thre):
scores = boxes.get_field("scores")
_, idx = scores.sort(0, descending=True)
if isinstance(score_thre, float):
keep = torch.nonzero(scores >= score_thre).squeeze(1)
else:
labels = boxes.get_field("labels")
keep = torch.nonzero(scores >= score_thre[labels]).squeeze(1)
return idx[:len(keep)]
def parse_predictions():
pass
def new_annotation_json(pseudo_labels, img_id, ann_id):
labels = pseudo_labels.get_field("labels").tolist()
boxes = pseudo_labels.convert("xywh").bbox
annos = []
for box, c in zip(boxes, labels):
annos.append({
"id": ann_id,
"image_id": img_id,
"category_id": c,
"bbox": box.tolist(),
"segmentation": [[0., 0.]],
"area": float(box[2] * box[3]),
"iscrowd": 0,
"ispseudo": True,
})
ann_id = ann_id + 1
return annos, ann_id
def main(args):
annFile = 'datasets/coco/annotations/instances_train2017_0.5.json'
coco = COCO(annFile)
with open(annFile, 'r') as f:
result_json = json.load(f)
annos_json = result_json['annotations']
# anno_id = max([ann['id'] for ann in annos_json]) + 1
output_dir = os.path.join(args.predictions, 'coco_2017_train_partial')
image_ids = torch.load(os.path.join(output_dir, 'image_ids.pth'))
predictions = torch.load(os.path.join(output_dir, 'predictions.pth'))
anno_id = max(torch.load(os.path.join(output_dir, 'box_ids.pth'))) + 1
imgIds=sorted(coco.getImgIds())
threshold = args.confidence
# threshold = torch.tensor([-1.0, 0.46633365750312805, 0.4409848749637604, 0.47267603874206543, 0.4707889258861542, 0.5220812559127808, 0.5358721613883972, 0.5226702690124512, 0.45160290598869324])
iou_threshold = 0.5
cpu_device = torch.device("cpu")
partial_box_num = 0
N = len(image_ids)
for i in tqdm(range(N)):
im_idx = image_ids[i]
bbox = predictions[i]
imginfo = coco.loadImgs(imgIds[im_idx])[0]
image_width = imginfo['width']
image_height = imginfo['height']
# load annotations
partial_anns = coco.loadAnns(coco.getAnnIds(imgIds=(imgIds[im_idx],)))
# full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(imgIds[im_idx],), catIds=catIds))
partial_boxes = [obj["bbox"] for obj in partial_anns]
partial_boxes_ids = set([obj["id"] for obj in partial_anns])
partial_boxes = torch.as_tensor(partial_boxes).reshape(-1, 4) # guard against no boxes
partial_boxes = BoxList(partial_boxes, (image_width, image_height), mode="xywh").convert(
"xyxy"
)
partial_box_num += len(partial_boxes_ids)
# get predictions
bbox = bbox.resize((image_width, image_height))
bbox = bbox.to(cpu_device)
# generate pseudo labels
idx = generate_pseudo_label_with_confidence_score(bbox, im_idx, threshold)
if len(idx) > 0:
pseudo_labels = bbox[idx]
scores = pseudo_labels.get_field("scores").tolist()
# compute iou
overlaps = boxlist_iou(partial_boxes, pseudo_labels)
matched_id = [True] * len(pseudo_labels)
# remove predictions for partial labels
for i in range(len(partial_boxes)):
matched = np.argmax(overlaps[i])
if overlaps[i, matched] >= iou_threshold:
matched_id[matched] = False
pseudo_labels = pseudo_labels[matched_id]
# print(num, len(pseudo_labels))
pseudo_annos, anno_id = new_annotation_json(pseudo_labels, imgIds[im_idx], anno_id)
annos_json.extend(pseudo_annos)
print('confidence threshold: {}'.format(threshold))
result_json['annotations'] = annos_json
with open(args.annotation, 'w') as f:
json.dump(result_json, f)
print(partial_box_num, len(result_json['annotations']))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--predictions", help="prediction directory path. e.g output/stage1/",
type=str, default="/home/mengqinj/capstone/output/stage1/")
parser.add_argument("--annotation", help="output annotation path. e.g instances_train_2017.json",
type=str, default="instances_train_2017.json")
parser.add_argument("--confidence", help="confidence score threshold",
type=float, default=0.5)
args = parser.parse_args()
main(args) | 4,896 | 1,779 |
# -*- coding: utf-8 -*-
import pytest
class SetupPublicHtml:
@pytest.fixture()
def context(self):
return None
@pytest.fixture()
def template(self, cookies, context):
if context is None:
return cookies.bake()
else:
return cookies.bake(extra_context=context)
class TestPublicHtml(SetupPublicHtml):
files = (
(".htaccess"),
("index.php"),
("wp-config.php"),
)
@pytest.mark.parametrize("file", files)
def test_has_file(self, template, file):
assert template.project.join("public_html", file).isfile()
class TestHtaccessFile(SetupPublicHtml):
@pytest.fixture()
def file(self, template):
return template.project.join(
"public_html", ".htaccess").readlines()
def test_is_not_empty(self, file):
assert len(file) is not 0
class TestIndexPhpFile(SetupPublicHtml):
@pytest.fixture()
def file(self, template):
return template.project.join(
"public_html", "index.php").readlines()
def test_is_not_empty(self, file):
assert len(file) is not 0
class TestWpConfigPhpFile(SetupPublicHtml):
@pytest.fixture()
def file(self, template):
return template.project.join(
"public_html", "wp-config.php").readlines()
def test_is_not_empty(self, file):
assert len(file) is not 0
| 1,400 | 446 |
"""Learn matplotlib"""
import os
import easygui as eg
import csv
import matplotlib.pyplot as plt
TITLE = """Learn - Matplotlib """
def select_file() -> str:
"""Use EasyGUI to select a function"""
current_directory = os.path.join(os.getcwd(), 'Data')
selected_file = eg.fileopenbox(title=f'{TITLE}: Open a file',
default=os.path.join(current_directory, ".."),
filetypes="*.txt")
print(f"Selected file: {os.path.basename(selected_file)}")
print(f"In directory: {os.path.dirname(selected_file)}")
return selected_file
def read_file(data: dict) -> dict:
"""
Read the file. Save the original into the input data dictionary.
:param data: dictionary for passing data
:return: a data structure with data
>>> data= {'orig': {'filename': os.path.join('Data', 'trends_cupcakes.csv')}}
>>> read_file(data)['orig']['Name time-line']
'Month'
>>> read_file(data)['orig']['Name values']
'Cupcake: (Worldwide)'
"""
data['orig']['x'] = []
data['orig']['values'] = []
with open(data['orig']['filename']) as csvfile:
for row_count, row in enumerate(csv.reader(csvfile, delimiter=',')):
if not row:
continue
if row_count == 0:
data['orig']['category'] = row[0]
continue
if row_count == 2:
data['orig']['Name x-line'] = row[0]
data['orig']['Name values'] = row[1]
else:
data['orig']['x'].append(row[0])
try:
data['orig']['values'].append(int(row[1]))
except ValueError:
data['orig']['values'].append(0)
return data
def display_original_data(data: dict) -> True:
"""
Display data
:param data: data set to display needs 'time' and 'values'
:return: True
>>> data= {'orig': {'filename': os.path.join('Data', 'trends_cupcakes.csv')}}
>>> data = read_file(data)
>>> display_original_data(data)
True
"""
time = range(len(data['orig']['x']))
# open figure
fig = plt.figure()
fig.suptitle(f"Analysis of searches for {data['orig']['Name values']}")
# set up 2 subplots
ax = fig.subplots(nrows=2, ncols=1)
# first subplot
ax[0].plot(time, data['orig']['values'], '.b')
ax[0].set(xlabel='time [months]', ylabel='Number of searches',
title='Searches per number of months')
ax[0].grid()
# second plot
ax[1].plot(time, data['orig']['values'], '.r')
ax[1].set_xlabel('time [months]')
ax[1].set_ylabel('Number of searches')
ax[1].set_title('Monthly searches')
ax[1].grid()
# proper axis and ticks
labels = ax[1].get_xticklabels()
for cnt, xtick in enumerate(ax[1].get_xticks()[0:-2]):
labels[cnt] = data['orig']['x'][int(xtick)]
ax[1].set_xticklabels(labels)
# Rotate the ticks
for tick in ax[1].get_xticklabels():
tick.set_rotation(55)
# sort out
fig.subplots_adjust(bottom=0.2, top=0.8, hspace=0.6)
plt.show()
return True
def display_per_nation(data: dict) -> True:
all_countries = data['orig']['x']
all_values = data['orig']['values']
max_size = 70
pie_labels, pie_sizes = [f'Rest of the World'], [0]
for cnt, value in enumerate(all_values):
if value >= max_size:
pie_labels.append(all_countries[cnt])
pie_sizes.append(value)
else:
pie_sizes[0] += value
explode = [0.0 for _ in pie_sizes]
maximum_size: int = pie_sizes.index(max(pie_sizes))
explode[maximum_size] = .3
fig1, ax1 = plt.subplots()
fig1.suptitle(f"Analysis of searches for {data['orig']['Name values']}")
ax1.pie(pie_sizes, labels=pie_labels, explode=explode,
shadow=True, autopct='%1.1f%%')
# ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
fig2, ax2 = plt.subplots()
fig2.suptitle(f"Analysis of searches for {data['orig']['Name values']}")
ax2.barh(pie_labels, pie_sizes)
ax2.set_yticks(range(len(pie_labels)))
ax2.set_yticklabels(pie_labels)
ax2.invert_yaxis() # labels read top-to-bottom
ax2.set_xlabel('Searches')
fig2.subplots_adjust(left=0.2, right=1.0)
plt.show()
return True
def select_operation(data) -> True:
""""""
all_choices = {'Monthly trend': display_original_data,
'National data': display_per_nation}
# Use Gui to select a choice
choice: str = eg.buttonbox(msg="Select what to display", title=TITLE,
choices=list(all_choices.keys()),
image=os.path.join('Images', 'qm.png'))
assert choice in all_choices, show_error_message('The choice is not available')
# This is the clever bit!! Run the choice as a function
all_choices[choice](data)
return True
def message_box(message: str) -> True:
message += "\n\nFor resources see: https://matplotlib.org/gallery "
eg.msgbox(title=TITLE,
msg=message,
ok_button='OK',
image=os.path.join('Images', 'Learn.png'))
return True
def show_error_message(error_string="Error!") -> True:
"""
A message box can be used for alert of error, success, etc
return: True
"""
eg.msgbox(title="Learn_EasyGUI: Example Error",
image=os.path.join('Images', 'failure.gif'),
msg=error_string)
return True
def main() -> True:
# Initialse the data set
data = {'orig': {'filename': ""}, 'analysis': {}}
message_box("First select a data file")
data['orig']['filename'] = select_file()
data = read_file(data)
select_operation(data)
return True
# --------------------------------------------------
if __name__ == "__main__":
import click
@click.group(help=TITLE)
def cli():
pass
@cli.command('run', help='Run full program')
def cli_run():
main()
@cli.command('test', help='Test csv testing')
def cli_test():
import doctest
failures_count, test_count = doctest.testmod(verbose=False)
assert failures_count == 0, 'Test failure... bailing out'
print(f'All {test_count} tests passed')
@cli.command('disp', help='display short cut')
def cli_disp():
data = {'orig': {'filename': os.path.join('Data', 'geoMap_cupcakes.csv')}}
data = read_file(data)
display_per_nation(data)
cli(obj={})
| 6,562 | 2,156 |
import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element, wait_for_invisible_xpath
from ..utils.isaac import open_accordion_section, close_accordion_section, wait_accordion_open, wait_accordion_closed
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["accordion_behavior"]
#####
# Test : Accordion Sections Open and Close
#####
@TestWithDependency("ACCORDION_BEHAVIOUR")
def accordion_behavior(driver, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test if accordions open and close as expected.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
time.sleep(WAIT_DUR)
driver.get(ISAAC_WEB + "/logout")
log(INFO, "Logging out any logged in user.")
time.sleep(WAIT_DUR)
driver.get(ISAAC_WEB + "/questions/_regression_test_")
log(INFO, "Got: %s" % (ISAAC_WEB + "/questions/_regression_test_"))
time.sleep(WAIT_DUR)
log(INFO, "Check accordions open first section automatically.")
try:
wait_for_xpath_element(driver, "//p[text()='This is a quick question.']")
log(INFO, "First accordion section open by default on question pages.")
time.sleep(WAIT_DUR)
except TimeoutException:
image_div(driver, "ERROR_accordion_default")
log(ERROR, "First accordion section not open by default; see 'ERROR_accordion_default.png'.")
return False
log(INFO, "Try closing an accordion section.")
try:
close_accordion_section(driver, 1)
time.sleep(WAIT_DUR)
wait_for_invisible_xpath(driver, "//p[text()='This is a quick question.']")
log(INFO, "Accordions close as expected.")
except NoSuchElementException:
log(ERROR, "Can't find accordion title bar to click; can't continue!")
return False
except TimeoutException:
image_div(driver, "ERROR_accordion_closing")
log(ERROR, "Accordion section did not close correctly; see 'ERROR_accordion_closing.png'")
return False
log(INFO, "Try reopening accordion section.")
try:
open_accordion_section(driver, 1)
time.sleep(WAIT_DUR)
wait_for_xpath_element(driver, "//p[text()='This is a quick question.']")
log(INFO, "Accordions open as expected.")
close_accordion_section(driver, 1)
time.sleep(WAIT_DUR)
log(INFO, "Closed accordion section; all should now be closed.")
except NoSuchElementException:
log(ERROR, "Can't find accordion title bar to click again; can't continue!")
return False
except TimeoutException:
image_div(driver, "ERROR_accordion_reopen")
log(ERROR, "Accordion section did not reopen correctly; see 'ERROR_accordion_reopen.png'!")
return False
log(INFO, "Check all accordion sections work.")
try:
expected_accordion_sections = 9
accordion_sections = driver.find_elements_by_xpath("//a[contains(@class, 'ru_accordion_titlebar')]")
assert len(accordion_sections) == expected_accordion_sections, "Expected %d accordion sections, got %s!" % (expected_accordion_sections, len(accordion_sections))
log(INFO, "%d accordion sections on page as expected." % expected_accordion_sections)
log(INFO, "Try to open each accordion section in turn.")
for i, accordion_title in enumerate(accordion_sections):
n = i + 1
accordion_title.click()
wait_accordion_open(driver, n)
log(INFO, "Accordion section %s correctly shown." % n)
accordion_title.click()
wait_accordion_closed(driver, n)
log(INFO, "Accordion section %s correctly hidden." % n)
time.sleep(WAIT_DUR)
except TimeoutException:
log(ERROR, "Couldn't open all accordion sections!")
return False
except AssertionError, e:
log(ERROR, e.message)
return False
log(PASS, "Accordion behavior is as expected.")
return True
| 4,262 | 1,319 |
from yggdrasil.communication import FileComm
class AsciiMapComm(FileComm.FileComm):
r"""Class for handling I/O from/to a ASCII map on disk.
Args:
name (str): The environment variable where file path is stored.
**kwargs: Additional keywords arguments are passed to parent class.
"""
_filetype = 'map'
_schema_subtype_description = ('The file contains a key/value mapping '
'with one key/value pair per line and '
'separated by some delimiter.')
_default_serializer = 'map'
| 587 | 157 |
### Author: Adam Michel <elfurbe@furbism.com>
### Based on work by: Dag Wieers <dag@wieers.com>
class dstat_plugin(dstat):
def __init__(self):
self.name = 'nfs4 client'
# this vars/nick pair is the ones I considered relevant. Any set of the full list would work.
self.vars = ('read', 'write', 'readdir', 'commit', 'getattr', 'create', 'link','remove')
self.nick = ('read', 'writ', 'rdir', 'cmmt', 'gatr','crt','link','rmv')
# this is every possible variable if you're into that
#self.vars = ("read", "write", "commit", "open", "open_conf", "open_noat", "open_dgrd", "close",
# "setattr", "fsinfo", "renew", "setclntid", "confirm", "lock", "lockt", "locku",
# "access", "getattr", "lookup", "lookup_root", "remove", "rename", "link", "symlink",
# "create", "pathconf", "statfs", "readlink", "readdir", "server_caps", "delegreturn",
# "getacl", "setacl", "fs_locations", "rel_lkowner", "secinfo")
# these are terrible shortnames for every possible variable
#self.nick = ("read", "writ", "comt", "open", "opnc", "opnn", "opnd", "clse", "seta", "fnfo",
# "renw", "stcd", "cnfm", "lock", "lckt", "lcku", "accs", "gatr", "lkup", "lkp_r",
# "rem", "ren", "lnk", "slnk", "crte", "pthc", "stfs", "rdlk", "rdir", "scps", "delr",
# "gacl", "sacl", "fslo", "relo", "seco")
self.type = 'd'
self.width = 5
self.scale = 1000
self.open('/proc/net/rpc/nfs')
def check(self):
# other NFS modules had this, so I left it. It seems to work.
info(1, 'Module %s is still experimental.' % self.filename)
def extract(self):
# list of fields from nfsstat, in order of output from cat /proc/net/rpc/nfs
nfs4_names = ("version", "fieldcount", "null", "read", "write", "commit", "open", "open_conf",
"open_noat", "open_dgrd", "close", "setattr", "fsinfo", "renew", "setclntid",
"confirm", "lock", "lockt", "locku", "access", "getattr", "lookup", "lookup_root",
"remove", "rename", "link", "symlink", "create", "pathconf", "statfs", "readlink",
"readdir", "server_caps", "delegreturn", "getacl", "setacl", "fs_locations",
"rel_lkowner", "secinfo")
for line in self.splitlines():
fields = line.split()
if fields[0] == "proc4": # just grab NFSv4 stats
assert int(fields[1]) == len(fields[2:]), ("reported field count (%d) does not match actual field count (%d)" % (int(fields[1]), len(fields[2:])))
for var in self.vars:
self.set2[var] = fields[nfs4_names.index(var)]
for name in self.vars:
self.val[name] = (int(self.set2[name]) - int(self.set1[name])) * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| 2,970 | 1,047 |
import datapyp
import warnings
import os
class DecamPipeError(Exception):
pass
class Pipeline(datapyp.core.Pipeline):
def __init__(self, **kwargs):
from datapyp.utils import get_bool
# Make sure that the user included a dictionary of paths to initialize the pipeline
if 'paths' not in kwargs:
raise DecamPipeError(
"You must initialize a pipeline with the following paths: 'temp'")
if('stacks' not in kwargs['paths'] or 'config' not in kwargs['paths']
or 'log' not in kwargs['paths'] or 'decam' not in kwargs['paths']):
warnings.warn(
"It is recommended to initialize a Pipeline with "
"'log', 'stacks', 'config', 'decam' paths")
# Check for the decam file index
if 'connection' not in kwargs:
warnings.warn("If you do not set an 'idx_connect_str' parts of the Pipeline may not work")
else:
if kwargs['connection'].startswith('sqlite'):
if not os.path.isfile(kwargs['connection'][10:]):
logger.info('path', kwargs['connection'][10:])
if 'create_idx' in kwargs:
if not create_idx:
raise PipelineError("Unable to locate DECam file index")
else:
if not get_bool(
"DECam file index does not exist, create it now? ('y'/'n')"):
raise PipelineError("Unable to locate DECam file index")
import astropyp.index as index
recursive = get_bool(
"Search '{0}' recursively for images? ('y'/'n')")
index.build(img_path, idx_connect_str, True, recursive, True)
datapyp.core.Pipeline.__init__(self, **kwargs) | 1,873 | 486 |
import gym
import highway_env
from agent import Agent
import pandas as pd
import numpy as np
env = gym.make("highway-v0")
done = False
# Notes
# Action space between 0 and 4 inclusive
# 0 is merge left
# 1 is do nothing
# 2 is merge right
# 3 is speed up
# 4 is slow down
#
## Obs space is a 5x5 matrix with values between -1 and 1
## This represents a matrix with the labels:
## presence, x, y, vx, vy: Ego Vehicle
## presence, x, y, vx, vy: VEHICLE 1
## presence, x, y, vx, vy: VEHICLE 2
## presence, x, y, vx, vy: VEHICLE 3
##
## X increases over time
## Y = 0 in top line
## Y = 4 in next line
## Y = 8 in next lane
## Y = 12 in bottom lane
next_step = 1
while not env.vehicle.crashed:
obs, _, _, _ = env.step(next_step)
# print(pd.DataFrame.from_records([env.vehicle.to_dict()])["x", "y", "vx", "vy"])
ego_dict = env.vehicle.to_dict()
ego_agent = Agent(
np.array([ego_dict["x"], ego_dict["y"] / 4]),
np.array([ego_dict["x"] + 100, ego_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], ego_dict["vy"] / 4]),
)
print(f"Ego (x, y): {ego_agent.pos[0], ego_agent.pos[1], ego_agent.vel[0], ego_agent.vel[1]}")
# print(f"Ego (lane, lane_index): {env.vehicle.lane, env.vehicle.lane_index}")
neighbors = []
for vehicle in env.road.close_vehicles_to(
env.vehicle, env.PERCEPTION_DISTANCE, see_behind=True
):
adj_dict = vehicle.to_dict()
neighbors.append(
Agent(
np.array([adj_dict["x"], adj_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([adj_dict["vx"], adj_dict["vy"] / 4]),
)
)
print(f"Neighbor (x, y): {neighbors[-1].pos[0], neighbors[-1].pos[1], neighbors[-1].vel[0], neighbors[-1].vel[1], ego_agent.time_to_collision(neighbors[-1])}")
# Add agents so the ego doesnt merge off of the edge of the lane
neighbors.append(
Agent(
np.array([-1, ego_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], 0.5]),
)
)
neighbors.append(
Agent(
np.array([5, ego_dict["y"] / 4]),
np.array([adj_dict["x"] + 100, adj_dict["y"] / 4]),
50,
50,
5,
np.array([ego_dict["vx"], -0.5]),
)
)
delta_v = ego_agent.computeForces(neighbors)
print(delta_v)
# If the X instruction is larger
# If the X instruction is positive
# if abs(delta_v[0]) == delta_v[0]:
# print("Speed up")
# else:
# print("Slow down")
lane_epsilon = 0.0125
move_epsilon = 0.01
def how_close(x):
return abs(round(x) - x), round(x)
laneness = how_close(ego_agent.pos[1])
can_change = False
if laneness[1] in [0, 1, 2, 3] and lane_epsilon > laneness[0]:
can_change = True
if can_change and abs(delta_v[1]) > move_epsilon:
if abs(delta_v[1]) == delta_v[1]:
print("Merge down")
next_step = 2
else:
print("Merge up")
next_step = 0
else:
if abs(delta_v[0]) == delta_v[0]:
print("Speed up")
next_step = 3
else:
print("Slow down")
next_step = 4
env.render()
| 3,487 | 1,336 |
from typing import List
class TimeStreamDataWriter:
def __init__(self, client) -> None:
self.client = client
def write_records(self, database_name: str, table_name: str, records: List[dict], common_attributes: List[dict] = None,):
if self.client is None:
raise Exception('client is not set')
if common_attributes is None:
response = self.client.write_records(
DatabaseName=database_name,
TableName=table_name,
Records=records
)
return response
else:
response = self.client.write_records(
DatabaseName=database_name,
TableName=table_name,
CommonAttributes=common_attributes,
Records=records
)
return response
| 849 | 207 |
#!/usr/bin/env python3
# encoding: utf-8
import argparse
import sqlite3
from os import path
from .wrapper import wrap_calls, wrap_corpus
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
"""generate_horoscope.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
def restricted_weight(x, max_range=1.0):
x = float(x)
if x < 0.0 or x > max_range:
raise argparse.ArgumentTypeError(
"%r not in range [0.0, %.2f]" % (x, max_range))
return x
here = path.abspath(path.dirname(__file__))
_parser = argparse.ArgumentParser(description="Awesome horoscope generator")
_parser.add_argument('-d', '--debug', dest='debug',
help='show debug logs', action='store_true')
_parser.add_argument('-a', '--database', dest='database',
default=path.join(here, 'data', 'zodiac.sqlite'), help='sqlite database file')
_parser.add_argument('-s', '--sign', dest='sign',
help='zodiac sign to generate', default=None)
_parser.add_argument('-k', '--keyword', dest='keyword',
help='keyword for the horoscope', default=None)
_parser.add_argument('-t', '--threshold', dest='threshold',
help='minimum count of horoscopes for the given filters', type=int, default=10)
_parser.add_argument('-o', '--order', dest='order', choices=range(1, 20),
help='order of the used markov chain', type=int, default=4)
_parser.add_argument('--order-emissions', dest='order_emissions', choices=range(1, 20),
help='max. order to look back at prev. emissions (HMM)', type=int, default=1)
_parser.add_argument('-n', '--horoscopes', dest='nr_horoscopes', choices=range(1, 11),
help='number of horoscopes', type=int, default=1)
_parser.add_argument('-c', '--synonyms-generation', dest='use_synonyms_generation',
help='additionally use synonyms of keywords for generation', action='store_true')
_parser.add_argument('-m', '--markov_type', dest='markov_type', choices=('markov', 'hmm', 'hmm_past'),
help='Markov type to use (default: markov)', default="markov")
_parser.add_argument('--prob-hmm-states', dest='prob_hmm_states', type=restricted_weight,
help='When using previous states and emissions, weight for the previous states',
default=0.5)
_parser.add_argument('--prob-hmm-emissions', dest='prob_hmm_emissions', type=restricted_weight,
help='When using previous states and emissions, weight for the previous emissions',
default=0.5)
_parser.add_argument('-y', '--synonyms-emission', dest='use_synonyms_emission',
help='use synonyms on emissions', action='store_true')
_parser.add_argument('--prob-syn-emissions', dest='prob_synonyms_emission', type=restricted_weight,
help='probability to emit synonyms', default=0.3)
_parser.add_argument('--list-keywords', dest='list_keywords', action='store_true',
help='show all available keywords')
_parser.add_argument('-r', '--random-keyword', dest='random_keyword', action='store_true',
help='select keyword randomly (weighted on occurrence)')
_parser.add_argument('--ratings', dest='use_ratings', action='store_true',
help='weight states according to ratings')
_parser.add_argument('--moon', dest='use_moon', action='store_true',
help='Use current moon phase for the keyword selection')
def main():
args = vars(_parser.parse_args())
with sqlite3.connect(args["database"]) as conn:
if args["list_keywords"]:
for row in wrap_corpus(conn, **args).list_keywords():
print("%-4s%s" % (row[1], row[0]))
else:
res = wrap_calls(conn, **args)
print("\n".join(res))
if __name__ == '__main__':
main()
| 4,015 | 1,246 |
"""
Copyright (C) 2016-2019 Chenchao Shou
Licensed under Illinois Open Source License (see the file LICENSE). For more information
about the license, see http://otm.illinois.edu/disclose-protect/illinois-open-source-license.
Define constants.
"""
# file templates
STATE_NPZ_FILE_TEMP = 'optimizer_state_%s.npz' # file that saves optimizer state (%s is optimization problem name)
STATE_PKL_FILE_TEMP = 'optimizer_state_%s.pkl' # file that saves optimizer state (%s is optimization problem name)
STATE_NPZ_TEMP_FILE_TEMP = 'optimizer_state_%s.temp.npz' # temporary file that saves optimizer state (%s is optimization problem name)
STATE_PKL_TEMP_FILE_TEMP = 'optimizer_state_%s.temp.pkl' # temporary file that saves optimizer state (%s is optimization problem name) | 765 | 254 |
__author__ = 'psuresh'
import asyncio
@asyncio.coroutine
def slow_operation(future):
print("inside task")
yield from asyncio.sleep(1)
print("task done")
future.set_result('Future is done!')
def got_result(future):
print("inside callback")
print(future.result())
loop.stop()
loop = asyncio.get_event_loop()
future = asyncio.Future()
print("future initialized")
print("task scheduled")
asyncio.ensure_future(slow_operation(future))
future.add_done_callback(got_result)
try:
loop.run_forever()
finally:
loop.close()
# Output
# ------------------------------
# python future_done_callback.py
# future initialized
# task scheduled
# inside task
# task done
# inside callback
# Future is done!
| 732 | 246 |
from flask_restful import Resource, reqparse
from models import UserModel, RevokedTokenModel, PortfolioModel
from flask_jwt_extended import (create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt, get_jwt_claims)
import json
from flask import request
parser = reqparse.RequestParser()
class UserRegistration(Resource):
def post(self):
parser.add_argument('username', help = 'This field cannot be blank', required = True)
parser.add_argument('password', help = 'This field cannot be blank', required = True)
data = parser.parse_args()
if UserModel.find_by_username(data['username']):
return {'message': 'User {} already exists'.format(data['username'])}
new_user = UserModel(
username = data['username'],
password = UserModel.generate_hash(data['password'])
)
try:
new_user.save_to_db()
access_token = create_access_token(identity = data['username'])
refresh_token = create_refresh_token(identity = data['username'])
return {
'message': 'User {} was created'.format(data['username']),
'access_token': access_token,
'refresh_token': refresh_token
}
except:
return {'message': 'Something went wrong'}, 500
class UserLogin(Resource):
def post(self):
parser.add_argument('username', help = 'This field cannot be blank', required = True)
parser.add_argument('password', help = 'This field cannot be blank', required = True)
data = parser.parse_args()
current_user = UserModel.find_by_username(data['username'])
if not current_user:
return {'message': 'User {} doesn\'t exist'.format(data['username'])}
if UserModel.verify_hash(data['password'], current_user.password):
access_token = create_access_token(identity = data['username'])
refresh_token = create_refresh_token(identity = data['username'])
return {
'message': 'Logged in as {}'.format(current_user.username),
'access_token': access_token,
'refresh_token': refresh_token
}
else:
return {'message': 'Wrong credentials'}
class UserLogoutAccess(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti = jti)
revoked_token.add()
return {'message': 'Access token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
class UserLogoutRefresh(Resource):
@jwt_refresh_token_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti = jti)
revoked_token.add()
return {'message': 'Refresh token has been revoked'}
except:
return {'message': 'Something went wrong'}, 500
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
print(current_user)
access_token = create_access_token(identity = current_user)
return {'access_token': access_token}
class AllUsers(Resource):
def get(self):
return UserModel.return_all()
def delete(self):
return UserModel.delete_all()
class SecretResource(Resource):
@jwt_required
def get(self):
return {
'answer': 42
}
class Mirror(Resource):
@jwt_required
def post(self):
data = parser.parse_args()
print(data)
return {
'you': json.dumps(data)
}
class Portfolio(Resource):
@jwt_required
def post(self):
#try:
parser.add_argument('portfolio', help = 'This field cannot be blank', required = True)
data = parser.parse_args()
name = data['portfolio']
current_user = get_jwt_identity()
current_user_model = UserModel.find_by_username(current_user)
new_portfolio = PortfolioModel(name = name)
current_user_model.add_portfolio(new_portfolio)
new_portfolio.add_data()
current_user_model.add_data()
new_portfolio.commit()
return { "message": "{0} created {1}".format(current_user, name)}
#except:
#return {"message":"Something went wrong"}
@jwt_required
def get(self):
current_user = get_jwt_identity()
current_user_model= UserModel.find_by_username(current_user)
porto = []
for i in current_user_model.portfolios:
porto.append(i.name)
return {"user":current_user,"portfolios":porto}
class PortfolioSpecific(Resource):
@jwt_required
def get(self, id):
current_user = get_jwt_identity()
current_user_model= UserModel.find_by_username(current_user)
return {"portfolio":current_user_model.portfolios[id+1].name}
#index out of range exception possible
@jwt_required
def put(self, id):
#parser.add_argument('portfolio', help = 'This field cannot be blank', required = True)
#data = parser.parse_args()
data = request.get_json(silent=True)
new_name = data['portfolio']
current_user = get_jwt_identity()
current_user_model= UserModel.find_by_username(current_user)
new_portfolio = PorfolioModel(name = new_name)
old_name = current_user_model.portfolios[id+1].name
current_user_model.portfolios[id+1] = new_portfolio
current_user_model.add_data()
new_portfolio.add_data()
new_portfolio.commit()
return {"message": "Portfolio {} has been changed to {}".format(old_name, new_name)}
@jwt_required
def delete(self, id):
#parser.add_argument('portfolio', help = 'This field cannot be blank', required = True)
#data = parser.parse_args()
data = request.get_json(silent=True)
delete_name = data['portfolio']
current_user = get_jwt_identity()
current_user_model= UserModel.find_by_username(current_user)
return {"message": "{}'s Porfolio {} wasn't deleted cause this functionality is still not implementd".format(current_user, delete_name)}
class TestRest(Resource):
def post(self):
dada = request.get_json()
print(dada)
return {"whatvar":dada}
| 6,555 | 1,870 |
import os
import kaggle
from dotenv import find_dotenv, load_dotenv
import logging
# setting credentials
os.system('set KAGGLE_USERNAME =' + os.environ.get('kaggle_username'))
os.system('set KAGGLE_KEY =' + os.environ.get('kaggle_key'))
# function to extract the data
def extractData(path):
os.system('kaggle datasets download mhouellemont/titanic -f train.csv -p %s'%path)
os.system('kaggle datasets download mhouellemont/titanic -f test.csv -p %s'%path)
def main(project_dir):
logger = logging.getLogger(__name__)
logger.info('Getting raw data')
# set data paths
raw_data_path = os.path.join(os.path.pardir,'data','raw')
# extract the data
extractData(raw_data_path)
logger.info('Data downloaded')
if __name__ == '__main__':
project_dir = os.path.join(os.path.dirname(__file__),os.pardir,os.pardir)
# set up logger
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level = logging.INFO, format = log_format)
# loading dotenv
dotenv = find_dotenv()
load_dotenv(dotenv)
main(project_dir)
| 1,164 | 433 |
from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import (StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
default_args = {
'owner': 'udacity',
'depends_on_past': False,
'start_date': datetime(2021, 7, 1),
'retries': 1,
'retry_delay': timedelta(minutes=5),
'catchup': False,
'data_quality_checks':[
{'check_sql': "SELECT COUNT(*) FROM users WHERE userid is null", 'expected_result':0},
{'check_sql': "SELECT COUNT(*) FROM songs WHERE songid is null", 'expected_result':0},
{'check_sql': "SELECT COUNT(*) FROM artists WHERE artistid is null", 'expected_result':0},
{'check_sql': "SELECT COUNT(*) FROM time WHERE start_time is null", 'expected_result':0},
{'check_sql': "SELECT COUNT(*) FROM songplays WHERE userid is null", 'expected_result':0}
]
}
dag = DAG('udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *'
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
stage_events_to_redshift = StageToRedshiftOperator(
task_id="Stage_events",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
table="staging_events",
s3_bucket="udacity-dend",
s3_key="log_data",
json_path="s3://udacity-dend/log_json_path.json",
dag=dag
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id="Stage_songs",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
table="staging_songs",
s3_bucket="udacity-dend",
s3_key="song_data",
json_path="auto",
dag=dag
)
load_songplays_table = LoadFactOperator(
task_id="Load_songplays_fact_table",
redshift_conn_id="redshift",
table="songplays",
sql_query=SqlQueries.songplay_table_insert,
dag=dag
)
load_user_dimension_table = LoadDimensionOperator(
task_id="Load_user_dim_table",
redshift_conn_id = "redshift",
table="users",
sql_query=SqlQueries.user_table_insert,
truncate_table=True,
dag=dag
)
load_song_dimension_table = LoadDimensionOperator(
task_id="Load_song_dim_table",
redshift_conn_id="redshift",
table="songs",
sql_query=SqlQueries.song_table_insert,
truncate_table=True,
dag=dag
)
load_artist_dimension_table = LoadDimensionOperator(
task_id="Load_artist_dim_table",
redshift_conn_id="redshift",
table="artists",
sql_query=SqlQueries.artist_table_insert,
truncate_table=True,
dag=dag
)
load_time_dimension_table = LoadDimensionOperator(
task_id="Load_time_dim_table",
redshift_conn_id="redshift",
table="time",
sql_query=SqlQueries.time_table_insert,
truncate_table=True,
dag=dag
)
run_quality_checks = DataQualityOperator(
task_id="Run_data_quality_checks",
redshift_conn_id="redshift",
dq_checks=default_args['data_quality_checks'],
dag=dag
)
end_operator = DummyOperator(task_id="End_execution", dag=dag)
# Load the staging table
start_operator >> stage_events_to_redshift
start_operator >> stage_songs_to_redshift
# Load the songplays fact table
stage_events_to_redshift >> load_songplays_table
stage_songs_to_redshift >> load_songplays_table
# Load the dimension table
load_songplays_table >> load_song_dimension_table
load_songplays_table >> load_user_dimension_table
load_songplays_table >> load_artist_dimension_table
load_songplays_table >> load_time_dimension_table
# Run the quality checks
load_song_dimension_table >> run_quality_checks
load_user_dimension_table >> run_quality_checks
load_artist_dimension_table >> run_quality_checks
load_time_dimension_table >> run_quality_checks
# End execution
run_quality_checks >> end_operator | 4,031 | 1,337 |
"""Test the kernels service API."""
import threading
import time
from jupyterlab.tests.utils import LabTestBase, APITester
from notebook.tests.launchnotebook import assert_http_error
class BuildAPITester(APITester):
"""Wrapper for build REST API requests"""
url = 'lab/api/build'
def getStatus(self):
return self._req('GET', '')
def build(self):
return self._req('POST', '')
def clear(self):
return self._req('DELETE', '')
class BuildAPITest(LabTestBase):
"""Test the build web service API"""
def setUp(self):
self.build_api = BuildAPITester(self.request)
def test_get_status(self):
"""Make sure there are no kernels running at the start"""
resp = self.build_api.getStatus().json()
assert 'status' in resp
assert 'message' in resp
def test_build(self):
resp = self.build_api.build()
assert resp.status_code == 200
def test_clear(self):
with assert_http_error(500):
self.build_api.clear()
def build_thread():
with assert_http_error(500):
self.build_api.build()
t1 = threading.Thread(target=build_thread)
t1.start()
while 1:
resp = self.build_api.getStatus().json()
if resp['status'] == 'building':
break
resp = self.build_api.clear()
assert resp.status_code == 204
| 1,436 | 440 |
from dataloader import *
| 25 | 8 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 6 15:11:12 2016
@author: yash
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
sess = tf.InteractiveSession()
"""
Convolutional Neural Net
"""
def weight_variable(shape):
#return initialised weight variable
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
#retuens initialised bias variables
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv2d(x, W):
#returns result of convolving x with W
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
def max_pool_2x2(x):
#returns pooled values from x
return tf.nn.max_pool(x, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
def validate(inp, out, accuracy, i = -1):
#validating at ith step and logging progress
train_accuracy = accuracy.eval(feed_dict={x:inp, y_:out, keep_prob:1.0})
print("step %d, training accuracy %g" %(i, train_accuracy))
def variable_summaries(var, name):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/'+name, mean)
tf.scalar_summary('max/'+name, tf.reduce_max(var))
tf.histogram_summary(name, var)
x = tf.placeholder(tf.float32, shape=[None,784]) #Input
tf.image_summary('input', x, 10)
y_ = tf.placeholder(tf.float32, shape=[None,10]) #Expected outcome
x_image = tf.reshape(x, [-1,28,28,1]) #reshape input vector
#First convolution layer with 32 filters of size 5x5
W_conv1 = weight_variable([5,5,1,32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#second convloutional layer with 64 filters of size 5x5
W_conv2 = weight_variable([5,5,32,64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#fully connected layer with 1024 hidden units
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) #Flatten the result of convolution
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#dropouts
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#final classifier
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
#loss function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
for i in range(500):
batch = mnist.train.next_batch(50)
if i%100 == 0:
validate(batch[0], batch[1], accuracy, i)
train_step.run(feed_dict = {x:batch[0], y_:batch[1], keep_prob: 1.0})
validate(mnist.test.images, mnist.test.labels, accuracy)
| 3,151 | 1,312 |
while True:
user_input = input("Enter something: ")
f = open("data.txt", "at")
f.write(user_input+"\n")
if(user_input == "[STOP]"):
f.close()
break | 158 | 69 |
from embedding import *
from collections import OrderedDict
import torch
class RelationMetaLearner(nn.Module):
def __init__(self, few, embed_size=100, num_hidden1=500, num_hidden2=200, out_size=100, dropout_p=0.5):
super(RelationMetaLearner, self).__init__()
self.embed_size = embed_size
self.few = few
self.out_size = out_size
self.rel_fc1 = nn.Sequential(OrderedDict([
('fc', nn.Linear(2*embed_size, num_hidden1)),
('bn', nn.BatchNorm1d(few)),
('relu', nn.LeakyReLU()),
('drop', nn.Dropout(p=dropout_p)),
]))
self.rel_fc2 = nn.Sequential(OrderedDict([
('fc', nn.Linear(num_hidden1, num_hidden2)),
('bn', nn.BatchNorm1d(few)),
('relu', nn.LeakyReLU()),
('drop', nn.Dropout(p=dropout_p)),
]))
self.rel_fc3 = nn.Sequential(OrderedDict([
('fc', nn.Linear(num_hidden2, out_size)),
('bn', nn.BatchNorm1d(few)),
]))
nn.init.xavier_normal_(self.rel_fc1.fc.weight)
nn.init.xavier_normal_(self.rel_fc2.fc.weight)
nn.init.xavier_normal_(self.rel_fc3.fc.weight)
def forward(self, inputs):
size = inputs.shape
x = inputs.contiguous().view(size[0], size[1], -1)
x = self.rel_fc1(x)
x = self.rel_fc2(x)
x = self.rel_fc3(x)
x = torch.mean(x, 1)
return x.view(size[0], 1, 1, self.out_size)
class EmbeddingLearner(nn.Module):
def __init__(self):
super(EmbeddingLearner, self).__init__()
def forward(self, h, t, r, pos_num):
score = -torch.norm(h + r - t, 2, -1).squeeze(2)
p_score = score[:, :pos_num]
n_score = score[:, pos_num:]
return p_score, n_score
class MetaR(nn.Module):
def __init__(self, dataset, parameter):
super(MetaR, self).__init__()
self.device = parameter['device']
self.beta = parameter['beta']
self.dropout_p = parameter['dropout_p']
self.embed_dim = parameter['embed_dim']
self.margin = parameter['margin']
self.abla = parameter['ablation']
self.embedding = Embedding(dataset, parameter)
if parameter['dataset'] == 'Wiki-One':
self.relation_learner = RelationMetaLearner(parameter['few'], embed_size=50, num_hidden1=250,
num_hidden2=100, out_size=50, dropout_p=self.dropout_p)
elif parameter['dataset'] == 'NELL-One':
self.relation_learner = RelationMetaLearner(parameter['few'], embed_size=100, num_hidden1=500,
num_hidden2=200, out_size=100, dropout_p=self.dropout_p)
self.embedding_learner = EmbeddingLearner()
self.loss_func = nn.MarginRankingLoss(self.margin)
self.rel_q_sharing = dict()
def split_concat(self, positive, negative):
pos_neg_e1 = torch.cat([positive[:, :, 0, :],
negative[:, :, 0, :]], 1).unsqueeze(2)
pos_neg_e2 = torch.cat([positive[:, :, 1, :],
negative[:, :, 1, :]], 1).unsqueeze(2)
return pos_neg_e1, pos_neg_e2
def forward(self, task, iseval=False, curr_rel=''):
# transfer task string into embedding
support, support_negative, query, negative = [self.embedding(t) for t in task]
few = support.shape[1] # num of few
num_sn = support_negative.shape[1] # num of support negative
num_q = query.shape[1] # num of query
num_n = negative.shape[1] # num of query negative
rel = self.relation_learner(support)
rel.retain_grad()
# relation for support
rel_s = rel.expand(-1, few+num_sn, -1, -1)
# because in test and dev step, same relation uses same support,
# so it's no need to repeat the step of relation-meta learning
if iseval and curr_rel != '' and curr_rel in self.rel_q_sharing.keys():
rel_q = self.rel_q_sharing[curr_rel]
else:
if not self.abla:
# split on e1/e2 and concat on pos/neg
sup_neg_e1, sup_neg_e2 = self.split_concat(support, support_negative)
p_score, n_score = self.embedding_learner(sup_neg_e1, sup_neg_e2, rel_s, few)
y = torch.Tensor([1]).to(self.device)
self.zero_grad()
loss = self.loss_func(p_score, n_score, y)
loss.backward(retain_graph=True)
grad_meta = rel.grad
rel_q = rel - self.beta*grad_meta
else:
rel_q = rel
self.rel_q_sharing[curr_rel] = rel_q
rel_q = rel_q.expand(-1, num_q + num_n, -1, -1)
que_neg_e1, que_neg_e2 = self.split_concat(query, negative) # [bs, nq+nn, 1, es]
p_score, n_score = self.embedding_learner(que_neg_e1, que_neg_e2, rel_q, num_q)
return p_score, n_score
| 5,036 | 1,763 |
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1181
#!/usr/bin/env python2.7
linha = int(raw_input())
array = [[0 for j in xrange(12)] for k in xrange(12)]
operacao = raw_input()
for p in xrange(12):
for k in xrange(12):
array[p][k]=float(raw_input())
if operacao == "S":
print "%.1f" % (sum(array[linha]))
else:
print "%.1f" % (sum(array[linha])/12.0)
| 395 | 179 |
from __future__ import with_statement
from pyps import workspace
with workspace("hyantes.c", "options.c") as w:
w.props.constant_path_effects=False
f=w["hyantes!do_run_AMORTIZED_DISK"]
for l in f.all_loops:
l.reduction_variable_expansion()
f.display()
| 276 | 101 |
"""
Unit tests for the Subject model
"""
from django.test import TestCase
from cms.api import create_page
from richie.apps.courses.factories import CourseFactory, SubjectFactory
from richie.apps.courses.models import Subject
class SubjectTestCase(TestCase):
"""
Unit test suite to validate the behavior of the Subject model
"""
def test_subject_str(self):
"""
The string representation should be built with the title of the related page.
Only 1 query to the associated page should be generated.
"""
page = create_page("Art", "courses/cms/subject_detail.html", "en")
subject = SubjectFactory(extended_object=page)
with self.assertNumQueries(1):
self.assertEqual(str(subject), "Subject: Art")
def test_subject_courses_copied_when_publishing(self):
"""
When publishing a subject, the links to draft courses on the draft version of the
subject should be copied (clear then add) to the published version.
Links to published courses should not be copied as they are redundant and not
up-to-date.
"""
# Create draft courses
course1, course2 = CourseFactory.create_batch(2)
# Create a draft subject
draft_subject = SubjectFactory(with_courses=[course1, course2])
# Publish course1
course1.extended_object.publish("en")
course1.refresh_from_db()
# The draft subject should see all courses and propose a custom filter to easily access
# the draft versions
self.assertEqual(
set(draft_subject.courses.all()),
{course1, course1.public_extension, course2},
)
self.assertEqual(set(draft_subject.courses.drafts()), {course1, course2})
# Publish the subject and check that the courses are copied
draft_subject.extended_object.publish("en")
published_subject = Subject.objects.get(
extended_object__publisher_is_draft=False
)
self.assertEqual(set(published_subject.courses.all()), {course1, course2})
# When publishing, the courses that are obsolete should be cleared
draft_subject.courses.remove(course2)
self.assertEqual(set(published_subject.courses.all()), {course1, course2})
# courses on the published subject are only cleared after publishing the draft page
draft_subject.extended_object.publish("en")
self.assertEqual(set(published_subject.courses.all()), {course1})
| 2,526 | 679 |
from Scripts.bank.bankController import BankController
from tkinter import ttk, Tk, Button, Label, END
from tkinter.scrolledtext import ScrolledText
from Scripts.support.textManipulation import TextManipulation
MAROON = "#800000"
WHITE = "#FFFFFF"
VALUES = "values"
class Main:
def __init__(self):
BankController.create_database(None)
global window
global text_description
global combo
window = Tk()
window.config(background="#ADD8E6")
window.title("INFOS")
window.geometry("900x500+100+10")
button_newInfo = Button(window, width=10, text="New Info", command=self.buttonNewInfo, background="#3CB371", foreground=WHITE, activebackground=MAROON, activeforeground=WHITE)
button_newInfo.place(x=150, y=50)
button_update = Button(window, width=10, text="Update", command=self.buttonUpdate, background="#1E90FF", foreground="#F0F8FF", activebackground=MAROON, activeforeground=WHITE)
button_update.place(x=630, y=50)
button_delete = Button(window, width=10, text="Delete", command=self.buttonDelete, background="#FF6347", foreground="#F0F8FF", activebackground=MAROON, activeforeground=WHITE)
button_delete.place(x=730, y=50)
button_selectInfo = Button(window, width=10, text="Confirm", command=self.buttonSelectInfo, activebackground=MAROON, activeforeground=WHITE)
button_selectInfo.place(x=530, y=50)
button_exit = Button(window, width=10, text="Quit", command=self.buttonExit, background="#696969", foreground="#FFFFFF", activebackground=MAROON, activeforeground=WHITE)
button_exit.place(x=820, y=0)
text_description = ScrolledText(window, width=106, height=21)
text_description.place(x=10, y=150)
combo = ttk.Combobox(window, width=40, height=34)
combo.place(x=250, y=50)
combo[VALUES] = (BankController.getNames(None))
label_description = Label(window, text="DESCRIPTION", width=10, height=1, background="#ADD8E6")
label_description.place(x=400, y=120)
label_infos = Label(window, text="INFOS", width=10, height=1, background="#ADD8E6")
label_infos.place(x=350, y=25)
window.mainloop()
def buttonNewInfo(self):
BankController.insert(None, combo.get(), TextManipulation.formatText(text_description.get(0.0, END)))
combo[VALUES] = (BankController.getNames(None))
def buttonUpdate(self):
BankController.update(None, combo.get(), text_description.get(0.0, END))
def buttonDelete(self):
BankController.delete(None, combo.get())
combo[VALUES] = (BankController.getNames(None))
def buttonSelectInfo(self):
text_description.delete('0.0', '100.0')
text_description.insert(END,BankController.getDescription(None, combo.get()))
def buttonExit(self):
BankController.close(None)
window.destroy()
gui = Main() | 2,935 | 990 |
#! /usr/bin/env python3
import os,sys,string
import sqlite3
from sqlite3 import Error
def create_connection(path):
connection = None
try:
connection = sqlite3.connect(path)
print("Connection to SQLite DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
connection.commit()
print("Query executed successfully")
except Error as e:
print(f"The error '{e}' occurred")
create_faults_table = """
CREATE TABLE IF NOT EXISTS faults (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
width INTEGER,
position INTEGER,
discovered TEXT,
notdiscovered TEXT
);
"""
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"The error '{e}' occurred")
def main():
Db = create_connection('faultsBase.sql')
execute_query(Db,create_faults_table)
Fname = sys.argv[1]
File = open(Fname)
while 1:
line = File.readline()
if line=='': return
wrds = line.split()
if len(wrds)==0:
pass
elif (wrds[0] == 'register'):
Query = "INSERT INTO faults (name,width,position,discovered,notdiscovered) VALUES ( '%s', %s, %s, 'false', 'false')"%(wrds[1],wrds[ 2],wrds[3])
print(Query)
execute_query(Db,Query)
if __name__ == '__main__': main()
| 1,651 | 522 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 25 14:47:10 2018
@author: adam
"""
from PyQt5.QtGui import QIcon
from . import icons_res
_icons = {
'app' : QIcon(":/images/icons/cadquery_logo_dark.svg")
}
import qtawesome as qta
_icons_specs = {
'new' : (('fa.file-o',),{}),
'open' : (('fa.folder-open-o',),{}),
# borrowed from spider-ide
'autoreload': [('fa.repeat', 'fa.clock-o'), {'options': [{'scale_factor': 0.75, 'offset': (-0.1, -0.1)}, {'scale_factor': 0.5, 'offset': (0.25, 0.25)}]}],
'save' : (('fa.save',),{}),
'save_as': (('fa.save','fa.pencil'),
{'options':[{'scale_factor': 1,},
{'scale_factor': 0.8,
'offset': (0.2, 0.2)}]}),
'run' : (('fa.play',),{}),
'delete' : (('fa.trash',),{}),
'delete-many' : (('fa.trash','fa.trash',),
{'options' : \
[{'scale_factor': 0.8,
'offset': (0.2, 0.2),
'color': 'gray'},
{'scale_factor': 0.8}]}),
'help' : (('fa.life-ring',),{}),
'about': (('fa.info',),{}),
'preferences' : (('fa.cogs',),{}),
'inspect' : (('fa.cubes','fa.search'),
{'options' : \
[{'scale_factor': 0.8,
'offset': (0,0),
'color': 'gray'},{}]}),
'screenshot' : (('fa.camera',),{}),
'screenshot-save' : (('fa.save','fa.camera'),
{'options' : \
[{'scale_factor': 0.8},
{'scale_factor': 0.8,
'offset': (.2,.2)}]})
}
def icon(name):
if name in _icons:
return _icons[name]
args,kwargs = _icons_specs[name]
return qta.icon(*args,**kwargs) | 1,836 | 694 |
# *******************************************************************************
# Copyright 2017 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
#
# @microservice: py-core-domain library
# @author: Tyler Cox, Dell
# @version: 1.0.0
# *******************************************************************************
from domain.common import base_object
# pylint: disable=C0103
class Schedule(base_object.BaseObject):
# TODO - make protected after changes to test package
DATETIME_FORMATS = ["yyyy_mMdd'T'HHmmss"]
def __init__(self, name=None, start=None, end=None, frequency=None, cron=None, runOnce=False,
created=None, modified=None, origin=None):
super(Schedule, self).__init__(created, modified, origin)
# non-database identifier for a schedule- must be unique
self.name = name
# Start time in ISO 8601 format YYYYMMDD'T'HHmmss
# @JsonFormat(shape = JsonFormat.Shape.STRING, pattern = "yyyymmdd'T'HHmmss")
self.start = start
# End time in ISO 8601 format YYYYMMDD'T'HHmmss
# @JsonFormat(shape = JsonFormat.Shape.STRING, pattern = "yyyymmdd'T'HHmmss")
self.end = end
# how frequently should the event occur
self.frequency = frequency
# cron styled regular expression indicating how often the action under schedule should
# occur. Use either runOnce, frequency or cron and not all.
self.cron = cron
# boolean indicating that this schedules runs one time - at the time indicated by the start
self.runOnce = runOnce
def __str__(self):
return ("Schedule [name=%s, start=%s, end=%s, frequency=%s, cron=%s, runOnce=%s,"
" to_string()=%s]") \
% (self.name, self.start, self.end, self.frequency, self.cron, self.runOnce,
super(Schedule, self).__str__())
| 2,377 | 694 |
'''
Represents a C++ enum.
'''
from collections import OrderedDict
from typing import Any, Optional, Text, Union
class Enum(OrderedDict):
'''
Represents a C++ enum.
'''
def __init__(self, name, items=None):
super().__init__(items or [])
self.name = name
def __getitem__(self, name: Union[Text, Any]) -> Union[Any, Text]:
if name in self.keys():
return super().__getitem__(name)
for key, value in self.items():
if name == value:
return key
raise KeyError(name)
def __setitem__(self, name: Text, value: Any):
if not isinstance(name, str):
raise TypeError("name must be a str.")
if not name.isidentifier():
raise ValueError("name must be a valid identifier.")
super().__setitem__(name, value)
def get(self, key: Text, default: Optional[Any] = None, /):
if key in self:
return self[key]
return default
def __contains__(self, name: Text):
return name in self.keys() or name in self.values()
def __str__(self):
return 'enum {}'.format(self.name)
def __repr__(self):
return 'Enum({!r}, [{}])'.format(self.name, ', '.join('({!r}, {!r})'.format(item, value) for item, value in self.items()))
| 1,316 | 394 |
from random import randint
from time import sleep
def sorteia(lista):
print('Sorteando 5 valores da lista:', end=' ')
for num in range(0, 5):
lista.append(randint(1, 10))
sleep(0.5)
print(lista[num], end=' ')
print('PRONTO!')
def somaPar(lista):
soma = 0
for valor in lista:
if valor % 2 == 0:
soma += valor
print(f'Somando os valores pares de {lista}, temos {soma}')
numeros = list()
sorteia(numeros)
somaPar(numeros)
| 493 | 184 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
CONF_PATH = click.option("-c", "--conf-path", type=click.Path(exists=True), required=True,
help="Configuration file path.")
DSL_PATH = click.option("-d", "--dsl-path", type=click.Path(exists=True),
help="Domain-specific language(DSL) file path. If the type of job is 'predict', "
"you can leave this feature blank, or you can provide a valid dsl file to "
"replace the one that aotumatically generated by fate.")
LIMIT = click.option("-l", "--limit", type=click.INT, default=10,
help="LIMIT flag constrains the number of records to return. (default: 10)")
JOBID = click.option("-j", "--job-id", type=click.STRING,
help="A valid job id.")
JOBID_REQUIRED = click.option("-j", "--job-id", type=click.STRING, required=True,
help="A valid job id.")
role_choices_list = ["local", "guest", "arbiter", "host"]
ROLE = click.option("-r", "--role", type=click.Choice(role_choices_list), metavar="TEXT",
help="Role name. Users can choose one from {} and {}.".format(",".join(role_choices_list[:-1]),
role_choices_list[-1]))
ROLE_REQUIRED = click.option("-r", "--role", type=click.Choice(role_choices_list), required=True, metavar="TEXT",
help="Role name. Users can choose one from {} and {}.".format(",".join(role_choices_list[:-1]),
role_choices_list[-1]))
PARTYID = click.option("-p", "--party-id", type=click.STRING,
help="A valid party id.")
PARTYID_REQUIRED = click.option("-p", "--party-id", type=click.STRING, required=True,
help="A valid party id.")
COMPONENT_NAME = click.option("-cpn", "--component-name", type=click.STRING,
help="A valid component name.")
COMPONENT_NAME_REQUIRED = click.option("-cpn", "--component-name", type=click.STRING, required=True,
help="A valid component name.")
status_choices_list = ["complete", "failed", "running", "waiting", "timeout", "canceled", "partial", "deleted"]
STATUS = click.option("-s", "--status", type=click.Choice(status_choices_list), metavar="TEXT",
help="Job status. Users can choose one from {} and {}.".format(", ".join(status_choices_list[:-1]),
status_choices_list[-1]))
OUTPUT_PATH_REQUIRED = click.option("-o", "--output-path", type=click.Path(exists=False), required=True,
help="User specifies output directory path.")
OUTPUT_PATH = click.option("-o", "--output-path", type=click.Path(exists=False),
help="User specifies output directory path.")
NAMESPACE = click.option("-n", "--namespace", type=click.STRING,
help="Namespace.")
TABLE_NAME = click.option("-t", "--table-name", type=click.STRING,
help="Table name.")
NAMESPACE_REQUIRED = click.option("-n", "--namespace", type=click.STRING, required=True,
help="Namespace.")
TABLE_NAME_REQUIRED = click.option("-t", "--table-name", type=click.STRING, required=True,
help="Table name.")
TAG_NAME_REQUIRED = click.option("-t", "--tag-name", type=click.STRING, required=True,
help="The name of tag.")
TAG_DESCRIPTION = click.option("-d", "--tag-desc", type=click.STRING,
help="The description of tag. Note that if there are some whitespaces in description, "
"please make sure the description text is enclosed in double quotation marks.")
MODEL_VERSION = click.option("-m", "--model_version", type=click.STRING,
help="Model version.")
| 4,728 | 1,336 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Callable, Optional, Union
from datasets import Dataset
from transformers import DataCollatorForLanguageModeling, DataCollatorForWholeWordMask, PreTrainedTokenizerBase
from lightning_transformers.core.nlp import HFDataModule
from lightning_transformers.task.nlp.masked_language_modeling.config import MaskedLanguageModelingDataConfig
class MaskedLanguageModelingDataModule(HFDataModule):
"""
Defines ``LightningDataModule`` for Language Modeling Datasets.
Args:
*args: ``HFDataModule`` specific arguments.
cfg: Contains data specific parameters when processing/loading the dataset
(Default ``MaskedLanguageModelingDataConfig``)
**kwargs: ``HFDataModule`` specific arguments.
"""
cfg: MaskedLanguageModelingDataConfig
def __init__(
self, *args, cfg: MaskedLanguageModelingDataConfig = MaskedLanguageModelingDataConfig(), **kwargs
) -> None:
super().__init__(*args, cfg=cfg, **kwargs)
def process_data(self, dataset: Dataset, stage: Optional[str] = None) -> Dataset:
column_names = dataset["train" if stage == "fit" else "validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
tokenize_function = partial(
self.tokenize_function,
tokenizer=self.tokenizer,
text_column_name=text_column_name,
line_by_line=self.cfg.line_by_line,
padding=self.cfg.padding,
max_length=self.cfg.max_length,
)
dataset = dataset.map(
tokenize_function,
batched=True,
num_proc=self.cfg.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=self.cfg.load_from_cache_file,
)
if not self.cfg.line_by_line:
convert_to_features = partial(
self.convert_to_features,
max_seq_length=self.cfg.max_length,
)
dataset = dataset.map(
convert_to_features,
batched=True,
num_proc=self.cfg.preprocessing_num_workers,
load_from_cache_file=self.cfg.load_from_cache_file,
)
return dataset
@staticmethod
def tokenize_function(
examples,
tokenizer: Union[PreTrainedTokenizerBase],
text_column_name: str = None,
line_by_line: bool = False,
padding: Union[str, bool] = "max_length",
max_length: int = 128,
):
if line_by_line:
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
@staticmethod
def convert_to_features(examples, max_seq_length: int, **kwargs):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i:i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
@property
def collate_fn(self) -> Callable:
if self.cfg.wwm:
return DataCollatorForWholeWordMask(self.tokenizer, mlm_probability=self.cfg.mlm_probability)
else:
return DataCollatorForLanguageModeling(self.tokenizer, mlm_probability=self.cfg.mlm_probability)
| 5,210 | 1,462 |
class Solution:
def hasAllCodes(self, s: str, k: int) -> bool:
seen = set()
i = 0
n = len(s)
while i <= n-k:
if s[i:i+k] not in seen:
seen.add(s[i:i+k])
i += 1
return len(seen)==2 ** k
| 297 | 108 |
import datetime
import sys
import traceback
from django.contrib import messages
from django.contrib.auth.views import LogoutView
from django.http import HttpResponse, HttpResponseServerError
from django.template import loader
from django.views.generic import TemplateView
from chipy_org.apps.meetings.models import Meeting
from chipy_org.apps.meetings.views import InitialRSVPMixin
from chipy_org.apps.sponsors.models import Sponsor
class Home(TemplateView, InitialRSVPMixin):
template_name = "main/homepage.html"
def get_meeting(self):
return (
Meeting.objects.filter(when__gt=datetime.datetime.now() - datetime.timedelta(hours=6))
.order_by("when")
.first()
)
def get_context_data(self, **kwargs):
context = {}
context.update(kwargs)
context["IS_HOMEPAGE"] = True
context["featured_sponsor"] = Sponsor.featured_sponsor()
context = self.add_extra_context(context)
return context
def custom_500(request):
template = loader.get_template("500.html")
print(sys.exc_info())
etype, value, tback = sys.exc_info()
return HttpResponseServerError(
template.render(
{
"exception_value": value,
"value": etype,
"tb": traceback.format_exception(etype, value, tback),
}
)
)
def custom_404(request, exception):
return HttpResponse("<h1>404 - Page Not Found</h1>", status=404)
class LogoutWithRedirectAndMessage(LogoutView):
next_page = "/"
def dispatch(self, request, *args, **kwargs):
messages.success(request, "You've been logged out")
return super().dispatch(request, *args, **kwargs)
| 1,737 | 524 |
from django.db import models
# Create your models here.
class ProductType(models.Model):
name = models.CharField(max_length=50, unique=True)
raiting = models.FloatField()
description = models.TextField(default='')
class SubProductType(models.Model):
name = models.CharField(max_length=50, unique=True)
raiting = models.FloatField()
type_id = models.ForeignKey(ProductType, on_delete=models.CASCADE)
description = models.TextField(default='')
class Product(models.Model):
sub_type_id = models.IntegerField()
name = models.CharField(max_length=200)
manufacture = models.CharField(max_length=200)
raiting = models.FloatField()
image_url = models.URLField()
parameters = models.CharField(max_length=3000)
class Meta:
unique_together = ('name', 'manufacture')
class Shop(models.Model):
name = models.CharField(max_length=200)
address = models.CharField(max_length=2000)
raiting = models.DecimalField(max_digits=7, decimal_places=4)
self_pickup = models.BooleanField(default=False)
phones = models.CharField(max_length=200)
delivery_conditions = models.CharField(max_length=200)
url = models.URLField(default='')
class Meta:
unique_together = ('name', 'address')
class SaleVariant(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
shop = models.ForeignKey(Shop, on_delete=models.CASCADE)
price = models.DecimalField(max_digits=9, decimal_places=2)
amount = models.IntegerField(default=0)
special_parameters = models.CharField(max_length=3000, default='{}')
url = models.URLField(default='')
class Meta:
unique_together = ('product', 'shop')
| 1,718 | 559 |
#!/usr/bin/env python
from __future__ import with_statement
from sys import argv
from socket import socket
from contextlib import closing
def main(args):
host, port = args[1:]
with closing(socket()) as s:
try: s.connect((host,int(port)))
except: return 1
if __name__ == '__main__':
exit(main(argv))
# vim:et:sw=2:ts=2
| 336 | 120 |
# -*- coding: utf-8 -*-
# **********************************************************************************************************************
# MIT License
# Copyright (c) 2020 School of Environmental Science and Engineering, Shanghai Jiao Tong University
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------------------------------------------------
# This file is part of the ASCA Algorithm, it is used for spatial point clustering analysis. This model contains mainly
# three parts, they are points trend analysis, point cluster analysis and spatial visualization.
#
# Author: Yuansheng Huang
# Date: 2020-06-18
# Version: V 1.2
# Literature
# ==========
# Yuansheng Huang, Peng Li, Yiliang He: To centralize or to decentralize? A systematic framework for optimizing
# rural wastewater treatment investment
# Clark and Evans, 1954; Gao, 2013
# **********************************************************************************************************************
# general import
import gc
import os
import sys
from ASCA_Functions import *
pythonScriptPath = r"...\RuST\ASCA_Cluster\pythonFiles"
gc.disable()
pythonPath = os.getcwd()
sys.path.append(pythonPath)
sys.path.append(pythonScriptPath)
# ======================================================================================================================
# 调用ArcGIS界面输入
# ======================================================================================================================
arcpy.env.overwriteOutput = True
buildings = sys.argv[1] # Building shape file
studyArea = sys.argv[2]
obstacleFile = sys.argv[3] # optical parameter.
num = int(sys.argv[4]) # 管道长度约束[米] optical parameter.
outputFolder = sys.argv[5]
# folder setting
outputFile = outputFolder + "/" + "Cluster.shp"
addFiledFile = outputFile # sys.argv[5] + ".shp"
# ----------------------------------------------------------------------------------------------------------------------
# 空间点分布模式判定
# ----------------------------------------------------------------------------------------------------------------------
pointList, spatialRef = readSpatialPoint(buildings) # 读取空间点及输入文件的
distanceList = getNearestDistance(pointList)
area = float(readArea(studyArea)) # 读取研究区域面积
index, _ = NNI(pointList, distanceList, area)
triangleVertexIndex, triangleVertexCoordinate = getDelaunayTriangle(pointList) # 核实未使用参数
# 输出空间点集分布趋势
arcpy.AddMessage("\n")
arcpy.AddMessage("************************************")
arcpy.AddMessage("Points spatial cluster analysis was successfully calculated!!")
arcpy.AddMessage("NNI index : " + str(index))
arcpy.AddMessage("************************************")
# 开始空间点集聚类分析
arcpy.AddMessage("\n")
arcpy.AddMessage("====================================")
arcpy.AddMessage("Ready for cluster module...")
arcpy.AddMessage("====================================")
arcpy.AddMessage("\n")
_, edgeList = getEdgeLength(triangleVertexIndex, triangleVertexCoordinate)
if index >= 1: # 空间点集呈均匀(>1)/随机分布(=1)
arcpy.AddMessage("Random distribution OR Uniform distribution (NNI >= 1)")
arcpy.AddMessage("Skip cluster analysis module!!!" + "\n" +
"Perform Obstacle and Restriction analysis!!!")
# obstacle
if len(obstacleFile) > 1:
obstacleList = readObstacle(obstacleFile)
reachableEdge = getReachableEdge(edgeList, obstacleList, pointList)
indexList_O = aggregation(reachableEdge)
mark_O = "O"
cluster(pointList, indexList_O, mark_O)
arcpy.AddMessage("Unreachable edges were deleted!!!")
else:
reachableEdge = edgeList[:]
pointList = [i + ["O0"] for i in pointList]
arcpy.AddMessage("No obstacles!!!")
# restrict
if num > 0:
unrestrictedEdge = deleteRestrictionEdge(reachableEdge, num)
indexList_C = aggregation(unrestrictedEdge)
mark_C = "C"
cluster(pointList, indexList_C, mark_C)
arcpy.AddMessage("Restricted edges were deleted!!!")
else:
unrestrictedEdge = reachableEdge[:]
pointList = [i + i[-1] for i in pointList]
arcpy.AddMessage("No Length restriction!!!")
createShapeFile(pointList, spatialRef, outputFile)
addMarkerFields(addFiledFile, pointList)
arcpy.AddMessage("-----" + "Spatial Cluster Model successfully performed!" + "-----")
elif index < 1: # 空间点集呈聚集分布
arcpy.AddMessage("Spatial points is aggregated, perform cluster analysis Module!!!")
# obstacle
if len(obstacleFile) > 1:
obstacleList = readObstacle(obstacleFile)
reachableEdge = getReachableEdge(edgeList, obstacleList, pointList)
indexList_O = aggregation(reachableEdge)
mark_O = "O"
cluster(pointList, indexList_O, mark_O) # return marked pointList
arcpy.AddMessage("Unreachable edges were deleted!!!" + "\n")
else:
reachableEdge = edgeList[:]
pointList = [i + ["O0"] for i in pointList]
arcpy.AddMessage("No obstacles!!!" + "\n")
# global long edge todo check
globalEdgeMean, globalEdgeVariation = getGlobalEdgeStatistic(reachableEdge)
firstOrderEdges, _ = getFirstOrderEdges(pointList, reachableEdge)
firstOrderEdgesMean = getFirstOrderEdgesMean(firstOrderEdges)
globalCutValueList = getGlobalCutValue(globalEdgeMean, globalEdgeVariation, firstOrderEdgesMean)
globalOtherEdgeList = getGlobalOtherEdge(reachableEdge, globalCutValueList)
indexListG = aggregation(globalOtherEdgeList)
markG = "G"
cluster(pointList, indexListG, markG)
arcpy.AddMessage("Global long edges were deleted !!!" + "\n")
# local long edge
subgraphVertexList, subgraphEdgeList = getSubgraphEdge(pointList, globalOtherEdgeList, indexListG)
subgraphSecondOrderEdgeMean = getSecondOrderEdges(subgraphVertexList, subgraphEdgeList)
subgraphMeanVariation = getSubgraphEdgeStatistic(subgraphVertexList, subgraphEdgeList)
subgraphLocalCutValueList = getLocalCutValue(subgraphMeanVariation, subgraphSecondOrderEdgeMean)
localOtherEdge = getLocalOtherEdge(globalOtherEdgeList, subgraphLocalCutValueList)
indexListL = aggregation(localOtherEdge)
markL = "L"
cluster(pointList, indexListL, markL)
arcpy.AddMessage("Local long edges were deleted !!!" + "\n")
# restrict
if num > 0:
unrestrictedEdge = deleteRestrictionEdge(localOtherEdge, num)
indexList_C = aggregation(unrestrictedEdge)
mark_C = "C"
cluster(pointList, indexList_C, mark_C)
arcpy.AddMessage("Restricted edges were deleted!!!" + "\n")
else:
unrestrictedEdge = localOtherEdge[:]
pointList = [i + i[-1] for i in pointList]
arcpy.AddMessage("No Length restriction!!!" + "\n")
arcpy.AddMessage("pointList:")
arcpy.AddMessage(str(pointList))
createShapeFile(pointList, spatialRef, outputFile)
addMarkerFields(addFiledFile, pointList)
D = list(set([i[7] for i in pointList])) # local long edge
LIST = [len(pointList), index, len(D)]
output = outputFolder + "/" + "Cluster"
name = "Output"
outputWriteToTxt(output, name, LIST, pointList)
arcpy.AddMessage("-----" + "Spatial Cluster Model successfully performed!" + "-----")
arcpy.AddMessage("\n")
| 8,313 | 2,543 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 21:29:32 2021
@author: qcao
Analysis code for example_topop_tb_v3.py
Parses and cleans load-driven phantoms. Computes Radiomic signatures. Compares with BvTv.
Compare with ROIs
"""
# FEA and BoneBox Imports
import os
import sys
sys.path.append('../') # use bonebox from source without having to install/build
from bonebox.phantoms.TrabeculaeVoronoi import *
from bonebox.FEA.fea import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import vtk
from pyvistaqt import BackgroundPlotter
from skimage.morphology import ball, closing, binary_dilation, binary_closing
import pyvista as pv
pv.set_plot_theme("document")
# For PyRadiomics
import logging
import six
import SimpleITK as sitk
import radiomics
from radiomics import featureextractor
from radiomics import firstorder, getTestCase, glcm, glrlm, glszm, imageoperations, shape
volumeShape = (100,100,100)
def ind2dir(ss,uu):
# converts ss and uu to output directories
saveNameAppend = "_phantom_ss_"+str(ss)+"_uu_"+str(uu)
return "/data/BoneBox-out/topopt/lazy_v3_sweep/randstate_"+str(ss)+saveNameAppend+"/"
def getBVFandE(ss,uu):
# Parse output directory given series and Ul index
BVF = np.nan
elasticModulus = np.nan
out_dir = ind2dir(ss,uu)
if os.path.exists(out_dir):
if os.path.exists(out_dir+"bvf7.npy"):
BVF = np.load(out_dir+"bvf7.npy")
if os.path.exists(out_dir+"elasticModulus7.npy"):
elasticModulus = np.load(out_dir+"elasticModulus7.npy")
return BVF, elasticModulus
def getVolume(ss,uu):
# Parse output directory and get volume
volume = np.zeros(volumeShape)
volume[:] = np.nan
out_dir = ind2dir(ss,uu)
if os.path.exists(out_dir):
if os.path.exists(out_dir+"volume_8.npy"):
volume = np.load(out_dir+"volume_8.npy")
return volume
def computeWaveletFeatures(image, mask, featureFunc=glcm.RadiomicsGLCM):
"""
featureFunc:
firstorder.RadiomicsFirstOrder
glcm.RadiomicsGLCM
"""
featureNames = []
featureVals = []
for decompositionImage, decompositionName, inputKwargs in imageoperations.getWaveletImage(image, mask):
waveletFirstOrderFeaturs = featureFunc(decompositionImage, mask, **inputKwargs)
waveletFirstOrderFeaturs.enableAllFeatures()
results = waveletFirstOrderFeaturs.execute()
print('Calculated firstorder features with wavelet ', decompositionName)
for (key, val) in six.iteritems(results):
waveletFeatureName = '%s_%s' % (str(decompositionName), key)
print(' ', waveletFeatureName, ':', val)
featureNames.append(waveletFeatureName)
featureVals.append(val)
return featureNames, np.array(featureVals)
def calculate_fid(act1, act2):
# calculate mean and covariance statistics
mu1, sigma1 = act1.mean(axis=0), cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), cov(act2, rowvar=False)
# calculate sum squared difference between means
ssdiff = numpy.sum((mu1 - mu2)**2.0)
# calculate sqrt of product between cov
covmean = sqrtm(sigma1.dot(sigma2))
# check and correct imaginary numbers from sqrt
if iscomplexobj(covmean):
covmean = covmean.real
# calculate score
fid = ssdiff + trace(sigma1 + sigma2 - 2.0 * covmean)
return fid
if __name__ == "__main__":
save_dir = "/data/BoneBox-out/topopt/lazy_v3_sweep/"
# Generate N phantom series, 3 resorption intensities per series
Nseries = 400
Nresorption = 3
# Create array of BVFs and ElasticModuli
bvfs = np.zeros((Nseries, Nresorption))
Es = np.zeros((Nseries, Nresorption))
# Array of random Uls (between 0.1 and 0.25), should be same as in example script.
randStateUls = 3012
Ulmin = 0.1
Ulmax = 0.25
Uls = sampleUniformZeroOne(((Nseries,Nresorption)), randState=randStateUls)*(Ulmax-Ulmin) + Ulmin
# Retrieve BVF and ElasticModulus
for ss in range(Nseries):
for uu in range(Nresorption):
bvfs[ss,uu], Es[ss,uu] = getBVFandE(ss,uu)
inds = np.invert(np.isnan(bvfs))
inds_nz = np.nonzero(inds)
# Correlation Coefficients
def linearFit(xx, yy):
# r2 with radiomics
# returns fit x, fit y, rs
mfit, bfit = np.polyfit(xx, yy, 1)
rs = np.corrcoef(xx, yy)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
yyy = mfit*xxx + bfit
return xxx, yyy, rs
def reject_outliers(data, m=2):
ind = abs(data - np.mean(data)) < m * np.std(data)
return ind, data[ind]
# Correlation Coefficients
def linearFitRejectOutliers(xx, yy):
# r2 with radiomics
# returns fit x, fit y, rs
ind, yy = reject_outliers(yy, m=2)
xx = xx[ind]
mfit, bfit = np.polyfit(xx, yy, 1)
rs = np.corrcoef(xx, yy)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
yyy = mfit*xxx + bfit
return xxx, yyy, rs
# Correlation Coefficients
def polyFitRejectOutliers(xx, yy, order = 2):
# r2 with radiomics
# returns fit x, fit y, rs
ind, yy = reject_outliers(yy, m=2)
xx = xx[ind]
p = np.polyfit(xx, yy, 1)
yyf = np.polyval(p,xx)
rs = np.corrcoef(yy, yyf)[0,1]**2
mi, ma = np.min(xx), np.max(yy)
xxx = np.array([mi, ma])
return np.sort(xx), np.sort(yyf), rs
# Plot BVF and Elastic Modulus vs Uls
fig, ax1 = plt.subplots()
xx, yy, rs1 = linearFitRejectOutliers(Uls[inds].flatten(), bvfs[inds].flatten())
ax1.plot(Uls[inds].flatten(), bvfs[inds].flatten(),'ko')
ax1.plot(xx, yy, 'k-')
ax1.set_ylim(0.16,0.28)
ax1.set_xlabel("Resorption Threshold $U_l$")
ax1.set_ylabel("BVF")
ax1.grid("major")
ax1.set_xlim(0.1,0.25)
xx, yy, rs2 = linearFitRejectOutliers(Uls[inds].flatten(), Es[inds].flatten())
ax2 = ax1.twinx()
ax2.plot(Uls[inds].flatten(), Es[inds].flatten(),'rv')
ax2.plot(xx, yy, 'r--')
ax2.set_ylabel("Elastic Modulus $E$",color='r')
ax2.set_ylim(0,10e7)
ax2.tick_params(axis ='y', labelcolor = 'r')
plt.savefig(save_dir+"BVF_Es_vs_Ul.png")
print("BVF vs Ul: r2="+str(rs1))
print("Es vs Ul: r2="+str(rs2))
# np.corrcoef(bvfs[inds], Es[inds])
# np.corrcoef(bvfs[inds], Uls[inds])
# # np.corrcoef(Es[inds], Uls[inds])
# Plot Es vs BVF
fig, ax1 = plt.subplots()
xx, yy, rs3 = polyFitRejectOutliers(bvfs[inds].flatten(), Es[inds].flatten())
ax1.plot(bvfs[inds].flatten(), Es[inds].flatten(),'ko')
ax1.plot(xx, yy, 'k-')
ax1.set_ylim(0,3e7)
ax1.set_xlim(0.16,0.28)
ax1.set_xlabel("BVF")
ax1.set_ylabel("Elastic Modulus $E$")
ax1.grid("major")
plt.savefig(save_dir+"Es_vs_BVF.png")
print("Es vs BVF: r2="+str(rs3))
#% Look at radiomics features
# Initialize array of features
features = np.zeros((Nseries, Nresorption, 93))
features[:] = np.nan
# Define settings for signature calculation
# These are currently set equal to the respective default values
settings = {}
settings['binWidth'] = 25
settings['resampledPixelSpacing'] = None # [3,3,3] is an example for defining resampling (voxels with size 3x3x3mm)
settings['interpolator'] = sitk.sitkBSpline
settings['imageType'] = ['original','wavelet']
# Initialize feature extractor
extractor = featureextractor.RadiomicsFeatureExtractor(**settings)
extractor.enableImageTypeByName("Wavelet")
# extractor.disableAllImageTypes()
# extractor.enableImageTypeByName(imageType="Original")
# extractor.enableImageTypeByName(imageType="Wavelet")
# extractor.enableFeatureClassByName("glcm")
# Test extraction pipeline on one volume
ss = 0; uu = 0
volume = getVolume(ss,uu).astype(int)*255
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
wvltFeatureNames, wvltFeatures = computeWaveletFeatures(volumeSITK, maskSITK)
featureVectorOriginal = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
volumeSITKWavelets = radiomics.imageoperations.getWaveletImage(volumeSITK, maskSITK)
featureVectorWavelet = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="wavelet")
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
#%
computeFeatures = False
if computeFeatures:
wvltFeatures = np.zeros((Nseries, Nresorption, 192))
wvltFeatures[:] = np.nan
# Extract volume and compute features
for ss in range(Nseries):
for uu in range(Nresorption):
if inds[ss,uu]:
volume = getVolume(ss,uu).astype(int)*255
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()])
features[ss,uu,:] = featureVectorArray
wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK)
# Reshape feature matrices
featuresReshaped = features.reshape((-1,93), order='F')
wvltFeaturesReshaped = wvltFeatures.reshape((-1,192), order='F')
indsReshaped = inds.reshape((-1,), order='F')
featuresReshaped = featuresReshaped[indsReshaped,:]
wvltFeaturesReshaped = wvltFeaturesReshaped[indsReshaped,:]
# Save feature vectors
np.save(save_dir+"features",features)
np.save(save_dir+"featuresReshaped",featuresReshaped)
np.save(save_dir+"wvltFeaturesReshaped",wvltFeaturesReshaped)
#%% Radiomic Features of ROIs
plt.close('all')
import nrrd
import glob
def readROI(filename):
roiBone, header = nrrd.read(filename)
roiBone[roiBone==255] = 1 # units for this is volume
return roiBone
roi_dir = "/data/BoneBox/data/rois/"
Nrois = len(glob.glob(roi_dir+"isodata_*_roi_*.nrrd"))
featuresROI = np.zeros((Nrois,93))
for ind in range(Nrois):
print(ind)
fn = glob.glob(roi_dir+"isodata_*_roi_"+str(ind)+".nrrd")[0]
roiBone = readROI(fn)
volume = roiBone.astype(int)*255
# Take ROI center
volume = volume[50:150,50:150,50:150]
volumeSITK = sitk.GetImageFromArray(volume)
maskSITK = sitk.GetImageFromArray(np.ones(volume.shape).astype(int))
featureVector = extractor.computeFeatures(volumeSITK, maskSITK, imageTypeName="original")
featureVectorArray = np.array([featureVector[featureName].item() for featureName in featureVector.keys()])
featuresROI[ind,:] = featureVectorArray
# wvltFeatureNames, wvltFeatures[ss,uu,:] = computeWaveletFeatures(volumeSITK, maskSITK)
np.save(save_dir+"featuresROI",featuresROI)
#%%
featureNames = list(featureVector.keys())
import seaborn as sns
import pandas as pd
sns.set_theme(style="whitegrid")
featuresReshaped = np.load(save_dir+"featuresReshaped.npy")
featuresROI = np.load(save_dir+"featuresROI.npy")
featuresAll = np.vstack((featuresReshaped,featuresROI))
sourceList = []
for ii in range(200):
sourceList.append("Phantom")
for ii in range(208):
sourceList.append("L1 Spine")
df = pd.DataFrame(data = featuresAll,
columns = featureNames)
df["source"] = sourceList
df["all"] = ""
fig_dir = save_dir+"comparison_with_rois/"
if not os.path.exists(fig_dir):
os.mkdir(fig_dir)
# Draw a nested violinplot and split the violins for easier comparison
for ind in range(93):
fig, ax = plt.subplots(figsize=(5,10))
sns.violinplot(data=df, x="all", y=featureNames[ind], hue="source",
split=True, inner="quart", linewidth=1)
sns.despine(left=True)
plt.savefig(fig_dir+"fig_"+str(ind)+"_"+featureNames[ind])
plt.close("all")
#%% Komogorov-smirnov test
from scipy.stats import ks_2samp
kss = np.zeros(93)
ps = np.zeros(93)
for ind in range(93):
kss[ind], ps[ind] = scipy.stats.ks_2samp(featuresReshaped[:,ind], featuresROI[:,ind])
#%% Prep data for regressor
# # Extract Feature Names
# featureNames = list(featureVector.keys())
# indsReshaped = inds.reshape((-1,), order='F')
# features = np.load(save_dir+"features.npy")
# featuresReshaped = np.load(save_dir+"featuresReshaped.npy")
# wvltFeaturesReshaped = np.load(save_dir+"wvltFeaturesReshaped.npy")
# EsReshaped = Es.reshape((-1,), order='F')[indsReshaped]
# bvfsReshaped = bvfs.reshape((-1,), order='F')[indsReshaped]
# # combine BVF with wavelet GLCM features
# # features_norm = np.concatenate((bvfsReshaped[:,None],wvltFeaturesReshaped),axis=1) # featuresReshaped # Feature Vector
# features_norm = np.concatenate((bvfsReshaped[:,None],featuresReshaped),axis=1) # featuresReshaped # Feature Vector
# features_norm -= np.mean(features_norm,axis=0) # center on mean
# features_norm /= np.std(features_norm,axis=0) # scale to standard deviation
# features_norm[np.isnan(features_norm)] = 0
# # features_norm_names = ["BVF"]+wvltFeatureNames
# features_norm_names = ["BVF"]+featureNames
# roi_vm_mean = EsReshaped # Label
# # Reject pathologic outliers in the dataset
# ii, roi_vm_mean = reject_outliers(roi_vm_mean, m=1)
# features_norm = features_norm[ii,:]
# bvfsReshaped = bvfsReshaped[ii]
# Ntrain = 110 # Training Testing Split
# #% Feature selection
# from sklearn.feature_selection import SelectKBest, VarianceThreshold
# from sklearn.feature_selection import chi2, f_classif, f_regression
# # # features_norm = SelectKBest(f_regression, k=20).fit_transform(features_norm, roi_vm_mean)
# # features_norm = VarianceThreshold(0.95).fit_transform(features_norm)
# # print(features_norm.shape)
# #%
# ytrain = roi_vm_mean[:Ntrain]
# ytest = roi_vm_mean[Ntrain:]
# Xtrain1 = features_norm[:Ntrain,:]
# Xtrain2 = bvfsReshaped[:Ntrain].reshape(-1,1)
# Xtest1 = features_norm[Ntrain:,:]
# Xtest2 = bvfsReshaped[Ntrain:].reshape(-1,1)
# # from xgboost import XGBRegressor
# # from sklearn.model_selection import cross_val_score
# # scores = cross_val_score(XGBRegressor(objective='reg:squarederror'), Xtrain1, ytrain, scoring='neg_mean_squared_error')
# #%% Radiomics + Random Forestocu
# plt.close('all')
# import random
# randState = 123
# random.seed(randState)
# # non-linear without feature selection
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.model_selection import GridSearchCV
# param_grid = [
# {'max_depth': [2,4,8,16,32,64], # 16
# 'max_leaf_nodes': [2,4,8,16,32,64], # 8
# 'n_estimators': [10,50,100,150,200]} # 50
# ]
# # param_grid = [
# # {'max_depth': [2,4,8,16], # 16
# # 'max_leaf_nodes': [2,4,8,16], # 8
# # 'n_estimators': [10,50,100]} # 50
# # ]
# rfr = GridSearchCV(
# RandomForestRegressor(random_state = randState),
# param_grid, cv = 5,
# scoring = 'explained_variance',
# n_jobs=-1
# )
# rfr2 = GridSearchCV(
# RandomForestRegressor(random_state = randState),
# param_grid, cv = 5,
# scoring = 'explained_variance',
# n_jobs=-1
# )
# # Fit with full set of features.
# grid_result = rfr.fit(Xtrain1, ytrain)
# yTrain_fit_rfr = rfr.predict(Xtest1)
# print("Best estimator for BvF+radiomics...")
# rfr.best_estimator_
# # Fit with BVTV only.
# grid_result2 = rfr2.fit(Xtrain2, ytrain)
# yTrain_fit_rfr2 = rfr2.predict(Xtest2)
# print("Best estimator for BVF...")
# rfr2.best_estimator_
# # r2 with radiomics
# mfit, bfit = np.polyfit(ytest, yTrain_fit_rfr, 1)
# rs = np.corrcoef(roi_vm_mean[Ntrain:], yTrain_fit_rfr)[0,1]**2
# print("BVF+Radiomics rs:"+str(rs))
# # r2 with BVFS
# mfit2, bfit2 = np.polyfit(roi_vm_mean[Ntrain:], yTrain_fit_rfr2, 1)
# rs2 = np.corrcoef(roi_vm_mean[Ntrain:], yTrain_fit_rfr2)[0,1]**2
# print("BVF rs:"+str(rs2))
# plt.figure()
# plt.plot(roi_vm_mean[Ntrain:],yTrain_fit_rfr2,'bv')
# plt.plot(roi_vm_mean[Ntrain:], mfit2*roi_vm_mean[Ntrain:] + bfit2, "b--")
# plt.plot(roi_vm_mean[Ntrain:],yTrain_fit_rfr,'ko')
# plt.plot(roi_vm_mean[Ntrain:], mfit*roi_vm_mean[Ntrain:] + bfit, "k-")
# plt.xlabel("$\mu$FE Elastic Modulus")
# plt.ylabel("Predicted Elastic Modulus")
# plt.savefig(save_dir+"Elastic Modulus Predicted vs True.png")
# plt.close("all")
# # Plot feature importance
# importances = rfr.best_estimator_.feature_importances_
# indices = np.argsort(importances)[::-1]
# std = np.std([tree.feature_importances_ for tree in rfr.best_estimator_], axis = 0)
# plt.figure()
# plt.title('Feature importances')
# plt.barh(range(20), importances[indices[0:20]], yerr = std[indices[0:20]], align = 'center',log=True)
# plt.yticks(range(20), list(features_norm_names[i] for i in indices[0:20] ), rotation=0)
# plt.gca().invert_yaxis()
# plt.show()
# plt.subplots_adjust(left=0.7,bottom=0.1, right=0.8, top=0.9)
# plt.savefig(save_dir+"Feature Importances.png")
# plt.close("all")
# #%% Changes in radiomic signature with remodeling
# # Retrieve indices of samples with continuous Uls
# indsUl = (np.sum(inds,axis=1) == 3)
# indsUlnz = np.nonzero(indsUl)[0]
# featuresCrop = features[indsUlnz,:,:]
# UlsCrop = Uls[indsUlnz,:]
# plt.figure()
#%%
# grid_result = rfr.fit(features_norm[:Ntrain,:], roi_vm_mean[:Ntrain])
# yTest_fit_rfr = rfr.predict(features_norm[Ntrain:])
# # sns.set(font_scale=1)
# mfit, bfit = np.polyfit(roi_vm_mean[Ntrain:], yTest_fit_rfr, 1)
# pr2 = np.corrcoef(roi_vm_mean[Ntrain:], yTest_fit_rfr)[0,1]**2
# print(pr2)
# plt.figure()
# plt.plot(roi_vm_mean[Ntrain:],yTest_fit_rfr,'ko')
# plt.plot(roi_vm_mean[Ntrain:], mfit*roi_vm_mean[Ntrain:] + bfit, "b--")
# importances = rfr.best_estimator_.feature_importances_
# indices = np.argsort(importances)[::-1]
# std = np.std([tree.feature_importances_ for tree in rfr.best_estimator_], axis = 0)
# plt.figure()
# plt.title('Feature importances')
# plt.barh(range(20), importances[indices[0:20]], yerr = std[indices[0:20]], align = 'center',log=True)
# plt.yticks(range(20), list(featureNames[i] for i in indices[0:20] ), rotation=0)
# plt.gca().invert_yaxis()
# plt.show()
# plt.subplots_adjust(left=0.7,bottom=0.1, right=0.8, top=0.9) | 19,903 | 7,506 |
import os
import threading
import json
from classes import Keypoint
def wait_for_frames():
while not os.path.isdir('keypoints/run0'):
print("waiting for frame dir, pls start openpose")
print("dir detected!")
while len(os.listdir("keypoints/run0")) == 0:
print("dir empty, waiting for frames")
print("frame detected!")
def get_frames():
return os.listdir("keypoints/run0")
def collect_keypoints(frame):
os.chdir("keypoints/run0")
# read json file
with open(frame, 'r') as frame:
data=frame.read()
# parse file
obj = json.loads(data)
# get raw keypoints list
if not obj['people']:
os.chdir("../..")
print("No people found")
return []
else:
keypoints_face_raw = obj['people'][0]['face_keypoints_2d']
os.chdir("../..")
print("collected keypoints succesfully!")
return keypoints_face_raw
def filter_keypoints(keypoints_face_raw):
# filter different values
x_positions = keypoints_face_raw[::3]
y_positions = keypoints_face_raw[1::3]
c_values = keypoints_face_raw[2::3]
# generate keypoints list
keypoints_face = []
for (x, y, c) in zip(x_positions, y_positions, c_values):
keypoints_face.append(Keypoint(x, y, c))
# filter different features
keypoints_eyebrows = keypoints_face[17:27]
keypoints_nosebridge = keypoints_face[27:31]
keypoints_nostrils = keypoints_face[31:36]
keypoints_eyes = keypoints_face[36:48]
keypoints_mouth = keypoints_face[48:68]
# log
print("filtered keypoints succesfully!")
return keypoints_eyebrows, keypoints_nosebridge, keypoints_nostrils, keypoints_eyes, keypoints_mouth
| 1,711 | 585 |
# Generated by Django 2.1.4 on 2019-01-04 01:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_line_1', models.CharField(blank=True, max_length=60)),
('address_line_2', models.CharField(blank=True, max_length=60)),
('city', models.CharField(blank=True, max_length=40)),
('state', models.CharField(blank=True, max_length=20, verbose_name='State/Province')),
('country', models.CharField(blank=True, max_length=50)),
('postal', models.CharField(blank=True, max_length=10)),
('formatted_address', models.CharField(blank=True, max_length=300, null=True)),
('lat', models.FloatField(blank=True, null=True)),
('lng', models.FloatField(blank=True, null=True)),
('name', models.CharField(max_length=40)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Arena',
fields=[
('venue_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='venues.Venue')),
('manager', models.CharField(blank=True, help_text='Contact person at the arena', max_length=40)),
],
options={
'abstract': False,
},
bases=('venues.venue',),
),
]
| 1,820 | 531 |
# import qgis libs so that ve set the correct sip api version
import qgis # pylint: disable=W0611 # NOQA
import unittest
from test.test_init import TestInit
from test.test_resources import TestResourceTest
def suite_configuration():
suite = unittest.TestSuite()
suite.addTest(TestInit('test_read_init'))
suite.addTest(TestResourceTest('test_icon_png'))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite_configuration()) | 484 | 157 |
import scrapy
import re
from urllib.parse import urlparse
from locations.hours import OpeningHours
from locations.items import GeojsonPointItem
ALL_DAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
class DickeysBarbecuePitSpider(scrapy.Spider):
name = "dickeys_barbecue_pit"
item_attributes = { 'brand': "Dickey's Barbecue Pit", 'brand_wikidata': "Q19880747" }
allowed_domains = ["dickeys.com"]
download_delay = 0.5
start_urls = (
"https://www.dickeys.com/location/search-by-state",
)
def parse(self, response):
directory_links = response.xpath('//a[@class="state-links"]/@href').extract()
for link in directory_links:
yield scrapy.Request(
response.urljoin(link),
callback=self.parse
)
regex_phone_prefix = re.compile(r'^\s*Telephone\:\s*(.+)$')
all_restaurants = response.xpath('//*[@itemtype="http://schema.org/Restaurant"]')
for restaurant in all_restaurants:
properties = {
"name": restaurant.xpath('.//*[@itemprop="name"]/text()').get(),
"addr_full": restaurant.xpath('.//*[@itemprop="streetAddress"]/text()').get(),
"city": restaurant.xpath('.//*[@itemprop="addressLocality"]/text()').get(),
"state": restaurant.xpath('.//*[@itemprop="addressRegion"]/text()').get(),
"postcode": restaurant.xpath('.//*[@itemprop="postalCode"]/text()').get(),
"phone": restaurant.xpath('.//a[starts-with(text(), "Telephone:")]/text()').get(),
"website": response.url
}
# URLs with details of all restaurants in a given city look like:
# '/location/search-by-city/<num>/<city-name>', where:
#
# <num> appears to be a number associated with the state containing the city
# <city-name> is the name of the city.
#
# Strip off the '/location/search-by-city' prefix, then append the street address we found for each
# restaurant. Use this as the unique ID of the restaurant in the crawl, as no other
# reliable ID seems to appear in the data.
ref = urlparse(response.url).path.split('/', maxsplit=3)[3]
properties['ref'] = '_'.join([ref, properties['addr_full']])
# If phone has a 'Telephone: ' prefix, strip it away.
match_phone = re.search(regex_phone_prefix, properties['phone'])
if match_phone:
properties['phone'] = match_phone.groups()[0]
# Some fields may have leading/trailing space. We've seen that city often has both
# trailing comma and space.
for key in properties:
properties[key] = properties[key].strip(', ')
opening_hours = self.parse_hours(restaurant)
if opening_hours:
properties["opening_hours"] = opening_hours
yield GeojsonPointItem(**properties)
def parse_hours(self, restaurant_item):
opening_hours = OpeningHours()
opening_hours_str = restaurant_item.xpath('.//*[@itemprop="openingHours"]/@content').get()
if opening_hours_str:
regex = re.compile(r'(.+)\:\s*(\d{1,2}:\d{2} [A|P][M])\s*-\s*(\d{1,2}:\d{2} [A|P][M])', flags=re.IGNORECASE)
# Opening hours specifications may look like either of the following:
#
# Open Daily: 11:00 AM - 9:00 PM
# Mon-Thur: 11:00 AM - 8:00 PM,Fri-Sat: 11:00 AM - 9:00 PM,Sunday: 11:00 AM - 8:00 PM
#
for hours in opening_hours_str.split(','):
hours = hours.strip()
match = re.search(regex, hours)
if match:
day_range = match.group(1)
open_time = match.group(2)
close_time = match.group(3)
if close_time == "00:00":
close_time = "23:59"
for day in self.get_days(day_range):
opening_hours.add_range(
day,
open_time,
close_time,
time_format="%I:%M %p"
)
return opening_hours.as_opening_hours()
def get_days(self, day_range_str):
day_range_str = day_range_str.casefold()
# Most stores list same opening hours for every day of the week: "Open Daily: ..."
if day_range_str == 'open daily':
return ALL_DAYS
# A few store list different hours for different days:
else:
start_and_end = day_range_str.split('-')
# Handle cases like the "Sunday" in "Sunday: ..."
if len(start_and_end) == 1:
return [self.day_mapping(start_and_end[0])]
# Handle cases like the "Mon-Thur" in "Mon-Thur: ..."
elif len(start_and_end) == 2:
try:
start_index = ALL_DAYS.index(self.day_mapping(start_and_end[0]))
end_index = ALL_DAYS.index(self.day_mapping(start_and_end[1]))
if start_index < end_index:
return ALL_DAYS[start_index:end_index+1]
except ValueError:
# In case we extract an unexpected string as a day of the week
return None
# In case the day/day range doesn't look like what we expect
else:
return None
def day_mapping(self, day_str):
day_str = day_str.casefold()
if 'sunday'.startswith(day_str, ):
return 'Su'
elif 'monday'.startswith(day_str):
return 'Mo'
elif 'tuesday'.startswith(day_str):
return 'Tu'
elif 'wednesday'.startswith(day_str):
return 'We'
elif 'thursday'.startswith(day_str):
return 'Th'
elif 'friday'.startswith(day_str):
return 'Fr'
elif 'saturday'.startswith(day_str):
return 'Sa'
return None
| 6,311 | 1,950 |
""" Session class. """
# Standard library imports
import math
# Local imports
import spotifython.constants as const
from spotifython.endpoints import Endpoints
import spotifython.utils as utils
class Session:
""" Represents an interactive Spotify session, tied to a Spotify API token.
Use methods here to deal with authentication, searching for objects, and
getting objects by their ids.
"""
def __init__(self, token, timeout=const.DEFAULT_REQUEST_TIMEOUT):
""" Create a new Spotify Session.
This is the only constructor that should be explicitly called by the
client. Use it to start a Session with the Spotify web API.
Args:
token (str): Spotify API authentication token
timeout (int): timeout value for each request made to Spotify's API.
Default 10. This library uses exponential backoff with a
timeout; this parameter is the hard timeout.
Raises:
TypeError: if incorrectly typed parameters are given.
ValueError: if parameters with illegal values are given.
"""
if not isinstance(token, str):
raise TypeError('token should be str')
if not isinstance(timeout, int):
raise TypeError('timeout should be int')
if timeout < 0:
raise ValueError(f'timeout {timeout} is < 0')
self._token = token
self._timeout = timeout
def reauthenticate(self, token):
""" Updates the stored Spotify authentication token for this instance.
Args:
token (str): the new Spotify authentication token.
"""
if not isinstance(token, str):
raise TypeError('token should be string')
self._token = token
def token(self):
"""
Returns:
str: The token associated with this session.
"""
return self._token
def timeout(self):
"""
Returns:
int: The timeout associated with this session.
"""
return self._timeout
def __str__(self):
""" Returns the Session's id.
Note:
This is NOT the token, but the unique Python identifier for this
object.
"""
# Guarantee that the IDs of different objects in memory are different
# Avoids exposing the token as plaintext for no reason, since that is
# the other possible indicator of object identity.
return f'Session <${id(self)}>'
def __repr__(self):
""" Returns the Session's id.
Note:
This is NOT the token, but the unique Python identifier for this
object.
"""
return self.__str__()
class SearchResult:
""" Represents the results of a Spotify API search call.
Do not use the constructor. To get a SearchResult, use
:meth:`Session.search() <spotifython.session.Session.search>`
"""
def __init__(self, search_result):
""" Get an instance of SearchResult. Client should not use this!
Internally, the search result will perform all necessary API calls
to get the desired number of search results (up to search limit).
Args:
search_result: dict containing keys 'album', 'artist',
'playlist', 'track', each mapped to a list of the
corresponding object type. For example, the key 'artist'
maps to a list of Artist objects.
"""
self._albums = search_result.get('album', list())
self._artists = search_result.get('artist', list())
self._playlists = search_result.get('playlist', list())
self._tracks = search_result.get('track', list())
# Field accessors
def albums(self):
"""
Returns:
List[Album]: Any albums returned by the Spotify search. Could be
empty.
"""
return self._albums
def artists(self):
"""
Returns:
List[Artist]: Any artists returned by the Spotify search. Could
be empty.
"""
return self._artists
def playlists(self):
"""
Returns:
List[Playlist]: Any playlists returned by the Spotify search.
Could be empty.
"""
return self._playlists
def tracks(self):
"""
Returns:
List[Track]: Any tracks returned by the Spotify search. Could
be empty.
"""
return self._tracks
##################################
# API Calls
##################################
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
# pylint: disable=too-many-statements
# TODO: unfortunately this function has to be kind of long to accomodate the
# logic required. Refactor this in the future.
# TODO: 'optional field filters and operators' What are these? Provide
# a link and / or example.
def search(self,
query,
types,
limit,
market=const.TOKEN_REGION,
include_external_audio=False):
""" Searches Spotify for content with the given query.
Args:
query (str): search query keywords and optional field filters and
operators.
types: type(s) of results to search for. One of:
- sp.ALBUMS
- sp.ARTISTS
- sp.PLAYLISTS
- sp.TRACKS
- List: can contain multiple of the above.
limit (int): the maximum number of results to return. If the limit
is None, will return up to the API-supported limit of 2000
results. Use this with caution as it will result in a very long
chain of queries!
Note: The limit is applied within each type, not on the total
response. For example, if the limit value is 3 and the search
is for both artists & albums, the response contains 3 artists
and 3 albums.
market (str): a :term:`market code <Market>` or sp.TOKEN_REGION,
used for :term:`track relinking <Track Relinking>`. If None, no
market is passed to Spotify's Web API, and its default behavior
is invoked.
include_external_audio (bool): If true, the response will include
any relevant audio content that is hosted externally. By default
external content is filtered out from responses.
Note:
- Shows and Episodes will be supported in a future release.
- Playlist search results are not affected by the market parameter.
Playlists queried from session.search() can't have
:term:`track relinking <Track Relinking>` applied,
while getting an individual playlist with
session.get_playlist() can. This is a limitation of the
Spotify API.
- If market is not None, only content playable in the specified
is returned.
Returns:
SearchResult: The results from the Spotify search.
Raises:
TypeError: for invalid types in any argument.
ValueError: if query type or market is invalid. TODO: validate?
ValueError: if limit is > 2000: this is the Spotify API's search
limit.
HTTPError: if failure or partial failure.
Required token scopes:
- user-read-private
Calls endpoints:
- GET /v1/search
"""
# Search limit is a required field due to the offset + limit being 2000,
# which would take 40 backend API calls. Throw an error if > the limit
# is 2000.
# Internally, include_external='audio' is the only valid argument.
# Type validation
if not all(isinstance(x, str) for x in query):
raise TypeError('query should be str')
if not isinstance(types, str) and \
not all(isinstance(x, str) for x in types):
raise TypeError('types should be str or a list of str')
if (limit is not None and not isinstance(limit, int)) or \
(isinstance(limit, int) and limit < 1):
raise TypeError('limit should be None or int > 0')
if market is not None and not isinstance(market, str):
raise TypeError('market should be None or str')
if include_external_audio is not None and \
not isinstance(include_external_audio, bool):
raise TypeError('include_external_audio should be None or bool')
# Argument validation
if isinstance(types, str):
types = list(types)
valid_types = [
const.ALBUMS,
const.ARTISTS,
const.PLAYLISTS,
const.TRACKS
]
for search_type_filter in types:
if search_type_filter not in valid_types:
raise ValueError(f'search type {search_type_filter} invalid')
if limit is None:
limit = 2000
if limit > 2000:
raise ValueError('Spotify only supports up to 2000 search results.')
# Construct params for API call
uri_params = dict()
# Encode the spaces in strings! See the following link for more details.
# https://developer.spotify.com/documentation/web-api/reference/search/search/
uri_params['q'] = query.replace(' ', '+')
if market is not None:
uri_params['market'] = market
if include_external_audio:
uri_params['include_external'] = 'audio'
# A maximum of 50 search results per search type can be returned per API
# call to the search backend
next_multiple = lambda num, mult: math.ceil(num / mult) * mult
num_to_request = next_multiple(limit, const.SPOTIFY_PAGE_SIZE)
# We want the singular search types, while our constants are plural
# search types in the argument for uniformity. The pagination objects
# use the plural types again, so a two way mapping is required.
map_args_to_api_call = {
const.ALBUMS: 'album',
const.ARTISTS: 'artist',
const.PLAYLISTS: 'playlist',
const.TRACKS: 'track',
const.SHOWS: 'show',
const.EPISODES: 'episode',
}
map_args_to_api_result = {
'album': const.ALBUMS,
'artist': const.ARTISTS,
'playlist': const.PLAYLISTS,
'track': const.TRACKS,
'show': const.SHOWS,
'episode': const.EPISODES,
}
remaining_types = [map_args_to_api_call.get(s) for s in types]
# Initialize SearchResult object
result = {
map_args_to_api_call[const.ALBUMS]: list(),
map_args_to_api_call[const.ARTISTS]: list(),
map_args_to_api_call[const.PLAYLISTS]: list(),
map_args_to_api_call[const.TRACKS]: list(),
}
# Unfortunately because each type can have a different amount of return
# values, utils.paginate_get() is not suited for this call.
for offset in range(0, num_to_request, const.SPOTIFY_PAGE_SIZE):
# This line simplifies the logic for cases where an extra request
# would otherwise be needed to hit the empty list check in the
# search responses.
if len(remaining_types) == 0:
break
uri_params['type'] = ','.join(remaining_types)
uri_params['limit'] = limit
uri_params['offset'] = offset
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=Endpoints.SEARCH,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
# Extract data per search type
# TODO: test what happens if unnecessary types are specified for
# the given offsets against live api
for curr_type in remaining_types:
api_result_type = map_args_to_api_result[curr_type]
items = response_json[api_result_type]['items']
# Add items to accumulator
for item in items:
if curr_type is map_args_to_api_call[const.ALBUMS]:
result.get(curr_type).append(Album(self, item))
elif curr_type is map_args_to_api_call[const.ARTISTS]:
result.get(curr_type).append(Artist(self, item))
elif curr_type is map_args_to_api_call[const.PLAYLISTS]:
result.get(curr_type).append(Playlist(self, item))
elif curr_type is map_args_to_api_call[const.TRACKS]:
result.get(curr_type).append(Track(self, item))
else:
# Should never reach here, but here for safety!
raise ValueError('Invalid type when building search')
# Only make necessary search queries
new_remaining_types = list()
for curr_type in remaining_types:
api_result_type = map_args_to_api_result[curr_type]
if response_json[api_result_type]['next'] is not None:
new_remaining_types.append(curr_type)
remaining_types = new_remaining_types
return self.SearchResult(result)
def get_albums(self,
album_ids,
market=const.TOKEN_REGION):
""" Gets the albums with the given Spotify ids.
Args:
album_ids (str, List[str]): The Spotify album id(s) to get.
market (str): a :term:`market code <Market>` or sp.TOKEN_REGION,
used for :term:`track relinking <Track Relinking>`. If None, no
market is passed to Spotify's Web API, and its default behavior
is invoked.
Returns:
Union[Album, List[Album]]: The requested album(s).
Raises:
TypeError: for invalid types in any argument.
ValueError: if market type is invalid. TODO
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/albums
Note: the following endpoint is not used.
- GET /v1/albums/{id}
"""
# Type/Argument validation
if not isinstance(album_ids, str) and\
not all(isinstance(x, str) for x in album_ids):
raise TypeError('album_ids should be str or list of str')
if market is None:
raise ValueError('market is a required argument')
if not isinstance(market, str):
raise TypeError('market should be str')
if isinstance(album_ids, str):
album_ids = list(album_ids)
# Construct params for API call
endpoint = Endpoints.SEARCH_ALBUMS
uri_params = dict()
if market is not None:
uri_params['market'] = market
# A maximum 20 albums can be returned per API call
batches = utils.create_batches(album_ids, 20)
result = list()
for batch in batches:
uri_params['ids'] = ','.join(batch)
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=endpoint,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
items = response_json['albums']
for item in items:
result.append(Album(self, item))
return result if len(result) != 1 else result[0]
def get_artists(self, artist_ids):
""" Gets the artists with the given Spotify ids.
Args:
artist_ids (str, List[str): The Spotify artist id(s) to get.
Returns:
Union[Album, List[Album]]: The requested artist(s).
Raises:
TypeError: for invalid types in any argument.
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/artists
Note: the following endpoint is not used.
- GET /v1/artists/{id}
"""
# Type validation
if not isinstance(artist_ids, str) and\
not all(isinstance(x, str) for x in artist_ids):
raise TypeError('artist_ids should be str or list of str')
if isinstance(artist_ids, str):
artist_ids = list(artist_ids)
# Construct params for API call
endpoint = Endpoints.SEARCH_ALBUMS
uri_params = dict()
# A maximum of 50 artists can be returned per API call
batches = utils.create_batches(artist_ids, 50)
result = list()
for batch in batches:
uri_params['ids'] = batch
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=endpoint,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
items = response_json['artists']
for item in items:
result.append(Artist(self, item))
return result if len(result) != 1 else result[0]
def get_tracks(self,
track_ids,
market=const.TOKEN_REGION):
""" Gets the tracks with the given Spotify ids.
Args:
track_ids (str, List[str]): The Spotify track id(s) to get.
market (str): a :term:`market code <Market>` or sp.TOKEN_REGION,
used for :term:`track relinking <Track Relinking>`. If None, no
market is passed to Spotify's Web API, and its default behavior
is invoked.
Returns:
Union[Track, List[Track]]: The requested track(s).
Raises:
TypeError: for invalid types in any argument.
ValueError: if market type is invalid. TODO
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/tracks
Note: the following endpoint is not used.
- GET /v1/tracks/{id}
"""
# Type validation
if not isinstance(track_ids, str) and\
not all(isinstance(x, str) for x in track_ids):
raise TypeError('track_ids should be str or list of str')
if market is not None and not isinstance(market, str):
raise TypeError('market should be None or str')
# Argument validation
if isinstance(track_ids, str):
track_ids = list(track_ids)
# Construct params for API call
endpoint = Endpoints.SEARCH_TRACKS
uri_params = dict()
if market is not None:
uri_params['market'] = market
# A maximum of 50 tracks can be returned per API call
batches = utils.create_batches(track_ids, 50)
result = list()
for batch in batches:
uri_params['ids'] = ','.join(batch)
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=endpoint,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
items = response_json['tracks']
for item in items:
result.append(Track(self, item))
return result if len(result) != 1 else result[0]
# TODO: what the heck are fields?
def get_playlists(self,
playlist_ids,
fields=None,
market=const.TOKEN_REGION):
""" Gets the playlist(s) with the given Spotify ids.
Args:
playlist_ids (str, List[str]): The Spotify playlist ids to get.
fields (str): filters for the query: a comma-separated list of the
fields to return. If omitted, all fields are returned. A dot
separator can be used to specify non-reoccurring fields, while
parentheses can be used to specify reoccurring fields within
objects. Use multiple parentheses to drill down into nested
objects. Fields can be excluded by prefixing them with an
exclamation mark.
market (str): a :term:`market code <Market>` or sp.TOKEN_REGION,
used for :term:`track relinking <Track Relinking>`. If None, no
market is passed to Spotify's Web API, and its default behavior
is invoked.
Returns:
Union[Playlist, List[Playlist]]: The requested playlist(s)
Raises:
TypeError: for invalid types in any argument.
ValueError: if market type is invalid. TODO
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/playlists/{playlist_id}
"""
# Note: additional_types is also a valid request param - it
# has been deprecated and therefore is removed from the API wrapper.
# Type/Argument validation
if not isinstance(playlist_ids, str) and\
not all(isinstance(x, str) for x in playlist_ids):
raise TypeError('playlist_ids should be str or list of str')
if fields is not None and not isinstance(fields, str):
raise TypeError('fields should be None or str')
if not isinstance(market, str):
raise TypeError('market should be str')
if isinstance(playlist_ids, str):
playlist_ids = list(playlist_ids)
# Construct params for API call
uri_params = dict()
uri_params['market'] = market
if fields is not None:
uri_params['fields'] = fields
# Each API call can return at most 1 playlist. Therefore there is no
# need to batch this query.
result = list()
for playlist_id in playlist_ids:
endpoint = Endpoints.SEARCH_PLAYLIST % playlist_id
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=endpoint,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
result.append(Playlist(self, response_json))
return result if len(result) != 1 else result[0]
def get_users(self, user_ids):
""" Gets the users with the given Spotify ids.
Args:
user_ids (str, List[str]): The Spotify user id(s) to get.
Returns:
Union[User, List[User]]: The requested user(s).
Raises:
TypeError: for invalid types in any argument.
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/users/{user_id}
"""
# Type validation
if not isinstance(user_ids, str) and\
not all(isinstance(x, str) for x in user_ids):
raise TypeError('user_ids should be str or list of str')
if isinstance(user_ids, str):
user_ids = list('user_ids should be str')
# Construct params for API call
uri_params = dict()
# Each API call can return at most 1 user. Therefore there is no need
# to batch this query.
result = list()
for user_id in user_ids:
# Execute requests
# TODO: Partial failure - if user with user_id does not exist,
# status_code is 404
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=Endpoints.SEARCH_USER % user_id,
uri_params=uri_params
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
result.append(User(self, response_json))
return result if len(result) != 1 else result[0]
def current_user(self):
"""
Returns:
User: The user associated with the current Spotify API token.
Raises:
ValueError: if the Spotify API key is not valid.
ValueError: if the response is empty.
HTTPError: if failure or partial failure.
Calls endpoints:
- GET /v1/me
"""
# Construct params for API call
endpoint = Endpoints.SEARCH_CURRENT_USER
# Execute requests
response_json, status_code = utils.request(
session=self,
request_type=const.REQUEST_GET,
endpoint=endpoint
)
if status_code != 200:
raise utils.SpotifyError(status_code, response_json)
return User(self, response_json)
# pylint: disable=wrong-import-position
from spotifython.album import Album
from spotifython.artist import Artist
from spotifython.playlist import Playlist
from spotifython.track import Track
from spotifython.user import User
| 25,970 | 6,792 |
# Plots results of preliminary experiments to determine parameter settings for all other experiments
import pandas as pd
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
df = pd.read_csv("data/preliminaryWord2vecComplete_clean.csv")
df3 = df.query('(skipgram == 1) & (contextwindow == 3)')
df5 = df.query('(skipgram == 1) & (contextwindow == 5)')
df7 = df.query('(skipgram == 1) & (contextwindow == 7)')
df10 = df.query('(skipgram == 1) & (contextwindow == 10)')
df15 = df.query('(skipgram == 1) & (contextwindow == 15)')
plt.plot(df3.dimension, df3.accuracy, 'o', label = "context window = 3")
plt.plot(df5.dimension, df5.accuracy, 'o', label = "context window = 5")
plt.plot(df7.dimension, df7.accuracy, 'o', label = "context window = 7")
plt.plot(df10.dimension, df10.accuracy, 'o', label = "context window = 10")
plt.plot(df15.dimension, df15.accuracy, 'o', label = "context window = 15")
plt.xlabel("vector dimension")
plt.ylabel("accuracy")
plt.xlim(90,510)
plt.ylim(40,60)
plt.legend(loc = "lower right")
plt.title("Parameter estimation for Skip-Gram model")
plt.tick_params(axis='both', which='both', top='off', right='off')
plt.savefig("plots/word2vecSkipgramPreliminary.pgf")
plt.savefig("plots/word2vecSkipgramPreliminary.pdf")
df3 = df.query('(skipgram == 0) & (contextwindow == 3)')
df5 = df.query('(skipgram == 0) & (contextwindow == 5)')
df7 = df.query('(skipgram == 0) & (contextwindow == 7)')
df10 = df.query('(skipgram == 0) & (contextwindow == 10)')
df15 = df.query('(skipgram == 0) & (contextwindow == 15)')
plt.clf()
plt.plot(df3.dimension, df3.accuracy, 'o', label = "context window = 3")
plt.plot(df5.dimension, df5.accuracy, 'o', label = "context window = 5")
plt.plot(df7.dimension, df7.accuracy, 'o', label = "context window = 7")
plt.plot(df10.dimension, df10.accuracy, 'o', label = "context window = 10")
plt.plot(df15.dimension, df15.accuracy, 'o', label = "context window = 15")
plt.xlabel("vector dimension")
plt.ylabel("accuracy")
plt.xlim(90,510)
plt.ylim(40,60)
#plt.legend(loc = "lower right")
plt.title("Parameter estimation for CBOW model")
plt.tick_params(axis='both', which='both', top='off', right='off')
plt.savefig("plots/word2vecCBOWPreliminary.pgf")
plt.savefig("plots/word2vecCBOWPreliminary.pdf")
df = pd.read_csv("data/preliminaryGloVeResults_clean.csv")
df5 = df.query('contextwindow == 5')
df10 = df.query('contextwindow == 10')
df15 = df.query('contextwindow == 15')
plt.clf()
plt.plot(df5.dimension, df5.accuracy, 'o', label = "context window = 5")
plt.plot(df10.dimension, df10.accuracy, 'o', label = "context window = 10")
plt.plot(df15.dimension, df15.accuracy, 'o', label = "context window = 15")
plt.xlabel("vector dimension")
plt.ylabel("accuracy")
#plt.ylim(45,55)
plt.legend(loc = "lower right")
plt.title("Parameter estimation for GloVe model")
plt.tick_params(axis='both', which='both', top='off', right='off')
plt.savefig("plots/gloVePreliminary.pgf")
plt.savefig("plots/gloVePreliminary.pdf")
| 2,993 | 1,239 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=E1101
import os
import numpy as np
import cv2
import caffe
def retifyxxyy(img, xxyy):
"""
let xxyy within image size
img: image
xxyy: left, right, top, bottom
return modified xxyy
"""
img_height, img_width = img.shape[:2]
xxyy = retifyxxyysize(img_height, img_width, xxyy)
return xxyy
def retifyxxyysize(img_height, img_width, xxyy):
"""return xxyy within image region
img_height:
img_width:
xxyy:
return xxyy
"""
xxyy[0] = max(xxyy[0], 0)
xxyy[1] = max(xxyy[1], 0)
xxyy[2] = max(xxyy[2], 0)
xxyy[3] = max(xxyy[3], 0)
xxyy[0] = min(xxyy[0], img_width)
xxyy[1] = min(xxyy[1], img_width)
xxyy[2] = min(xxyy[2], img_height)
xxyy[3] = min(xxyy[3], img_height)
return xxyy
def getCutSize(xxyy, left, right, top, bottom): #left, right, top, and bottom
u"""
xxyy:
left:
right:
top:
bottom:
left, right, top, bottom are ratio.
The return value is a region with a margin.
"""
box_width = xxyy[1] - xxyy[0]
box_height = xxyy[3] - xxyy[2]
cut_size = np.zeros((4))
cut_size[0] = xxyy[0] + left * box_width
cut_size[1] = xxyy[1] + (right - 1) * box_width
cut_size[2] = xxyy[2] + top * box_height
cut_size[3] = xxyy[3] + (bottom-1) * box_height
return cut_size
def dets2xxyys(dets):
"""
In this module
xxyy = [left, right, top, bottom]
"""
xxyys = np.zeros((len(dets), 4))
for i, d in enumerate(dets):
xxyys[i, 0] = d.left()
xxyys[i, 1] = d.right()
xxyys[i, 2] = d.top()
xxyys[i, 3] = d.bottom()
return xxyys
class FacePosePredictor(object):
"""
A face Pose Predcitor using pre-trained caffe model.
The orignal code was modified to class version.
https://github.com/guozhongluo/head-pose-estimation-and-face-landmark
Example:
posePredictor = facePose.FacePosePredictor()
predictpoints, landmarks, headposes = posePredictor.predict(frameCopy, np.array([[left, right, top, bottom]]))
"""
def __init__(self):
self.M_left = -0.15
self.M_right = +1.15
self.M_top = -0.10
self.M_bottom = +1.25
self.vgg_height = 224
self.vgg_width = 224
vgg_point_MODEL_FILE = 'model/deploy.prototxt'
vgg_point_PRETRAINED = 'model/68point_dlib_with_pose.caffemodel'
mean_filename = 'model/VGG_mean.binaryproto'
self.vgg_point_net = caffe.Net(vgg_point_MODEL_FILE, vgg_point_PRETRAINED, caffe.TEST)
caffe.set_mode_cpu()
# caffe.set_mode_gpu()
# caffe.set_device(0)
proto_data = open(mean_filename, "rb").read()
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
self.mean = caffe.io.blobproto_to_array(a)[0]
def predict(self, colorImage, xxyys):
"""
predcit pitch yaw, roll for each rectangle.
colorImage:
xxyys: list of rectangle
return
predictpoints: 68 point
landmarks:
predictposes: pitch yaw roll
"""
def getRGBTestPart(img, xxyy, left, right, top, bottom, asHeight, asWidth):
"""return face image as float32
returned image size width, height
"""
largexxyy = getCutSize(xxyy, left, right, top, bottom)
retixxyy = retifyxxyy(img, largexxyy)
retixxyy = [int(round(x)) for x in retixxyy]
face = img[retixxyy[2]:retixxyy[3], retixxyy[0]:retixxyy[1], :]
face = cv2.resize(face, (asHeight, asWidth), interpolation=cv2.INTER_AREA)
face = face.astype('float32')
return face
pointNum = 68
faceNum = xxyys.shape[0]
faces = np.zeros((1, 3, self.vgg_height, self.vgg_width))
predictpoints = np.zeros((faceNum, pointNum*2))
predictposes = np.zeros((faceNum, 3))
imgsize = colorImage.shape[:2]
TotalSize = np.zeros((faceNum, 2))
normalface = np.zeros(self.mean.shape)
for i in range(0, faceNum):
TotalSize[i] = imgsize
colorface = getRGBTestPart(colorImage, xxyys[i], self.M_left, self.M_right, self.M_top, self.M_bottom, self.vgg_height, self.vgg_width)
normalface[0] = colorface[:, :, 0]
normalface[1] = colorface[:, :, 1]
normalface[2] = colorface[:, :, 2]
normalface = normalface - self.mean
faces[0] = normalface
data4DL = np.zeros([faces.shape[0], 1, 1, 1])
self.vgg_point_net.set_input_arrays(faces.astype(np.float32), data4DL.astype(np.float32))
self.vgg_point_net.forward()
predictpoints[i] = self.vgg_point_net.blobs['68point'].data[0]
predictposes[i] = 50 * self.vgg_point_net.blobs['poselayer'].data
predictpoints = predictpoints * self.vgg_height/2 + self.vgg_width/2
landmarks = self.batchRecoverPart(predictpoints, xxyys, TotalSize, self.M_left, self.M_right, self.M_top, self.M_bottom, self.vgg_height, self.vgg_width)
return predictpoints, landmarks, predictposes
def batchRecoverPart(self, predictPoint, totalxxyy, totalSize, left, right, top, bottom, height, width):
def recover_coordinate(largetxxyy, landmarks, width, height):
point = np.zeros(np.shape(landmarks))
cut_width = largetxxyy[1] - largetxxyy[0]
cut_height = largetxxyy[3] - largetxxyy[2]
scale_x = cut_width*1.0/width
scale_y = cut_height*1.0/height
point[0::2] = [float(j * scale_x + largetxxyy[0]) for j in landmarks[0::2]]
point[1::2] = [float(j * scale_y + largetxxyy[2]) for j in landmarks[1::2]]
return point
def recoverPart(point, xxyy, left, right, top, bottom, img_height, img_width, height, width):
largexxyy = getCutSize(xxyy, left, right, top, bottom)
retixxyy = retifyxxyysize(img_height, img_width, largexxyy)
recover = recover_coordinate(retixxyy, point, height, width)
recover = recover.astype('float32')
return recover
recoverPoint = np.zeros(predictPoint.shape)
for i in range(0, predictPoint.shape[0]):
recoverPoint[i] = recoverPart(predictPoint[i], totalxxyy[i], left, right, top, bottom, totalSize[i, 0], totalSize[i, 1], height, width)
return recoverPoint
def predict1(self, colorImage, xxyy):
"""
predcit pitch yaw, roll for single rectangle.
colorImage:
xxyy: single rectangle
return value
predictposes[0, :] : pitch, yaw, roll
"""
predictpoints, landmarks, predictposes = self.predict(colorImage, np.array([xxyy]))
return predictpoints[0], landmarks[0], predictposes[0, :]
def roundByD(angle, delta):
"""round angle by delta
angle:
delta:
>>> roundByD(8, 10)
10.0
>>> roundByD(-9.5, 10)
-10.0
"""
return delta*round(angle/float(delta))
def getPyrStr(pitch, yaw, roll):
"""
pitch:
yaw:
roll:
"""
pitchDelta = 5
yawDelta = 5
rollDelta = 10
pyrDir = "P_%+03d_Y_%+03d_R_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta), roundByD(roll, rollDelta))
return pyrDir
def getPyStr(pitch, yaw):
"""
pitch:
yaw:
"""
pitchDelta = 5
yawDelta = 5
rollDelta = 10
pyrDir = "P_%+03d_Y_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta))
return pyrDir
def getPyrDir(outDir, pitch, yaw, roll):
"""
pitch:
yaw:
roll:
"""
pyrDir = os.path.join(outDir, getPyrStr(pitch, yaw, roll))
if not os.path.isdir(pyrDir):
os.makedirs(pyrDir)
return pyrDir
def getPyDir(outDir, pitch, yaw):
"""
pitch:
yaw:
roll:
"""
pitchDelta = 5
yawDelta = 5
rollDelta = 10
pyrDir = "P_%+03d_Y_%+03d" % (roundByD(pitch, pitchDelta), roundByD(yaw, yawDelta))
pyrDir = os.path.join(outDir, pyrDir)
if not os.path.isdir(pyrDir):
os.makedirs(pyrDir)
return pyrDir
| 8,156 | 3,145 |
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""
module to compile the required python extensions
This is for development purposes only! Later on
it might be integrated into the standard setup.py
"""
# http://docs.cython.org/src/tutorial/cython_tutorial.htmlfrom
# distutils.core import setup
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize(
["./pycmbs/geostatistic/variogram_base.pyx"]),
# this is needed to get proper information on numpy headers
include_dirs=[numpy.get_include()]
)
# run as ... to build extension
# $ python setup_extensions.py build_ext --inplace
| 763 | 248 |
from src.models.classifiers import *
from src.models.frank import *
| 68 | 21 |
# -*- coding: utf-8 -*-
"""
DC Resistivity Forward Simulation in 2.5D
=========================================
Here we use the module *SimPEG.electromagnetics.static.resistivity* to predict
DC resistivity data and plot using a pseudosection. In this tutorial, we focus
on the following:
- How to define the survey
- How to define the forward simulation
- How to predict normalized voltage data for a synthetic conductivity model
- How to include surface topography
- The units of the model and resulting data
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import model_builder, surface2ind_topo
from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc
from SimPEG import maps, data
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.static_utils import (
generate_dcip_sources_line,
apparent_resistivity_from_voltage,
plot_pseudosection,
)
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
write_output = False
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 3
###############################################################
# Defining Topography
# -------------------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file. In our case, our survey takes place within a set
# of valleys that run North-South.
#
x_topo, y_topo = np.meshgrid(
np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101)
)
z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
topo_xyz = np.c_[x_topo, y_topo, z_topo]
# Create 2D topography. Since our 3D topography only changes in the x direction,
# it is easy to define the 2D topography projected along the survey line. For
# arbitrary topography and for an arbitrary survey orientation, the user must
# define the 2D topography along the survey line.
topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0)
#####################################################################
# Create Dipole-Dipole Survey
# ---------------------------
#
# Here we define a single EW survey line that uses a dipole-dipole configuration.
# For the source, we must define the AB electrode locations. For the receivers
# we must define the MN electrode locations. Instead of creating the survey
# from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility.
#
# Define survey line parameters
survey_type = "dipole-dipole"
dimension_type = "2D"
data_type = "volt"
end_locations = np.r_[-400.0, 400.0]
station_separation = 40.0
num_rx_per_src = 10
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_2d,
num_rx_per_src,
station_separation,
)
# Define survey
survey = dc.survey.Survey(source_list, survey_type=survey_type)
###############################################################
# Create Tree Mesh
# ------------------
#
# Here, we create the Tree mesh that will be used to predict DC data.
#
dh = 4 # base cell width
dom_width_x = 3200.0 # domain width x
dom_width_z = 2400.0 # domain width z
nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x
nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z
# Define the base mesh
hx = [(dh, nbcx)]
hz = [(dh, nbcz)]
mesh = TreeMesh([hx, hz], x0="CN")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh,
topo_xyz[:, [0, 2]],
octree_levels=[0, 0, 4, 4],
method="surface",
finalize=False,
)
# Mesh refinement near transmitters and receivers. First we need to obtain the
# set of unique electrode locations.
electrode_locations = np.c_[
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
]
unique_locations = np.unique(
np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0
)
mesh = refine_tree_xyz(
mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(zp)]
mesh = refine_tree_xyz(
mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False
)
mesh.finalize()
###############################################################
# Create Conductivity Model and Mapping for Tree Mesh
# -----------------------------------------------------
#
# It is important that electrodes are not modeled as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# lie on the discretized surface.
#
# Define conductivity model in S/m (or resistivity model in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-2
conductor_conductivity = 1e-1
resistor_conductivity = 1e-3
# Find active cells in forward modeling (cell below surface)
ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]])
# Define mapping from model to active cells
nC = int(ind_active.sum())
conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define model
conductivity_model = background_conductivity * np.ones(nC)
ind_conductor = model_builder.getIndicesSphere(np.r_[-120.0, -160.0], 60.0, mesh.gridCC)
ind_conductor = ind_conductor[ind_active]
conductivity_model[ind_conductor] = conductor_conductivity
ind_resistor = model_builder.getIndicesSphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC)
ind_resistor = ind_resistor[ind_active]
conductivity_model[ind_resistor] = resistor_conductivity
# Plot Conductivity Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
norm = LogNorm(vmin=1e-3, vmax=1e-1)
ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7])
mesh.plot_image(
plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm}
)
ax1.set_xlim(-600, 600)
ax1.set_ylim(-600, 0)
ax1.set_title("Conductivity Model")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7])
cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical")
cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12)
plt.show()
###############################################################
# Project Survey to Discretized Topography
# ----------------------------------------
#
# It is important that electrodes are not model as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# like on the discretized surface.
#
survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
#######################################################################
# Predict DC Resistivity Data
# ---------------------------
#
# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is
# defined, the simulation will expect a conductivity model. If the keyword
# argument *rhoMap* is defined, the simulation will expect a resistivity model.
#
simulation = dc.simulation_2d.Simulation2DNodal(
mesh, survey=survey, sigmaMap=conductivity_map, solver=Solver
)
# Predict the data by running the simulation. The data are the raw voltage in
# units of volts.
dpred = simulation.dpred(conductivity_model)
#######################################################################
# Plotting in Pseudo-Section
# --------------------------
#
# Here, we demonstrate how to plot 2D data in pseudo-section.
# First, we plot the voltages in pseudo-section as a scatter plot. This
# allows us to visualize the pseudo-sensitivity locations for our survey.
# Next, we plot the apparent conductivities in pseudo-section as a filled
# contour plot.
#
# Plot voltages pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=np.abs(dpred),
plot_type="scatter",
ax=ax1,
scale="log",
cbar_label="V/A",
scatter_opts={"cmap": mpl.cm.viridis},
)
ax1.set_title("Normalized Voltages")
plt.show()
# Get apparent conductivities from volts and survey geometry
apparent_conductivities = 1 / apparent_resistivity_from_voltage(survey, dpred)
# Plot apparent conductivity pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=apparent_conductivities,
plot_type="contourf",
ax=ax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
plt.show()
#######################################################################
# Optional: Write out dpred
# -------------------------
#
# Write DC resistivity data, topography and true model
#
if write_output:
dir_path = os.path.dirname(__file__).split(os.path.sep)
dir_path.extend(["outputs"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# Add 10% Gaussian noise to each datum
np.random.seed(225)
std = 0.05 * np.abs(dpred)
dc_noise = std * np.random.rand(len(dpred))
dobs = dpred + dc_noise
# Create a survey with the original electrode locations
# and not the shifted ones
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
survey_original = dc.survey.Survey(source_list)
# Write out data at their original electrode locations (not shifted)
data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std)
fname = dir_path + "dc_data.obs"
write_dcip2d_ubc(fname, data_obj, "volt", "dobs")
fname = dir_path + "topo_xyz.txt"
np.savetxt(fname, topo_xyz, fmt="%.4e")
| 10,469 | 3,683 |
"""
PURPOSE:
Run feature selection mettestd available from sci-kit learn on a given dataframe
Must set path to Miniconda in HPC: export PATH=/mnt/testme/azodichr/miniconda3/bin:$PATH
INPUT:
-df Feature file for ML. If class/Y values are in a separate file use -df for features and -df2 for class/Y
-alg Feature selection mettestd to use
- Chi2
need: -n
- RandomForest
need: -n, -type
- Enrichment using Fisher's Exact (for classification with binary feats only)
need: -p (default=0.05)
- LASSO
need: -p, -type, n
- Bayesian LASSO (bl)
need: -n
- Elastic Net (EN)
need: -n -p (default=0.5, proportion of L1 to L2 penalty)
- Relief (https://github.com/EpistasisLab/scikit-rebate) (currently for regression only)
need: -n,
- BayesA regression (for regression only)
need: -n
- rrBLUP (i.e. mixed.solve) (for regression only)
need: -n
- Random
need: -n
OPTIONAL INPUT:
-n Number(s) of features you would like to keep (required for chi2, RF, relief, bayesA)
Example: -n 10 or -n 10,50,100
-test File with list of intances to testldout from feature selection
-save Save name for list of features selected. Will automatically append _n to the name
Default: df_F_n or df_f_cvJobNum_n
-cl_train Since only RF works for multi-class problems, use cl_train to give a list of what classes you want to include (Default = 'all')
If binary, first label = positive class.
-sep Set seperator for input data (Default = '\t')
-df2 File with class information. Use only if df contains the features but not the classes
* Need to specifiy what column in df2 is y using -y_name
-y_name Name of the column to predict (Default = Class)
-drop_na T/F to drop rows with NAs
-feat File containing the features you want to use from the df (one feature per line)
Default: all (i.e. everything in the dataframe given).
-type r = regression, c = classification (required for LASSO and RF)
-p Parameter value for LASSO, EN, or Fisher's Exact Test.
Fishers: pvalue cut off (Default = 0.05)
EN: Ratio of L1 to L2 regularization (larger = fewer features selected)
LASSO: If type = r: need alpha value, try 0.01, 0.001. (larger = fewer features selected)
LASSO: If type = c: need C which controls the sparcity, try 0.01, 0.1 (smaller = fewer features selected)
-pos String for what codes for the positive example (i.e. UUN) Default = 1
-neg String for what codes for the negative example (i.e. NNN) Default = 0
-cvs To run feat. sel. withing cross-validation scheme provide a CVs matrix and -JobNum
CVs maxtrix: rows = instances, columns = CV replicates, value are the CV-fold each instance belongs to.
-scores T/F to output scores/coefficients for each feature (Not available for: FET, LASSO, or Chi2)
OUTPUT:
-df_alg.txt New dataframe with columns only from feature selection
AUTtestR: Christina Azodi
REVISIONS: Written 8/16/2016
Added relief algorithm 10/22/2017
Added BayesA algorithm 3/23/2018
"""
import pandas as pd
import numpy as np
import subprocess as sp
import sys, os, time
start_time = time.time()
def SaveTopFeats(top, save_name):
try:
top.remove('Class')
except:
pass
out = open(save_name, 'w')
for f in top:
out.write(f + '\n')
def DecisionTree(df, n, TYPE, save_name, SCORES):
"""Feature selection using DecisionTree on the wtestle dataframe
Feature importance from the Random Forest Classifier is the Gini importance
(i.e. the normalized total reduction of the criterion for the decendent nodes
compared to the parent node brought by that feature across all trees.)
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from math import sqrt
X_all = df.drop('Class', axis=1).values
Y_all = df.loc[:, 'Class'].values
fean_num_feat_sel = len(list(df.columns.values)[1:])
if TYPE.lower() == 'c':
feat_sel_forest = RandomForestClassifier(criterion='entropy', max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1)
elif TYPE.lower() == 'r':
Y_all = Y_all.astype('float')
feat_sel_forest = RandomForestRegressor(max_features= round(sqrt(fean_num_feat_sel)), n_estimators=500, n_jobs=1)
else:
print('Need to specify -type r/c (regression/classification)')
exit()
print("=====* Running decision tree based feature selection *=====")
#Train the model & derive importance scores
feat_sel_forest = feat_sel_forest.fit(X_all, Y_all)
importances = feat_sel_forest.feature_importances_
# Sort importance scores and keep top n
feat_names = list(df.columns.values)[1:]
temp_imp = pd.DataFrame(importances, columns = ["imp"], index=feat_names)
indices = np.argsort(importances)[::-1]
if SCORES.lower() != 'f':
save_scores = save_name + '_RFScores.txt'
temp_imp.to_csv(save_scores)
for n_size in n:
indices_keep = indices[0:int(n_size)]
fixed_index = []
# Translate keep indices into the indices in the df
for i in indices_keep:
new_i = i + 1
fixed_index.append(new_i)
fixed_index = [0] + fixed_index
good = [df.columns[i] for i in fixed_index]
print("Features selected using DecisionTree feature selection: %s" % str(good.remove('Class')))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(good, save_name2)
def Chi2(df, n, save_name):
"""Feature selection using Chi2 on the wtestle dataframe.
Chi2 measures the dependence between stochastic variables, this mettestd
weeds out features that are most likely to be independent of class"""
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import mutual_info_classif
print('This function might not be working right now.... Bug Christina if you need it!')
X_all = df.drop('Class', axis=1).values
Y_all = df.loc[:, 'Class'].values
#Y_all = Y_all.astype('int')
print(Y_all)
# Set selection to chi2 with n to keep
for n_size in n:
# Set selection to chi2 with n to keep
ch2 = SelectKBest(chi2, k=n)
ch2.fit_transform(X_all, Y_all)
index = ch2.get_support(indices=True)
# Translate keep indices into the indices in the df
fixed_index = []
for i in index:
new_i = i + 1
fixed_index.append(new_i)
fixed_index = [0] + fixed_index
good = [df.columns[i] for i in fixed_index]
print("Features selected using DecisionTree feature selection: %s" % str(good))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(good, save_name2)
def Relief(df, n, n_jobs, save_name, SCORES):
"""Feature selection using Relief on the wtestle dataframe."""
from skrebate import ReliefF
X_all = df.drop('Class', axis=1).values
Y_all = df.loc[:, 'Class'].values
Y_all = Y_all.astype('int')
feature_names = list(df)
feature_names.remove('Class')
print("=====* Running relief/rebase based feature selection *=====")
# Set selection to relief
fs = ReliefF(n_jobs = int(n_jobs))
fs.fit(X_all, Y_all)
imp = pd.DataFrame(fs.feature_importances_, index = feature_names, columns = ['relief_imp'])
imp_top = imp.sort_values(by='relief_imp', ascending=False)
if SCORES.lower() != 'f':
save_scores = save_name + '_ReliefScores.txt'
imp_top.to_csv(save_scores)
for n_size in n:
keep = imp_top.index.values[0:int(n_size)]
print("Features selected using Relief from rebase: %s" % str(keep))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(keep, save_name2)
def L1(df, PARAMETER, TYPE, save_name):
"""Apply a linear model with a L1 penalty and select features wtest's coefficients aren't
shrunk to zero. Unlike Chi2, this mettestd accounts for the effect of all of the
other features when determining if a feature is a good predictor.
For a regression problem, it uses linear_model.Lasso
For a classification problem, it uses svm.LinearSVC """
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import LinearSVC
from sklearn.linear_model import Lasso
X_all = df.drop('Class', axis=1).values
Y_all = df.loc[:, 'Class'].values
Y_all = Y_all.astype('int')
if TYPE == 'c' or TYPE == 'classification':
estimator = LinearSVC(C = PARAMETER, penalty='l1', dual=False).fit(X_all, Y_all)
elif TYPE == 'r' or TYPE == 'regression':
estimator = Lasso(alpha = PARAMETER).fit(X_all, Y_all)
print("=====* Running L1/LASSO based feature selection *=====")
model = SelectFromModel(estimator, prefit=True)
keep = model.get_support([])
X_new = model.transform(X_all)
feat_names = np.array(list(df)[1:])
good = feat_names[keep]
print("Features selected using LASSO: %s" % str(good))
print('\nNumber of features selected using LASSO (sparcity parameter = %s): %i' % (str(PARAMETER), X_new.shape[1]))
save_name2 = save_name
SaveTopFeats(good, save_name2)
def EN(df, PARAMETER, n, save_name, SCORES):
"""Apply Elastic-Net based feature selection. Can tune l1:l2 (penalty:zero) ratio (Default = 0.5)"""
from sklearn.linear_model import ElasticNet
X_all = df.drop('Class', axis=1).values
Y_all = df.loc[:, 'Class'].values
feature_names = list(df)
feature_names.remove('Class')
enet = ElasticNet(alpha=0.5, l1_ratio=PARAMETER, fit_intercept=True, positive=False).fit(X_all, Y_all)
imp = pd.DataFrame(enet.coef_, index = feature_names, columns = ['EN_coef'])
imp = imp.abs()
imp_top = imp.sort_values(by='EN_coef', ascending=False)
# Count the number of coefficients that were not zero
non_zero = imp_top[imp_top > 0 ].count()
if SCORES.lower() != 'f':
save_scores = save_name + '_ENScores.txt'
imp_top.to_csv(save_scores)
for n_size in n:
keep = imp_top.index.values[0:int(n_size)]
if int(n_size) > int(non_zero):
print("!!!!!!!!!!!WARNING!!!!!!!!!!!! ONLY %i FEATURES HAD A NON-ZERO COEFFICIENT." % non_zero)
print('!!!!!!!!!!!WARNING!!!!!!!!!!!! THIS LIST CONTAINS SOME FEATURES THAT HAD A COEF = 0')
print("Features selected using Elastic-Net with l1 ratio = %0.6f: %s..." % (PARAMETER, str(keep[:5])))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(keep, save_name2)
print("Note that using a l1 ratio = %.6f, there were %i non-zero features" % (PARAMETER, non_zero))
def BayesA(df_use, n, save_name, SCORES):
""" Use BayesA from BGLR package to select features with largest
abs(coefficients) """
cwd = os.getcwd()
temp_name = 'temp_' + save_name
df_use.to_csv(temp_name)
# Write temp Rscript
tmpR=open("%s_BayA.R" % temp_name,"w")
tmpR.write('library(BGLR)\n')
tmpR.write("setwd('%s')\n" % cwd)
tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name)
tmpR.write("Y <- df[, 'Class']\n")
tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n")
tmpR.write("X=scale(X)\n")
tmpR.write("ETA=list(list(X=X,model='BayesA'))\n")
tmpR.write("fm=BGLR(y=Y,ETA=ETA,verbose=FALSE, nIter=12000,burnIn=2000)\n")
tmpR.write("coef <- abs(fm$ETA[[1]]$b)\n")
tmpR.write("coef_df <- as.data.frame(coef)\n")
tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_BayAScores.txt'))
tmpR.close()
print('Running bayesA model from BGLR inplemented in R.')
process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_BayA.R' % temp_name, shell=True)
process.wait()
coefs = pd.read_csv(temp_name + '_BayAScores.txt', sep = ',')
coefs['coef_abs'] = coefs.coef.abs()
coefs_top = coefs.sort_values(by='coef_abs', ascending=False)
if SCORES.lower() == 'f':
os.system("rm %s_BayAScores.txt" % temp_name)
else:
os.system("mv %s_BayAScores.txt %s_BayAScores.txt" % (temp_name, save_name))
os.system("rm %s %s_BayA.R varE.dat mu.dat ETA_1_ScaleBayesA.dat ETA_1_lambda.dat" % (temp_name, temp_name))
for n_size in n:
keep = coefs_top.index.values[0:int(n_size)]
print("Top %s features selected using BayesA from BGLR: %s" % (str(n_size), str(keep)))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(keep, save_name2)
def BLASSO(df_use, n, save_name, SCORES):
""" Use BayesA from BGLR package to select features with largest
abs(coefficients) """
cwd = os.getcwd()
temp_name = 'temp_' + save_name
df_use.to_csv(temp_name)
# Write temp Rscript
tmpR=open("%s_BL.R" % temp_name,"w")
tmpR.write('library(BGLR)\n')
tmpR.write("setwd('%s')\n" % cwd)
tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name)
tmpR.write("Y <- df[, 'Class']\n")
tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n")
tmpR.write("ETA=list(list(X=X,model='BL'))\n")
tmpR.write("fm=BGLR(y=Y,ETA=ETA,verbose=FALSE, nIter=12000,burnIn=2000)\n")
tmpR.write("coef <- abs(fm$ETA[[1]]$b)\n")
tmpR.write("coef_df <- as.data.frame(coef)\n")
tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_BLScores.txt'))
tmpR.close()
print('Running bayesA model from BGLR inplemented in R.')
process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_BL.R' % temp_name, shell=True)
process.wait()
coefs = pd.read_csv(temp_name + '_BLScores.txt', sep = ',')
coefs['coef_abs'] = coefs.coef.abs()
coefs_top = coefs.sort_values(by='coef_abs', ascending=False)
if SCORES.lower() == 'f':
os.system("rm %s_BLScores.txt" % temp_name)
else:
os.system("mv %s_BLScores.txt %s_BLScores.txt" % (temp_name, save_name))
os.system("rm %s %s_rrB.R varE.dat mu.dat ETA_1_ScaleBL.dat ETA_1_lambda.dat" % (temp_name, temp_name, temp_name))
for n_size in n:
keep = coefs_top.index.values[0:int(n_size)]
print("Top %s features selected using BL from BGLR: %s" % (str(n_size), str(keep)))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(keep, save_name2)
def rrBLUP(df_use, n, save_name, SCORES):
""" Use BayesA from BGLR package to select features with largest
abs(coefficients) """
cwd = os.getcwd()
temp_name = 'temp_' + save_name
df_use.to_csv(temp_name)
# Write temp Rscript
tmpR=open("%s_rrB.R" % temp_name,"w")
tmpR.write("setwd('%s')\n" % cwd)
tmpR.write('library(rrBLUP)\n')
tmpR.write("df <- read.csv('%s', sep=',', header=TRUE, row.names=1)\n" % temp_name)
tmpR.write("Y <- df[, 'Class']\n")
tmpR.write("X <- df[, !colnames(df) %in% c('Class')]\n")
tmpR.write("mod <- mixed.solve(Y, Z=X, K=NULL, SE=FALSE, return.Hinv=FALSE)\n")
tmpR.write("coef <- mod$u\n")
tmpR.write("coef_df <- as.data.frame(coef)\n")
tmpR.write("write.table(coef_df, file='%s', sep=',', row.names=TRUE, quote=FALSE)\n" % (temp_name + '_rrBScores.txt'))
tmpR.close()
print('Running rrBLUP using mixed.solve in R.')
process = sp.Popen('module load R && export R_LIBS_USER=~/R/library && R CMD BATCH %s_rrB.R' % temp_name, shell=True)
process.wait()
coefs = pd.read_csv(temp_name + '_rrBScores.txt', sep = ',')
coefs['coef_abs'] = coefs.coef.abs()
coefs_top = coefs.sort_values(by='coef_abs', ascending=False)
if SCORES.lower() == 'f':
os.system("rm %s_rrBScores.txt" % temp_name)
else:
os.system("mv %s_rrBScores.txt %s_rrBScores.txt" % (temp_name, save_name))
os.system("rm %s %s_rrB.R varE.dat mu.dat ETA_1_ScalerrB.dat" % (temp_name, temp_name))
for n_size in n:
keep = coefs_top.index.values[0:int(n_size)]
print("Top %s features selected using mixed.solve in R (similar to rrBLUP): %s" % (str(n_size), str(keep)))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(keep, save_name2)
def FET(df, PARAMETER, pos, neg, save_name):
"""Use Fisher's Exact Test to look for enriched features"""
from scipy.stats import fisher_exact
kmers = list(df)
kmers.remove(CL)
enriched = [CL]
print("=====* Running enrichement based feature selection *=====")
for k in kmers:
temp = df.groupby([CL, k]).size().reset_index(name="Count")
try:
TP = temp.loc[(temp[CL] == pos) & (temp[k] == 1), 'Count'].iloc[0]
except:
TP = 0
try:
TN = temp.loc[(temp[CL] == neg) & (temp[k] == 0), 'Count'].iloc[0]
except:
TN = 0
try:
FP = temp.loc[(temp[CL] == neg) & (temp[k] == 1), 'Count'].iloc[0]
except:
FP = 0
try:
FN = temp.loc[(temp[CL] == pos) & (temp[k] == 0), 'Count'].iloc[0]
except:
FN = 0
oddsratio,pvalue = fisher_exact([[TP,FN],[FP,TN]],alternative='greater')
if pvalue <= PARAMETER:
enriched.append(k)
save_name2 = save_name
SaveTopFeats(enriched, save_name2)
def Random(df, n, save_name):
"""Randomly select n features"""
from random import sample
feat_names = list(df)
feat_names.remove('Class')
for n_size in n:
rand_feats = sample(feat_names, int(n_size))
save_name2 = save_name + "_" + str(n_size)
SaveTopFeats(rand_feats, save_name2)
if __name__ == "__main__":
#Default parameters
FEAT = 'all' #Features to include from dataframe. Default = all (i.e. don't remove any from the given dataframe)
neg = 0 #Default value for negative class = 0
pos = 1 #Default value for positive class = 1
save_list = 'false'
p = 0.05
CL = 'Class'
TYPE = 'c'
n_jobs = 1
CVs, REPS = 'pass', 1
SEP = '\t'
SAVE, DF2 = 'default', 'None'
UNKNOWN = 'unk'
y_name = 'Class'
test = ''
cl_train = ''
drop_na = 'f'
NA = 'na'
SCORES = 'f'
N = 10
for i in range (1,len(sys.argv),2):
if sys.argv[i].lower() == "-df":
DF = sys.argv[i+1]
if sys.argv[i].lower() == "-df2":
DF2 = sys.argv[i+1]
if sys.argv[i].lower() == "-sep":
SEP = sys.argv[i+1]
if sys.argv[i].lower() == '-save':
SAVE = sys.argv[i+1]
if sys.argv[i].lower() == '-alg':
alg = sys.argv[i+1]
if alg.lower() == 'en':
PARAMETER = 0.5
if sys.argv[i].lower() == '-n':
N = sys.argv[i+1]
if sys.argv[i].lower() == '-n_jobs':
n_jobs = int(sys.argv[i+1])
if sys.argv[i].lower() == '-feat':
FEAT = sys.argv[i+1]
if sys.argv[i].lower() == '-cl_train':
cl_train = sys.argv[i+1]
if sys.argv[i].lower() == '-p':
PARAMETER = float(sys.argv[i+1])
if sys.argv[i].lower() == '-type':
TYPE = sys.argv[i+1]
if sys.argv[i].lower() == '-y_name':
y_name = sys.argv[i+1]
if sys.argv[i].lower() == '-pos':
pos = sys.argv[i+1]
if sys.argv[i].lower() == '-neg':
neg = sys.argv[i+1]
if sys.argv[i].lower() == '-cvs':
CVs = sys.argv[i+1]
if sys.argv[i].lower() == '-jobnum':
jobNum = sys.argv[i+1]
if sys.argv[i].lower() == '-test':
test = sys.argv[i+1]
if sys.argv[i].lower() == '-drop_na':
drop_na = sys.argv[i+1]
if sys.argv[i].lower() == '-scores':
SCORES = sys.argv[i+1]
if len(sys.argv) <= 1:
print(__doc__)
exit()
#Load feature matrix and save feature names
df = pd.read_csv(DF, sep=SEP, index_col = 0)
# If features and class info are in separate files, merge them:
if DF2 != 'None':
start_dim = df.shape
df_class = pd.read_csv(DF2, sep=SEP, index_col = 0)
df = pd.concat([df_class[y_name], df], axis=1, join='inner')
print('Merging the feature & class dataframes changed the dimensions from %s to %s (instance, features).'
% (str(start_dim), str(df.shape)))
# Specify class column - default = Class
if y_name != 'Class':
df = df.rename(columns = {y_name:'Class'})
# Check for Nas
if df.isnull().values.any() == True:
if drop_na.lower() == 't' or drop_na.lower() == 'true':
start_dim = df.shape
df = df.dropna(axis=0)
print('Dropping rows with NA values changed the dimensions from %s to %s.'
% (str(start_dim), str(df.shape)))
else:
print(df.columns[df.isnull().any()].tolist())
print('There are Na values in your dataframe.\n Impute them or add -drop_na True to remove rows with nas')
quit()
# Drop instances in testld out set if provided
if test !='':
print('Removing testldout instances...')
with open(test) as test_file:
test_instances = test_file.read().splitlines()
try:
df = df.drop(test_instances)
except:
print('Trying converting instance names to int')
test_instances = [int(i) for i in test_instances]
df = df.drop(test_instances)
# Drop instances that aren't in the listed classes (i.e. make binary matrix)
if cl_train !='':
start_dim = df.shape
use_classes = cl_train.strip().split(',')
df = df[df['Class'].isin(use_classes)]
print('Dropping instances that are not in %s, changed dimensions from %s to %s (instance, features).'
% (str(use_classes), str(start_dim), str(df.shape)))
df = df[df['Class'] != UNKNOWN]
#Recode class as 1 for positive and 0 for negative
if TYPE.lower() == 'c':
if cl_train != '':
use_classes = cl_train.strip().split(',')
pos = use_classes[0]
neg = use_classes[1]
df["Class"] = df["Class"].replace(pos, 1)
df["Class"] = df["Class"].replace(neg, 0)
# If requesting multiple n, convert to list
try:
N = N.strip().split(',')
except:
N = [N]
#If 'features to keep' list given, remove columns not in that list
if FEAT != 'all':
with open(FEAT) as f:
features = f.read().splitlines()
features = ['Class'] + features
df = df.loc[:,features]
print('\nSnapshot of data:')
print(df.iloc[:6, :5])
# Run feature selection
df_use = df.copy()
# Run FS within a cross-validation scheme
if CVs != 'pass':
print("Working on cv_%s" % str(jobNum))
cv_folds = pd.read_csv(CVs, sep=',', index_col=0)
cv = cv_folds['cv_' + str(jobNum)]
df_use['Class'][cv==5] = 'unk'
if SAVE != 'default':
save_name = SAVE
else:
try:
save_name = DF.split("/")[-1] + "_" + y_name + '_'+ alg + '_cv' + str(jobNum)
except:
save_name = DF.split("/")[-1] + "_" + alg
if alg.lower() == "randomforest" or alg.lower() == "rf":
DecisionTree(df_use, N, TYPE, save_name, SCORES)
elif alg.lower() == "chi2" or alg.lower() == "c2":
Chi2(df_use, N, save_name)
elif alg.lower() == "l1" or alg.lower() == "lasso":
if SAVE == 'default':
save_name = save_name + '_' + str(PARAMETER)
L1(df_use, PARAMETER, TYPE, save_name)
elif alg.lower() == "en" or alg.lower() == "elasticnet":
if SAVE == 'default':
save_name = save_name + '_' + str(PARAMETER)
EN(df_use, PARAMETER, N, save_name, SCORES)
elif alg.lower() == "relief" or alg.lower() == "rebate" or alg.lower() == "rl":
Relief(df_use, N, n_jobs, save_name, SCORES)
elif alg.lower() == "bayesa" or alg.lower() == "ba":
BayesA(df_use, N, save_name, SCORES)
elif alg.lower() == "blasso" or alg.lower() == "bl":
BLASSO(df_use, N, save_name, SCORES)
elif alg.lower() == "rrblup" or alg.lower() == "rrb":
rrBLUP(df_use, N, save_name, SCORES)
elif alg.lower() == "fisher" or alg.lower() == "fet" or alg.lower() == 'enrich':
if SAVE == 'default':
save_name = save_name + '_' + str(PARAMETER)
FET(df_use, PARAMETER, pos, neg, save_name)
elif alg.lower() == "random" or alg.lower() == "rand" or alg.lower() == 'ran':
Random(df_use, N, save_name)
run_time = time.time() - start_time
print('Run time (sec):' + str(round(run_time,2)))
print('Done!')
| 23,944 | 9,155 |
# -*- coding: utf-8 -*-
import sys
import warnings
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parent
if sys.path[0] != str(PROJECT_DIR.parent):
sys.path.insert(0, str(PROJECT_DIR.parent))
warnings.filterwarnings(
"ignore", category=FutureWarning, module="sklearn.utils.deprecation"
)
from common import *
from common import _sci_format
warnings.filterwarnings(
"always", category=FutureWarning, module="sklearn.utils.deprecation"
)
figure_saver = PaperFigureSaver(
directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name,
debug=True,
)
map_figure_saver = figure_saver(**map_figure_saver_kwargs)
for fig_saver in (figure_saver, map_figure_saver):
fig_saver.experiment = PROJECT_DIR.name
memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100)
CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name
def single_ax_multi_ale_1d(
ax,
feature_data,
feature,
bins=20,
xlabel=None,
ylabel=None,
title=None,
n_jobs=8,
verbose=False,
):
quantile_list = []
ale_list = []
for experiment, single_experiment_data in zip(
tqdm(
feature_data["experiment"],
desc="Calculating feature ALEs",
disable=not verbose,
),
feature_data["single_experiment_data"],
):
cache = SimpleCache(
f"{experiment}_{feature}_ale_{bins}",
cache_dir=CACHE_DIR / "ale",
verbose=10 if verbose else 0,
)
try:
quantiles, ale = cache.load()
except NoCachedDataError:
model = single_experiment_data["model"]
model.n_jobs = n_jobs
X_train = single_experiment_data["X_train"]
with parallel_backend("threading", n_jobs=n_jobs):
quantiles, ale = first_order_ale_quant(
model.predict, X_train, feature, bins=bins
)
cache.save((quantiles, ale))
quantile_list.append(quantiles)
ale_list.append(ale)
# Construct quantiles from the individual quantiles, minimising the amount of interpolation.
combined_quantiles = np.vstack([quantiles[None] for quantiles in quantile_list])
final_quantiles = np.mean(combined_quantiles, axis=0)
mod_quantiles = np.arange(len(quantiles))
for plot_kwargs, quantiles, ale in zip(
feature_data["plot_kwargs"], quantile_list, ale_list
):
# Interpolate each of the quantiles relative to the accumulated final quantiles.
ax.plot(
np.interp(quantiles, final_quantiles, mod_quantiles),
ale,
**{"marker": "o", "ms": 3, **plot_kwargs},
)
ax.set_xticks(mod_quantiles[::2])
ax.set_xticklabels(
[
t if t != "0.0e+0" else "0"
for t in _sci_format(final_quantiles[::2], scilim=0)
]
)
ax.xaxis.set_tick_params(rotation=18)
ax.grid(alpha=0.4, linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
def multi_model_ale_1d(
feature_name,
experiment_data,
experiment_plot_kwargs,
lags=(0, 1, 3, 6, 9),
bins=20,
title=None,
n_jobs=8,
verbose=False,
figure_saver=None,
single_figsize=(5.4, 1.5),
legend_bbox=(0.5, 0.5),
fig=None,
axes=None,
legend=True,
legend_labels=None,
):
assert set(experiment_data) == set(experiment_plot_kwargs)
plotted_experiments = set()
# Compile data for later plotting.
comp_data = {}
for lag in tqdm(lags, desc="Lags", disable=not verbose):
if lag:
feature = f"{feature_name} {-lag} Month"
else:
feature = feature_name
feature_data = defaultdict(list)
experiment_count = 0
for experiment, single_experiment_data in experiment_data.items():
# Skip experiments that do not contain this feature.
if feature not in single_experiment_data["X_train"]:
continue
experiment_count += 1
plotted_experiments.add(experiment)
# Data required to calculate the ALEs.
feature_data["experiment"].append(experiment)
feature_data["single_experiment_data"].append(single_experiment_data)
feature_data["plot_kwargs"].append(experiment_plot_kwargs[experiment])
if experiment_count <= 1:
# We need at least two models for a comparison.
continue
comp_data[feature] = feature_data
n_plots = len(comp_data)
n_cols = 2
n_rows = math.ceil(n_plots / n_cols)
if fig is None and axes is None:
fig, axes = plt.subplots(
n_rows,
n_cols,
figsize=np.array(single_figsize) * np.array([n_cols, n_rows]),
)
elif fig is not None and axes is not None:
pass
else:
raise ValueError("Either both or none of fig and axes need to be given.")
# Disable unused axes.
if len(axes.flatten()) > n_plots:
for ax in axes.flatten()[-(len(axes.flatten()) - n_plots) :]:
ax.axis("off")
for ax, feature, feature_data in zip(axes.flatten(), comp_data, comp_data.values()):
single_ax_multi_ale_1d(
ax,
feature_data=feature_data,
feature=feature,
bins=bins,
xlabel=add_units(shorten_features(feature).replace(fill_name(""), "")),
n_jobs=n_jobs,
verbose=verbose,
)
@ticker.FuncFormatter
def major_formatter(x, pos):
t = np.format_float_scientific(x, precision=1, unique=False, exp_digits=1)
if t == "0.0e+0":
return "0"
elif ".0" in t:
return t.replace(".0", "")
return t
for ax in axes.flatten()[:n_plots]:
ax.yaxis.set_major_formatter(major_formatter)
for row_axes in axes:
row_axes[0].set_ylabel("ALE (BA)")
fig.tight_layout()
lines = []
labels = []
for experiment in sort_experiments(plotted_experiments):
lines.append(Line2D([0], [0], **experiment_plot_kwargs[experiment]))
labels.append(experiment_plot_kwargs[experiment]["label"])
labels = labels if legend_labels is None else legend_labels
if legend:
fig.legend(
lines,
labels,
loc="center",
bbox_to_anchor=legend_bbox,
ncol=len(labels) if len(labels) <= 6 else 6,
)
if figure_saver is not None:
figure_saver.save_figure(
fig,
f"{shorten_features(feature_name).replace(' ', '_').lower()}_ale_comp",
sub_directory="ale_comp",
)
| 6,815 | 2,225 |
#!/usr/bin/python3
import sys
column_num=eval(sys.argv[1])
print("ARGUMENT column_num: ", column_num)
file_name = "tana_res.txt"
records = {}
with open(file_name) as f:
while True:
line = f.readline() # Line 1: log file name
if not line:
break
print(line.strip())
key = f.readline().strip() # Line 2: the key == nprobe, refine_nprobe
print("key: ${0}".format(key))
new_record = []
for x in range(column_num): # Line 3-n
new_record.append(f.readline().strip())
records[key] = new_record
sorted_records = sorted(records.items())
outfile="form.txt"
with open(outfile, 'w') as f:
for kv in sorted_records:
f.write(kv[0] + " ")
for val in kv[1]:
f.write(val)
f.write(' ')
f.write('\n')
| 722 | 315 |
import os
from kloppy import DatafactorySerializer
from kloppy.domain import (
AttackingDirection,
Ground,
Orientation,
Period,
Point,
Provider,
SetPieceType,
)
from kloppy.domain.models.common import DatasetType
class TestDatafactory:
def test_correct_deserialization(self):
base_dir = os.path.dirname(__file__)
serializer = DatafactorySerializer()
with open(
f"{base_dir}/files/datafactory_events.json", "r"
) as event_data:
dataset = serializer.deserialize(
inputs={"event_data": event_data},
options={"coordinate_system": Provider.DATAFACTORY},
)
assert dataset.metadata.provider == Provider.DATAFACTORY
assert dataset.dataset_type == DatasetType.EVENT
assert len(dataset.events) == 1027
assert len(dataset.metadata.periods) == 2
assert dataset.events[10].ball_owning_team == dataset.metadata.teams[1]
assert dataset.events[23].ball_owning_team == dataset.metadata.teams[0]
assert dataset.metadata.orientation == Orientation.HOME_TEAM
assert dataset.metadata.teams[0].name == "Team A"
assert dataset.metadata.teams[0].ground == Ground.HOME
assert dataset.metadata.teams[1].name == "Team B"
assert dataset.metadata.teams[1].ground == Ground.AWAY
player = dataset.metadata.teams[0].players[0]
assert player.player_id == "38804"
assert player.jersey_no == 1
assert str(player) == "Daniel Bold"
assert player.position is None # not set
assert player.starting
assert dataset.metadata.periods[0] == Period(
id=1,
start_timestamp=0,
end_timestamp=2912,
attacking_direction=AttackingDirection.HOME_AWAY,
)
assert dataset.metadata.periods[1] == Period(
id=2,
start_timestamp=2700,
end_timestamp=5710,
attacking_direction=AttackingDirection.AWAY_HOME,
)
assert dataset.events[0].coordinates == Point(0.01, 0.01)
# Check the qualifiers
assert dataset.events[0].qualifiers[0].value == SetPieceType.KICK_OFF
assert dataset.events[412].qualifiers[0].value == SetPieceType.THROW_IN
def test_correct_normalized_deserialization(self):
base_dir = os.path.dirname(__file__)
serializer = DatafactorySerializer()
with open(
f"{base_dir}/files/datafactory_events.json", "r"
) as event_data:
dataset = serializer.deserialize(
inputs={"event_data": event_data},
)
assert dataset.events[0].coordinates == Point(0.505, 0.505)
| 2,743 | 835 |
import tensorflow as tf
from layers.attention import stacked_multihead_attention
from layers.recurrent import rnn_layer
from layers.similarity import manhattan_similarity
from models.base_model import BaseSiameseNet
class LSTMATTBasedSiameseNet(BaseSiameseNet):
def __init__(
self,
max_sequence_len,
vocabulary_size,
main_cfg,
model_cfg,
):
BaseSiameseNet.__init__(
self,
max_sequence_len,
vocabulary_size,
main_cfg,
model_cfg,
)
def siamese_layer(
self,
sequence_len,
model_cfg,
):
#print(model_cfg)
hidden_size = model_cfg['PARAMS'].getint('hidden_size')
cell_type = model_cfg['PARAMS'].get('cell_type')
num_blocks = model_cfg['PARAMS'].getint('num_blocks')
num_heads = model_cfg['PARAMS'].getint('num_heads')
use_residual = model_cfg['PARAMS'].getboolean('use_residual')
dropout_rate = model_cfg['PARAMS'].getfloat('dropout_rate')
BiLSTM_sen1 = rnn_layer(
embedded_x=self.embedded_x1,
hidden_size=hidden_size,
bidirectional=False,
cell_type=cell_type,
)
BiLSTM_sen2 = rnn_layer(
embedded_x=self.embedded_x2,
hidden_size=hidden_size,
bidirectional=False,
cell_type=cell_type,
reuse=True,
)
ATT_out1, self.debug_vars['attentions_x1'] = stacked_multihead_attention(
BiLSTM_sen1,
num_blocks=num_blocks,
num_heads=num_heads,
use_residual=use_residual,
is_training=self.is_training,
dropout_rate=dropout_rate,
)
ATT_out2, self.debug_vars['attentions_x2'] = stacked_multihead_attention(
BiLSTM_sen2,
num_blocks=num_blocks,
num_heads=num_heads,
use_residual=use_residual,
is_training=self.is_training,
dropout_rate=dropout_rate,
reuse=True,
)
out1 = tf.reduce_mean(ATT_out1, axis=1)
out2 = tf.reduce_mean(ATT_out2, axis=1)
return manhattan_similarity(out1, out2), out1, out2
| 2,308 | 759 |
# Krzysztof Joachimiak 2017
# sciquence: Time series & sequences in Python
#
# Sliding window
# Author: Krzysztof Joachimiak
#
# License: MIT
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import Imputer
class SlidingWindow(BaseEstimator, TransformerMixin):
# TODO: check reference
'''
Class dedicated for simple sliding window transformations.
It transforms only the one input sequence:
for parallel X and y vectors processing see WindowsMaker
Parameters
----------
window_size: int, default 5
Size of sliding window
shift: int
Shift of sliding window, default 1
Attributes
----------
window_size_: int
Size of sliding window
shift_: int
Shift of sliding window
Examples
--------
>>> from sciquence.sliding_window import SlidingWindow
>>> import numpy as np
>>> X = np.array([0, 1, 23, 3, 4, 67, 89, 11, 2, 34])
>>> print SlidingWindow(window_size=3, shift=4).transform(X)
[array([1, 2, 3]), array([ 5, 11, 12])]
References
----------
Dietterich Thomas G.
Machine Learning for Sequential Data: A Review
http://web.engr.oregonstate.edu/~tgd/publications/mlsd-ssspr.pdf
See Also
--------
WindowsMaker
'''
def __init__(self, window_size=5, shift=1):
self.window_size_ = window_size
self.shift_ = shift
def fit(self, X, y=None):
'''
Mock method, does nothing.
Parameters
----------
X: ndarray (n_samples, n_features)
A sequence
Returns
-------
self: SlidingWindow
Returns self
'''
return self
def transform(self, X, y=None):
# TODO: check array shape
'''
Transform sequence into sequence of contextual sliding_window
Parameters
----------
X: ndarray (n_samples, n_features)
A sequence
Returns
-------
'''
windows = []
range_end = len(X) - self.window_size_+1
for i in xrange(0, range_end, self.shift_):
windows.append(X[range(i, i+self.window_size_)])
return windows
| 2,237 | 710 |
"""Module containing the logic for an arithmetic parser.
Lark is used as a parser generator.
"""
from typing import Mapping
import operator
import lark
# Default syntax for arithmetic expressions
ARITHM_EXPRESSIONS_SYNTAX = """
?start: or
?or: and
| or "||" and -> or_
| or "or" and -> or_
?and: comparison
| and "&&" comparison -> and_
| and "and" comparison -> and_
?comparison: sum
| comparison "<" sum -> lt
| comparison ">" sum -> gt
| comparison "<=" sum -> lte
| comparison ">=" sum -> gte
| comparison "==" sum -> eq
| comparison "!=" sum -> neq
?sum: product
| sum "+" product -> add
| sum "-" product -> sub
?product: not
| product "*" not -> mul
| product "/" not -> div
| product "//" not -> floordiv
?not: atom
| "!"atom -> not_
| "not" atom -> not_
?atom: NUMBER -> number
| "-" atom -> neg
| "False" -> false
| "True" -> true
| NAME -> var
| STRING -> string
| "(" or ")"
%import common.ESCAPED_STRING -> STRING
%import common.CNAME -> NAME
%import common.NUMBER
%import common.WS_INLINE
%ignore WS_INLINE
"""
@lark.v_args(inline=True)
class ArithmExpressionTransformer(lark.Transformer):
"""Transformer used to calculate the result of arithmetic exp."""
add = operator.add
sub = operator.sub
mul = operator.mul
div = operator.truediv
floordiv = operator.floordiv
neg = operator.neg
lt = operator.lt
gt = operator.gt
lte = operator.le
gte = operator.ge
eq = operator.eq
neq = operator.ne
def __init__(self, variables):
self.variables = variables
def and_(self, x, y):
"""Boolean and.
For simplicity, an easy implementation of boolean operators
which will NOT provide shot circuiting.
"""
return x and y
def or_(self, x, y):
"""Boolean or.
For simplicity, an easy implementation of boolean operators
which will NOT provide shot circuiting.
"""
return x or y
not_ = operator.not_
number = float
def string(self, string: str):
return string.strip('"')
def true(self):
return True
def false(self):
return False
def var(self, name):
return self.variables[name]
# Default parser for arithmetic expressions (using the default syntax)
ARITHM_EXPRESSIONS_PARSER = lark.Lark(ARITHM_EXPRESSIONS_SYNTAX)
def arithm_expression_evaluate(
expression: str, variables: Mapping,
parser: lark.Lark = ARITHM_EXPRESSIONS_PARSER,
transformer: lark.Transformer = ArithmExpressionTransformer):
"""Return the value of the given expression.
Free variable names will be calculated using the 'variables'
mapping.
"""
return transformer(variables).transform(parser.parse(expression))
| 2,923 | 891 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
from opendr.perception.object_detection_3d.voxel_object_detection_3d.\
second_detector.torchplus_tanet.train import learning_schedules
def build(optimizer_config, optimizer, last_step=-1):
"""Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof("optimizer")
if optimizer_type == "rms_prop_optimizer":
config = optimizer_config.rms_prop_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, last_step=last_step
)
if optimizer_type == "momentum_optimizer":
config = optimizer_config.momentum_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, last_step=last_step
)
if optimizer_type == "adam_optimizer":
config = optimizer_config.adam_optimizer
lr_scheduler = _create_learning_rate_scheduler(
config.learning_rate, optimizer, last_step=last_step
)
return lr_scheduler
def build_online(learning_rate_type, learning_rate_config, optimizer, last_step=-1):
lr_scheduler = _create_learning_rate_scheduler_online(
learning_rate_type, learning_rate_config, optimizer, last_step=last_step
)
return lr_scheduler
def _create_learning_rate_scheduler(learning_rate_config, optimizer, last_step=-1):
"""Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
lr_scheduler = None
learning_rate_type = learning_rate_config.WhichOneof("learning_rate")
if learning_rate_type == "constant_learning_rate":
config = learning_rate_config.constant_learning_rate
lr_scheduler = learning_schedules.Constant(optimizer, last_step=last_step)
if learning_rate_type == "exponential_decay_learning_rate":
config = learning_rate_config.exponential_decay_learning_rate
lr_scheduler = learning_schedules.ExponentialDecay(
optimizer,
config.decay_steps,
config.decay_factor,
config.staircase,
last_step=last_step,
)
if learning_rate_type == "manual_step_learning_rate":
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError("Empty learning rate schedule.")
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
lr_scheduler = learning_schedules.ManualStepping(
optimizer,
learning_rate_step_boundaries,
learning_rate_sequence,
last_step=last_step,
)
if learning_rate_type == "cosine_decay_learning_rate":
config = learning_rate_config.cosine_decay_learning_rate
lr_scheduler = learning_schedules.CosineDecayWithWarmup(
optimizer,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
last_step=last_step,
)
if lr_scheduler is None:
raise ValueError("Learning_rate %s not supported." % learning_rate_type)
return lr_scheduler
def _create_learning_rate_scheduler_online(
learning_rate_type, config, optimizer, last_step=-1
):
lr_scheduler = None
if learning_rate_type == "constant_learning_rate":
lr_scheduler = learning_schedules.Constant(optimizer, last_step=last_step)
if learning_rate_type == "exponential_decay_learning_rate":
lr_scheduler = learning_schedules.ExponentialDecay(
optimizer,
config["decay_steps"],
config["decay_factor"],
config["staircase"],
last_step=last_step,
)
if learning_rate_type == "manual_step_learning_rate":
if "schedule" not in config:
raise ValueError("Empty learning rate schedule.")
learning_rate_step_boundaries = [x.step for x in config["schedule"]]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config["schedule"]]
lr_scheduler = learning_schedules.ManualStepping(
optimizer,
learning_rate_step_boundaries,
learning_rate_sequence,
last_step=last_step,
)
if learning_rate_type == "cosine_decay_learning_rate":
lr_scheduler = learning_schedules.CosineDecayWithWarmup(
optimizer,
config["total_steps"],
config["warmup_learning_rate"],
config["warmup_steps"],
last_step=last_step,
)
if lr_scheduler is None:
raise ValueError("Learning_rate %s not supported." % learning_rate_type)
return lr_scheduler
| 5,996 | 1,796 |
import sys
import os.path
import pprint
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
print("WMI requester is {0}".format(windows.system.wmi))
print("Selecting * from 'Win32_Process'")
result = windows.system.wmi.select("Win32_Process")
print("They are <{0}> processes".format(len(result)))
print("Looking for ourself via pid")
us = [p for p in result if int(p["ProcessId"]) == windows.current_process.pid][0]
print("Some info about our process:")
print(" * {0} -> {1}".format("Name", us["Name"]))
print(" * {0} -> {1}".format("ProcessId", us["ProcessId"]))
print(" * {0} -> {1}".format("OSName", us["OSName"]))
print(" * {0} -> {1}".format("UserModeTime", us["UserModeTime"]))
print(" * {0} -> {1}".format("WindowsVersion", us["WindowsVersion"]))
print(" * {0} -> {1}".format("CommandLine", us["CommandLine"]))
print("<Select Caption,FileSystem,FreeSpace from Win32_LogicalDisk>:")
for vol in windows.system.wmi.select("Win32_LogicalDisk", ["Caption", "FileSystem", "FreeSpace"]):
print(" * " + str(vol))
print("\n ==== Advanced use ====")
print("Listing some namespaces:")
for namespace in [ns for ns in windows.system.wmi.namespaces if "2" in ns]:
print(" * {0}".format(namespace))
security2 = windows.system.wmi["root\\SecurityCenter2"]
print("Querying non-default namespace: {0}".format(security2))
print("Listing some available classes:")
for clsname in [x for x in security2.classes if x.endswith("Product")]:
print(" * {0}".format(clsname))
print("Listing <AntiVirusProduct>:")
for av in security2.select("AntiVirusProduct"):
print(" * {0}".format(av["displayName"]))
| 1,656 | 576 |
l = [int(e) for e in input().split()]
for n in l:
if l.count(n) == 1:
print(n)
| 92 | 43 |
"""
This script loads Google and Apple Mobility reports, builds cleaned reports in different formats and builds merged files from both sources.
Original data:
- Google Community Mobility reports: https://www.google.com/covid19/mobility/
- Apple Mobility Trends reports: https://www.apple.com/covid19/mobility
"""
import os
import datetime
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import re
import json
import pandas as pd
def get_google_link():
'''Get link of Google Community Mobility report file
Returns:
link (str): link of Google Community report file
'''
# get webpage source
url = 'https://www.google.com/covid19/mobility/'
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
csv_tag = soup.find('a', {"class": "icon-link"})
link = csv_tag['href']
return link
def download_google_report(directory="google_reports"):
'''Download Google Community Mobility report in CSV format
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not new files have been downloaded
'''
new_files = False
# create directory if it don't exist
if not os.path.exists(directory):
os.makedirs(directory)
# download CSV file
link = get_google_link()
file_name = "Global_Mobility_Report.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Google: No updates')
else:
print('Google: Update available')
return new_files
def build_google_report(
source=os.path.join("google_reports", "Global_Mobility_Report.csv"),
report_type="regions"):
'''Build cleaned Google report for the worldwide or for some country (currently only for the US)
Args:
source: location of the raw Google CSV report
report_type: two options available: "regions" - report for the worldwide, "US" - report for the US
Returns:
google (DataFrame): generated Google report
'''
google = pd.read_csv(source, low_memory=False)
google.columns = google.columns.str.replace(
r'_percent_change_from_baseline', '')
google.columns = google.columns.str.replace(r'_', ' ')
google = google.rename(columns={'country region': 'country'})
if report_type == "regions":
google = google[google['sub region 2'].isnull()]
google = google.rename(columns={'sub region 1': 'region'})
google = google.loc[:,
['country',
'region',
'date',
'retail and recreation',
'grocery and pharmacy',
'parks',
'transit stations',
'workplaces',
'residential']]
google['region'].fillna('Total', inplace=True)
elif report_type == "US":
google = google[(google['country'] == "United States")]
google = google.rename(
columns={
'sub region 1': 'state',
'sub region 2': 'county'})
google = google.loc[:,
['state',
'county',
'date',
'retail and recreation',
'grocery and pharmacy',
'parks',
'transit stations',
'workplaces',
'residential']]
google['state'].fillna('Total', inplace=True)
google['county'].fillna('Total', inplace=True)
return google
def get_apple_link():
'''Get link of Apple Mobility Trends report file
Returns:
link (str): link of Apple Mobility Trends report file
'''
# get link via API
json_link = "https://covid19-static.cdn-apple.com/covid19-mobility-data/current/v3/index.json"
with urllib.request.urlopen(json_link) as url:
json_data = json.loads(url.read().decode())
link = "https://covid19-static.cdn-apple.com" + \
json_data['basePath'] + json_data['regions']['en-us']['csvPath']
return link
def download_apple_report(directory="apple_reports"):
'''Download Apple Mobility Trends report in CSV
Args:
directory: directory to which CSV report will be downloaded
Returns:
new_files (bool): flag indicating whether or not a new file has been downloaded
'''
new_files = False
if not os.path.exists(directory):
os.makedirs(directory)
link = get_apple_link()
file_name = "applemobilitytrends.csv"
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
new_files = True
urllib.request.urlretrieve(link, path)
else:
path_new = os.path.join(directory, file_name + "_new")
urllib.request.urlretrieve(link, path_new)
if os.path.getsize(path) == os.path.getsize(path_new):
os.remove(path_new)
else:
new_files = True
os.remove(path)
os.rename(path_new, path)
if not new_files:
print('Apple: No updates')
else:
print('Apple: Update available')
return new_files
def build_apple_report(
source=os.path.join(
'apple_reports',
"applemobilitytrends.csv"),
report_type="regions"):
'''Build cleaned Apple report (transform dates from columns to rows, add country names for subregions and cities)
for the worldwide or for some country (currently only for the US)
Args:
source: location of the raw Apple CSV report
destination: destination file path
report_type: two options available: "regions" - report for the worldwide, "US" - report for the US
Returns:
apple (DataFrame): generated Apple report
'''
apple = pd.read_csv(source)
apple = apple.drop(columns=['alternative_name'])
apple['country'] = apple.apply(
lambda x: x['region'] if x['geo_type'] == 'country/region' else x['country'],
axis=1)
if report_type == 'regions':
apple = apple[apple.geo_type != 'county']
apple['sub-region'] = apple.apply(lambda x: 'Total' if x['geo_type'] == 'country/region' else (
x['region'] if x['geo_type'] == 'sub-region' else x['sub-region']), axis=1)
apple['subregion_and_city'] = apple.apply(
lambda x: 'Total' if x['geo_type'] == 'country/region' else x['region'], axis=1)
apple = apple.drop(columns=['region'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['subregion_and_city'])
apple = apple.melt(
id_vars=[
'geo_type',
'subregion_and_city',
'sub-region',
'transportation_type',
'country'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
"geo_type",
"subregion_and_city",
"sub-region",
"date",
"country"],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple.loc[:,
['country',
'sub-region',
'subregion_and_city',
'geo_type',
'date',
'driving',
'transit',
'walking']]
apple = apple.sort_values(by=['country',
'sub-region',
'subregion_and_city',
'date']).reset_index(drop=True)
elif report_type == "US":
apple = apple[apple.country == "United States"].drop(columns=[
'country'])
apple['sub-region'] = apple['sub-region'].fillna(
apple['region']).replace({"United States": "Total"})
apple['region'] = apple.apply(lambda x: x['region'] if (
x['geo_type'] == 'city' or x['geo_type'] == 'county') else 'Total', axis=1)
apple = apple.rename(
columns={
'sub-region': 'state',
'region': 'county_and_city'})
apple = apple.melt(
id_vars=[
'geo_type',
'state',
'county_and_city',
'transportation_type'],
var_name='date')
apple['value'] = apple['value'] - 100
apple = apple.pivot_table(
index=[
'geo_type',
'state',
'county_and_city',
'date'],
columns='transportation_type').reset_index()
apple.columns = [t + (v if v != "value" else "")
for v, t in apple.columns]
apple = apple.loc[:, ['state', 'county_and_city', 'geo_type',
'date', 'driving', 'transit', 'walking']]
apple = apple.sort_values(
by=['state', 'county_and_city', 'geo_type', 'date']).reset_index(drop=True)
return apple
def check_waze_report(countries_source = os.path.join("waze_reports", "Waze_Country-Level_Data.csv"),
cities_source = os.path.join("waze_reports", "Waze_City-Level_Data.csv"),
report_source = os.path.join("waze_reports", "waze_mobility.csv")):
'''Checks if new raw Waze CSV reports have been added.
Args:
countries_source: location of the raw Waze country-level CSV report
cities_source: location of the raw Waze city-level CSV report
report_source: location of the Waze CSV report generated by build_waze_report function (if available)
Returns:
new_files (bool): flag indicating whether or not new raw Waze CSV reports have been added
'''
if not os.path.isfile(report_source):
new_files = True
else:
# check by a number of rows in files
with open(countries_source) as f:
country_rows = sum(1 for line in f)
with open(cities_source) as f:
cities_rows = sum(1 for line in f)
with open(report_source) as f:
report_rows = sum(1 for line in f)
new_files = country_rows + cities_rows != report_rows + 1
if not new_files:
print('Waze: No updates')
else:
print('Waze: Update available')
return new_files
def build_waze_report(countries_source=os.path.join("waze_reports", "Waze_Country-Level_Data.csv"),
cities_source=os.path.join("waze_reports", "Waze_City-Level_Data.csv")):
'''Build cleaned Waze report (transform dates from string to date format, merge country&city-level data,
add geo_type column)
Args:
countries_source: location of the raw Waze country-level CSV report
cities_source: location of the raw Waze city-level CSV report
Returns:
waze (DataFrame): generated Waze report
'''
waze_countries = pd.read_csv(countries_source, parse_dates=['Date'])
waze_cities = pd.read_csv(cities_source, parse_dates=['Date'])
waze_countries['City'] = 'Total'
waze_countries['geo_type'] = 'country'
waze_cities['geo_type'] = 'city'
waze = waze_countries.append(waze_cities)
waze = waze.rename(columns={'Country':'country', 'City':'city',
'Date':'date', '% Change In Waze Driven Miles/KMs':'driving_waze'})
waze['driving_waze'] = waze['driving_waze'] * 100
waze['date'] = waze['date'].dt.date
waze = waze.loc[:,['country', 'city','geo_type','date', 'driving_waze']]
waze = waze.sort_values(by=['country', 'city', 'geo_type', 'date']).reset_index(drop=True)
return waze
def build_summary_report(apple_source, google_source, report_type="regions"):
'''Build a merged report from Google and Apple data
Args:
apple_source: location of the CSV report generated by build_apple_report function
google_source: location of the CSV report generated by build_google_report function
report_type: two options available: "regions" - report for the worldwide, "US" - report for the US
Returns:
summary (DataFrame): merged report from Google and Apple data
'''
apple = pd.read_csv(apple_source, low_memory=False)
google = pd.read_csv(google_source, low_memory=False)
summary = pd.DataFrame()
# build report for regions
if report_type == "regions":
apple = apple.rename(columns={'subregion_and_city': 'region'})
apple = apple.loc[:, ['country', 'region',
'date', 'driving', 'transit', 'walking']]
# get matching table for converting Apple countries and subregions to
# Google names
country_AtoG_file = os.path.join(
'auxiliary_data', 'country_Apple_to_Google.csv')
subregions_AtoG_file = os.path.join(
'auxiliary_data', 'subregions_Apple_to_Google.csv')
if os.path.isfile(country_AtoG_file):
country_AtoG = pd.read_csv(country_AtoG_file, index_col=0)
else:
country_AtoG = None
if os.path.isfile(subregions_AtoG_file):
subregions_AtoG = pd.read_csv(subregions_AtoG_file, index_col=0)
else:
subregions_AtoG = None
# convert Apple countries and subregions to Google names
apple['country'] = apple.apply(lambda x: country_AtoG.loc[x['country'], 'country_google'] if (
country_AtoG is not None and x['country'] in country_AtoG.index) else x['country'], axis=1)
apple['region'] = apple.apply(lambda x: subregions_AtoG.loc[x['region'], 'subregion_Google'] if (
subregions_AtoG is not None and x['region'] in subregions_AtoG.index) else x['region'], axis=1)
# merge reports
apple = apple.set_index(['country', 'region', 'date'])
google = google.set_index(['country', 'region', 'date'])
summary = google.join(apple, how='outer')
summary = summary.reset_index(level=['country', 'region', 'date'])
elif report_type == "US":
apple = apple.loc[:, ['state', 'county_and_city',
'date', 'driving', 'transit', 'walking']]
apple.loc[apple.state == 'Washington DC',
'state'] = 'District of Columbia'
apple.loc[apple.county_and_city ==
'Washington DC', 'county_and_city'] = 'Total'
google = google.rename(columns={'county': 'county_and_city'})
# merge reports
apple = apple.set_index(['state', 'county_and_city', 'date'])
google = google.set_index(['state', 'county_and_city', 'date'])
summary = google.join(apple, how='outer')
summary = summary.reset_index(
level=['state', 'county_and_city', 'date'])
return summary
def run():
"""Run parse flow and build reports"""
# process Google reports
new_files_status_google = download_google_report()
if new_files_status_google:
# build reports
google_world = build_google_report()
google_US = build_google_report(report_type="US")
# write reports to CSV and Excel
google_world.to_csv(os.path.join("google_reports", "mobility_report_countries.csv"), index=False)
google_world.to_excel(os.path.join("google_reports", "mobility_report_countries.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
google_US.to_csv(os.path.join("google_reports", "mobility_report_US.csv"), index=False)
google_US.to_excel(os.path.join("google_reports", "mobility_report_US.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
# process Apple reports
new_files_status_apple = download_apple_report()
if new_files_status_apple:
# build reports
apple_world = build_apple_report()
apple_US = build_apple_report(report_type="US")
# write reports to CSV and Excel
apple_world.to_csv(os.path.join("apple_reports", "apple_mobility_report.csv"), index=False)
apple_world.to_excel(os.path.join("apple_reports", "apple_mobility_report.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
apple_US.to_csv(os.path.join("apple_reports", "apple_mobility_report_US.csv"), index=False)
apple_US.to_excel(os.path.join("apple_reports", "apple_mobility_report_US.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
# process Waze reports
new_files_status_waze = check_waze_report()
if new_files_status_waze:
# build report
waze = build_waze_report()
# write report to CSV and Excel
waze.to_csv(os.path.join("waze_reports", "waze_mobility.csv"), index=False)
waze.to_excel(os.path.join("waze_reports", "waze_mobility.xlsx"),
index=False, sheet_name='Data', engine='xlsxwriter')
# build summary reports
if new_files_status_apple or new_files_status_google:
print("Merging reports...")
summary_regions = build_summary_report(os.path.join("apple_reports","apple_mobility_report.csv"),
os.path.join("google_reports", "mobility_report_countries.csv"))
summary_US = build_summary_report(os.path.join("apple_reports", "apple_mobility_report_US.csv"),
os.path.join("google_reports", "mobility_report_US.csv"), 'US')
summary_countries = summary_regions[summary_regions['region']=='Total'].drop(columns=['region'])
print('Writing merged reports to files...')
summary_regions.to_csv(os.path.join("summary_reports", "summary_report_regions.csv"), index=False)
summary_regions.to_excel(os.path.join("summary_reports", "summary_report_regions.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
summary_US.to_csv(os.path.join("summary_reports", "summary_report_US.csv"), index=False)
summary_US.to_excel(os.path.join("summary_reports", "summary_report_US.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
summary_countries.to_csv(os.path.join("summary_reports", "summary_report_countries.csv"), index=False)
summary_countries.to_excel(os.path.join("summary_reports", "summary_report_countries.xlsx"),
index=False, sheet_name='Data', engine = 'xlsxwriter')
if __name__ == '__main__':
run()
| 20,065 | 6,106 |
class PerformanceList(list):
"""A performance list provides statistical analysis functionality for a list of performances"""
@property
def earnings(self):
"""Return the total prize money for the performances in this list"""
prize_monies = [performance['prize_money'] for performance in self if performance['prize_money'] is not None]
if len(prize_monies) > 0:
return sum(prize_monies)
else:
return 0.00
@property
def earnings_potential(self):
"""Return the total prize money as a percentage of the total prize pools for the performances in this list"""
prize_pools = [performance['prize_pool'] for performance in self if performance['prize_pool'] is not None]
if len(prize_pools) > 0:
return self.calculate_percentage(self.earnings, sum(prize_pools))
@property
def fourths(self):
"""Return the number of fourth placed performances in this list"""
return self.count_results(4)
@property
def fourth_pct(self):
"""Return the percentage of fourth placed performances in this list"""
return self.calculate_percentage(self.fourths)
@property
def momentums(self):
"""Return a tuple containing the minimum, maximum and average momentums for the performances in this list"""
momentums = [performance.momentum for performance in self if performance.momentum is not None]
return (min(momentums), max(momentums), sum(momentums) / len(momentums)) if len(momentums) > 0 else (None, None, None)
@property
def places(self):
"""Return the number of first, second and third placed performances in this list"""
return self.wins + self.seconds + self.thirds
@property
def place_pct(self):
"""Return the percentage of first, second and third placed performances in this list"""
return self.calculate_percentage(self.places)
@property
def result_potential(self):
"""Return 1 - the sum of all results / the sum of all starters for the performances in this list"""
results = [performance['result'] for performance in self if performance['result'] is not None]
if len(results) > 0:
starters = [performance['starters'] for performance in self if performance['starters'] is not None]
if len(starters) > 0:
pct = self.calculate_percentage(sum(results), sum(starters))
if pct is not None:
return 1.0 - pct
@property
def roi(self):
"""Return the total profits divided by the number of starts in this list"""
profits = [performance.profit for performance in self]
if len(profits) > 0:
return self.calculate_percentage(sum(profits))
@property
def seconds(self):
"""Return the number of second placed performances in this list"""
return self.count_results(2)
@property
def second_pct(self):
"""Return the percentage of second placed performances in this list"""
return self.calculate_percentage(self.seconds)
@property
def starting_prices(self):
"""Return a tuple containing the minimum, maximum and average starting prices for the performances in this list"""
starting_prices = [performance['starting_price'] for performance in self if performance['starting_price'] is not None]
return (min(starting_prices), max(starting_prices), sum(starting_prices) / len(starting_prices)) if len(starting_prices) > 0 else (None, None, None)
@property
def starts(self):
"""Return the number of starts in this performance list"""
return len(self)
@property
def thirds(self):
"""Return the number of third placed performances in this list"""
return self.count_results(3)
@property
def third_pct(self):
"""Return the percentage of third placed performances in this list"""
return self.calculate_percentage(self.thirds)
@property
def wins(self):
"""Return the number of winning performances in this list"""
return self.count_results(1)
@property
def win_pct(self):
"""Return the percentage of winning performances in this list"""
return self.calculate_percentage(self.wins)
def calculate_percentage(self, numerator, denominator=None, divide_by_zero=None):
"""Return numerator / denominator, or divide_by_zero if denominator is 0"""
if denominator is None:
denominator = self.starts
if denominator == 0:
return divide_by_zero
else:
return numerator / denominator
def count_results(self, result):
"""Count the number of performances with the specified result in this list"""
return len([performance for performance in self if performance['result'] is not None and performance['result'] == result])
| 4,975 | 1,357 |
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
@lru_cache(None)
def dp(i, j):
if i < 0 or j < 0: return max(i, j) + 1
return dp(i - 1, j - 1) if word1[i] == word2[j] else min(dp(i - 1, j), dp(i - 1, j - 1), dp(i, j - 1)) + 1
return dp(len(word1) - 1, len(word2) - 1)
| 343 | 160 |
import discord
import requests
from discord.ext import commands
from discord.ext.commands import BucketType, cooldown
class Lyrics(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Lyrics cog loaded successfully")
@commands.command(aliases=["lyrics"], description="Shows the lyrics of given song")
@cooldown(1, 30, BucketType.user)
async def ly(self, ctx, *, lyrics):
if lyrics is None:
await ctx.send("You forgot lyrcis")
else:
words = "+".join(lyrics.split(" "))
print(words)
URL = f"https://some-random-api.ml/lyrics?title={words}"
def check_valid_status_code(request):
if request.status_code == 200:
return request.json()
return False
def get_song():
request = requests.get(URL)
data = check_valid_status_code(request)
return data
song = get_song()
if not song:
await ctx.channel.send("Couldn't get lyrcis from API. Try again later.")
else:
music = song["lyrics"]
ti = song["title"]
au = song["author"]
embed = discord.Embed(
timestamp=ctx.message.created_at,
Title="Title: Song",
color=0xFF0000,
)
embed.add_field(name=f"Title: {ti}", value=f"Author: {au}")
chunks = [music[i : i + 1024] for i in range(0, len(music), 2000)]
for chunk in chunks:
embed.add_field(name="\u200b", value=chunk, inline=False)
# embed.add_field(name='Song',value=f'{music}', inline=True)
embed.set_footer(
text=f"Requested By: {ctx.author.name}",
icon_url=f"{ctx.author.avatar_url}",
)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Lyrics(client))
| 2,130 | 626 |
import time
import pickle
import numpy as np
from copy import deepcopy
class ModelTraining:
def __init__(self, model, data_loader, batch_size = 10, epochs = 20, model_ckpt_file = 'model/PCamNet'):
self.model = model
self.data_loader = data_loader # List of tuple: (left_cam, right_cam, disp_map) filenames
self.batch_size = batch_size
self.num_epochs = epochs
self.num_batch = len( self.data_loader )
self.model_ckpt_file = model_ckpt_file
self.train_stats = []
def train_model(self):
print ( 'Training Model: %s ... ' % self.model.get_name() )
# train model for one epoch - call fn model.train_batch(data, label) for each batch
for epoch in range( self.num_epochs ):
training_loss = 0.0
training_count = 0
validation_loss = 0.0
validation_count = 0
validation_predict = []
true_labels = []
true_labels = []
t1 = time.time()
for batch_data, batch_labels in self.data_loader:
if training_count <= 0.8 * self.num_batch:
training_loss += self.model.train_batch( batch_data, batch_labels )
training_count += 1
else:
validation_loss += self.model.compute_loss( batch_data, batch_labels )
# validation_predict.append(self.model.forward_pass( batch_data ).numpy())
validation_count += 1
# true_labels.append(deepcopy(batch_labels).numpy())
t2 = time.time()
self.train_stats.append([epoch, training_loss.numpy(), training_count,
validation_loss.numpy(), validation_count,
validation_predict, true_labels, t2 - t1])
print ()
print ( 'epoch: %4d train loss: %20.6f val loss: %20.6f' %
( epoch, training_loss / training_count,
validation_loss / validation_count ) )
print('epoch time:', np.round(t2 - t1, 2), 's')
print('time for completion:', np.round((t2 - t1) * (self.num_epochs - epoch - 1) / 60, 2), 'm')
print ('')
self.model.save_model( self.model_ckpt_file + '.pth')
pickle.dump(self.train_stats, open(self.model_ckpt_file+'_stats.pkl','wb'))
print ( 'Training Model: %s ... Complete' % self.model.get_name() )
print ( 'Saving stats into model/stats.pkl')
return 0
def get_model(self):
return self.model
def set_model_save(self, filename):
self.model_ckpt_file = filename
| 2,761 | 829 |
import numpy as np
from scipy.fft import ifft
def generate_waveforms(data: np.ndarray) -> np.ndarray:
"""
Generate waveforms from frequency domains
:param data: frequency domains (where first n/2 examples consist of real values and the rest consists
of imaginary values)
:return: numpy array containing the waveforms
"""
waveform_length = int(data.shape[0] // 2)
real_values = data[:waveform_length]
imag_values = data[waveform_length:]
freq_domains = np.empty(shape=waveform_length, dtype=np.complex128)
freq_domains.real = real_values
freq_domains.imag = imag_values
waveform = ifft(freq_domains, norm='forward')
return waveform
| 695 | 223 |
"""
Utility to extract extra OAS description from docstring.
"""
import re
import textwrap
from typing import Dict, List
class LinesIterator:
def __init__(self, lines: str):
self._lines = lines.splitlines()
self._i = -1
def next_line(self) -> str:
if self._i == len(self._lines) - 1:
raise StopIteration from None
self._i += 1
return self._lines[self._i]
def rewind(self) -> str:
if self._i == -1:
raise StopIteration from None
self._i -= 1
return self._lines[self._i]
def __iter__(self):
return self
def __next__(self):
return self.next_line()
def _i_extract_block(lines: LinesIterator):
"""
Iter the line within an indented block and dedent them.
"""
# Go to the first not empty or not white space line.
try:
line = next(lines)
except StopIteration:
return # No block to extract.
while line.strip() == "":
try:
line = next(lines)
except StopIteration:
return
indent = re.fullmatch("( *).*", line).groups()[0]
indentation = len(indent)
start_of_other_block = re.compile(f" {{0,{indentation}}}[^ ].*")
yield line[indentation:]
# Yield lines until the indentation is the same or is greater than
# the first block line.
try:
line = next(lines)
except StopIteration:
return
while not start_of_other_block.fullmatch(line):
yield line[indentation:]
try:
line = next(lines)
except StopIteration:
return
lines.rewind()
def _dedent_under_first_line(text: str) -> str:
"""
Apply textwrap.dedent ignoring the first line.
"""
lines = text.splitlines()
other_lines = "\n".join(lines[1:])
if other_lines:
return f"{lines[0]}\n{textwrap.dedent(other_lines)}"
return text
def status_code(docstring: str) -> Dict[int, str]:
"""
Extract the "Status Code:" block of the docstring.
"""
iterator = LinesIterator(docstring)
for line in iterator:
if re.fullmatch("status\\s+codes?\\s*:", line, re.IGNORECASE):
iterator.rewind()
blocks = []
lines = []
i_block = _i_extract_block(iterator)
next(i_block)
for line_of_block in i_block:
if re.search("^\\s*\\d{3}\\s*:", line_of_block):
if lines:
blocks.append("\n".join(lines))
lines = []
lines.append(line_of_block)
if lines:
blocks.append("\n".join(lines))
return {
int(status.strip()): _dedent_under_first_line(desc.strip())
for status, desc in (block.split(":", 1) for block in blocks)
}
return {}
def tags(docstring: str) -> List[str]:
"""
Extract the "Tags:" block of the docstring.
"""
iterator = LinesIterator(docstring)
for line in iterator:
if re.fullmatch("tags\\s*:.*", line, re.IGNORECASE):
iterator.rewind()
lines = " ".join(_i_extract_block(iterator))
return [" ".join(e.split()) for e in re.split("[,;]", lines.split(":")[1])]
return []
def operation(docstring: str) -> str:
"""
Extract all docstring except the "Status Code:" block.
"""
lines = LinesIterator(docstring)
ret = []
for line in lines:
if re.fullmatch("status\\s+codes?\\s*:|tags\\s*:.*", line, re.IGNORECASE):
lines.rewind()
for _ in _i_extract_block(lines):
pass
else:
ret.append(line)
return ("\n".join(ret)).strip()
| 3,745 | 1,156 |
print('='*8,'Tuplas com Times de Futebol','='*8)
cf = ('Palmeiras', 'Santos', 'Vasco da Gama', 'Grêmio', 'Flamengo', 'Corinthians', 'Internacional', 'Cruzeiro', 'São Paulo', 'Atlético Mineiro', 'Botafogo', 'Fluminense', 'Coritiba', 'Bahia', 'Goiás', 'Guarani', 'Sport', 'Portuguesa', 'Atlético Paranaense', 'Vitória')
print('De acordo com os dados do Campeonato Brasileiro de Futebol temos que:')
print('-=' * 40)
print(f'A lista dos 20 primeiros colocados é: {cf}.')
print('-=' * 40)
print(f'Os cinco primeiros colocados são: {cf[:5]}.')
print('-=' * 40)
print(f'Os quatro últimos colocados são: {cf[-4:]}.')
print('-=' * 40)
print(f'A lista dos times em ordem alfabética fica: {sorted(cf)}.')
print('-=' * 40)
print(f'E o time Chapecoense não está entre os vinte primeiros da CBF.')
print('-=' * 40)
| 802 | 326 |
from setuptools import setup
setup(
name='visual_regression_tracker',
version='4.0.0',
description='Open source, self hosted solution for visual testing '
'and managing results of visual testing.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='',
license='APACHE',
url='https://github.com/Visual-Regression-Tracker/'
'Visual-Regression-Tracker',
packages=['visual_regression_tracker'],
requires=['requests']
)
| 527 | 154 |
print("-"*14)
print("Km's Calculation")
print("-"*14)
kms = int(input("How many kilometers did you drive ? "))
if kms<=200:
price = kms*0.50
print("Price to be paied out equivalent\nto quantity of the kilometers that you have driven: R$ {}".format(price))
else:
price = kms*0.45
print("Price to be paied out equivalent\nto quantity of the kilometers that you have driven:: R$ {}".format(price)) | 423 | 155 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import os, sys
file_path = os.path.dirname(os.path.abspath(__file__))
root_path = file_path.replace('/phanpy/tests', '')
sys.path.append(root_path) if root_path not in sys.path else None
import numpy as np
from phanpy.core.objects import Status, Item, Move, Pokemon, Trainer
class TestItems():
def test_no_item(self):
item = Item(0)
assert item.name == 'no-item'
assert item.category_id == 23
assert item.fling.power == 0
assert item.fling.effect_id == 0
assert item.fling.effect_name == 'no-effect'
assert list(item.flags.id.values) == []
def test_flags_id_and_name_map(self):
item = Item(1)
assert sorted(item.flags.id.values) == sorted([1, 2, 4, 5])
assert sorted(item.flags.name.values) == sorted(['countable', 'consumable',
'usable-in-battle',
'holdable'])
def test_instantiate_item_with_undefined_fling_effects(self):
item = Item(1)
assert item.name == 'master-ball'
assert item.category_id == 34
assert item.fling.power == 0
assert item.fling.effect_id == 0
assert item.fling.effect_name == 'no-effect'
def test_instantiate_item_with_defined_fling_effects(self):
item = Item(126)
assert item.name == 'cheri-berry'
assert item.category_id == 3
assert item.fling.power == 10
assert item.fling.effect_id == 3
assert item.fling.effect_name == 'berry-effect'
assert item.flags.id.values == [7]
class TestStatusInstantiation():
def test_declare_a_status_by_id_from_the_table(self):
assert Status(5).name[0] == 'poison'
def test_declare_a_status_with_a_timer(self):
assert Status(5, 5).duration[0] == 5
def test_declare_a_status_by_name_from_the_table(self):
assert Status('poison').id[0] == 5
def test_declare_a_custom_status(self):
trick_room = Status('trick-room', 5)
assert trick_room.id[0] >= 100000
assert trick_room.duration[0] == 5
assert trick_room.volatile[0] == True
def test_status_volatility(self):
assert Status(20).volatile[0] == True
assert Status(0).volatile[0] == False
@pytest.fixture(scope='function')
def setUpStatus():
poison = Status(5)
burn = Status('burn')
confused = Status('confused', 5)
disabled = Status('disabled', 4)
yield poison, burn, confused, disabled
class TestStatusAddition():
def test_add_two_non_volatile(self, setUpStatus):
poison, burn, __, __ = setUpStatus
non_volatile = poison + burn # burn should override poison
assert non_volatile.name == ['burn']
# Check if the original ones are mutated or not.
assert poison.name == ['poison']
assert burn.name == ['burn']
def test_add_a_volatile_to_non_volatile(self, setUpStatus):
poison, __, confused, __ = setUpStatus
mixed = poison + confused
assert sorted(mixed.name) == sorted(['poison', 'confused'])
assert sorted(mixed.duration) == sorted([float('inf'), 5])
def test_add_two_volatile_statuses(self, setUpStatus):
__, __, confused, disabled = setUpStatus
volatile = confused + disabled
assert sorted(volatile.name) == sorted(['confused', 'disabled'])
assert sorted(volatile.duration) == sorted([5, 4])
def test_add_a_status_to_a_mixed(self, setUpStatus):
poison, __, confused, disabled = setUpStatus
volatile = confused + disabled
mixed2 = volatile + poison
assert sorted(mixed2.name) == sorted(['confused', 'disabled', 'poison'])
assert sorted(mixed2.volatile) == sorted([True, True, False])
def test_add_multiple_in_one_line(self, setUpStatus):
__, burn, confused, disabled = setUpStatus
mixed = burn + confused + disabled
assert sorted(mixed.name) == sorted(['burn', 'confused', 'disabled'])
class TestStatusMethods():
def test_remove_an_existing_status_by_name(self, setUpStatus):
poison, __, confused, disabled = setUpStatus
combined = poison + confused + disabled
combined.remove('poison')
assert sorted(combined.name) == sorted(['confused', 'disabled'])
assert sorted(combined.duration) == sorted([5, 4])
assert sorted(combined.volatile) == sorted([True, True])
def test_remove_an_existing_status_by_id(self, setUpStatus):
poison, __, confused, disabled = setUpStatus
combined = poison + confused + disabled
combined.remove(5)
assert sorted(combined.name) == sorted(['confused', 'disabled'])
assert sorted(combined.duration) == sorted([5, 4])
assert sorted(combined.volatile) == sorted([True, True])
def test_remove_the_only_status(self):
poison = Status(5)
poison.remove('poison')
assert poison.id == np.array([0])
assert poison.name == np.array(['normal'])
assert poison.duration == np.array([float('inf')])
assert poison.volatile == np.array([False])
def test_remove_a_non_existing_status(self):
poison = Status(5)
with pytest.raises(KeyError):
poison.remove('burn')
def test_reduce_duration_by_1(self, setUpStatus):
__, burn, confused, disabled = setUpStatus
mixed = burn + confused + disabled
mixed.reduce()
assert sorted(mixed.duration) == sorted([float('inf'), 4, 3])
def test_reduce_the_duration_by_1_where_the_duration_was_1(self):
burn = Status('burn', 1)
confused = Status('confused', 5)
mixed = burn + confused
mixed.reduce()
assert mixed.name == ['confused']
assert mixed.duration == [4]
assert mixed.volatile == [True]
@pytest.fixture(scope='function')
def setUpPokemon():
p = Pokemon(10001)
yield p
class TestPokemon():
def test_id_over_10000(self, setUpPokemon):
p = setUpPokemon
assert p.name == 'deoxys-attack'
def test_types_single(self, setUpPokemon):
p = setUpPokemon
assert p.types == [14]
def test_types_double(self):
p = Pokemon(10004)
assert p.types == [7, 5]
def test_effort_values_sum(self, setUpPokemon):
p = setUpPokemon
assert sum(p.ev.values) == 510
def test_nature_id_assignment(self, setUpPokemon):
p = setUpPokemon
p.set_nature(18) # lax nature, decrease 5, increase 3.
assert p.nature.id == 18
def test_set_nature_by_name(self, setUpPokemon):
p = setUpPokemon
p.set_nature('lax') # 18
assert p.nature.id == 18
def test_nature_modifier(self, setUpPokemon):
p = setUpPokemon
p.set_nature(18)
assert p.nature_modifier.defense == 1.1
assert p.nature_modifier.specialDefense == 0.9
def test_set_iv(self, setUpPokemon):
p = setUpPokemon
p.set_iv([31. for x in range(6)])
assert p.iv.defense == 31.
def test_set_ev(self, setUpPokemon):
p = setUpPokemon
p.set_ev([31. for x in range(6)])
assert p.ev.defense == 31.
def test_calculated_stats(self):
"""Using the example on
https://bulbapedia.bulbagarden.net/wiki/Statistic#Determination_of_stats
"""
p = Pokemon('garchomp', 78)
p.set_nature('adamant')
p.set_iv([24, 12, 30, 16, 23, 5])
p.set_ev([74, 190, 91, 48, 84, 23])
expected = [289, 278, 193, 135, 171, 171]
for i in range(6):
assert p.stats[i] == expected[i]
def test_stage_factor_changes_when_stage_changes(self, setUpPokemon):
p = setUpPokemon
p.stage.attack += 3 # the factor should be multiplied by 2.5
assert p.stage_factor.attack == 2.5
def test_current_stats_change_when_factors_change(self, setUpPokemon):
p = setUpPokemon
p.stage.attack += 3
assert p.current.attack == np.floor(p.stats.attack * 2.5)
def test_add_received_damage(self, setUpPokemon):
p = setUpPokemon
p.history.damage.appendleft(288)
p.history.damage.appendleft(199)
assert p.history.damage[0] == 199
def test_add_stage(self, setUpPokemon):
p = setUpPokemon
p.history.stage += 5
assert p.history.stage == 5
def test_set_moves(self, setUpPokemon):
p = setUpPokemon
first_4_moves = [Move(x) for x in range(1, 5)]
p.moves = first_4_moves
for i in range(4):
assert p.moves[i].name == first_4_moves[i].name
def test_set_pp_and_power(self, setUpPokemon):
p = setUpPokemon
p.moves[0] = Move(33) # tackle, power 40, pp 35, accuracy 100.
p.moves[0].pp -= 4
p.moves[0].power *= 2
assert p.moves[0].pp == 31
assert p.moves[0].power == 80
assert p.moves[0].power != Move(33).power
def test_holding_item303_changes_critical_stage(self, setUpPokemon):
p = setUpPokemon
p.item = Item(303)
assert p.stage.critical == 1.
def test_dual_abilities_successfully_initiated(self):
p = Pokemon(19)
assert p.ability in [50, 62]
def test_single_ability_successfully_initiated(self):
p = Pokemon(1)
assert p.ability == 65
def test_reset_current_stats(self, setUpPokemon):
p = setUpPokemon
p.stage += 3
p.reset_current()
assert p.current.attack == p.stats.attack
def test_two_pokemons_are_equal(self, setUpPokemon):
p = setUpPokemon
q = Pokemon(10001)
assert p != q
q.set_iv(p.iv.values)
q.unique_id = p.unique_id
assert p == q
def test_trainer_set_pokemon(self):
t = Trainer('Satoshi')
t.set_pokemon(3, Pokemon(10005))
assert t.party(3).name == 'wormadam-trash'
def test_set_trainers_pokemons_moves(self):
t = Trainer('Satoshi')
t.set_pokemon(3, Pokemon(10001))
t.party(1).moves[1] = Move(33)
assert t.party(1).moves[1].name == 'tackle'
| 10,224 | 3,549 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 16:08:19 2017
@author: uqytu1
"""
import math
import numpy as np
import urllib.request
import json
import base64
import struct
import datetime
import pytz
def GetLonLat(Metadata):
Position = Metadata['GPSPosition'].split(',')
Latitude = Position[0].split()
Lat = eval(Latitude[0]) + \
eval(Latitude[2].strip("'"))/60.0 + \
eval(Latitude[3].strip('"'))/3600.0
if Latitude[4] == 'S':
Lat = math.copysign(Lat,-1)
else:
Lat = math.copysign(Lat,1)
Longitude = Position[1].split()
Lon = eval(Longitude[0]) + \
eval(Longitude[2].strip("'"))/60.0 + \
eval(Longitude[3].strip('"'))/3600.0
if Longitude[4] == 'W':
Lon = math.copysign(Lon,-1)
else:
Lon = math.copysign(Lon,1)
return Lon, Lat
def GetAltitude(Metadata):
return eval(Metadata['GPSAltitude'].split()[0])
def GetRollPitchYaw(Metadata):
roll = eval(Metadata['Roll'])
pitch = eval(Metadata['Pitch'])
yaw = eval(Metadata['Yaw'])
if yaw < 0:
yaw = yaw + 360
return roll, pitch, yaw
def GetTime(Metadata):
Time = datetime.datetime.strptime(Metadata['SubSecDateTimeOriginal'], "%Y:%m:%d %H:%M:%S.%f")
Time_UTC = pytz.utc.localize(Time, is_dst=False)
return Time_UTC
def GetGPSTime(Metadata):
Time = datetime.datetime.strptime(Metadata['GPSTimeStamp'], "%H:%M:%S.%f")
Time_UTC = pytz.utc.localize(Time, is_dst=False)
return Time_UTC.time()
def GetTimefromStart(Metadata):
Time = datetime.datetime.strptime(Metadata['SubSecCreateDate'], "%Y:%m:%d %H:%M:%S.%f")
Time_UTC = pytz.utc.localize(Time, is_dst=False)
duration = datetime.timedelta(hours = Time_UTC.hour,
minutes = Time_UTC.minute,
seconds = Time_UTC.second,
microseconds = Time_UTC.microsecond)
return duration
def GetTimeOffset(Metadata):
GPSTime = GetGPSTime(Metadata)
ImageTime = GetTime(Metadata).time()
offset = datetime.timedelta(hours = GPSTime.hour - ImageTime.hour,
minutes = GPSTime.minute - ImageTime.minute,
seconds = GPSTime.second - ImageTime.second,
microseconds = GPSTime.microsecond - ImageTime.microsecond)
return offset
def GetPrincipalPoint(Metadata, sensor_size):
cx, cy = eval(Metadata['PrincipalPoint'].split(',').strip())
w = eval(Metadata['ImageWidth'])
h = eval(Metadata['ImageHeight'])
# Note that Sequoia's origin is at lower left instead of top left
CP = np.array([[w*cx/sensor_size[0]],[h*(cy/sensor_size[1])]])
return CP
def GetFisheyeAffineMatrix(Metadata):
CDEF = eval(Metadata['FisheyeAffineMatrix'].split(','))
FisheyeAffineMatrix = np.array([[CDEF[0], CDEF[1]],[CDEF[2],CDEF[3]]])
return FisheyeAffineMatrix
def GetFisheyePolynomial(Metadata):
return eval(Metadata['FisheyePolynomial'].split(','))
def GetElevation(Metadata):
Lon, Lat = GetLonLat(Metadata)
# Retrieve Elevation from Google Map API
# Note: 2500 querry per day for free users
Elevation_Base_URL = 'http://maps.googleapis.com/maps/api/elevation/json?'
URL_Params = 'locations={Lat},{Lon}&sensor={Bool}'.format(Lat=Lat, Lon=Lon, Bool='false')
url = Elevation_Base_URL + URL_Params
with urllib.request.urlopen(url) as f:
response = json.loads(f.read().decode())
result = response['results'][0]
elevation = result['elevation']
return elevation
def GetSunIrradiance(Metadata):
encoded = Metadata['IrradianceList']
# decode the string
data = base64.standard_b64decode(encoded)
# ensure that there's enough data QHHHfff
assert len(data) % 28 == 0
# determine how many datasets there are
count = len(data) // 28
# unpack the data as uint64, uint16, uint16, uint16, uint16, float, float, float
result = []
for i in range(count):
index = 28 * i
s = struct.unpack('<QHHHHfff', data[index:index + 28])
result.append(s)
CreateTime = GetTimefromStart(Metadata)
timestamp = []
for measurement in result:
q, r = divmod(measurement[0], 1000000)
timestamp.append(abs(datetime.timedelta(seconds=q, microseconds=r)-CreateTime))
TargetIndex = timestamp.index(min(timestamp))
count = result[TargetIndex][1]
gain = result[TargetIndex][3]
exposuretime = result[TargetIndex][4]
Irradiance = count / (gain * exposuretime)
return Irradiance
def GetPowerCoefficients(Metadata):
powers = Metadata['VignettingPolynomial2DName']
coefficients = Metadata['VignettingPolynomial2D']
power_items = powers.split(',')
coefficient_items = coefficients.split(',')
powers_coefficients = list()
for i in range(0, len(power_items), 2):
powers_coefficients.append((int(power_items[i]),
int(power_items[i+1]),
float(coefficient_items[int(i/2)])))
return powers_coefficients
def GetSensorModelCoef(Metadata):
Coefs = Metadata['SensorModel'].split(',')
return float(Coefs[0].strip('.')), float(Coefs[1].strip('.')), float(Coefs[2].strip('.'))
def GetExposureTime(Metadata):
ExposureTime = Metadata['ExposureTime'].split('/')
if len(ExposureTime) > 1:
ExpinSec = float(ExposureTime[0]) / float(ExposureTime[1])
else:
ExpinSec = float(ExposureTime[0])
return ExpinSec
def GetISO(Metadata):
return int(Metadata['ISO'])
def GetFNumber(Metadata):
return float(Metadata['FNumber']) | 5,743 | 1,963 |
from skimage import draw
red = np.zeros((300, 300))
green = np.zeros((300, 300))
blue = np.zeros((300, 300))
r, c = draw.circle(100, 100, 100)
red[r, c] = 1
r, c = draw.circle(100, 200, 100)
green[r, c] = 1
r, c = draw.circle(200, 150, 100)
blue[r, c] = 1
f, axes = plt.subplots(1, 3)
for (ax, channel) in zip(axes, [red, green, blue]):
ax.imshow(channel, cmap='gray')
ax.axis('off')
plt.imshow(np.stack([red, green, blue], axis=2));
| 453 | 245 |
from .training.ReviewClassifier import ReviewClassifier
from .data_managing.Dataset import ReviewDataset
from .training.hyperparameters import args
from .testing import compute_loss_acc as loss_acc
from .testing import predict_rating as predict
from .testing import analizing as analyze
if __name__ == '__main__':
# creating dataset
if args.reload_from_files:
# training from a checkpoint
print("Loading dataset and vectorizer")
dataset = ReviewDataset.load_dataset_and_load_vectorizer(args.review_csv,
args.vectorizer_file)
else:
print("Loading dataset and creating vectorizer")
# create dataset and vectorizer
dataset = ReviewDataset.load_dataset_and_make_vectorizer(args.review_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = ReviewClassifier(num_features=len(vectorizer.review_vocab))
# computing loass and accuracy of model
loss_acc.compute(classifier, args, dataset)
# testing model on real data
test_review = "не бери грех на душу"
classifier = classifier.cpu()
prediction = predict.predict_rating(test_review, classifier, vectorizer, decision_threshold=0.5)
print("{} -> {}".format(test_review, prediction))
analyze.influencial_words(classifier, vectorizer)
| 1,398 | 377 |
from setuptools import setup, find_packages, Extension
# To use a consistent encoding
from codecs import open
# Other stuff
import sys, os, fileinput
import versioneer
here = os.path.dirname(os.path.realpath(__file__))
def main():
# Start package setup
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
# template at https://github.com/pypa/sampleproject/blob/master/setup.py
name='ai4materials',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
# version=get_property('__version__'),
version="0.1",
description='Data-analytics modeling of materials science data', long_description=long_description,
zip_safe=True,
# The project's main homepage.
url='https://https://github.com/angeloziletti/ai4materials',
# Author details
author='Ziletti, Angelo and Leitherer, Andreas', author_email='angelo.ziletti@gmail.com, andreas.leitherer@gmail.com',
# Choose your license
license='Apache License 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Physics :: Materials science :: Machine learning :: Deep learning :: Data analytics',
# Pick your license as you wish (should match "license" above)
'License :: Apache Licence 2.0',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3.5',
# 'Programming Language :: Python :: 3.6',
# 'Programming Language :: Python :: 3.7'
],
# What does your project relate to?
keywords='Data analytics of materials science data.',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['ai4materials', 'ai4materials.dataprocessing', 'ai4materials.descriptors',
'ai4materials.interpretation', 'ai4materials.visualization',
'ai4materials.models', 'ai4materials.utils', 'ai4materials.external'],
#packages=find_packages(include=['ai4materials']),
package_dir={'ai4materials': 'ai4materials'},
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'ase>=3.19.0', 'tensorflow==1.13.1', 'keras==2.2.4',
'scikit-learn>=0.17.1', 'pint', 'future',
'pandas>=0.25.0', 'enum34', 'pymatgen>=2020.3.13',
'keras-tqdm', 'seaborn', 'paramiko',
'scipy', 'nose>=1.0', 'numpy', 'h5py<=2.9.0',
'cython>=0.19', 'Jinja2', 'progressbar'],
#
#'ase==3.15.0', # neighbors list does not work for ase 3.16
# 'scikit-learn >=0.17.1', 'tensorflow==1.8.0', 'pint', 'future', 'pandas',
# 'bokeh',
# 'enum34', 'pymatgen', 'keras==1.2.0', 'pillow>=2.7.0', 'mendeleev', 'keras-tqdm',
# 'seaborn', 'paramiko', 'scipy', 'nose>=1.0', 'sqlalchemy', 'theano==0.9.0',
# 'numpy', 'h5py', 'cython>=0.19', 'pyshtools', 'Jinja2'],
# 'bokeh==0.11.0',
# 'multiprocessing',
# , 'asap3'],
#'mayavi', 'weave'],
#setup_requires=['nomadcore', 'atomic_data'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': ['check-manifest'],
'test': ['pytest', 'coverage'],
},
# https://mike.zwobble.org/2013/05/adding-git-or-hg-or-svn-dependencies-in-setup-py/
# add atomic_data and nomadcore
dependency_links=['https://github.com/libAtoms/QUIP',
'https://github.com/FXIhub/condor.git'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'ai4materials': ['descriptors/descriptors.nomadmetainfo.json',
'data/nn_models/*.h5', 'data/nn_models/*.json', 'utils/units.txt', 'utils/constants.txt',
'data/PROTOTYPES/*/*/*.in', 'data/training_data/*.pkl', 'data/training_data/*.json'
]},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'condor=condor.scripts.condor_script:main',
# ],
# },
# test_suite = "condor.tests.test_all",
project_urls={ # Optional
'Bug Reports': 'https://gitlab.com/ai4materials/issues', 'Source': 'https://gitlab.com/ai4materials/', },
)
# Run main function by default
if __name__ == "__main__":
main()
| 6,573 | 2,034 |
from zeekofile.cache import zf
import re
blog = zf.config.controllers.blog
def run():
write_permapages()
def write_permapages():
"Write blog posts to their permalink locations"
site_re = re.compile(zf.config.site.url, re.IGNORECASE)
num_posts = len(blog.posts)
for i, post in enumerate(blog.posts):
if post.permalink:
path = site_re.sub("", post.permalink)
blog.logger.info("Writing permapage for post: {0}".format(path))
else:
#Permalinks MUST be specified. No permalink, no page.
blog.logger.info("Post has no permalink: {0}".format(post.title))
continue
env = {
"post": post,
"posts": blog.posts
}
#Find the next and previous posts chronologically
if i < num_posts - 1:
env['prev_post'] = blog.posts[i + 1]
if i > 0:
env['next_post'] = blog.posts[i - 1]
zf.writer.materialize_template(
"/blog/permapage.mako", zf.util.path_join(path, "index.html"), env)
| 1,074 | 355 |
n = float(input("Digite o número: "))
b=2
dif=3.14
while dif >= 0.0001:
p=(b+(n/b))/2
print("p é",p)
b=p
pq = p*p
dif=abs(n-pq) | 132 | 84 |
# Requirements
import subprocess
# Console print initial
print('######################################################################')
print('')
print('######## ## ## ######## ### ######## ## ## ##')
print('## ## ## ## ## ## ## ## ## ## ## ## ##')
print('## ## #### ## ## ## ## ## ## ## ####')
print('######## ## ## ## ## ## ## ## ## ##')
print('## ## ## ## ######### ## ## ######### ##')
print('## ## ## ## ## ## ## ## ## ##')
print('######## ## ######## ## ## ######## ## ##')
print('')
print('######################################################################')
print('')
# Initial
data = subprocess.check_output(['netsh', 'wlan', 'show', 'profiles']).decode('utf-8').split('\n')
wifis = [line.split(':0')[1][1:-1] for line in data if "All User Profile" in line]
print('> Process operating...')
print('> Getting Password.')
print('')
print('Results:')
print('Wi-Fi connection:')
for wifi in wifis:
results = subprocess.check_output(['netsh', 'wlan', 'show', 'profiles', wifi, 'key=clear']).decode('utf-8').split('\n')
results = [line.split(':')[1][1:-1] for line in results if "Key Content" in line]
try:
print(f'Name:{wifi}, Password: {results[0]}')
except IndexError:
print(f'Name:{wifi}, Password: Cannot be read, try again')
print('')
print('######################################################################')
print('')
print('Close this window to exit.')
print('')
# Metadate
print('Repository: https://github.com/ZAD4YTV/excract-wifi-passwords-from-windows/')
print('BY ZAD4Y')
print('')
print('Title: Extract Wi-Fi passwords from windows.')
print('Description: Extractor of Wi-Fi passwords from windows. I am not responsible for any illegal activities that you perform with the content of this repository')
print('License: MIT License')
print('More information in the repository.')
| 2,045 | 604 |
#!/usr/bin/python
import os
def download_model(model_name):
'''
Downloads a given model binary
'''
print "Downloading ", model_name
cmd = "wget http://opennlp.sourceforge.net/models-1.5/%s" % model_name
os.system(cmd)
if __name__ == "__main__":
model_names = ["en-ner-dates.bin", "en-ner-locations.bin", "en-ner-money.bin", \
"en-ner-organization.bin", "en-ner-percentage.bin", "en-ner-persons.bin", "en-ner-time.bin", \
"en-sent.bin", "en-token.bin"]
for model_name in model_names:
if (os.path.isfile("./%s" % model_name) is False):
download_model(model_name)
| 633 | 235 |
#!/usr/bin/env python
#
# NSC_INSTCAL_SEXDAOPHOT.PY -- Run SExtractor and DAOPHOT on an exposure
#
from __future__ import print_function
__authors__ = 'David Nidever <dnidever@noao.edu>'
__version__ = '20180819' # yyyymmdd
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, Column
import time
import shutil
import re
import subprocess
import glob
import logging
import socket
#from scipy.signal import convolve2d
from scipy.ndimage.filters import convolve
import astropy.stats
import struct
from utils import *
from phot import *
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Get NSC directories
def getnscdirs(version=None):
# Host
hostname = socket.gethostname()
host = hostname.split('.')[0]
# Version
verdir = ""
if version is not None:
verdir = version if version.endswith('/') else version+"/"
# on thing/hulk use
if (host == "thing") | (host == "hulk"):
basedir = "/dl1/users/dnidever/nsc/instcal/"+verdir
tmproot = "/d0/dnidever/nsc/instcal/"+verdir+"tmp/"
# on gp09 use
if (host == "gp09") | (host == "gp08") | (host == "gp07") | (host == "gp06") | (host == "gp05"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+verdir
tmproot = "/data0/dnidever/nsc/instcal/"+verdir+"tmp/"
return basedir,tmproot
# Class to represent an exposure to process
class Exposure:
# Initialize Exposure object
def __init__(self,fluxfile,wtfile,maskfile,nscversion="t3a"):
# Check that the files exist
if os.path.exists(fluxfile) is False:
print(fluxfile+" NOT found")
return
if os.path.exists(wtfile) is False:
print(wtfile+" NOT found")
return
if os.path.exists(maskfile) is False:
print(maskfile+" NOT found")
return
# Setting up the object properties
self.origfluxfile = fluxfile
self.origwtfile = wtfile
self.origmaskfile = maskfile
self.fluxfile = None # working files in temp dir
self.wtfile = None # working files in temp dir
self.maskfile = None # working files in temp dir
base = os.path.basename(fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
self.base = base
self.nscversion = nscversion
self.logfile = base+".log"
self.logger = None
self.origdir = None
self.wdir = None # the temporary working directory
self.outdir = None
self.chip = None
# Get instrument
head0 = fits.getheader(fluxfile,0)
if head0["DTINSTRU"] == 'mosaic3':
self.instrument = 'k4m'
elif head0["DTINSTRU"] == '90prime':
self.instrument = 'ksb'
elif head0["DTINSTRU"] == 'decam':
self.instrument = 'c4d'
else:
print("Cannot determine instrument type")
return
# Get number of extensions
hdulist = fits.open(fluxfile)
nhdu = len(hdulist)
hdulist.close()
self.nexten = nhdu
# Get night
dateobs = head0.get("DATE-OBS")
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
self.night = night
# Output directory
basedir,tmpdir = getnscdirs(nscversion)
self.outdir = basedir+self.instrument+"/"+self.night+"/"+self.base+"/"
# Setup
def setup(self):
basedir,tmproot = getnscdirs(self.nscversion)
# Prepare temporary directory
tmpcntr = 1#L
tmpdir = tmproot+self.base+"."+str(tmpcntr)
while (os.path.exists(tmpdir)):
tmpcntr = tmpcntr+1
tmpdir = tmproot+self.base+"."+str(tmpcntr)
if tmpcntr > 20:
print("Temporary Directory counter getting too high. Exiting")
sys.exit()
os.mkdir(tmpdir)
origdir = os.getcwd()
self.origdir = origdir
os.chdir(tmpdir)
self.wdir = tmpdir
# Set up logging to screen and logfile
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
# file handler
fileHandler = logging.FileHandler(self.logfile)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# console/screen handler
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.NOTSET)
self.logger = rootLogger
self.logger.info("Setting up in temporary directory "+tmpdir)
self.logger.info("Starting logfile at "+self.logfile)
# Copy over images from zeus1:/mss
fluxfile = "bigflux.fits.fz"
wtfile = "bigwt.fits.fz"
maskfile = "bigmask.fits.fz"
self.logger.info("Copying InstCal images from mass store archive")
shutil.copyfile(basedir+os.path.basename(self.origfluxfile),tmpdir+"/"+os.path.basename(self.origfluxfile))
self.logger.info(" "+self.origfluxfile)
if (os.path.basename(self.origfluxfile) != fluxfile):
os.symlink(os.path.basename(self.origfluxfile),fluxfile)
shutil.copyfile(basedir+os.path.basename(self.origwtfile),tmpdir+"/"+os.path.basename(self.origwtfile))
self.logger.info(" "+self.origwtfile)
if (os.path.basename(self.origwtfile) != wtfile):
os.symlink(os.path.basename(self.origwtfile),wtfile)
shutil.copyfile(basedir+os.path.basename(self.origmaskfile),tmpdir+"/"+os.path.basename(self.origmaskfile))
self.logger.info(" "+self.origmaskfile)
if (os.path.basename(self.origmaskfile) != maskfile):
os.symlink(os.path.basename(self.origmaskfile),maskfile)
# Set local working filenames
self.fluxfile = fluxfile
self.wtfile = wtfile
self.maskfile = maskfile
# Make final output directory
if not os.path.exists(self.outdir):
os.makedirs(self.outdir) # will make multiple levels of directories if necessary
self.logger.info("Making output directory: "+self.outdir)
# Load chip
def loadchip(self,extension,fluxfile="flux.fits",wtfile="wt.fits",maskfile="mask.fits"):
# Load the data
self.logger.info(" Loading chip "+str(extension))
# Check that the working files set by "setup"
if (self.fluxfile is None) | (self.wtfile is None) | (self.maskfile is None):
self.logger.warning("Local working filenames not set. Make sure to run setup() first")
return
try:
flux,fhead = fits.getdata(self.fluxfile,extension,header=True)
fhead0 = fits.getheader(self.fluxfile,0) # add PDU info
fhead.extend(fhead0,unique=True)
wt,whead = fits.getdata(self.wtfile,extension,header=True)
mask,mhead = fits.getdata(self.maskfile,extension,header=True)
except:
self.logger.error("No extension "+str(extension))
return
# Write the data to the appropriate files
if os.path.exists(fluxfile):
os.remove(fluxfile)
fits.writeto(fluxfile,flux,header=fhead,output_verify='warn')
if os.path.exists(wtfile):
os.remove(wtfile)
fits.writeto(wtfile,wt,header=whead,output_verify='warn')
if os.path.exists(maskfile):
os.remove(maskfile)
fits.writeto(maskfile,mask,header=mhead,output_verify='warn')
# Create the chip object
self.chip = Chip(fluxfile,wtfile,maskfile,self.base)
self.chip.bigextension = extension
self.chip.nscversion = self.nscversion
self.chip.outdir = self.outdir
# Add logger information
self.chip.logger = self.logger
# Process all chips
def process(self):
self.logger.info("-------------------------------------------------")
self.logger.info("Processing ALL extension images")
self.logger.info("-------------------------------------------------")
# LOOP through the HDUs/chips
#----------------------------
for i in range(1,self.nexten):
t0 = time.time()
self.logger.info(" ")
self.logger.info("=== Processing subimage "+str(i)+" ===")
# Load the chip
self.loadchip(i)
self.logger.info("CCDNUM = "+str(self.chip.ccdnum))
# Process it
self.chip.process()
# Clean up
self.chip.cleanup()
self.logger.info("dt = "+str(time.time()-t0)+" seconds")
# Teardown
def teardown(self):
# Delete files and temporary directory
self.logger.info("Deleting files and temporary directory.")
# Move the final log file
shutil.move(self.logfile,self.outdir+self.base+".log")
# Delete temporary files and directory
tmpfiles = glob.glob("*")
for f in tmpfiles: os.remove(f)
os.rmdir(self.wdir)
# CD back to original directory
os.chdir(self.origdir)
# RUN all steps to process this exposure
def run(self):
self.setup()
self.process()
self.teardown()
# Class to represent a single chip of an exposure
class Chip:
def __init__(self,fluxfile,wtfile,maskfile,bigbase):
self.fluxfile = fluxfile
self.wtfile = wtfile
self.maskfile = maskfile
self.bigbase = bigbase
self.bigextension = None
base = os.path.basename(fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
self.dir = os.path.abspath(os.path.dirname(fluxfile))
self.base = base
self.meta = makemeta(header=fits.getheader(fluxfile,0))
self.sexfile = self.dir+"/"+self.base+"_sex.fits"
self.daofile = self.dir+"/"+self.base+"_dao.fits"
self.sexcatfile = None
self.sexcat = None
self.seeing = None
self.apcorr = None
# Internal hidden variables
self._rdnoise = None
self._gain = None
self._ccdnum = None
self._pixscale = None
self._saturate = None
self._wcs = None
self._exptime = None
self._instrument = None
self._plver = None
self._cpfwhm = None
self._daomaglim = None # set by daoaperphot()
self._sexmaglim = None # set by runsex()
# Logger
self.logger = None
def __repr__(self):
return "Chip object"
@property
def rdnoise(self):
# We have it already, just return it
if self._rdnoise is not None:
return self._rdnoise
# Can't get rdnoise, no header yet
if self.meta is None:
self.logger.warning("Cannot get RDNOISE, no header yet")
return None
# Check DECam style rdnoise
if "RDNOISEA" in self.meta.keys():
rdnoisea = self.meta["RDNOISEA"]
rdnoiseb = self.meta["RDNOISEB"]
rdnoise = (rdnoisea+rdnoiseb)*0.5
self._rdnoise = rdnoise
return self._rdnoise
# Get rdnoise from the header
for name in ['RDNOISE','READNOIS','ENOISE']:
# We have this key, set _rndoise and return
if name in self.meta.keys():
self._rdnoise = self.meta[name]
return self._rdnoise
self.logger.warning('No RDNOISE found')
return None
@property
def gain(self):
# We have it already, just return it
if self._gain is not None:
return self._gain
try:
gainmap = { 'c4d': lambda x: 0.5*(x.get('gaina')+x.get('gainb')),
'k4m': lambda x: x.get('gain'),
'ksb': lambda x: [1.3,1.5,1.4,1.4][ccdnum-1] } # bok gain in HDU0, use list here
gain = gainmap[self.instrument](self.meta)
except:
gainmap_avg = { 'c4d': 3.9845419, 'k4m': 1.8575, 'ksb': 1.4}
gain = gainmap_avg[self.instrument]
self._gain = gain
return self._gain
## Can't get gain, no header yet
#if self.meta is None:
# print("Cannot get GAIN, no header yet")
## Get rdnoise from the header
#for name in ['GAIN','EGAIN']:
# # We have this key, set _gain and return
# if self.meta.has_key(name):
# self._gain = self.meta[name]
# return self._gain
#print('No GAIN found')
#return None
@property
def ccdnum(self):
# We have it already, just return it
if self._ccdnum is not None:
return self._ccdnum
# Can't get ccdnum, no header yet
if self.meta is None:
self.logger.warning("Cannot get CCDNUM, no header yet")
return None
# Get ccdnum from the header
# We have this key, set _rndoise and return
if 'CCDNUM' in self.meta.keys():
self._ccdnum = self.meta['CCDNUM']
return self._ccdnum
self.logger.warning('No CCDNUM found')
return None
@property
def pixscale(self):
# We have it already, just return it
if self._pixscale is not None:
return self._pixscale
pixmap = { 'c4d': 0.27, 'k4m': 0.258, 'ksb': 0.45 }
try:
pixscale = pixmap[self.instrument]
self._pixscale = pixscale
return self._pixscale
except:
self._pixscale = np.max(np.abs(self.wcs.pixel_scale_matrix))
return self._pixscale
@property
def saturate(self):
# We have it already, just return it
if self._saturate is not None:
return self._saturate
# Can't get saturate, no header yet
if self.meta is None:
self.logger.warning("Cannot get SATURATE, no header yet")
return None
# Get saturate from the header
# We have this key, set _saturate and return
if 'SATURATE' in self.meta.keys():
self._saturate = self.meta['SATURATE']
return self._saturate
self.logger.warning('No SATURATE found')
return None
@property
def wcs(self):
# We have it already, just return it
if self._wcs is not None:
return self._wcs
# Can't get wcs, no header yet
if self.meta is None:
self.logger.warning("Cannot get WCS, no header yet")
return None
try:
self._wcs = WCS(self.meta)
return self._wcs
except:
self.logger.warning("Problem with WCS")
return None
@property
def exptime(self):
# We have it already, just return it
if self._exptime is not None:
return self._exptime
# Can't get exptime, no header yet
if self.meta is None:
self.logger.warning("Cannot get EXPTIME, no header yet")
return None
# Get rdnoise from the header
# We have this key, set _rndoise and return
if 'EXPTIME' in self.meta.keys():
self._exptime = self.meta['EXPTIME']
return self._exptime
print('No EXPTIME found')
return None
@property
def instrument(self):
# We have it already, just return it
if self._instrument is not None:
return self._instrument
# Can't get instrument, no header yet
if self.meta is None:
self.logger.warning("Cannot get INSTRUMENT, no header yet")
return None
# instrument, c4d, k4m or ksb
# DTINSTRU = 'mosaic3 '
# DTTELESC = 'kp4m '
# Bok 90Prime data has
if self.meta.get("DTINSTRU") == 'mosaic3':
self._instrument = 'k4m'
return self._instrument
elif self.meta.get("DTINSTRU") == '90prime':
self._instrument = 'ksb'
return self._instrument
else:
self._instrument = 'c4d'
return self._instrument
@property
def plver(self):
# We have it already, just return it
if self._plver is not None:
return self._plver
# Can't get plver, no header yet
if self.meta is None:
self.logger.warning("Cannot get PLVER, no header yet")
return None
plver = self.meta.get('PLVER')
if plver is None:
self._plver = 'V1.0'
self._plver = plver
return self._plver
@property
def cpfwhm(self):
# We have it already, just return it
if self._cpfwhm is not None:
return self._cpfwhm
# Can't get fwhm, no header yet
if self.meta is None:
self.logger.warning("Cannot get CPFWHM, no header yet")
return None
# FWHM values are ONLY in the extension headers
cpfwhm_map = { 'c4d': 1.5 if self.meta.get('FWHM') is None else self.meta.get('FWHM')*0.27,
'k4m': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1'),
'ksb': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1') }
cpfwhm = cpfwhm_map[self.instrument]
self._cpfwhm = cpfwhm
return self._cpfwhm
@property
def maglim(self):
# We have it already, just return it
if self._daomaglim is not None:
return self._daomaglim
if self._sexmaglim is not None:
return self._sexmaglim
self.logger.warning('Maglim not set yet')
return None
# Write SE catalog in DAO format
def sextodao(self,cat=None,outfile=None,format="coo"):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=daobase+".coo"
if cat is None: cat=self.sexcat
sextodao(self.sexcat,self.meta,outfile=outfile,format=format,logger=self.logger)
# Run Source Extractor
#---------------------
def runsex(self,outfile=None):
basedir, tmpdir = getnscdirs(self.nscversion)
configdir = basedir+"config/"
sexcatfile = "flux_sex.cat.fits"
sexcat, maglim = runsex(self.fluxfile,self.wtfile,self.maskfile,self.meta,sexcatfile,configdir,logger=self.logger)
self.sexcat = sexcatfile
self.sexcat = sexcat
self._sexmaglim = maglim
# Set the FWHM as well
fwhm = sexfwhm(sexcat,logger=self.logger)
self.meta['FWHM'] = fwhm
# Determine FWHM using SE catalog
#--------------------------------
def sexfwhm(self):
self.seeing = sexfwhm(self.sexcat)
return self.seeing
# Pick PSF candidates using SE catalog
#-------------------------------------
def sexpickpsf(self,nstars=100):
base = os.path.basename(self.sexfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
fwhm = self.sexfwhm() if self.seeing is None else self.seeing
psfcat = sexpickpsf(self.sexcat,fwhm,self.meta,base+".lst",nstars=nstars,logger=self.logger)
# Make DAOPHOT option files
#--------------------------
#def mkopt(self,**kwargs):
def mkopt(self):
base = os.path.basename(self.daofile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
#mkopt(base,self.meta,logger=self.logger,**kwargs)
mkopt(base,self.meta,logger=self.logger)
# Make image ready for DAOPHOT
def mkdaoim(self):
mkdaoim(self.fluxfile,self.wtfile,self.maskfile,self.meta,self.daofile,logger=self.logger)
# DAOPHOT detection
#----------------------
def daofind(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
cat = daofind(self.daofile,outfile=daobase+".coo",logger=self.logger)
# DAOPHOT aperture photometry
#----------------------------
def daoaperphot(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcat, maglim = daoaperphot(self.daofile,daobase+".coo",outfile=daobase+".ap",logger=self.logger)
self._daomaglim = maglim
# Pick PSF stars using DAOPHOT
#-----------------------------
def daopickpsf(self,maglim=None,nstars=100):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if maglim is None: maglim=self.maglim
psfcat = daopickpsf(self.daofile,daobase+".ap",maglim,daobase+".lst",nstars,logger=self.logger)
# Run DAOPHOT PSF
#-------------------
def daopsf(self,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = daopsf(self.daofile,daobase+".lst",outfile=daobase+".psf",verbose=verbose,logger=self.logger)
# Subtract neighbors of PSF stars
#--------------------------------
def subpsfnei(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = subpsfnei(self.daofile,daobase+".lst",daobase+".nei",daobase+"a.fits",logger=self.logger)
# Create DAOPHOT PSF
#-------------------
def createpsf(self,listfile=None,apfile=None,doiter=True,maxiter=5,minstars=6,subneighbors=True,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
createpsf(daobase+".fits",daobase+".ap",daobase+".lst",meta=self.meta,logger=self.logger)
# Run ALLSTAR
#-------------
def allstar(self,psffile=None,apfile=None,subfile=None):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
alscat = allstar(daobase+".fits",daobase+".psf",daobase+".ap",outfile=daobase+".als",meta=self.meta,logger=self.logger)
# Get aperture correction
#------------------------
def getapcor(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcorr = apcor(daobase+"a.fits",daobase+".lst",daobase+".psf",self.meta,optfile=daobase+'.opt',alsoptfile=daobase+".als.opt",logger=self.logger)
self.apcorr = apcorr
self.meta['apcor'] = (apcorr,"Aperture correction in mags")
# Combine SE and DAOPHOT catalogs
#--------------------------------
def finalcat(self,outfile=None,both=True,sexdetect=True):
# both Only keep sources that have BOTH SE and ALLSTAR information
# sexdetect SE catalog was used for DAOPHOT detection list
self.logger.info("-- Creating final combined catalog --")
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=self.base+".cat.fits"
# Check that we have the SE and ALS information
if (self.sexcat is None) | (os.path.exists(daobase+".als") is None):
self.logger.warning("SE catalog or ALS catalog NOT found")
return
# Load ALS catalog
als = Table(daoread(daobase+".als"))
nals = len(als)
# Apply aperture correction
if self.apcorr is None:
self.logger.error("No aperture correction available")
return
als['MAG'] -= self.apcorr
# Just add columns to the SE catalog
ncat = len(self.sexcat)
newcat = self.sexcat.copy()
alsnames = ['X','Y','MAG','ERR','SKY','ITER','CHI','SHARP']
newnames = ['XPSF','YPSF','MAGPSF','ERRPSF','SKY','ITER','CHI','SHARP','RAPSF','DECPSF']
newtypes = ['float64','float64','float','float','float','float','float','float','float64','float64']
nan = float('nan')
newvals = [nan, nan, nan, nan ,nan, nan, nan, nan, nan, nan]
# DAOPHOT detection list used, need ALS ID
if not sexdetect:
alsnames = ['ID']+alsnames
newnames = ['ALSID']+newnames
newtypes = ['int32']+newtypes
newvals = [-1]+newvals
newcols = []
for n,t,v in zip(newnames,newtypes,newvals):
col = Column(name=n,length=ncat,dtype=t)
col[:] = v
newcols.append(col)
newcat.add_columns(newcols)
# Match up with IDs if SE list used by DAOPHOT
if sexdetect:
mid, ind1, ind2 = np.intersect1d(newcat["NUMBER"],als["ID"],return_indices=True)
for id1,id2 in zip(newnames,alsnames):
newcat[id1][ind1] = als[id2][ind2]
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Match up with coordinates, DAOPHOT detection list used
else:
print("Need to match up with coordinates")
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Add RA, DEC
r,d = self.wcs.all_pix2world(newcat["XPSF"],newcat["YPSF"],1)
newcat['RAPSF'] = r
newcat['DECPSF'] = d
# Write to file
self.logger.info("Final catalog = "+outfile)
fits.writeto(outfile,None,self.meta,overwrite=True) # meta in PDU header
# append the table in extension 1
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(newcat)
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
#newcat.write(outfile,overwrite=True)
#fits.append(outfile,0,self.meta) # meta is header of 2nd extension
# Process a single chip
#----------------------
def process(self):
self.runsex()
self.logger.info("-- Getting ready to run DAOPHOT --")
self.mkopt()
self.mkdaoim()
#self.daodetect()
# Create DAOPHOT-style coo file
# Need to use SE positions
self.sextodao(outfile="flux_dao.coo")
self.daoaperphot()
self.daopickpsf()
self.createpsf()
self.allstar()
self.getapcor()
self.finalcat()
# Do I need to rerun daoaperphot to get aperture
# photometry at the FINAL allstar positions??
# Is there a way to reduce the number of iterations needed to create the PSF?
# what do the ?, * mean anyway?
# maybe just remove the worse 10% of stars or something
# Put all of the daophot-running into separate function (maybe separate module)
# same for sextractor
# Maybe make my own xmatch function that does one-to-one matching
# Clean up the files
#--------------------
def cleanup(self):
self.logger.info("Copying final files to output directory "+self.outdir)
base = os.path.basename(self.fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
# Copy the files we want to keep
# final combined catalog, logs
outcatfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".fits"
if os.path.exists(outcatfile): os.remove(outcatfile)
shutil.copyfile("flux.cat.fits",outcatfile)
# Copy DAOPHOT opt files
outoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".opt"
if os.path.exists(outoptfile): os.remove(outoptfile)
shutil.copyfile(daobase+".opt",outoptfile)
outalsoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".als.opt"
if os.path.exists(outalsoptfile): os.remove(outalsoptfile)
shutil.copyfile(daobase+".als.opt",outalsoptfile)
# Copy DAOPHOT PSF star list
outlstfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf.lst"
if os.path.exists(outlstfile): os.remove(outlstfile)
shutil.copyfile(daobase+".lst",outlstfile)
# Copy DAOPHOT PSF file
outpsffile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf"
if os.path.exists(outpsffile): os.remove(outpsffile)
shutil.copyfile(daobase+".psf",outpsffile)
# Copy DAOPHOT .apers file??
# copy Allstar PSF subtracted file to output dir
outsubfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+"s.fits"
if os.path.exists(outsubfile): os.remove(outsubfile)
shutil.copyfile(daobase+"s.fits",outsubfile)
# Copy SE config file
outconfigfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".sex.config"
if os.path.exists(outconfigfile): os.remove(outconfigfile)
shutil.copyfile("default.config",outconfigfile)
# Combine all the log files
logfiles = glob.glob(base+"*.log")
loglines = []
for logfil in logfiles:
loglines += ["==> "+logfil+" <==\n"]
f = open(logfil,'r')
lines = f.readlines()
f.close()
loglines += lines
loglines += ["\n"]
f = open(base+".logs","w")
f.writelines("".join(loglines))
f.close()
outlogfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".logs"
if os.path.exists(outlogfile): os.remove(outlogfile)
shutil.copyfile(base+".logs",outlogfile)
# Delete temporary directory/files
self.logger.info(" Cleaning up")
files1 = glob.glob("flux*")
files2 = glob.glob("default*")
files = files1+files2+["flux.fits","wt.fits","mask.fits","daophot.opt","allstar.opt"]
for f in files:
if os.path.exists(f): os.remove(f)
# Main command-line program
if __name__ == "__main__":
# Version
verdir = ""
if len(sys.argv) > 4:
version = sys.argv[4]
verdir = version if version.endswith('/') else version+"/"
else: version = None
# Get NSC directories
basedir, tmpdir = getnscdirs(version)
# Make sure the directories exist
if not os.path.exists(basedir):
os.makedirs(basedir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
t0 = time.time()
print(sys.argv)
# Not enough inputs
n = len(sys.argv)
if n < 4:
print("Syntax - nsc_instcal_sexdaophot.py fluxfile wtfile maskfile version")
sys.exit()
# File names
fluxfile = sys.argv[1]
wtfile = sys.argv[2]
maskfile = sys.argv[3]
# Check that the files exist
if os.path.exists(fluxfile) is False:
print(fluxfile+" file NOT FOUND")
sys.exit()
if os.path.exists(wtfile) is False:
print(wtfile+" file NOT FOUND")
sys.exit()
if os.path.exists(maskfile) is False:
print(maskile+" file NOT FOUND")
sys.exit()
# Create the Exposure object
exp = Exposure(fluxfile,wtfile,maskfile,nscversion=version)
# Run
exp.run()
print("Total time = "+str(time.time()-t0)+" seconds")
| 31,643 | 10,394 |
import math
# Constants
ELEMENTARY_CHARGE = 1.602176634e-19 # Coulombs
# Conversions
ATM_TO_PA = 101325
MBAR_TO_PA = 100
STD_TO_FWHM = 2*math.sqrt(2*math.log(2))
| 165 | 97 |
import addict
class Dict(addict.Dict):
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
def __repr__(self):
overview = ['* ' + str(key) for key, value in self.items()]
return '\n'.join(overview)
def sum_ignore_none(*items):
not_none = [value for value in items if value is not None]
if not_none:
sum_ignoring_none = sum(not_none)
else:
sum_ignoring_none = None
return sum_ignoring_none
| 486 | 168 |
import subprocess
from glob import glob
import re
from collections import defaultdict
import shutil
import os
import hashlib
files = glob('enlightenment_dist/*')
libraries = defaultdict(list)
ldds = {}
for f in files:
try:
ldd_out = subprocess.check_output(['ldd', f])
except subprocess.CalledProcessError:
continue
libz = ""
for line in ldd_out.splitlines():
match = re.match(r'\t(.*) =>', line)
if match:
libz += match.group(1)
k = hashlib.md5(libz).hexdigest()
libraries[k].append(f)
ldds[k] = ldd_out
print libraries.keys()
print ldds
# ['libchicken.so.7', 'libutil.so.1', 'libstdc++.so.6', 'librt.so.1', 'libgcc_s.so.1', 'libm.so.6', 'libpthread.so.0', 'libswiftCore.so', 'libdl.so.2', 'linux-vdso.so.1', 'libc.so.6', 'libswiftGlibc.so']
for k in libraries.keys():
try:
os.mkdir(k)
except:
pass
for f in libraries[k]:
try:
shutil.move(f, k+'/')
except:
pass
| 1,007 | 375 |
#!/usr/bin/env python3
import glob
text = '''
# Logos for Jutge.org
'''
for png in sorted(glob.glob('*.png')):
text += '''- %s\n\n <a href='%s'><img src='%s' height='200'></a>\n\n''' % (png, png, png)
with open('README.md', 'w') as file:
file.write(text)
| 270 | 123 |
import json
import numpy as np
import torch
from pathlib import Path
import pandas as pd
import pydicom
from ast import literal_eval
from adpkd_segmentation.data.data_utils import (
get_labeled,
get_y_Path,
int16_to_uint8,
make_dcmdicts,
path_2dcm_int16,
path_2label,
TKV_update,
)
from adpkd_segmentation.data.data_utils import (
KIDNEY_PIXELS,
STUDY_TKV,
VOXEL_VOLUME,
)
from adpkd_segmentation.datasets.filters import PatientFiltering
class SegmentationDataset(torch.utils.data.Dataset):
"""Some information about SegmentationDataset"""
def __init__(
self,
label2mask,
dcm2attribs,
patient2dcm,
patient_IDS=None,
augmentation=None,
smp_preprocessing=None,
normalization=None,
output_idx=False,
attrib_types=None,
):
super().__init__()
self.label2mask = label2mask
self.dcm2attribs = dcm2attribs
self.pt2dcm = patient2dcm
self.patient_IDS = patient_IDS
self.augmentation = augmentation
self.smp_preprocessing = smp_preprocessing
self.normalization = normalization
self.output_idx = output_idx
self.attrib_types = attrib_types
# store some attributes as PyTorch tensors
if self.attrib_types is None:
self.attrib_types = {
STUDY_TKV: "float32",
KIDNEY_PIXELS: "float32",
VOXEL_VOLUME: "float32",
}
self.patients = list(patient2dcm.keys())
# kept for compatibility with previous experiments
# following patient order in patient_IDS
if patient_IDS is not None:
self.patients = patient_IDS
self.dcm_paths = []
for p in self.patients:
self.dcm_paths.extend(patient2dcm[p])
self.label_paths = [get_y_Path(dcm) for dcm in self.dcm_paths]
# study_id to TKV and TKV for each dcm
self.studies, self.dcm2attribs = TKV_update(dcm2attribs)
# storring attrib types as tensors
self.tensor_dict = self.prepare_tensor_dict(self.attrib_types)
def __getitem__(self, index):
if isinstance(index, slice):
return [self[ii] for ii in range(*index.indices(len(self)))]
# numpy int16, (H, W)
im_path = self.dcm_paths[index]
image = path_2dcm_int16(im_path)
# image local scaling by default to convert to uint8
if self.normalization is None:
image = int16_to_uint8(image)
else:
image = self.normalization(image, self.dcm2attribs[im_path])
label = path_2label(self.label_paths[index])
# numpy uint8, one hot encoded (C, H, W)
mask = self.label2mask(label[np.newaxis, ...])
if self.augmentation is not None:
# requires (H, W, C) or (H, W)
mask = mask.transpose(1, 2, 0)
sample = self.augmentation(image=image, mask=mask)
image, mask = sample["image"], sample["mask"]
# get back to (C, H, W)
mask = mask.transpose(2, 0, 1)
# convert to float
image = (image / 255).astype(np.float32)
mask = mask.astype(np.float32)
# smp preprocessing requires (H, W, 3)
if self.smp_preprocessing is not None:
image = np.repeat(image[..., np.newaxis], 3, axis=-1)
image = self.smp_preprocessing(image).astype(np.float32)
# get back to (3, H, W)
image = image.transpose(2, 0, 1)
else:
# stack image to (3, H, W)
image = np.repeat(image[np.newaxis, ...], 3, axis=0)
if self.output_idx:
return image, mask, index
return image, mask
def __len__(self):
return len(self.dcm_paths)
def get_verbose(self, index):
"""returns more details than __getitem__()
Args:
index (int): index in dataset
Returns:
tuple: sample, dcm_path, attributes dict
"""
sample = self[index]
dcm_path = self.dcm_paths[index]
attribs = self.dcm2attribs[dcm_path]
return sample, dcm_path, attribs
def get_extra_dict(self, batch_of_idx):
return {k: v[batch_of_idx] for k, v in self.tensor_dict.items()}
def prepare_tensor_dict(self, attrib_types):
tensor_dict = {}
for k, v in attrib_types.items():
tensor_dict[k] = torch.zeros(
self.__len__(), dtype=getattr(torch, v)
)
for idx, _ in enumerate(self):
dcm_path = self.dcm_paths[idx]
attribs = self.dcm2attribs[dcm_path]
for k, v in tensor_dict.items():
v[idx] = attribs[k]
return tensor_dict
class DatasetGetter:
"""Create SegmentationDataset"""
def __init__(
self,
splitter,
splitter_key,
label2mask,
augmentation=None,
smp_preprocessing=None,
filters=None,
normalization=None,
output_idx=False,
attrib_types=None,
):
super().__init__()
self.splitter = splitter
self.splitter_key = splitter_key
self.label2mask = label2mask
self.augmentation = augmentation
self.smp_preprocessing = smp_preprocessing
self.filters = filters
self.normalization = normalization
self.output_idx = output_idx
self.attrib_types = attrib_types
dcms_paths = sorted(get_labeled())
print(
"The number of images before splitting and filtering: {}".format(
len(dcms_paths)
)
)
dcm2attribs, patient2dcm = make_dcmdicts(tuple(dcms_paths))
if filters is not None:
dcm2attribs, patient2dcm = filters(dcm2attribs, patient2dcm)
self.all_patient_IDS = list(patient2dcm.keys())
# train, val, or test
self.patient_IDS = self.splitter(self.all_patient_IDS)[
self.splitter_key
]
patient_filter = PatientFiltering(self.patient_IDS)
self.dcm2attribs, self.patient2dcm = patient_filter(
dcm2attribs, patient2dcm
)
if self.normalization is not None:
self.normalization.update_dcm2attribs(self.dcm2attribs)
def __call__(self):
return SegmentationDataset(
label2mask=self.label2mask,
dcm2attribs=self.dcm2attribs,
patient2dcm=self.patient2dcm,
patient_IDS=self.patient_IDS,
augmentation=self.augmentation,
smp_preprocessing=self.smp_preprocessing,
normalization=self.normalization,
output_idx=self.output_idx,
attrib_types=self.attrib_types,
)
class JsonDatasetGetter:
"""Get the dataset from a prepared patient ID split"""
def __init__(
self,
json_path,
splitter_key,
label2mask,
augmentation=None,
smp_preprocessing=None,
normalization=None,
output_idx=False,
attrib_types=None,
):
super().__init__()
self.label2mask = label2mask
self.augmentation = augmentation
self.smp_preprocessing = smp_preprocessing
self.normalization = normalization
self.output_idx = output_idx
self.attrib_types = attrib_types
dcms_paths = sorted(get_labeled())
print(
"The number of images before splitting and filtering: {}".format(
len(dcms_paths)
)
)
dcm2attribs, patient2dcm = make_dcmdicts(tuple(dcms_paths))
print("Loading ", json_path)
with open(json_path, "r") as f:
dataset_split = json.load(f)
self.patient_IDS = dataset_split[splitter_key]
# filter info dicts to correpsond to patient_IDS
patient_filter = PatientFiltering(self.patient_IDS)
self.dcm2attribs, self.patient2dcm = patient_filter(
dcm2attribs, patient2dcm
)
if self.normalization is not None:
self.normalization.update_dcm2attribs(self.dcm2attribs)
def __call__(self):
return SegmentationDataset(
label2mask=self.label2mask,
dcm2attribs=self.dcm2attribs,
patient2dcm=self.patient2dcm,
patient_IDS=self.patient_IDS,
augmentation=self.augmentation,
smp_preprocessing=self.smp_preprocessing,
normalization=self.normalization,
output_idx=self.output_idx,
attrib_types=self.attrib_types,
)
class InferenceDataset(torch.utils.data.Dataset):
"""Some information about SegmentationDataset"""
def __init__(
self,
dcm2attribs,
patient2dcm,
augmentation=None,
smp_preprocessing=None,
normalization=None,
output_idx=False,
attrib_types=None,
):
super().__init__()
self.dcm2attribs = dcm2attribs
self.pt2dcm = patient2dcm
self.augmentation = augmentation
self.smp_preprocessing = smp_preprocessing
self.normalization = normalization
self.output_idx = output_idx
self.attrib_types = attrib_types
self.patients = list(patient2dcm.keys())
self.dcm_paths = []
for p in self.patients:
self.dcm_paths.extend(patient2dcm[p])
# Sorts Studies by Z axis
studies = [
pydicom.dcmread(path).SeriesDescription for path in self.dcm_paths
]
folders = [path.parent.name for path in self.dcm_paths]
patients = [pydicom.dcmread(path).PatientID for path in self.dcm_paths]
x_dims = [pydicom.dcmread(path).Rows for path in self.dcm_paths]
y_dims = [pydicom.dcmread(path).Columns for path in self.dcm_paths]
z_pos = [
literal_eval(str(pydicom.dcmread(path).ImagePositionPatient))[2]
for path in self.dcm_paths
]
acc_nums = [
pydicom.dcmread(path).AccessionNumber for path in self.dcm_paths
]
ser_nums = [
pydicom.dcmread(path).SeriesNumber for path in self.dcm_paths
]
data = {
"dcm_paths": self.dcm_paths,
"folders": folders,
"studies": studies,
"patients": patients,
"x_dims": x_dims,
"y_dims": y_dims,
"z_pos": z_pos,
"acc_nums": acc_nums,
"ser_nums": ser_nums,
}
group_keys = [
"folders",
"studies",
"patients",
"x_dims",
"y_dims",
"acc_nums",
"ser_nums",
]
dataset = pd.DataFrame.from_dict(data)
dataset["slice_pos"] = ""
grouped_dataset = dataset.groupby(group_keys)
for (name, group) in grouped_dataset:
sort_key = "z_pos"
# handle missing slice position with filename
if group[sort_key].isna().any():
sort_key = "dcm_paths"
zs = list(group[sort_key])
sorted_idxs = np.argsort(zs)
slice_map = {
zs[idx]: pos for idx, pos in zip(sorted_idxs, range(len(zs)))
}
zs_slice_pos = group[sort_key].map(slice_map)
for i in group.index:
dataset.at[i, "slice_pos"] = zs_slice_pos.get(i)
grouped_dataset = dataset.groupby(group_keys)
for (name, group) in grouped_dataset:
group.sort_values(by="slice_pos", inplace=True)
self.df = dataset
self.dcm_paths = list(dataset["dcm_paths"])
def __getitem__(self, index):
if isinstance(index, slice):
return [self[ii] for ii in range(*index.indices(len(self)))]
# numpy int16, (H, W)
im_path = self.dcm_paths[index]
image = path_2dcm_int16(im_path)
# image local scaling by default to convert to uint8
if self.normalization is None:
image = int16_to_uint8(image)
else:
image = self.normalization(image, self.dcm2attribs[im_path])
if self.augmentation is not None:
sample = self.augmentation(image=image)
image = sample["image"]
# convert to float
image = (image / 255).astype(np.float32)
# smp preprocessing requires (H, W, 3)
if self.smp_preprocessing is not None:
image = np.repeat(image[..., np.newaxis], 3, axis=-1)
image = self.smp_preprocessing(image).astype(np.float32)
# get back to (3, H, W)
image = image.transpose(2, 0, 1)
else:
# stack image to (3, H, W)
image = np.repeat(image[np.newaxis, ...], 3, axis=0)
if self.output_idx:
return image, index
return image
def __len__(self):
return len(self.dcm_paths)
def get_verbose(self, index):
"""returns more details than __getitem__()
Args:
index (int): index in dataset
Returns:
tuple: sample, dcm_path, attributes dict
"""
sample = self[index]
dcm_path = self.dcm_paths[index]
attribs = self.dcm2attribs[dcm_path]
return sample, dcm_path, attribs
class InferenceDatasetGetter:
"""Get the dataset from a prepared patient ID split"""
def __init__(
self,
inference_path,
augmentation=None,
smp_preprocessing=None,
normalization=None,
output_idx=False,
attrib_types=None,
):
super().__init__()
self.augmentation = augmentation
self.smp_preprocessing = smp_preprocessing
self.normalization = normalization
self.output_idx = output_idx
self.attrib_types = attrib_types
self.inference_path = Path(inference_path)
all_paths = set(self.inference_path.glob("**/*"))
dcms_paths = []
for path in all_paths:
if path.is_file():
try:
pydicom.filereader.dcmread(path, stop_before_pixels=True)
dcms_paths.append(path)
except pydicom.errors.InvalidDicomError:
continue
self.dcm2attribs, self.patient2dcm = make_dcmdicts(
tuple(dcms_paths), label_status=False, WCM=False
)
if self.normalization is not None:
self.normalization.update_dcm2attribs(self.dcm2attribs)
def __call__(self):
return InferenceDataset(
dcm2attribs=self.dcm2attribs,
patient2dcm=self.patient2dcm,
augmentation=self.augmentation,
smp_preprocessing=self.smp_preprocessing,
normalization=self.normalization,
output_idx=self.output_idx,
attrib_types=self.attrib_types,
)
| 14,950 | 4,810 |
import numpy as np
def check_ts_X_y(X, y):
"""Placeholder function for input validation.
"""
# TODO: add proper checks (e.g. check if input stuff is pandas full of objects)
# currently it checks neither the data nor the datatype
# return check_X_y(X, y, dtype=None, ensure_2d=False)
return X, y
def check_ts_array(X):
"""Placeholder function for input validation.
"""
# TODO: add proper checks (e.g. check if input stuff is pandas full of objects)
# currently it checks neither the data nor the datatype
# return check_array(X, dtype=None, ensure_2d=False)
return X
def check_equal_index(X):
"""
Function to check if all time-series for a given column in a nested pandas DataFrame have the same index.
Parameters
----------
param X : nested pandas DataFrame
Input dataframe with time-series in cells.
Returns
-------
indexes : list of indixes
List of indixes with one index for each column
"""
# TODO handle 1d series, not only 2d dataframes
# TODO assumes columns are typed (i.e. all rows for a given column have the same type)
# TODO only handles series columns, raises error for columns with primitives
indexes = []
# Check index for each column separately.
for c, col in enumerate(X.columns):
# Get index from first row, can be either pd.Series or np.array.
first_index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else np.arange(X.iloc[c, 0].shape[0])
# Series must contain at least 2 observations, otherwise should be primitive.
if len(first_index) < 2:
raise ValueError(f'Time series must contain at least 2 observations, '
f'found time series in column {col} with less than 2 observations')
# Check index for all rows.
for i in range(1, X.shape[0]):
index = X.iloc[i, c].index if hasattr(X.iloc[i, c], 'index') else np.arange(X.iloc[c, 0].shape[0])
if not np.array_equal(first_index, index):
raise ValueError(f'Found time series with unequal index in column {col}. '
f'Input time-series must have the same index.')
indexes.append(first_index)
return indexes
| 2,286 | 664 |
import os
import sys
from halo import Halo
spinner = Halo(text='Please wait...', spinner='dots')
def main():
"""Main program"""
portsToExpose = str(input('Ports to expose [Default: 22 for SSH]: '))
print("Installing...\n")
spinner.start()
os.system("docker pull kalilinux/kali-rolling")
if portsToExpose != "":
os.system(
f"docker run -dt --name kali-vm -p {portsToExpose}:{portsToExpose} -i kalilinux/kali-rolling")
else:
os.system(
"docker run -dt --name kali-vm -p 22:22 -i kalilinux/kali-rolling")
spinner.stop()
os.system("docker exec -it kali-vm bash")
def run():
"""Program entry point"""
try:
main()
except KeyboardInterrupt:
print("\n\nExiting...")
try:
os._exit(1)
except SystemExit:
sys.exit(1)
if __name__ == "__main__":
run()
| 947 | 350 |
# Generated by Django 2.1.1 on 2018-09-26 15:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mysite', '0002_auto_20180912_1246'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='image',
),
]
| 324 | 124 |
#!/usr/bin/env python3
import os
import shutil
import subprocess
import sys
# ARGS
fileDirname = sys.argv[1]
fileBasename = sys.argv[2]
workspaceFolder = sys.argv[3]
# TRANSFORMATION
relativeFileDirname = fileDirname[len(workspaceFolder)+1:]
fileBasenameNoExtension = "".join(fileBasename.rsplit(".py", 1))
distpath = os.path.join(workspaceFolder, "output", relativeFileDirname)
# COMMAND GENERATOR
def construct():
COMMAND = [
"pyinstaller",
"--onefile",
"--clean",
"--noconsole",
"--distpath",
distpath,
]
for file in os.listdir(fileDirname):
if file.endswith(".ico"):
COMMAND.append("--icon")
COMMAND.append(os.path.join(fileDirname, file))
break
COMMAND.append(fileBasename)
return COMMAND
# CLEANER
def clean():
path = os.path.join(fileDirname, "__pycache__")
shutil.rmtree(path)
path = os.path.join(fileDirname, "build")
shutil.rmtree(path)
if __name__ == "__main__":
COMMAND = construct()
subprocess.check_call(COMMAND, cwd=fileDirname)
clean()
| 1,098 | 387 |
from hostNameHandle import hostNameHandle
from gethostList import get_ips
from getwechat import getProxyInfo
from gethostNameIp import getIps
from getFrequency import getFrequency
from getDownloadAccount import getInfo
from sendMessage import sendMessage
hostName=input("主机名:")
type = str(input("1 \"频繁掉线\" or 2 \"接口不在线\": "))
nodeName = hostNameHandle(hostName)
#print(nodeName)
hostList =get_ips(nodeName)
#print(hostList)
hoststr = " \n ".join(hostList) #主机名字符串已\n分隔
#print(hoststr)
hostipList = getIps(hoststr) #主机ip列表
hostipStr= "\n".join(hostipList)
proxyInfoDic = getProxyInfo(nodeName)
wecharGroup = proxyInfoDic["wechat_group_name"] #微信群
#print(proxyInfoDic)
nameNode=proxyInfoDic['name']
wxgroup=proxyInfoDic['wechat_group_name']
#print(hostipList)
def show(messageHead,messageBody):
print("\n\t 节点名:\n\t\t"+messageHead.strip("\n"))
print("\t 微信群:"+wxgroup)
message = messageHead+"\n"+messageBody
#print(message)
sendMessage(message,wxgroup)
def getres():
with open("/home/jwf/pfdx/result.txt",'r') as f:
return f.read()
if type == "1":
getFrequency(hostipStr) #输出统计信息
messageHead = nameNode+" 频繁掉线,麻烦处理一下!\n"
messageBody = getres()
show(messageHead,messageBody)
elif type == "2":
data = getInfo(hostipStr)
messageHead = nameNode+" 账号拨不上,麻烦处理一下!\n"
messageBody = data
show(messageHead,messageBody)
else:
print("input is what fuck")
| 1,430 | 602 |
"""
Build unstable products
"""
from phydat import instab_fgrps
import automol.graph
from automol.reac._util import rxn_objs_from_zmatrix
import automol.geom
import automol.inchi
import automol.zmat
from automol.graph import radical_dissociation_prods
from automol.graph import radical_group_dct
def instability_product_zmas(zma):
""" Determine if the species has look for functional group attachments that
could cause molecule instabilities
"""
disconn_zmas = ()
for gra in instability_product_graphs(automol.zmat.graph(zma)):
ich = automol.graph.inchi(gra)
geo_tmp = automol.inchi.geometry(ich)
zma = automol.geom.zmatrix(geo_tmp)
disconn_zmas += (zma,)
return disconn_zmas
def instability_product_graphs(gra):
""" Determine if the species has look for functional group attachments that
could cause molecule instabilities
"""
# Build graphs for the detection scheme
rad_grp_dct = radical_group_dct(gra)
# Check for instability causing functional groups
prd_gras = ()
for atm, grps in rad_grp_dct.items():
if atm in instab_fgrps.DCT:
fgrps, prds = instab_fgrps.DCT[atm]
for grp in grps:
grp_ich = automol.graph.inchi(grp)
if grp_ich in fgrps:
# If instability found, determine prod of the instability
prd_ich = prds[fgrps.index(grp_ich)]
prd_geo = automol.inchi.geometry(prd_ich)
prd_gra = automol.geom.graph(prd_geo)
prd_gras = radical_dissociation_prods(
gra, prd_gra)
break
return prd_gras
def instability_transformation(conn_zma, disconn_zmas):
""" Build the reaction objects for an instability
"""
zrxn_objs = rxn_objs_from_zmatrix(
[conn_zma], disconn_zmas, indexing='zma')
if zrxn_objs:
zrxn, zma, _, _ = zrxn_objs[0]
else:
zrxn, zma = None, None
return zrxn, zma
| 2,041 | 688 |
# MINLP written by GAMS Convert at 04/21/18 13:53:48
#
# Equation counts
# Total E G L N X C B
# 104 52 0 52 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 151 101 50 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2851 2801 50 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - 0.0399775*m.x52 - 0.0629738*m.x53 - 0.027838*m.x54 - 0.00361039*m.x55 - 0.0761837*m.x56
- 0.135299*m.x57 - 0.0122123*m.x58 - 0.0399709*m.x59 - 0.0256571*m.x60 - 0.0991766*m.x61
- 0.0210495*m.x62 - 0.044223*m.x63 - 0.0128715*m.x64 - 0.00399952*m.x65 - 0.0501755*m.x66
- 0.149247*m.x67 - 0.0613428*m.x68 - 0.041802*m.x69 - 0.0754226*m.x70 - 0.0434943*m.x71
- 0.10135*m.x72 - 0.15397*m.x73 - 0.0576577*m.x74 - 0.0340755*m.x75 - 0.0426673*m.x76
- 0.0298566*m.x77 - 0.0952893*m.x78 - 0.169485*m.x79 - 0.0440279*m.x80 - 0.0470473*m.x81
- 0.00699576*m.x82 - 0.127417*m.x83 - 0.126305*m.x84 - 0.0486665*m.x85 - 0.153319*m.x86
- 0.0202574*m.x87 - 0.0272516*m.x88 - 0.0695536*m.x89 - 0.030744*m.x90 - 0.0325349*m.x91
- 0.0163484*m.x92 - 0.0753619*m.x93 - 0.0271795*m.x94 - 0.0113752*m.x95 - 0.0394797*m.x96
- 0.123927*m.x97 - 0.00514876*m.x98 - 0.0380825*m.x99 - 0.142836*m.x100 - 0.0540865*m.x101
, sense=minimize)
m.c2 = Constraint(expr=m.x2*m.x2 + m.x3*m.x3 + m.x4*m.x4 + m.x5*m.x5 + m.x6*m.x6 + m.x7*m.x7 + m.x8*m.x8 + m.x9*m.x9 +
m.x10*m.x10 + m.x11*m.x11 + m.x12*m.x12 + m.x13*m.x13 + m.x14*m.x14 + m.x15*m.x15 + m.x16*m.x16
+ m.x17*m.x17 + m.x18*m.x18 + m.x19*m.x19 + m.x20*m.x20 + m.x21*m.x21 + m.x22*m.x22 + m.x23*
m.x23 + m.x24*m.x24 + m.x25*m.x25 + m.x26*m.x26 + m.x27*m.x27 + m.x28*m.x28 + m.x29*m.x29 + m.x30
*m.x30 + m.x31*m.x31 + m.x32*m.x32 + m.x33*m.x33 + m.x34*m.x34 + m.x35*m.x35 + m.x36*m.x36 +
m.x37*m.x37 + m.x38*m.x38 + m.x39*m.x39 + m.x40*m.x40 + m.x41*m.x41 + m.x42*m.x42 + m.x43*m.x43
+ m.x44*m.x44 + m.x45*m.x45 + m.x46*m.x46 + m.x47*m.x47 + m.x48*m.x48 + m.x49*m.x49 + m.x50*
m.x50 + m.x51*m.x51 <= 0.04)
m.c3 = Constraint(expr= m.x52 - m.b102 <= 0)
m.c4 = Constraint(expr= m.x53 - m.b103 <= 0)
m.c5 = Constraint(expr= m.x54 - m.b104 <= 0)
m.c6 = Constraint(expr= m.x55 - m.b105 <= 0)
m.c7 = Constraint(expr= m.x56 - m.b106 <= 0)
m.c8 = Constraint(expr= m.x57 - m.b107 <= 0)
m.c9 = Constraint(expr= m.x58 - m.b108 <= 0)
m.c10 = Constraint(expr= m.x59 - m.b109 <= 0)
m.c11 = Constraint(expr= m.x60 - m.b110 <= 0)
m.c12 = Constraint(expr= m.x61 - m.b111 <= 0)
m.c13 = Constraint(expr= m.x62 - m.b112 <= 0)
m.c14 = Constraint(expr= m.x63 - m.b113 <= 0)
m.c15 = Constraint(expr= m.x64 - m.b114 <= 0)
m.c16 = Constraint(expr= m.x65 - m.b115 <= 0)
m.c17 = Constraint(expr= m.x66 - m.b116 <= 0)
m.c18 = Constraint(expr= m.x67 - m.b117 <= 0)
m.c19 = Constraint(expr= m.x68 - m.b118 <= 0)
m.c20 = Constraint(expr= m.x69 - m.b119 <= 0)
m.c21 = Constraint(expr= m.x70 - m.b120 <= 0)
m.c22 = Constraint(expr= m.x71 - m.b121 <= 0)
m.c23 = Constraint(expr= m.x72 - m.b122 <= 0)
m.c24 = Constraint(expr= m.x73 - m.b123 <= 0)
m.c25 = Constraint(expr= m.x74 - m.b124 <= 0)
m.c26 = Constraint(expr= m.x75 - m.b125 <= 0)
m.c27 = Constraint(expr= m.x76 - m.b126 <= 0)
m.c28 = Constraint(expr= m.x77 - m.b127 <= 0)
m.c29 = Constraint(expr= m.x78 - m.b128 <= 0)
m.c30 = Constraint(expr= m.x79 - m.b129 <= 0)
m.c31 = Constraint(expr= m.x80 - m.b130 <= 0)
m.c32 = Constraint(expr= m.x81 - m.b131 <= 0)
m.c33 = Constraint(expr= m.x82 - m.b132 <= 0)
m.c34 = Constraint(expr= m.x83 - m.b133 <= 0)
m.c35 = Constraint(expr= m.x84 - m.b134 <= 0)
m.c36 = Constraint(expr= m.x85 - m.b135 <= 0)
m.c37 = Constraint(expr= m.x86 - m.b136 <= 0)
m.c38 = Constraint(expr= m.x87 - m.b137 <= 0)
m.c39 = Constraint(expr= m.x88 - m.b138 <= 0)
m.c40 = Constraint(expr= m.x89 - m.b139 <= 0)
m.c41 = Constraint(expr= m.x90 - m.b140 <= 0)
m.c42 = Constraint(expr= m.x91 - m.b141 <= 0)
m.c43 = Constraint(expr= m.x92 - m.b142 <= 0)
m.c44 = Constraint(expr= m.x93 - m.b143 <= 0)
m.c45 = Constraint(expr= m.x94 - m.b144 <= 0)
m.c46 = Constraint(expr= m.x95 - m.b145 <= 0)
m.c47 = Constraint(expr= m.x96 - m.b146 <= 0)
m.c48 = Constraint(expr= m.x97 - m.b147 <= 0)
m.c49 = Constraint(expr= m.x98 - m.b148 <= 0)
m.c50 = Constraint(expr= m.x99 - m.b149 <= 0)
m.c51 = Constraint(expr= m.x100 - m.b150 <= 0)
m.c52 = Constraint(expr= m.x101 - m.b151 <= 0)
m.c53 = Constraint(expr= m.x52 + m.x53 + m.x54 + m.x55 + m.x56 + m.x57 + m.x58 + m.x59 + m.x60 + m.x61 + m.x62 + m.x63
+ m.x64 + m.x65 + m.x66 + m.x67 + m.x68 + m.x69 + m.x70 + m.x71 + m.x72 + m.x73 + m.x74 + m.x75
+ m.x76 + m.x77 + m.x78 + m.x79 + m.x80 + m.x81 + m.x82 + m.x83 + m.x84 + m.x85 + m.x86 + m.x87
+ m.x88 + m.x89 + m.x90 + m.x91 + m.x92 + m.x93 + m.x94 + m.x95 + m.x96 + m.x97 + m.x98 + m.x99
+ m.x100 + m.x101 == 1)
m.c54 = Constraint(expr= m.b102 + m.b103 + m.b104 + m.b105 + m.b106 + m.b107 + m.b108 + m.b109 + m.b110 + m.b111
+ m.b112 + m.b113 + m.b114 + m.b115 + m.b116 + m.b117 + m.b118 + m.b119 + m.b120 + m.b121
+ m.b122 + m.b123 + m.b124 + m.b125 + m.b126 + m.b127 + m.b128 + m.b129 + m.b130 + m.b131
+ m.b132 + m.b133 + m.b134 + m.b135 + m.b136 + m.b137 + m.b138 + m.b139 + m.b140 + m.b141
+ m.b142 + m.b143 + m.b144 + m.b145 + m.b146 + m.b147 + m.b148 + m.b149 + m.b150 + m.b151
<= 10)
m.c55 = Constraint(expr= - m.x2 + 0.437623*m.x52 + 0.00776152*m.x53 + 0.00831088*m.x54 - 0.00522971*m.x55
+ 0.015015*m.x56 - 0.0107741*m.x57 - 0.00662896*m.x58 - 0.00824877*m.x59 + 0.00953726*m.x60
- 0.0162102*m.x61 + 0.06876*m.x62 + 0.0307553*m.x63 + 0.00493869*m.x64 + 0.00905031*m.x65
+ 0.00428006*m.x66 + 0.0159505*m.x67 + 0.0372772*m.x68 + 0.00356282*m.x69 + 0.0102555*m.x70
- 0.0161653*m.x71 - 0.00678775*m.x72 - 0.000991393*m.x73 + 0.0104307*m.x74 - 0.00554627*m.x75
+ 0.000275614*m.x76 + 0.00146767*m.x77 - 0.0219202*m.x78 - 0.0152471*m.x79 - 0.0133041*m.x80
+ 0.00532027*m.x81 + 0.0190296*m.x82 + 9.52152E-5*m.x83 - 0.0180784*m.x84 + 0.00127079*m.x85
- 0.00331643*m.x86 - 0.0107273*m.x87 - 6.72321E-5*m.x88 + 0.0019753*m.x89 - 0.00561942*m.x90
- 0.0137411*m.x91 + 0.0266953*m.x92 + 0.0039322*m.x93 + 0.0312023*m.x94 + 0.00475029*m.x95
+ 0.00458043*m.x96 - 0.0111713*m.x97 + 0.00233202*m.x98 + 0.00279105*m.x99 + 0.00588268*m.x100
+ 0.0171354*m.x101 == 0)
m.c56 = Constraint(expr= - m.x3 + 0.00776152*m.x52 + 0.305432*m.x53 + 0.0022503*m.x54 + 0.0131826*m.x55 + 0.013322*m.x56
+ 0.0622902*m.x57 + 0.00612167*m.x58 + 0.00797614*m.x59 + 0.00886071*m.x60 - 0.0285042*m.x61
+ 0.003025*m.x62 + 0.0159085*m.x63 - 0.00357187*m.x64 + 0.0016128*m.x65 + 0.012642*m.x66
+ 0.119815*m.x67 + 0.00505566*m.x68 + 0.0131274*m.x69 + 0.00269972*m.x70 + 0.00899326*m.x71
+ 0.0193615*m.x72 + 0.114117*m.x73 + 0.0118212*m.x74 + 0.00695719*m.x75 - 0.00146012*m.x76
- 0.00455327*m.x77 - 0.00233478*m.x78 - 0.00354018*m.x79 - 0.0108257*m.x80 + 0.00548427*m.x81
+ 0.00843954*m.x82 + 0.0957415*m.x83 + 0.0724208*m.x84 + 0.00920314*m.x85 - 0.00921773*m.x86
+ 0.0112775*m.x87 + 0.010577*m.x88 - 0.00268772*m.x89 + 0.0104329*m.x90 - 0.00184253*m.x91
+ 0.0230614*m.x92 + 0.0797692*m.x93 - 0.00718849*m.x94 + 0.00668562*m.x95 - 0.00479877*m.x96
+ 0.037467*m.x97 - 0.000833339*m.x98 - 0.00287641*m.x99 - 0.00540049*m.x100 + 0.0133618*m.x101
== 0)
m.c57 = Constraint(expr= - m.x4 + 0.00831088*m.x52 + 0.0022503*m.x53 + 0.179315*m.x54 + 0.0238256*m.x55
- 0.00566425*m.x56 - 0.0137602*m.x57 + 0.00878864*m.x58 + 0.0166554*m.x59 + 0.0152274*m.x60
- 0.0193213*m.x61 + 0.0171146*m.x62 + 0.0117301*m.x63 + 0.0108599*m.x64 + 0.011655*m.x65
- 0.00502711*m.x66 + 0.011192*m.x67 + 0.0247138*m.x68 + 0.00188025*m.x69 + 0.00635281*m.x70
+ 0.0217042*m.x71 + 0.0189843*m.x72 - 0.00893642*m.x73 + 0.020493*m.x74 + 0.0060982*m.x75
+ 0.00709161*m.x76 + 0.0192029*m.x77 + 0.00489188*m.x78 + 0.0141398*m.x79 + 0.0183881*m.x80
+ 0.0132555*m.x81 + 0.0089825*m.x82 - 0.00433095*m.x83 + 0.000368443*m.x84 + 0.00845006*m.x85
+ 0.0106863*m.x86 + 0.0165343*m.x87 + 0.0182906*m.x88 + 0.000474699*m.x89 + 0.0125524*m.x90
+ 0.00998269*m.x91 + 0.00663781*m.x92 - 0.00941355*m.x93 + 0.0166904*m.x94 + 0.00602889*m.x95
+ 0.00224387*m.x96 - 0.00806098*m.x97 + 0.0151626*m.x98 - 0.000965771*m.x99 + 0.0157379*m.x100
+ 0.0187837*m.x101 == 0)
m.c58 = Constraint(expr= - m.x5 - 0.00522971*m.x52 + 0.0131826*m.x53 + 0.0238256*m.x54 + 0.220297*m.x55
+ 0.0243861*m.x56 - 0.00430317*m.x57 + 0.0174604*m.x58 + 0.00681665*m.x59 + 0.0242063*m.x60
+ 0.00144938*m.x61 + 0.015222*m.x62 + 0.014716*m.x63 + 0.00177302*m.x64 + 0.0176392*m.x65
+ 0.021276*m.x66 + 0.00889693*m.x67 + 0.00407666*m.x68 + 0.00949954*m.x69 + 0.00937267*m.x70
+ 0.0242093*m.x71 + 0.00460206*m.x72 - 0.00745268*m.x73 + 0.0160821*m.x74 + 0.00240536*m.x75
+ 0.0042418*m.x76 + 0.00264811*m.x77 + 0.00832847*m.x78 + 0.0040175*m.x79 + 0.0153818*m.x80
+ 0.0182359*m.x81 + 0.00961571*m.x82 + 0.0122098*m.x83 - 0.000558226*m.x84 + 0.0179991*m.x85
+ 0.0126379*m.x86 + 0.0175827*m.x87 + 0.00566779*m.x88 - 0.000955585*m.x89 + 0.0234718*m.x90
- 0.00128625*m.x91 + 0.00397589*m.x92 + 0.00253364*m.x93 + 0.0161477*m.x94 + 0.0163612*m.x95
+ 0.012804*m.x96 + 0.0254602*m.x97 + 0.0164285*m.x98 + 0.0113336*m.x99 + 0.00992279*m.x100
+ 0.00909239*m.x101 == 0)
m.c59 = Constraint(expr= - m.x6 + 0.015015*m.x52 + 0.013322*m.x53 - 0.00566425*m.x54 + 0.0243861*m.x55 + 0.404084*m.x56
+ 0.058688*m.x57 + 0.0144003*m.x58 + 0.0371145*m.x59 + 0.0227472*m.x60 + 0.0120821*m.x61
+ 0.00730434*m.x62 + 0.0238735*m.x63 + 0.00933373*m.x64 + 0.0051169*m.x65 + 0.0488881*m.x66
+ 0.0227134*m.x67 + 0.00590284*m.x68 + 0.0335068*m.x69 + 0.0167733*m.x70 + 0.044455*m.x71
+ 0.069787*m.x72 + 0.040347*m.x73 + 0.039664*m.x74 + 0.0102778*m.x75 + 0.0172657*m.x76
+ 0.00473961*m.x77 + 0.0132399*m.x78 - 0.0118559*m.x79 + 0.0329745*m.x80 + 0.00776731*m.x81
+ 0.00146596*m.x82 + 0.0398038*m.x83 + 0.0268424*m.x84 + 0.0120171*m.x85 + 0.0145295*m.x86
+ 0.0354297*m.x87 - 0.00170776*m.x88 + 0.0255113*m.x89 + 0.0115797*m.x90 + 0.0340249*m.x91
+ 0.00175196*m.x92 + 0.0214384*m.x93 + 0.0113414*m.x94 + 0.039091*m.x95 + 0.00619763*m.x96
+ 0.0133319*m.x97 + 0.0121082*m.x98 + 0.0357203*m.x99 + 0.0381607*m.x100 + 0.0203578*m.x101
== 0)
m.c60 = Constraint(expr= - m.x7 - 0.0107741*m.x52 + 0.0622902*m.x53 - 0.0137602*m.x54 - 0.00430317*m.x55
+ 0.058688*m.x56 + 0.452644*m.x57 + 0.0193845*m.x58 + 0.0341649*m.x59 + 0.00602161*m.x60
+ 0.0583255*m.x61 - 0.00423459*m.x62 + 0.016241*m.x63 + 0.0157118*m.x64 - 0.00370551*m.x65
+ 0.0511023*m.x66 + 0.148921*m.x67 + 0.0156037*m.x68 + 0.0155171*m.x69 + 0.0112086*m.x70
+ 0.030702*m.x71 + 0.0216234*m.x72 + 0.105953*m.x73 + 0.0128583*m.x74 + 0.00399753*m.x75
+ 0.0184167*m.x76 + 0.010492*m.x77 + 0.0244629*m.x78 + 0.047228*m.x79 + 0.00547127*m.x80
+ 0.0133769*m.x81 + 0.0119332*m.x82 + 0.161483*m.x83 + 0.187982*m.x84 + 0.00916881*m.x85
+ 0.0209491*m.x86 + 0.0327261*m.x87 + 0.028455*m.x88 + 0.0105724*m.x89 + 0.0238296*m.x90
- 0.00223337*m.x91 + 0.0230382*m.x92 + 0.112083*m.x93 + 0.00257709*m.x94 - 0.0088657*m.x95
+ 0.0101284*m.x96 + 0.0087194*m.x97 + 0.016345*m.x98 + 0.0145296*m.x99 + 0.00606395*m.x100
+ 0.00747571*m.x101 == 0)
m.c61 = Constraint(expr= - m.x8 - 0.00662896*m.x52 + 0.00612167*m.x53 + 0.00878864*m.x54 + 0.0174604*m.x55
+ 0.0144003*m.x56 + 0.0193845*m.x57 + 0.28381*m.x58 + 0.0129912*m.x59 + 0.00711013*m.x60
+ 0.023726*m.x61 + 0.0135222*m.x62 + 0.00245137*m.x63 + 0.0139941*m.x64 + 0.0146659*m.x65
- 0.000316803*m.x66 + 0.0195659*m.x67 + 0.0130298*m.x68 + 0.0143949*m.x69 - 0.0152357*m.x70
+ 0.0229109*m.x71 + 0.0178969*m.x72 + 0.00747729*m.x73 + 0.0262*m.x74 + 0.0176229*m.x75
+ 0.0184672*m.x76 + 0.00333289*m.x77 + 0.0125282*m.x78 + 0.0160426*m.x79 - 0.00910903*m.x80
+ 0.0168617*m.x81 + 0.00649361*m.x82 + 0.000720061*m.x83 + 0.0015496*m.x84 + 0.0120757*m.x85
+ 0.0231367*m.x86 + 0.0160891*m.x87 + 0.000127307*m.x88 + 0.00590674*m.x89 + 0.0251974*m.x90
+ 0.0109883*m.x91 + 0.0197048*m.x92 + 0.00281047*m.x93 + 0.0113665*m.x94 + 0.0128475*m.x95
+ 0.00622782*m.x96 + 0.0245605*m.x97 + 0.00706149*m.x98 + 0.00272192*m.x99 + 0.00300911*m.x100
+ 0.0133916*m.x101 == 0)
m.c62 = Constraint(expr= - m.x9 - 0.00824877*m.x52 + 0.00797614*m.x53 + 0.0166554*m.x54 + 0.00681665*m.x55
+ 0.0371145*m.x56 + 0.0341649*m.x57 + 0.0129912*m.x58 + 0.189607*m.x59 + 0.0210316*m.x60
+ 0.00633527*m.x61 + 0.00869335*m.x62 + 0.031581*m.x63 - 0.00230763*m.x64 + 0.00682721*m.x65
+ 0.0158862*m.x66 + 0.016982*m.x67 + 0.0111502*m.x68 + 0.0375819*m.x69 + 0.0223572*m.x70
+ 0.0434772*m.x71 + 0.0304477*m.x72 + 0.00554913*m.x73 + 0.0268377*m.x74 + 0.00229807*m.x75
+ 0.01809*m.x76 + 0.0114054*m.x77 + 0.0148192*m.x78 + 0.0286969*m.x79 + 0.0156643*m.x80
+ 0.0214673*m.x81 + 0.00423722*m.x82 + 0.0101393*m.x83 + 0.00438509*m.x84 + 0.0186319*m.x85
+ 0.046181*m.x86 + 0.0332107*m.x87 + 0.0160758*m.x88 + 0.00541803*m.x89 + 0.0243196*m.x90
+ 0.0145438*m.x91 + 0.00473001*m.x92 + 0.00681241*m.x93 + 0.00988793*m.x94 + 0.0149668*m.x95
+ 0.023562*m.x96 + 0.0173729*m.x97 + 0.016267*m.x98 + 0.0121424*m.x99 - 0.00299957*m.x100
+ 0.00907044*m.x101 == 0)
m.c63 = Constraint(expr= - m.x10 + 0.00953726*m.x52 + 0.00886071*m.x53 + 0.0152274*m.x54 + 0.0242063*m.x55
+ 0.0227472*m.x56 + 0.00602161*m.x57 + 0.00711013*m.x58 + 0.0210316*m.x59 + 0.186866*m.x60
+ 0.00832283*m.x61 + 0.0180258*m.x62 + 0.0154265*m.x63 + 0.0114402*m.x64 + 0.0209618*m.x65
+ 0.0173064*m.x66 - 0.000705565*m.x67 + 0.0143527*m.x68 + 0.0248206*m.x69 + 0.0181781*m.x70
+ 0.0279005*m.x71 + 0.0285813*m.x72 + 0.00289351*m.x73 + 0.0153119*m.x74 + 0.00890117*m.x75
+ 0.0222796*m.x76 + 0.0442301*m.x77 + 0.0119004*m.x78 + 0.00720201*m.x79 + 0.0201433*m.x80
+ 0.0169933*m.x81 + 0.019457*m.x82 + 0.0111733*m.x83 + 0.00689119*m.x84 + 0.00669496*m.x85
+ 0.0331297*m.x86 + 0.0197397*m.x87 + 0.0120744*m.x88 + 0.0127905*m.x89 + 0.0406861*m.x90
+ 0.0323148*m.x91 + 0.0200869*m.x92 + 0.00172542*m.x93 + 0.0311244*m.x94 + 0.00519737*m.x95
+ 0.0142684*m.x96 + 0.0178041*m.x97 + 0.00992985*m.x98 + 0.0146222*m.x99 + 0.00920343*m.x100
+ 0.0199828*m.x101 == 0)
m.c64 = Constraint(expr= - m.x11 - 0.0162102*m.x52 - 0.0285042*m.x53 - 0.0193213*m.x54 + 0.00144938*m.x55
+ 0.0120821*m.x56 + 0.0583255*m.x57 + 0.023726*m.x58 + 0.00633527*m.x59 + 0.00832283*m.x60
+ 0.63428*m.x61 - 0.00280448*m.x62 - 0.00545788*m.x63 - 0.00396523*m.x64 - 0.0183861*m.x65
+ 0.0180971*m.x66 + 0.00513145*m.x67 + 0.00613144*m.x68 - 0.0110514*m.x69 + 0.0194917*m.x70
+ 0.00495793*m.x71 + 0.0244718*m.x72 + 0.00915034*m.x73 - 0.000197643*m.x74 - 0.00657968*m.x75
- 0.00738206*m.x76 + 0.0105229*m.x77 - 0.0124412*m.x78 - 0.00440667*m.x79 + 0.0123441*m.x80
+ 0.00670955*m.x81 + 0.000975768*m.x82 + 0.0409171*m.x83 - 0.0110323*m.x84 - 0.00482281*m.x85
- 0.00546107*m.x86 - 0.0142879*m.x87 + 0.018699*m.x88 + 0.0440906*m.x89 - 0.00363253*m.x90
+ 0.00273765*m.x91 + 0.00673168*m.x92 + 0.0033605*m.x93 + 0.0241296*m.x94 - 0.00441557*m.x95
- 0.00703875*m.x96 + 0.016325*m.x97 + 0.00222896*m.x98 - 0.0077883*m.x99 - 0.00313691*m.x100
+ 0.0264584*m.x101 == 0)
m.c65 = Constraint(expr= - m.x12 + 0.06876*m.x52 + 0.003025*m.x53 + 0.0171146*m.x54 + 0.015222*m.x55 + 0.00730434*m.x56
- 0.00423459*m.x57 + 0.0135222*m.x58 + 0.00869335*m.x59 + 0.0180258*m.x60 - 0.00280448*m.x61
+ 0.316413*m.x62 + 0.0323352*m.x63 - 0.00236891*m.x64 + 0.00787061*m.x65 + 0.0149546*m.x66
+ 0.0036316*m.x67 - 0.0116267*m.x68 + 0.032345*m.x69 - 0.000144027*m.x70 - 0.00218381*m.x71
+ 0.00530167*m.x72 + 0.000497945*m.x73 + 0.0156557*m.x74 + 0.0127479*m.x75 + 0.0111445*m.x76
+ 0.0085222*m.x77 - 0.00157042*m.x78 + 0.00905753*m.x79 - 0.00402737*m.x80 + 0.00937755*m.x81
+ 0.00827346*m.x82 + 0.00543371*m.x83 + 0.0230998*m.x84 + 0.0238731*m.x85 + 0.0199311*m.x86
+ 0.0174054*m.x87 + 0.00185204*m.x88 + 0.0156839*m.x89 + 0.00443354*m.x90 + 0.0202129*m.x91
+ 0.0114171*m.x92 + 0.00122747*m.x93 + 0.0118384*m.x94 + 0.0228483*m.x95 + 0.0131884*m.x96
- 0.0151598*m.x97 + 0.00844519*m.x98 + 0.0198609*m.x99 + 0.0242712*m.x100 + 0.0138048*m.x101
== 0)
m.c66 = Constraint(expr= - m.x13 + 0.0307553*m.x52 + 0.0159085*m.x53 + 0.0117301*m.x54 + 0.014716*m.x55
+ 0.0238735*m.x56 + 0.016241*m.x57 + 0.00245137*m.x58 + 0.031581*m.x59 + 0.0154265*m.x60
- 0.00545788*m.x61 + 0.0323352*m.x62 + 0.187022*m.x63 + 0.00222855*m.x64 + 0.00747903*m.x65
+ 0.0223879*m.x66 + 0.0408618*m.x67 + 0.00998685*m.x68 + 0.0255*m.x69 + 0.0234902*m.x70
+ 0.0410056*m.x71 + 0.0457515*m.x72 + 0.0404933*m.x73 + 0.0173727*m.x74 + 0.0186957*m.x75
+ 0.0206278*m.x76 + 0.0197312*m.x77 + 0.0258626*m.x78 + 0.0281149*m.x79 + 0.020796*m.x80
+ 0.0154147*m.x81 + 0.00821687*m.x82 + 0.0277493*m.x83 + 0.0231334*m.x84 + 0.0242186*m.x85
+ 0.0562299*m.x86 + 0.0315629*m.x87 + 0.0122553*m.x88 + 0.0146058*m.x89 + 0.0225422*m.x90
+ 0.0126094*m.x91 + 0.0195556*m.x92 + 0.0148528*m.x93 + 0.016949*m.x94 + 0.0309886*m.x95
+ 0.0111695*m.x96 + 0.023004*m.x97 + 0.00865625*m.x98 + 0.0218181*m.x99 + 0.0268327*m.x100
+ 0.0203605*m.x101 == 0)
m.c67 = Constraint(expr= - m.x14 + 0.00493869*m.x52 - 0.00357187*m.x53 + 0.0108599*m.x54 + 0.00177302*m.x55
+ 0.00933373*m.x56 + 0.0157118*m.x57 + 0.0139941*m.x58 - 0.00230763*m.x59 + 0.0114402*m.x60
- 0.00396523*m.x61 - 0.00236891*m.x62 + 0.00222855*m.x63 + 0.221194*m.x64 + 0.0104987*m.x65
+ 0.0399316*m.x66 - 0.000811365*m.x67 + 0.00762929*m.x68 - 0.0044099*m.x69 + 0.0198057*m.x70
+ 0.00234582*m.x71 - 0.0069834*m.x72 + 0.00152018*m.x73 - 0.00484524*m.x74 + 0.0034154*m.x75
- 0.0060451*m.x76 + 0.0102102*m.x77 + 0.019147*m.x78 + 0.00861968*m.x79 - 0.0013634*m.x80
+ 0.00686903*m.x81 + 0.0133687*m.x82 + 0.00136495*m.x83 + 0.00888952*m.x84 + 0.00809492*m.x85
+ 0.00573295*m.x86 + 0.00828577*m.x87 + 0.0152408*m.x88 + 0.0110413*m.x89 + 0.0069969*m.x90
+ 0.0053944*m.x91 + 0.0104813*m.x92 - 0.00694263*m.x93 + 0.0141714*m.x94 - 0.00184581*m.x95
+ 0.0147295*m.x96 - 0.00369236*m.x97 + 0.00526228*m.x98 + 0.00828497*m.x99 - 0.0189632*m.x100
+ 0.0101028*m.x101 == 0)
m.c68 = Constraint(expr= - m.x15 + 0.00905031*m.x52 + 0.0016128*m.x53 + 0.011655*m.x54 + 0.0176392*m.x55
+ 0.0051169*m.x56 - 0.00370551*m.x57 + 0.0146659*m.x58 + 0.00682721*m.x59 + 0.0209618*m.x60
- 0.0183861*m.x61 + 0.00787061*m.x62 + 0.00747903*m.x63 + 0.0104987*m.x64 + 0.172607*m.x65
+ 0.010781*m.x66 + 0.0114342*m.x67 + 0.00907137*m.x68 + 0.0104462*m.x69 + 0.0151955*m.x70
+ 0.00458498*m.x71 + 0.0183508*m.x72 - 0.0158535*m.x73 + 0.0070277*m.x74 + 0.00809957*m.x75
+ 0.0120566*m.x76 + 0.0156797*m.x77 + 0.019146*m.x78 + 0.0230557*m.x79 + 0.00625971*m.x80
+ 0.0154784*m.x81 + 0.0113709*m.x82 - 0.00207874*m.x83 - 0.00747722*m.x84 + 0.00726553*m.x85
+ 0.037832*m.x86 + 0.0123555*m.x87 - 0.000156492*m.x88 + 0.0119264*m.x89 + 0.0124128*m.x90
+ 0.0206051*m.x91 + 0.0182519*m.x92 - 0.0063393*m.x93 + 0.0162264*m.x94 + 0.0114734*m.x95
+ 0.0298746*m.x96 + 0.00393739*m.x97 + 0.0153743*m.x98 + 0.00989917*m.x99 + 0.0228823*m.x100
+ 0.017772*m.x101 == 0)
m.c69 = Constraint(expr= - m.x16 + 0.00428006*m.x52 + 0.012642*m.x53 - 0.00502711*m.x54 + 0.021276*m.x55
+ 0.0488881*m.x56 + 0.0511023*m.x57 - 0.000316803*m.x58 + 0.0158862*m.x59 + 0.0173064*m.x60
+ 0.0180971*m.x61 + 0.0149546*m.x62 + 0.0223879*m.x63 + 0.0399316*m.x64 + 0.010781*m.x65
+ 0.30953*m.x66 + 0.0123346*m.x67 - 0.00454343*m.x68 + 0.00554417*m.x69 + 0.0322368*m.x70
+ 0.0122026*m.x71 + 0.0154661*m.x72 + 0.0109601*m.x73 + 0.0128077*m.x74 + 0.00710322*m.x75
+ 0.0100525*m.x76 + 0.0141544*m.x77 - 0.00302889*m.x78 + 0.0202446*m.x79 + 0.0273331*m.x80
+ 0.0142628*m.x81 + 0.0130754*m.x82 + 0.00886564*m.x83 + 0.0125267*m.x84 + 0.00167144*m.x85
+ 0.0368131*m.x86 + 0.0135909*m.x87 - 0.000550234*m.x88 + 0.0369853*m.x89 + 0.00970355*m.x90
+ 0.0253109*m.x91 + 0.01371*m.x92 + 0.0151066*m.x93 + 0.0201164*m.x94 + 0.0193544*m.x95
+ 0.0166079*m.x96 + 0.0113423*m.x97 + 0.0488179*m.x98 + 0.016393*m.x99 - 0.00100315*m.x100
+ 0.0101386*m.x101 == 0)
m.c70 = Constraint(expr= - m.x17 + 0.0159505*m.x52 + 0.119815*m.x53 + 0.011192*m.x54 + 0.00889693*m.x55
+ 0.0227134*m.x56 + 0.148921*m.x57 + 0.0195659*m.x58 + 0.016982*m.x59 - 0.000705565*m.x60
+ 0.00513145*m.x61 + 0.0036316*m.x62 + 0.0408618*m.x63 - 0.000811365*m.x64 + 0.0114342*m.x65
+ 0.0123346*m.x66 + 0.506241*m.x67 + 0.025301*m.x68 + 0.0356088*m.x69 + 0.0108864*m.x70
+ 0.0190276*m.x71 + 0.0288312*m.x72 + 0.12559*m.x73 + 0.0213959*m.x74 + 0.0275661*m.x75
+ 0.0260354*m.x76 + 0.00490195*m.x77 - 8.95127E-5*m.x78 + 0.0278101*m.x79 + 0.0154943*m.x80
+ 0.0110009*m.x81 + 0.0209885*m.x82 + 0.129895*m.x83 + 0.104593*m.x84 + 0.0164835*m.x85
+ 0.0238469*m.x86 + 0.0319592*m.x87 + 0.016159*m.x88 - 0.00048612*m.x89 + 0.0206697*m.x90
- 0.0044719*m.x91 + 0.0412523*m.x92 + 0.150222*m.x93 + 0.0060731*m.x94 + 0.00469106*m.x95
+ 0.032667*m.x96 + 0.00513266*m.x97 + 0.00884207*m.x98 + 0.0125003*m.x99 - 0.00578404*m.x100
+ 0.0225237*m.x101 == 0)
m.c71 = Constraint(expr= - m.x18 + 0.0372772*m.x52 + 0.00505566*m.x53 + 0.0247138*m.x54 + 0.00407666*m.x55
+ 0.00590284*m.x56 + 0.0156037*m.x57 + 0.0130298*m.x58 + 0.0111502*m.x59 + 0.0143527*m.x60
+ 0.00613144*m.x61 - 0.0116267*m.x62 + 0.00998685*m.x63 + 0.00762929*m.x64 + 0.00907137*m.x65
- 0.00454343*m.x66 + 0.025301*m.x67 + 0.272867*m.x68 + 0.013367*m.x69 + 0.0153675*m.x70
+ 0.0202051*m.x71 + 0.0334085*m.x72 + 0.0195246*m.x73 + 0.0119803*m.x74 + 0.0131243*m.x75
+ 0.009587*m.x76 + 0.00326145*m.x77 + 0.0055836*m.x78 + 0.0160137*m.x79 - 0.00700837*m.x80
+ 0.00816694*m.x81 + 0.0133907*m.x82 + 0.00598212*m.x83 - 0.00201041*m.x84 + 0.0153712*m.x85
+ 0.00839091*m.x86 + 0.00597115*m.x87 - 0.000508298*m.x88 - 0.00265155*m.x89 + 0.0148232*m.x90
- 0.000660928*m.x91 + 0.0219128*m.x92 + 0.0200429*m.x93 + 0.00803816*m.x94 + 0.0174527*m.x95
+ 0.00328568*m.x96 + 0.00580133*m.x97 - 0.000537323*m.x98 + 0.0127107*m.x99 + 0.0134156*m.x100
+ 0.00882735*m.x101 == 0)
m.c72 = Constraint(expr= - m.x19 + 0.00356282*m.x52 + 0.0131274*m.x53 + 0.00188025*m.x54 + 0.00949954*m.x55
+ 0.0335068*m.x56 + 0.0155171*m.x57 + 0.0143949*m.x58 + 0.0375819*m.x59 + 0.0248206*m.x60
- 0.0110514*m.x61 + 0.032345*m.x62 + 0.0255*m.x63 - 0.0044099*m.x64 + 0.0104462*m.x65
+ 0.00554417*m.x66 + 0.0356088*m.x67 + 0.013367*m.x68 + 0.243112*m.x69 + 0.00434594*m.x70
+ 0.057792*m.x71 + 0.0294945*m.x72 + 0.030868*m.x73 + 0.0219596*m.x74 + 0.00928365*m.x75
+ 0.0279232*m.x76 + 0.0138525*m.x77 + 0.0582128*m.x78 + 0.0225874*m.x79 + 0.0216165*m.x80
+ 0.0188341*m.x81 + 0.0113276*m.x82 + 0.0272881*m.x83 + 0.0118425*m.x84 + 0.0244022*m.x85
+ 0.0305204*m.x86 + 0.0378227*m.x87 + 0.00150342*m.x88 + 0.000336096*m.x89 + 0.0330899*m.x90
+ 0.0189859*m.x91 + 0.0161305*m.x92 + 0.00657093*m.x93 + 0.0118269*m.x94 + 0.0262376*m.x95
+ 0.0229703*m.x96 + 0.0245122*m.x97 + 0.00497315*m.x98 + 0.0222552*m.x99 + 0.00180371*m.x100
+ 0.00323067*m.x101 == 0)
m.c73 = Constraint(expr= - m.x20 + 0.0102555*m.x52 + 0.00269972*m.x53 + 0.00635281*m.x54 + 0.00937267*m.x55
+ 0.0167733*m.x56 + 0.0112086*m.x57 - 0.0152357*m.x58 + 0.0223572*m.x59 + 0.0181781*m.x60
+ 0.0194917*m.x61 - 0.000144027*m.x62 + 0.0234902*m.x63 + 0.0198057*m.x64 + 0.0151955*m.x65
+ 0.0322368*m.x66 + 0.0108864*m.x67 + 0.0153675*m.x68 + 0.00434594*m.x69 + 0.486402*m.x70
+ 0.0205735*m.x71 + 0.0176842*m.x72 + 0.016224*m.x73 + 0.029091*m.x74 + 0.0174387*m.x75
+ 0.0237535*m.x76 + 0.0139083*m.x77 + 0.0112918*m.x78 + 0.0315031*m.x79 + 0.0104372*m.x80
+ 0.0253639*m.x81 + 0.00237959*m.x82 - 0.00567431*m.x83 + 0.0125939*m.x84 + 0.0195843*m.x85
+ 0.0768331*m.x86 + 0.0267106*m.x87 + 0.00312045*m.x88 + 0.00720686*m.x89 + 0.0261195*m.x90
+ 0.0295481*m.x91 - 0.00121588*m.x92 + 0.00174197*m.x93 + 0.000971523*m.x94 + 0.016521*m.x95
+ 0.0242338*m.x96 + 0.0387835*m.x97 + 0.0249114*m.x98 + 0.0106646*m.x99 - 0.0157855*m.x100
+ 0.0165385*m.x101 == 0)
m.c74 = Constraint(expr= - m.x21 - 0.0161653*m.x52 + 0.00899326*m.x53 + 0.0217042*m.x54 + 0.0242093*m.x55
+ 0.044455*m.x56 + 0.030702*m.x57 + 0.0229109*m.x58 + 0.0434772*m.x59 + 0.0279005*m.x60
+ 0.00495793*m.x61 - 0.00218381*m.x62 + 0.0410056*m.x63 + 0.00234582*m.x64 + 0.00458498*m.x65
+ 0.0122026*m.x66 + 0.0190276*m.x67 + 0.0202051*m.x68 + 0.057792*m.x69 + 0.0205735*m.x70
+ 0.30309*m.x71 + 0.0477266*m.x72 + 0.0307124*m.x73 + 0.0320937*m.x74 + 0.00895684*m.x75
+ 0.0261585*m.x76 + 0.0224334*m.x77 + 0.0281506*m.x78 + 0.0324489*m.x79 + 0.0266137*m.x80
+ 0.0183526*m.x81 - 0.0016676*m.x82 + 0.0194921*m.x83 + 0.0366494*m.x84 + 0.0166731*m.x85
+ 0.0415684*m.x86 + 0.0425512*m.x87 + 0.0185632*m.x88 + 0.0150068*m.x89 + 0.0206301*m.x90
+ 0.00808519*m.x91 - 0.00805047*m.x92 + 0.0108192*m.x93 + 0.01367*m.x94 + 0.0348135*m.x95
+ 0.0320515*m.x96 + 0.0132639*m.x97 - 0.00327629*m.x98 + 0.0267494*m.x99 + 0.0178498*m.x100
+ 0.0295494*m.x101 == 0)
m.c75 = Constraint(expr= - m.x22 - 0.00678775*m.x52 + 0.0193615*m.x53 + 0.0189843*m.x54 + 0.00460206*m.x55
+ 0.069787*m.x56 + 0.0216234*m.x57 + 0.0178969*m.x58 + 0.0304477*m.x59 + 0.0285813*m.x60
+ 0.0244718*m.x61 + 0.00530167*m.x62 + 0.0457515*m.x63 - 0.0069834*m.x64 + 0.0183508*m.x65
+ 0.0154661*m.x66 + 0.0288312*m.x67 + 0.0334085*m.x68 + 0.0294945*m.x69 + 0.0176842*m.x70
+ 0.0477266*m.x71 + 0.574196*m.x72 + 0.0396485*m.x73 + 0.0302363*m.x74 + 0.0130538*m.x75
+ 0.02932*m.x76 + 0.0266188*m.x77 + 0.0279647*m.x78 + 0.0180419*m.x79 + 0.0293269*m.x80
+ 0.02223*m.x81 + 0.00413185*m.x82 + 0.0241439*m.x83 + 0.0263683*m.x84 - 0.0132754*m.x85
+ 0.0388595*m.x86 + 0.0578838*m.x87 + 0.00722557*m.x88 + 0.0210916*m.x89 + 0.0335768*m.x90
- 0.00914657*m.x91 + 0.0153621*m.x92 + 0.0170669*m.x93 + 0.00771841*m.x94 + 0.0161467*m.x95
+ 0.0470226*m.x96 + 0.0696792*m.x97 + 0.00688465*m.x98 + 0.0406248*m.x99 - 0.00265226*m.x100
+ 0.0216914*m.x101 == 0)
m.c76 = Constraint(expr= - m.x23 - 0.000991393*m.x52 + 0.114117*m.x53 - 0.00893642*m.x54 - 0.00745268*m.x55
+ 0.040347*m.x56 + 0.105953*m.x57 + 0.00747729*m.x58 + 0.00554913*m.x59 + 0.00289351*m.x60
+ 0.00915034*m.x61 + 0.000497945*m.x62 + 0.0404933*m.x63 + 0.00152018*m.x64 - 0.0158535*m.x65
+ 0.0109601*m.x66 + 0.12559*m.x67 + 0.0195246*m.x68 + 0.030868*m.x69 + 0.016224*m.x70
+ 0.0307124*m.x71 + 0.0396485*m.x72 + 0.567664*m.x73 + 0.0167088*m.x74 + 0.00851376*m.x75
+ 0.0194063*m.x76 - 0.00258911*m.x77 + 0.000352563*m.x78 + 0.0170447*m.x79 + 0.00326757*m.x80
+ 0.0111415*m.x81 + 0.0158008*m.x82 + 0.10889*m.x83 + 0.116075*m.x84 + 0.0169971*m.x85
+ 0.0341233*m.x86 + 0.0267429*m.x87 - 0.0114268*m.x88 - 0.00234199*m.x89 + 0.0350183*m.x90
- 0.00327782*m.x91 + 0.0234788*m.x92 + 0.0976326*m.x93 + 0.000202835*m.x94 + 0.00567421*m.x95
+ 0.0334415*m.x96 + 0.0182382*m.x97 - 0.00355687*m.x98 + 0.0188454*m.x99 + 0.0261119*m.x100
+ 0.0236217*m.x101 == 0)
m.c77 = Constraint(expr= - m.x24 + 0.0104307*m.x52 + 0.0118212*m.x53 + 0.020493*m.x54 + 0.0160821*m.x55 + 0.039664*m.x56
+ 0.0128583*m.x57 + 0.0262*m.x58 + 0.0268377*m.x59 + 0.0153119*m.x60 - 0.000197643*m.x61
+ 0.0156557*m.x62 + 0.0173727*m.x63 - 0.00484524*m.x64 + 0.0070277*m.x65 + 0.0128077*m.x66
+ 0.0213959*m.x67 + 0.0119803*m.x68 + 0.0219596*m.x69 + 0.029091*m.x70 + 0.0320937*m.x71
+ 0.0302363*m.x72 + 0.0167088*m.x73 + 0.227104*m.x74 + 0.0110539*m.x75 + 0.0685123*m.x76
+ 0.0166982*m.x77 + 0.00939654*m.x78 + 0.00636519*m.x79 + 0.0242445*m.x80 + 0.0724648*m.x81
+ 0.0194513*m.x82 + 0.00366476*m.x83 + 0.0134866*m.x84 + 0.00878361*m.x85 + 0.0269894*m.x86
+ 0.0281086*m.x87 + 0.00493919*m.x88 + 0.0265072*m.x89 + 0.0495917*m.x90 + 0.00899853*m.x91
+ 0.0191737*m.x92 + 0.0112022*m.x93 + 0.0106917*m.x94 + 0.0282436*m.x95 + 0.0119814*m.x96
+ 0.00852934*m.x97 + 0.0132486*m.x98 - 0.00483593*m.x99 + 0.00268557*m.x100 + 0.0264927*m.x101
== 0)
m.c78 = Constraint(expr= - m.x25 - 0.00554627*m.x52 + 0.00695719*m.x53 + 0.0060982*m.x54 + 0.00240536*m.x55
+ 0.0102778*m.x56 + 0.00399753*m.x57 + 0.0176229*m.x58 + 0.00229807*m.x59 + 0.00890117*m.x60
- 0.00657968*m.x61 + 0.0127479*m.x62 + 0.0186957*m.x63 + 0.0034154*m.x64 + 0.00809957*m.x65
+ 0.00710322*m.x66 + 0.0275661*m.x67 + 0.0131243*m.x68 + 0.00928365*m.x69 + 0.0174387*m.x70
+ 0.00895684*m.x71 + 0.0130538*m.x72 + 0.00851376*m.x73 + 0.0110539*m.x74 + 0.183511*m.x75
+ 0.00968069*m.x76 + 0.00777885*m.x77 + 0.00484151*m.x78 + 0.0120339*m.x79 + 0.0182045*m.x80
+ 0.0142639*m.x81 + 0.014134*m.x82 + 0.0123093*m.x83 + 0.00543117*m.x84 + 0.0065975*m.x85
+ 0.016776*m.x86 + 0.00170557*m.x87 + 0.0026933*m.x88 + 0.00792354*m.x89 + 0.00735961*m.x90
- 0.000614984*m.x91 + 0.0118767*m.x92 + 0.00947244*m.x93 + 0.00574257*m.x94 + 0.0110814*m.x95
+ 0.00174348*m.x96 + 0.00448876*m.x97 + 0.0220952*m.x98 + 0.0063483*m.x99 + 0.000150809*m.x100
+ 6.68242E-5*m.x101 == 0)
m.c79 = Constraint(expr= - m.x26 + 0.000275614*m.x52 - 0.00146012*m.x53 + 0.00709161*m.x54 + 0.0042418*m.x55
+ 0.0172657*m.x56 + 0.0184167*m.x57 + 0.0184672*m.x58 + 0.01809*m.x59 + 0.0222796*m.x60
- 0.00738206*m.x61 + 0.0111445*m.x62 + 0.0206278*m.x63 - 0.0060451*m.x64 + 0.0120566*m.x65
+ 0.0100525*m.x66 + 0.0260354*m.x67 + 0.009587*m.x68 + 0.0279232*m.x69 + 0.0237535*m.x70
+ 0.0261585*m.x71 + 0.02932*m.x72 + 0.0194063*m.x73 + 0.0685123*m.x74 + 0.00968069*m.x75
+ 0.190498*m.x76 + 0.0273631*m.x77 + 0.0144043*m.x78 + 0.00276303*m.x79 + 0.00422846*m.x80
+ 0.0638216*m.x81 + 0.017823*m.x82 + 0.0135183*m.x83 + 0.00365697*m.x84 - 0.000986928*m.x85
+ 0.0169049*m.x86 + 0.0266562*m.x87 + 0.00523559*m.x88 + 0.014168*m.x89 + 0.0413952*m.x90
+ 0.00776725*m.x91 + 0.0326211*m.x92 + 0.0119027*m.x93 + 0.011424*m.x94 + 0.015665*m.x95
+ 0.0129933*m.x96 + 0.0057329*m.x97 + 0.00863731*m.x98 + 0.00782909*m.x99 + 0.0385547*m.x100
+ 0.0147477*m.x101 == 0)
m.c80 = Constraint(expr= - m.x27 + 0.00146767*m.x52 - 0.00455327*m.x53 + 0.0192029*m.x54 + 0.00264811*m.x55
+ 0.00473961*m.x56 + 0.010492*m.x57 + 0.00333289*m.x58 + 0.0114054*m.x59 + 0.0442301*m.x60
+ 0.0105229*m.x61 + 0.0085222*m.x62 + 0.0197312*m.x63 + 0.0102102*m.x64 + 0.0156797*m.x65
+ 0.0141544*m.x66 + 0.00490195*m.x67 + 0.00326145*m.x68 + 0.0138525*m.x69 + 0.0139083*m.x70
+ 0.0224334*m.x71 + 0.0266188*m.x72 - 0.00258911*m.x73 + 0.0166982*m.x74 + 0.00777885*m.x75
+ 0.0273631*m.x76 + 0.14242*m.x77 + 0.0237243*m.x78 + 0.00294961*m.x79 + 0.0200953*m.x80
+ 0.0206276*m.x81 + 0.0230949*m.x82 + 0.00859757*m.x83 + 0.0169*m.x84 + 0.0129568*m.x85
+ 0.0262844*m.x86 + 0.0202602*m.x87 + 0.0135266*m.x88 + 0.0134485*m.x89 + 0.0259415*m.x90
+ 0.0189386*m.x91 + 0.0167553*m.x92 + 0.012156*m.x93 + 0.0312321*m.x94 + 0.0133677*m.x95
+ 0.0168904*m.x96 + 0.021903*m.x97 + 0.00904192*m.x98 + 0.00640522*m.x99 + 0.000393756*m.x100
+ 0.0123718*m.x101 == 0)
m.c81 = Constraint(expr= - m.x28 - 0.0219202*m.x52 - 0.00233478*m.x53 + 0.00489188*m.x54 + 0.00832847*m.x55
+ 0.0132399*m.x56 + 0.0244629*m.x57 + 0.0125282*m.x58 + 0.0148192*m.x59 + 0.0119004*m.x60
- 0.0124412*m.x61 - 0.00157042*m.x62 + 0.0258626*m.x63 + 0.019147*m.x64 + 0.019146*m.x65
- 0.00302889*m.x66 - 8.95127E-5*m.x67 + 0.0055836*m.x68 + 0.0582128*m.x69 + 0.0112918*m.x70
+ 0.0281506*m.x71 + 0.0279647*m.x72 + 0.000352563*m.x73 + 0.00939654*m.x74 + 0.00484151*m.x75
+ 0.0144043*m.x76 + 0.0237243*m.x77 + 0.507964*m.x78 + 0.0151067*m.x79 + 0.0166188*m.x80
+ 0.010503*m.x81 + 0.006312*m.x82 + 0.00351795*m.x83 + 0.0068205*m.x84 + 0.00479431*m.x85
+ 0.0145654*m.x86 + 0.033506*m.x87 + 0.00559812*m.x88 + 0.0126415*m.x89 + 0.0123446*m.x90
+ 0.028821*m.x91 + 0.00981253*m.x92 + 0.0284364*m.x93 + 0.0179957*m.x94 + 0.0240785*m.x95
+ 0.0203486*m.x96 + 0.0246958*m.x97 + 0.0301721*m.x98 + 0.00697773*m.x99 + 0.00248209*m.x100
- 0.00975878*m.x101 == 0)
m.c82 = Constraint(expr= - m.x29 - 0.0152471*m.x52 - 0.00354018*m.x53 + 0.0141398*m.x54 + 0.0040175*m.x55
- 0.0118559*m.x56 + 0.047228*m.x57 + 0.0160426*m.x58 + 0.0286969*m.x59 + 0.00720201*m.x60
- 0.00440667*m.x61 + 0.00905753*m.x62 + 0.0281149*m.x63 + 0.00861968*m.x64 + 0.0230557*m.x65
+ 0.0202446*m.x66 + 0.0278101*m.x67 + 0.0160137*m.x68 + 0.0225874*m.x69 + 0.0315031*m.x70
+ 0.0324489*m.x71 + 0.0180419*m.x72 + 0.0170447*m.x73 + 0.00636519*m.x74 + 0.0120339*m.x75
+ 0.00276303*m.x76 + 0.00294961*m.x77 + 0.0151067*m.x78 + 0.670433*m.x79 + 0.0205952*m.x80
+ 0.00444933*m.x81 + 0.0225512*m.x82 + 0.0465233*m.x83 + 0.0608492*m.x84 + 0.0358653*m.x85
+ 0.0417635*m.x86 - 0.00291679*m.x87 - 0.000317393*m.x88 + 0.0125595*m.x89 - 0.00116156*m.x90
- 0.00192373*m.x91 + 0.0114605*m.x92 + 0.0425365*m.x93 - 0.000808147*m.x94 + 0.00295518*m.x95
+ 0.0242798*m.x96 + 0.0107554*m.x97 + 0.0120875*m.x98 + 0.0292966*m.x99 - 0.00126318*m.x100
- 0.0099048*m.x101 == 0)
m.c83 = Constraint(expr= - m.x30 - 0.0133041*m.x52 - 0.0108257*m.x53 + 0.0183881*m.x54 + 0.0153818*m.x55
+ 0.0329745*m.x56 + 0.00547127*m.x57 - 0.00910903*m.x58 + 0.0156643*m.x59 + 0.0201433*m.x60
+ 0.0123441*m.x61 - 0.00402737*m.x62 + 0.020796*m.x63 - 0.0013634*m.x64 + 0.00625971*m.x65
+ 0.0273331*m.x66 + 0.0154943*m.x67 - 0.00700837*m.x68 + 0.0216165*m.x69 + 0.0104372*m.x70
+ 0.0266137*m.x71 + 0.0293269*m.x72 + 0.00326757*m.x73 + 0.0242445*m.x74 + 0.0182045*m.x75
+ 0.00422846*m.x76 + 0.0200953*m.x77 + 0.0166188*m.x78 + 0.0205952*m.x79 + 0.229224*m.x80
+ 0.0223216*m.x81 + 0.0206237*m.x82 + 0.0101265*m.x83 + 0.0015088*m.x84 + 0.0223314*m.x85
+ 0.0273206*m.x86 + 0.00161461*m.x87 + 0.00487681*m.x88 + 0.0183379*m.x89 + 0.0275921*m.x90
+ 0.0159442*m.x91 + 0.0134875*m.x92 + 0.0270417*m.x93 + 0.00200928*m.x94 + 0.0218467*m.x95
+ 0.00352069*m.x96 + 0.00446644*m.x97 + 0.0176237*m.x98 + 0.0279531*m.x99 + 0.0110346*m.x100
+ 0.00696769*m.x101 == 0)
m.c84 = Constraint(expr= - m.x31 + 0.00532027*m.x52 + 0.00548427*m.x53 + 0.0132555*m.x54 + 0.0182359*m.x55
+ 0.00776731*m.x56 + 0.0133769*m.x57 + 0.0168617*m.x58 + 0.0214673*m.x59 + 0.0169933*m.x60
+ 0.00670955*m.x61 + 0.00937755*m.x62 + 0.0154147*m.x63 + 0.00686903*m.x64 + 0.0154784*m.x65
+ 0.0142628*m.x66 + 0.0110009*m.x67 + 0.00816694*m.x68 + 0.0188341*m.x69 + 0.0253639*m.x70
+ 0.0183526*m.x71 + 0.02223*m.x72 + 0.0111415*m.x73 + 0.0724648*m.x74 + 0.0142639*m.x75
+ 0.0638216*m.x76 + 0.0206276*m.x77 + 0.010503*m.x78 + 0.00444933*m.x79 + 0.0223216*m.x80
+ 0.185075*m.x81 + 0.0205911*m.x82 + 0.0145088*m.x83 + 0.00876387*m.x84 + 0.0107778*m.x85
+ 0.014933*m.x86 + 0.0186524*m.x87 + 0.0106153*m.x88 + 0.044217*m.x89 + 0.0463482*m.x90
+ 0.019405*m.x91 + 0.0233399*m.x92 + 0.0136317*m.x93 + 0.0110294*m.x94 + 0.0119847*m.x95
+ 0.0293732*m.x96 - 0.00785039*m.x97 + 0.0195485*m.x98 + 0.00530393*m.x99 - 0.00585743*m.x100
+ 0.0197286*m.x101 == 0)
m.c85 = Constraint(expr= - m.x32 + 0.0190296*m.x52 + 0.00843954*m.x53 + 0.0089825*m.x54 + 0.00961571*m.x55
+ 0.00146596*m.x56 + 0.0119332*m.x57 + 0.00649361*m.x58 + 0.00423722*m.x59 + 0.019457*m.x60
+ 0.000975768*m.x61 + 0.00827346*m.x62 + 0.00821687*m.x63 + 0.0133687*m.x64 + 0.0113709*m.x65
+ 0.0130754*m.x66 + 0.0209885*m.x67 + 0.0133907*m.x68 + 0.0113276*m.x69 + 0.00237959*m.x70
- 0.0016676*m.x71 + 0.00413185*m.x72 + 0.0158008*m.x73 + 0.0194513*m.x74 + 0.014134*m.x75
+ 0.017823*m.x76 + 0.0230949*m.x77 + 0.006312*m.x78 + 0.0225512*m.x79 + 0.0206237*m.x80
+ 0.0205911*m.x81 + 0.147147*m.x82 + 0.0105685*m.x83 + 0.00474516*m.x84 + 0.0149866*m.x85
- 0.00374475*m.x86 + 0.0147657*m.x87 + 0.00370161*m.x88 - 0.00382518*m.x89 + 0.0112733*m.x90
+ 0.00898559*m.x91 + 0.047951*m.x92 + 0.00269973*m.x93 + 0.00305288*m.x94 + 0.00998711*m.x95
- 0.00599198*m.x96 + 0.00378519*m.x97 + 0.00228262*m.x98 + 0.000223223*m.x99 + 0.0131328*m.x100
+ 0.0100911*m.x101 == 0)
m.c86 = Constraint(expr= - m.x33 + 9.52152E-5*m.x52 + 0.0957415*m.x53 - 0.00433095*m.x54 + 0.0122098*m.x55
+ 0.0398038*m.x56 + 0.161483*m.x57 + 0.000720061*m.x58 + 0.0101393*m.x59 + 0.0111733*m.x60
+ 0.0409171*m.x61 + 0.00543371*m.x62 + 0.0277493*m.x63 + 0.00136495*m.x64 - 0.00207874*m.x65
+ 0.00886564*m.x66 + 0.129895*m.x67 + 0.00598212*m.x68 + 0.0272881*m.x69 - 0.00567431*m.x70
+ 0.0194921*m.x71 + 0.0241439*m.x72 + 0.10889*m.x73 + 0.00366476*m.x74 + 0.0123093*m.x75
+ 0.0135183*m.x76 + 0.00859757*m.x77 + 0.00351795*m.x78 + 0.0465233*m.x79 + 0.0101265*m.x80
+ 0.0145088*m.x81 + 0.0105685*m.x82 + 0.389649*m.x83 + 0.138762*m.x84 + 0.00825629*m.x85
+ 0.0181004*m.x86 + 0.0167077*m.x87 + 0.00722734*m.x88 - 0.00583878*m.x89 + 0.0232216*m.x90
+ 0.0168437*m.x91 + 0.0278419*m.x92 + 0.117531*m.x93 + 0.00545108*m.x94 + 0.007432*m.x95
+ 0.0161894*m.x96 + 0.0203409*m.x97 - 0.00640225*m.x98 + 0.00363753*m.x99 + 0.00102053*m.x100
+ 0.0252622*m.x101 == 0)
m.c87 = Constraint(expr= - m.x34 - 0.0180784*m.x52 + 0.0724208*m.x53 + 0.000368443*m.x54 - 0.000558226*m.x55
+ 0.0268424*m.x56 + 0.187982*m.x57 + 0.0015496*m.x58 + 0.00438509*m.x59 + 0.00689119*m.x60
- 0.0110323*m.x61 + 0.0230998*m.x62 + 0.0231334*m.x63 + 0.00888952*m.x64 - 0.00747722*m.x65
+ 0.0125267*m.x66 + 0.104593*m.x67 - 0.00201041*m.x68 + 0.0118425*m.x69 + 0.0125939*m.x70
+ 0.0366494*m.x71 + 0.0263683*m.x72 + 0.116075*m.x73 + 0.0134866*m.x74 + 0.00543117*m.x75
+ 0.00365697*m.x76 + 0.0169*m.x77 + 0.0068205*m.x78 + 0.0608492*m.x79 + 0.0015088*m.x80
+ 0.00876387*m.x81 + 0.00474516*m.x82 + 0.138762*m.x83 + 0.397419*m.x84 + 0.0108491*m.x85
- 0.00298466*m.x86 + 0.0247715*m.x87 + 0.0157939*m.x88 + 0.00640654*m.x89 + 0.0102405*m.x90
+ 0.0051056*m.x91 + 0.0145699*m.x92 + 0.0756527*m.x93 + 0.00684049*m.x94 - 0.000862575*m.x95
+ 0.00996209*m.x96 + 0.0282548*m.x97 + 0.0055526*m.x98 + 0.00924268*m.x99 + 0.00369864*m.x100
- 0.00445725*m.x101 == 0)
m.c88 = Constraint(expr= - m.x35 + 0.00127079*m.x52 + 0.00920314*m.x53 + 0.00845006*m.x54 + 0.0179991*m.x55
+ 0.0120171*m.x56 + 0.00916881*m.x57 + 0.0120757*m.x58 + 0.0186319*m.x59 + 0.00669496*m.x60
- 0.00482281*m.x61 + 0.0238731*m.x62 + 0.0242186*m.x63 + 0.00809492*m.x64 + 0.00726553*m.x65
+ 0.00167144*m.x66 + 0.0164835*m.x67 + 0.0153712*m.x68 + 0.0244022*m.x69 + 0.0195843*m.x70
+ 0.0166731*m.x71 - 0.0132754*m.x72 + 0.0169971*m.x73 + 0.00878361*m.x74 + 0.0065975*m.x75
- 0.000986928*m.x76 + 0.0129568*m.x77 + 0.00479431*m.x78 + 0.0358653*m.x79 + 0.0223314*m.x80
+ 0.0107778*m.x81 + 0.0149866*m.x82 + 0.00825629*m.x83 + 0.0108491*m.x84 + 0.312298*m.x85
+ 0.0120296*m.x86 + 0.0106859*m.x87 + 0.0204397*m.x88 + 0.0119026*m.x89 + 0.0319466*m.x90
+ 0.00664877*m.x91 + 0.00548571*m.x92 + 0.0048078*m.x93 + 0.0331056*m.x94 + 0.0274019*m.x95
+ 0.00104681*m.x96 + 0.011411*m.x97 - 0.00331677*m.x98 - 0.00425863*m.x99 + 0.0100274*m.x100
+ 0.00728145*m.x101 == 0)
m.c89 = Constraint(expr= - m.x36 - 0.00331643*m.x52 - 0.00921773*m.x53 + 0.0106863*m.x54 + 0.0126379*m.x55
+ 0.0145295*m.x56 + 0.0209491*m.x57 + 0.0231367*m.x58 + 0.046181*m.x59 + 0.0331297*m.x60
- 0.00546107*m.x61 + 0.0199311*m.x62 + 0.0562299*m.x63 + 0.00573295*m.x64 + 0.037832*m.x65
+ 0.0368131*m.x66 + 0.0238469*m.x67 + 0.00839091*m.x68 + 0.0305204*m.x69 + 0.0768331*m.x70
+ 0.0415684*m.x71 + 0.0388595*m.x72 + 0.0341233*m.x73 + 0.0269894*m.x74 + 0.016776*m.x75
+ 0.0169049*m.x76 + 0.0262844*m.x77 + 0.0145654*m.x78 + 0.0417635*m.x79 + 0.0273206*m.x80
+ 0.014933*m.x81 - 0.00374475*m.x82 + 0.0181004*m.x83 - 0.00298466*m.x84 + 0.0120296*m.x85
+ 0.618581*m.x86 + 0.0289636*m.x87 - 0.00446781*m.x88 + 0.0224213*m.x89 + 0.0380495*m.x90
+ 0.0386705*m.x91 + 0.0297938*m.x92 + 0.0058598*m.x93 + 0.0252835*m.x94 + 0.0145417*m.x95
+ 0.0665246*m.x96 + 0.00798604*m.x97 + 0.00560573*m.x98 + 0.0328297*m.x99 + 0.0235991*m.x100
+ 0.0470289*m.x101 == 0)
m.c90 = Constraint(expr= - m.x37 - 0.0107273*m.x52 + 0.0112775*m.x53 + 0.0165343*m.x54 + 0.0175827*m.x55
+ 0.0354297*m.x56 + 0.0327261*m.x57 + 0.0160891*m.x58 + 0.0332107*m.x59 + 0.0197397*m.x60
- 0.0142879*m.x61 + 0.0174054*m.x62 + 0.0315629*m.x63 + 0.00828577*m.x64 + 0.0123555*m.x65
+ 0.0135909*m.x66 + 0.0319592*m.x67 + 0.00597115*m.x68 + 0.0378227*m.x69 + 0.0267106*m.x70
+ 0.0425512*m.x71 + 0.0578838*m.x72 + 0.0267429*m.x73 + 0.0281086*m.x74 + 0.00170557*m.x75
+ 0.0266562*m.x76 + 0.0202602*m.x77 + 0.033506*m.x78 - 0.00291679*m.x79 + 0.00161461*m.x80
+ 0.0186524*m.x81 + 0.0147657*m.x82 + 0.0167077*m.x83 + 0.0247715*m.x84 + 0.0106859*m.x85
+ 0.0289636*m.x86 + 0.270232*m.x87 + 0.0400357*m.x88 + 0.00621348*m.x89 + 0.0404134*m.x90
+ 0.00592392*m.x91 + 0.00614247*m.x92 + 0.00530712*m.x93 + 0.00684822*m.x94 + 0.0187153*m.x95
+ 0.0225813*m.x96 + 0.0289411*m.x97 + 0.00901397*m.x98 + 0.0166774*m.x99 + 0.0332544*m.x100
+ 0.0151416*m.x101 == 0)
m.c91 = Constraint(expr= - m.x38 - 6.72321E-5*m.x52 + 0.010577*m.x53 + 0.0182906*m.x54 + 0.00566779*m.x55
- 0.00170776*m.x56 + 0.028455*m.x57 + 0.000127307*m.x58 + 0.0160758*m.x59 + 0.0120744*m.x60
+ 0.018699*m.x61 + 0.00185204*m.x62 + 0.0122553*m.x63 + 0.0152408*m.x64 - 0.000156492*m.x65
- 0.000550234*m.x66 + 0.016159*m.x67 - 0.000508298*m.x68 + 0.00150342*m.x69 + 0.00312045*m.x70
+ 0.0185632*m.x71 + 0.00722557*m.x72 - 0.0114268*m.x73 + 0.00493919*m.x74 + 0.0026933*m.x75
+ 0.00523559*m.x76 + 0.0135266*m.x77 + 0.00559812*m.x78 - 0.000317393*m.x79 + 0.00487681*m.x80
+ 0.0106153*m.x81 + 0.00370161*m.x82 + 0.00722734*m.x83 + 0.0157939*m.x84 + 0.0204397*m.x85
- 0.00446781*m.x86 + 0.0400357*m.x87 + 0.222166*m.x88 + 0.00907574*m.x89 + 0.0281441*m.x90
+ 0.0265542*m.x91 + 0.00608259*m.x92 + 0.0066023*m.x93 + 0.00659999*m.x94 + 0.0224381*m.x95
+ 0.00149053*m.x96 + 0.000405727*m.x97 - 0.0104234*m.x98 + 0.000189871*m.x99
+ 0.00118145*m.x100 + 0.00362186*m.x101 == 0)
m.c92 = Constraint(expr= - m.x39 + 0.0019753*m.x52 - 0.00268772*m.x53 + 0.000474699*m.x54 - 0.000955585*m.x55
+ 0.0255113*m.x56 + 0.0105724*m.x57 + 0.00590674*m.x58 + 0.00541803*m.x59 + 0.0127905*m.x60
+ 0.0440906*m.x61 + 0.0156839*m.x62 + 0.0146058*m.x63 + 0.0110413*m.x64 + 0.0119264*m.x65
+ 0.0369853*m.x66 - 0.00048612*m.x67 - 0.00265155*m.x68 + 0.000336096*m.x69 + 0.00720686*m.x70
+ 0.0150068*m.x71 + 0.0210916*m.x72 - 0.00234199*m.x73 + 0.0265072*m.x74 + 0.00792354*m.x75
+ 0.014168*m.x76 + 0.0134485*m.x77 + 0.0126415*m.x78 + 0.0125595*m.x79 + 0.0183379*m.x80
+ 0.044217*m.x81 - 0.00382518*m.x82 - 0.00583878*m.x83 + 0.00640654*m.x84 + 0.0119026*m.x85
+ 0.0224213*m.x86 + 0.00621348*m.x87 + 0.00907574*m.x88 + 0.394267*m.x89 + 0.0165051*m.x90
+ 0.00980853*m.x91 - 0.00226117*m.x92 - 0.00984533*m.x93 + 0.00565748*m.x94 + 0.00895692*m.x95
+ 0.00919195*m.x96 + 0.00900527*m.x97 + 0.0181986*m.x98 + 0.0249229*m.x99 - 0.000623048*m.x100
+ 0.0135896*m.x101 == 0)
m.c93 = Constraint(expr= - m.x40 - 0.00561942*m.x52 + 0.0104329*m.x53 + 0.0125524*m.x54 + 0.0234718*m.x55
+ 0.0115797*m.x56 + 0.0238296*m.x57 + 0.0251974*m.x58 + 0.0243196*m.x59 + 0.0406861*m.x60
- 0.00363253*m.x61 + 0.00443354*m.x62 + 0.0225422*m.x63 + 0.0069969*m.x64 + 0.0124128*m.x65
+ 0.00970355*m.x66 + 0.0206697*m.x67 + 0.0148232*m.x68 + 0.0330899*m.x69 + 0.0261195*m.x70
+ 0.0206301*m.x71 + 0.0335768*m.x72 + 0.0350183*m.x73 + 0.0495917*m.x74 + 0.00735961*m.x75
+ 0.0413952*m.x76 + 0.0259415*m.x77 + 0.0123446*m.x78 - 0.00116156*m.x79 + 0.0275921*m.x80
+ 0.0463482*m.x81 + 0.0112733*m.x82 + 0.0232216*m.x83 + 0.0102405*m.x84 + 0.0319466*m.x85
+ 0.0380495*m.x86 + 0.0404134*m.x87 + 0.0281441*m.x88 + 0.0165051*m.x89 + 0.226153*m.x90
+ 0.00565646*m.x91 + 0.0239442*m.x92 + 0.00622955*m.x93 + 0.014515*m.x94 + 0.0227247*m.x95
+ 0.026331*m.x96 + 0.0188097*m.x97 + 0.00284125*m.x98 + 0.00673929*m.x99 + 0.00450472*m.x100
+ 0.0152845*m.x101 == 0)
m.c94 = Constraint(expr= - m.x41 - 0.0137411*m.x52 - 0.00184253*m.x53 + 0.00998269*m.x54 - 0.00128625*m.x55
+ 0.0340249*m.x56 - 0.00223337*m.x57 + 0.0109883*m.x58 + 0.0145438*m.x59 + 0.0323148*m.x60
+ 0.00273765*m.x61 + 0.0202129*m.x62 + 0.0126094*m.x63 + 0.0053944*m.x64 + 0.0206051*m.x65
+ 0.0253109*m.x66 - 0.0044719*m.x67 - 0.000660928*m.x68 + 0.0189859*m.x69 + 0.0295481*m.x70
+ 0.00808519*m.x71 - 0.00914657*m.x72 - 0.00327782*m.x73 + 0.00899853*m.x74 - 0.000614984*m.x75
+ 0.00776725*m.x76 + 0.0189386*m.x77 + 0.028821*m.x78 - 0.00192373*m.x79 + 0.0159442*m.x80
+ 0.019405*m.x81 + 0.00898559*m.x82 + 0.0168437*m.x83 + 0.0051056*m.x84 + 0.00664877*m.x85
+ 0.0386705*m.x86 + 0.00592392*m.x87 + 0.0265542*m.x88 + 0.00980853*m.x89 + 0.00565646*m.x90
+ 0.290035*m.x91 + 0.0156774*m.x92 - 0.00869674*m.x93 + 0.00461003*m.x94 - 0.000555319*m.x95
+ 0.016294*m.x96 + 0.0016488*m.x97 + 0.0137582*m.x98 + 0.0245795*m.x99 - 0.00658672*m.x100
+ 0.00527527*m.x101 == 0)
m.c95 = Constraint(expr= - m.x42 + 0.0266953*m.x52 + 0.0230614*m.x53 + 0.00663781*m.x54 + 0.00397589*m.x55
+ 0.00175196*m.x56 + 0.0230382*m.x57 + 0.0197048*m.x58 + 0.00473001*m.x59 + 0.0200869*m.x60
+ 0.00673168*m.x61 + 0.0114171*m.x62 + 0.0195556*m.x63 + 0.0104813*m.x64 + 0.0182519*m.x65
+ 0.01371*m.x66 + 0.0412523*m.x67 + 0.0219128*m.x68 + 0.0161305*m.x69 - 0.00121588*m.x70
- 0.00805047*m.x71 + 0.0153621*m.x72 + 0.0234788*m.x73 + 0.0191737*m.x74 + 0.0118767*m.x75
+ 0.0326211*m.x76 + 0.0167553*m.x77 + 0.00981253*m.x78 + 0.0114605*m.x79 + 0.0134875*m.x80
+ 0.0233399*m.x81 + 0.047951*m.x82 + 0.0278419*m.x83 + 0.0145699*m.x84 + 0.00548571*m.x85
+ 0.0297938*m.x86 + 0.00614247*m.x87 + 0.00608259*m.x88 - 0.00226117*m.x89 + 0.0239442*m.x90
+ 0.0156774*m.x91 + 0.195197*m.x92 + 0.0167141*m.x93 - 0.00108078*m.x94 + 0.0154638*m.x95
+ 0.00879495*m.x96 + 0.0251912*m.x97 + 0.00951858*m.x98 + 0.0145509*m.x99 + 0.0109233*m.x100
+ 0.00930651*m.x101 == 0)
m.c96 = Constraint(expr= - m.x43 + 0.0039322*m.x52 + 0.0797692*m.x53 - 0.00941355*m.x54 + 0.00253364*m.x55
+ 0.0214384*m.x56 + 0.112083*m.x57 + 0.00281047*m.x58 + 0.00681241*m.x59 + 0.00172542*m.x60
+ 0.0033605*m.x61 + 0.00122747*m.x62 + 0.0148528*m.x63 - 0.00694263*m.x64 - 0.0063393*m.x65
+ 0.0151066*m.x66 + 0.150222*m.x67 + 0.0200429*m.x68 + 0.00657093*m.x69 + 0.00174197*m.x70
+ 0.0108192*m.x71 + 0.0170669*m.x72 + 0.0976326*m.x73 + 0.0112022*m.x74 + 0.00947244*m.x75
+ 0.0119027*m.x76 + 0.012156*m.x77 + 0.0284364*m.x78 + 0.0425365*m.x79 + 0.0270417*m.x80
+ 0.0136317*m.x81 + 0.00269973*m.x82 + 0.117531*m.x83 + 0.0756527*m.x84 + 0.0048078*m.x85
+ 0.0058598*m.x86 + 0.00530712*m.x87 + 0.0066023*m.x88 - 0.00984533*m.x89 + 0.00622955*m.x90
- 0.00869674*m.x91 + 0.0167141*m.x92 + 0.306057*m.x93 + 0.018202*m.x94 + 0.0064207*m.x95
+ 0.007465*m.x96 + 0.0209936*m.x97 + 0.00813794*m.x98 + 0.0137895*m.x99 + 0.00376129*m.x100
+ 0.00807619*m.x101 == 0)
m.c97 = Constraint(expr= - m.x44 + 0.0312023*m.x52 - 0.00718849*m.x53 + 0.0166904*m.x54 + 0.0161477*m.x55
+ 0.0113414*m.x56 + 0.00257709*m.x57 + 0.0113665*m.x58 + 0.00988793*m.x59 + 0.0311244*m.x60
+ 0.0241296*m.x61 + 0.0118384*m.x62 + 0.016949*m.x63 + 0.0141714*m.x64 + 0.0162264*m.x65
+ 0.0201164*m.x66 + 0.0060731*m.x67 + 0.00803816*m.x68 + 0.0118269*m.x69 + 0.000971523*m.x70
+ 0.01367*m.x71 + 0.00771841*m.x72 + 0.000202835*m.x73 + 0.0106917*m.x74 + 0.00574257*m.x75
+ 0.011424*m.x76 + 0.0312321*m.x77 + 0.0179957*m.x78 - 0.000808147*m.x79 + 0.00200928*m.x80
+ 0.0110294*m.x81 + 0.00305288*m.x82 + 0.00545108*m.x83 + 0.00684049*m.x84 + 0.0331056*m.x85
+ 0.0252835*m.x86 + 0.00684822*m.x87 + 0.00659999*m.x88 + 0.00565748*m.x89 + 0.014515*m.x90
+ 0.00461003*m.x91 - 0.00108078*m.x92 + 0.018202*m.x93 + 0.2295*m.x94 + 0.0263474*m.x95
+ 0.0158978*m.x96 - 0.00338835*m.x97 + 0.0116215*m.x98 + 0.0102735*m.x99 - 0.0164264*m.x100
+ 0.0105885*m.x101 == 0)
m.c98 = Constraint(expr= - m.x45 + 0.00475029*m.x52 + 0.00668562*m.x53 + 0.00602889*m.x54 + 0.0163612*m.x55
+ 0.039091*m.x56 - 0.0088657*m.x57 + 0.0128475*m.x58 + 0.0149668*m.x59 + 0.00519737*m.x60
- 0.00441557*m.x61 + 0.0228483*m.x62 + 0.0309886*m.x63 - 0.00184581*m.x64 + 0.0114734*m.x65
+ 0.0193544*m.x66 + 0.00469106*m.x67 + 0.0174527*m.x68 + 0.0262376*m.x69 + 0.016521*m.x70
+ 0.0348135*m.x71 + 0.0161467*m.x72 + 0.00567421*m.x73 + 0.0282436*m.x74 + 0.0110814*m.x75
+ 0.015665*m.x76 + 0.0133677*m.x77 + 0.0240785*m.x78 + 0.00295518*m.x79 + 0.0218467*m.x80
+ 0.0119847*m.x81 + 0.00998711*m.x82 + 0.007432*m.x83 - 0.000862575*m.x84 + 0.0274019*m.x85
+ 0.0145417*m.x86 + 0.0187153*m.x87 + 0.0224381*m.x88 + 0.00895692*m.x89 + 0.0227247*m.x90
- 0.000555319*m.x91 + 0.0154638*m.x92 + 0.0064207*m.x93 + 0.0263474*m.x94 + 0.219232*m.x95
+ 0.0233015*m.x96 - 0.00971973*m.x97 + 0.0161499*m.x98 + 0.0121398*m.x99 - 0.000692501*m.x100
+ 0.00371111*m.x101 == 0)
m.c99 = Constraint(expr= - m.x46 + 0.00458043*m.x52 - 0.00479877*m.x53 + 0.00224387*m.x54 + 0.012804*m.x55
+ 0.00619763*m.x56 + 0.0101284*m.x57 + 0.00622782*m.x58 + 0.023562*m.x59 + 0.0142684*m.x60
- 0.00703875*m.x61 + 0.0131884*m.x62 + 0.0111695*m.x63 + 0.0147295*m.x64 + 0.0298746*m.x65
+ 0.0166079*m.x66 + 0.032667*m.x67 + 0.00328568*m.x68 + 0.0229703*m.x69 + 0.0242338*m.x70
+ 0.0320515*m.x71 + 0.0470226*m.x72 + 0.0334415*m.x73 + 0.0119814*m.x74 + 0.00174348*m.x75
+ 0.0129933*m.x76 + 0.0168904*m.x77 + 0.0203486*m.x78 + 0.0242798*m.x79 + 0.00352069*m.x80
+ 0.0293732*m.x81 - 0.00599198*m.x82 + 0.0161894*m.x83 + 0.00996209*m.x84 + 0.00104681*m.x85
+ 0.0665246*m.x86 + 0.0225813*m.x87 + 0.00149053*m.x88 + 0.00919195*m.x89 + 0.026331*m.x90
+ 0.016294*m.x91 + 0.00879495*m.x92 + 0.007465*m.x93 + 0.0158978*m.x94 + 0.0233015*m.x95
+ 0.325248*m.x96 + 0.0152129*m.x97 + 0.0136663*m.x98 + 0.0127301*m.x99 - 0.00399355*m.x100
+ 0.00993756*m.x101 == 0)
m.c100 = Constraint(expr= - m.x47 - 0.0111713*m.x52 + 0.037467*m.x53 - 0.00806098*m.x54 + 0.0254602*m.x55
+ 0.0133319*m.x56 + 0.0087194*m.x57 + 0.0245605*m.x58 + 0.0173729*m.x59 + 0.0178041*m.x60
+ 0.016325*m.x61 - 0.0151598*m.x62 + 0.023004*m.x63 - 0.00369236*m.x64 + 0.00393739*m.x65
+ 0.0113423*m.x66 + 0.00513266*m.x67 + 0.00580133*m.x68 + 0.0245122*m.x69 + 0.0387835*m.x70
+ 0.0132639*m.x71 + 0.0696792*m.x72 + 0.0182382*m.x73 + 0.00852934*m.x74 + 0.00448876*m.x75
+ 0.0057329*m.x76 + 0.021903*m.x77 + 0.0246958*m.x78 + 0.0107554*m.x79 + 0.00446644*m.x80
- 0.00785039*m.x81 + 0.00378519*m.x82 + 0.0203409*m.x83 + 0.0282548*m.x84 + 0.011411*m.x85
+ 0.00798604*m.x86 + 0.0289411*m.x87 + 0.000405727*m.x88 + 0.00900527*m.x89 + 0.0188097*m.x90
+ 0.0016488*m.x91 + 0.0251912*m.x92 + 0.0209936*m.x93 - 0.00338835*m.x94 - 0.00971973*m.x95
+ 0.0152129*m.x96 + 0.903924*m.x97 - 0.0108291*m.x98 + 0.0425572*m.x99 - 0.0154741*m.x100
+ 0.0155463*m.x101 == 0)
m.c101 = Constraint(expr= - m.x48 + 0.00233202*m.x52 - 0.000833339*m.x53 + 0.0151626*m.x54 + 0.0164285*m.x55
+ 0.0121082*m.x56 + 0.016345*m.x57 + 0.00706149*m.x58 + 0.016267*m.x59 + 0.00992985*m.x60
+ 0.00222896*m.x61 + 0.00844519*m.x62 + 0.00865625*m.x63 + 0.00526228*m.x64 + 0.0153743*m.x65
+ 0.0488179*m.x66 + 0.00884207*m.x67 - 0.000537323*m.x68 + 0.00497315*m.x69 + 0.0249114*m.x70
- 0.00327629*m.x71 + 0.00688465*m.x72 - 0.00355687*m.x73 + 0.0132486*m.x74 + 0.0220952*m.x75
+ 0.00863731*m.x76 + 0.00904192*m.x77 + 0.0301721*m.x78 + 0.0120875*m.x79 + 0.0176237*m.x80
+ 0.0195485*m.x81 + 0.00228262*m.x82 - 0.00640225*m.x83 + 0.0055526*m.x84 - 0.00331677*m.x85
+ 0.00560573*m.x86 + 0.00901397*m.x87 - 0.0104234*m.x88 + 0.0181986*m.x89 + 0.00284125*m.x90
+ 0.0137582*m.x91 + 0.00951858*m.x92 + 0.00813794*m.x93 + 0.0116215*m.x94 + 0.0161499*m.x95
+ 0.0136663*m.x96 - 0.0108291*m.x97 + 0.224056*m.x98 + 0.00641426*m.x99 + 0.0200771*m.x100
- 0.0157458*m.x101 == 0)
m.c102 = Constraint(expr= - m.x49 + 0.00279105*m.x52 - 0.00287641*m.x53 - 0.000965771*m.x54 + 0.0113336*m.x55
+ 0.0357203*m.x56 + 0.0145296*m.x57 + 0.00272192*m.x58 + 0.0121424*m.x59 + 0.0146222*m.x60
- 0.0077883*m.x61 + 0.0198609*m.x62 + 0.0218181*m.x63 + 0.00828497*m.x64 + 0.00989917*m.x65
+ 0.016393*m.x66 + 0.0125003*m.x67 + 0.0127107*m.x68 + 0.0222552*m.x69 + 0.0106646*m.x70
+ 0.0267494*m.x71 + 0.0406248*m.x72 + 0.0188454*m.x73 - 0.00483593*m.x74 + 0.0063483*m.x75
+ 0.00782909*m.x76 + 0.00640522*m.x77 + 0.00697773*m.x78 + 0.0292966*m.x79 + 0.0279531*m.x80
+ 0.00530393*m.x81 + 0.000223223*m.x82 + 0.00363753*m.x83 + 0.00924268*m.x84
- 0.00425863*m.x85 + 0.0328297*m.x86 + 0.0166774*m.x87 + 0.000189871*m.x88 + 0.0249229*m.x89
+ 0.00673929*m.x90 + 0.0245795*m.x91 + 0.0145509*m.x92 + 0.0137895*m.x93 + 0.0102735*m.x94
+ 0.0121398*m.x95 + 0.0127301*m.x96 + 0.0425572*m.x97 + 0.00641426*m.x98 + 0.246306*m.x99
+ 0.00353612*m.x100 - 0.00520827*m.x101 == 0)
m.c103 = Constraint(expr= - m.x50 + 0.00588268*m.x52 - 0.00540049*m.x53 + 0.0157379*m.x54 + 0.00992279*m.x55
+ 0.0381607*m.x56 + 0.00606395*m.x57 + 0.00300911*m.x58 - 0.00299957*m.x59 + 0.00920343*m.x60
- 0.00313691*m.x61 + 0.0242712*m.x62 + 0.0268327*m.x63 - 0.0189632*m.x64 + 0.0228823*m.x65
- 0.00100315*m.x66 - 0.00578404*m.x67 + 0.0134156*m.x68 + 0.00180371*m.x69 - 0.0157855*m.x70
+ 0.0178498*m.x71 - 0.00265226*m.x72 + 0.0261119*m.x73 + 0.00268557*m.x74 + 0.000150809*m.x75
+ 0.0385547*m.x76 + 0.000393756*m.x77 + 0.00248209*m.x78 - 0.00126318*m.x79 + 0.0110346*m.x80
- 0.00585743*m.x81 + 0.0131328*m.x82 + 0.00102053*m.x83 + 0.00369864*m.x84 + 0.0100274*m.x85
+ 0.0235991*m.x86 + 0.0332544*m.x87 + 0.00118145*m.x88 - 0.000623048*m.x89 + 0.00450472*m.x90
- 0.00658672*m.x91 + 0.0109233*m.x92 + 0.00376129*m.x93 - 0.0164264*m.x94 - 0.000692501*m.x95
- 0.00399355*m.x96 - 0.0154741*m.x97 + 0.0200771*m.x98 + 0.00353612*m.x99 + 1.25224*m.x100
+ 0.0259038*m.x101 == 0)
m.c104 = Constraint(expr= - m.x51 + 0.0171354*m.x52 + 0.0133618*m.x53 + 0.0187837*m.x54 + 0.00909239*m.x55
+ 0.0203578*m.x56 + 0.00747571*m.x57 + 0.0133916*m.x58 + 0.00907044*m.x59 + 0.0199828*m.x60
+ 0.0264584*m.x61 + 0.0138048*m.x62 + 0.0203605*m.x63 + 0.0101028*m.x64 + 0.017772*m.x65
+ 0.0101386*m.x66 + 0.0225237*m.x67 + 0.00882735*m.x68 + 0.00323067*m.x69 + 0.0165385*m.x70
+ 0.0295494*m.x71 + 0.0216914*m.x72 + 0.0236217*m.x73 + 0.0264927*m.x74 + 6.68242E-5*m.x75
+ 0.0147477*m.x76 + 0.0123718*m.x77 - 0.00975878*m.x78 - 0.0099048*m.x79 + 0.00696769*m.x80
+ 0.0197286*m.x81 + 0.0100911*m.x82 + 0.0252622*m.x83 - 0.00445725*m.x84 + 0.00728145*m.x85
+ 0.0470289*m.x86 + 0.0151416*m.x87 + 0.00362186*m.x88 + 0.0135896*m.x89 + 0.0152845*m.x90
+ 0.00527527*m.x91 + 0.00930651*m.x92 + 0.00807619*m.x93 + 0.0105885*m.x94 + 0.00371111*m.x95
+ 0.00993756*m.x96 + 0.0155463*m.x97 - 0.0157458*m.x98 - 0.00520827*m.x99 + 0.0259038*m.x100
+ 0.389181*m.x101 == 0)
| 74,910 | 53,719 |
from . import game
game.main()
| 32 | 12 |
from __future__ import absolute_import, unicode_literals
import unittest
from mason import Param, ANY, SELECT, COUNT, SUM, AND, OR, Table, NUMERIC, DATE, COALESCE, CASE
class TheSelectClass(unittest.TestCase):
def test_returns_string_for_select_query(self):
purchases = Table('purchases')
users = Table('users')
user_id = Param('user_id')
start = Param('start')
end = Param('end')
query = str(
SELECT(purchases.id, purchases.product_name, NUMERIC(purchases.product_price, 10, 2),
DATE(purchases.datetime_purchased))
.FROM(purchases)
.INNER_JOIN(users.ON(purchases.purchaser_id == users.user_id))
.WHERE(AND(purchases.datetime_purchased.BETWEEN(start).AND(end),
OR(purchases.purchaser_id == user_id,
purchases.purchaser_id.IS_NULL)))
.ORDER_BY(purchases.datetime_purchased.ASC)
.LIMIT(10)
.OFFSET(10)
)
expected_query = '\n'.join([
"SELECT purchases.id, purchases.product_name, "
"(purchases.product_price)::NUMERIC(10, 2), (purchases.datetime_purchased)::DATE",
"FROM purchases",
"INNER JOIN users ON purchases.purchaser_id = users.user_id",
"WHERE purchases.datetime_purchased BETWEEN %(start)s AND %(end)s "
"AND (purchases.purchaser_id = %(user_id)s OR purchases.purchaser_id IS NULL)",
"ORDER BY purchases.datetime_purchased ASC",
"LIMIT 10",
"OFFSET 10",
])
self.assertEqual(query, expected_query)
def test_returns_string_for_select_query_grouping(self):
purchases = Table('purchases')
start = Param('start')
end = Param('end')
min_category_sum = Param('min_category_sum')
num_purchases = COUNT(purchases).AS('num_purchases')
category_percent = (SUM(
CASE.WHEN(purchases.is_valid)
.THEN(COALESCE(purchases.product_price, 0))
.ELSE(0).END
) / 100.0).AS('category_percent')
category_sum = SUM(COALESCE(purchases.product_price, 0)).AS('category_sum')
query = str(
SELECT(purchases.category, category_percent, num_purchases)
.FROM(purchases)
.WHERE(purchases.datetime_purchased.BETWEEN(start).AND(end))
.GROUP_BY(purchases.category)
.HAVING(category_sum > min_category_sum)
)
expected_query = '\n'.join([
("SELECT purchases.category, "
"(SUM(CASE WHEN purchases.is_valid "
"THEN COALESCE(purchases.product_price, 0) ELSE 0 END) / 100.0) AS category_percent, "
"COUNT(*) AS num_purchases"),
"FROM purchases",
"WHERE purchases.datetime_purchased BETWEEN %(start)s AND %(end)s",
"GROUP BY purchases.category",
"HAVING category_sum > %(min_category_sum)s",
])
self.assertEqual(query, expected_query)
def test_returns_string_for_select_query_with_subqueries(self):
purchases = Table('purchases')
num_purchases = COUNT(purchases).AS('num_purchases')
grouped_purchases = (
SELECT(purchases.category.AS('category'), num_purchases)
.FROM(purchases)
.GROUP_BY(purchases.category)
.AS('grouped_purchases')
)
products = Table('products')
num_products = COUNT(products).AS('num_products')
grouped_products = (
SELECT(products.category.AS('category'), num_products)
.FROM(products)
.GROUP_BY(products.category)
.AS('grouped_products')
)
categories_param = Param('categories')
categories_table = Table('categories')
query = str(
SELECT(grouped_purchases.category, grouped_purchases.num_purchases, grouped_products.num_products)
.FROM(grouped_purchases)
.INNER_JOIN(grouped_products.ON(grouped_purchases.category == grouped_products.category))
.WHERE(AND(grouped_purchases.category == ANY(categories_param),
grouped_purchases.category.IN(SELECT(categories_table.category).FROM(categories_table))))
)
expected_query = '\n'.join([
"SELECT grouped_purchases.category, grouped_purchases.num_purchases, grouped_products.num_products",
"FROM (",
"\tSELECT purchases.category AS category, COUNT(*) AS num_purchases",
"\tFROM purchases",
"\tGROUP BY purchases.category",
") AS grouped_purchases",
"INNER JOIN (",
"\tSELECT products.category AS category, COUNT(*) AS num_products",
"\tFROM products",
"\tGROUP BY products.category",
") AS grouped_products ON grouped_purchases.category = grouped_products.category",
"WHERE grouped_purchases.category = ANY(%(categories)s) "
"AND grouped_purchases.category IN (",
"\tSELECT categories.category",
"\tFROM categories",
")",
])
self.assertEqual(query, expected_query)
def test_returns_string_for_select_query_with_joins(self):
table = Table('table')
query = str(
SELECT('*')
.FROM(table)
.LEFT_OUTER_JOIN(table)
.RIGHT_OUTER_JOIN(table)
.FULL_OUTER_JOIN(table)
.OUTER_JOIN(table)
.LIMIT(10)
)
expected_query = '\n'.join([
"SELECT *",
"FROM table",
"LEFT OUTER JOIN table",
"RIGHT OUTER JOIN table",
"FULL OUTER JOIN table",
"OUTER JOIN table",
"LIMIT 10",
])
self.assertEqual(query, expected_query)
| 6,012 | 1,791 |
class Departments(object):
_departments = []
def add_department(self, department):
self._departments.append(department)
def get_department(self, i):
return self._departments[i]
@property
def departments_range(self):
return (0, len(self._departments) - 1)
| 302 | 99 |
# -*- coding: utf-8 -*-
"""
Calculate monthly bias ratios of variables from climate station
to overlapping gridMET (or other gridded dataset) cells.
Input file for this module must first be created by running
:mod:`gridwxcomp.prep_input` followed by :mod:`gridwxcomp.download_gridmet_opendap`.
Attributes:
GRIDMET_STATION_VARS (:obj:`dict`): mapping dictionary with gridMET
variable names as keys and station variable names as values.
Used to determine which station variable to calculate bias
ratios according to the given gridMET variable.
Default values::
GRIDMET_STATION_VARS = {
'u2_ms' : 'ws_2m (m/s)',
'tmin_c' : 'TMin (C)',
'tmax_c' : 'TMax (C)',
'srad_wm2' : 'Rs (w/m2)',
'ea_kpa' : 'Vapor Pres (kPa)',
'prcp_mm' : 'Precip (mm)',
'etr_mm' : 'ETr (mm)',
'eto_mm' : 'ETo (mm)'
}
Note: The module attribute ``GRIDMET_STATION_VARS`` can be manually adjusted,
if ``gridwxcomp`` is installed in editable mode or used as scripts from the
root directory. New pairs of station-to-grid variables can then be made
or removed to efficiently use :mod:`gridwxcomp` on station data that was
**not** created by `PyWeatherQAQC
<https://github.com/WSWUP/pyWeatherQAQC>`_. Otherwise, the same can be
achieved by the ``var_dict`` or ``grid_var`` and ``station_var`` arguments
to :func:`calc_bias_ratios`.
"""
import os
import calendar
import argparse
import warnings
import pandas as pd
import numpy as np
# allows for CL script usage if gridwxcomp not installed
try:
from .util import parse_yr_filter
except:
from util import parse_yr_filter
# keys = gridMET variable name
# values = climate station variable name
GRIDMET_STATION_VARS = {
'u2_ms' : 'ws_2m (m/s)',
'tmin_c' : 'TMin (C)',
'tmax_c' : 'TMax (C)',
'srad_wm2' : 'Rs (w/m2)',
'ea_kpa' : 'Vapor Pres (kPa)',
'prcp_mm' : 'Precip (mm)',
'etr_mm' : 'ETr (mm)',
'eto_mm' : 'ETo (mm)'
}
OPJ = os.path.join
def main(input_file_path, out_dir, method='long_term_mean',
grid_id_name='GRIDMET_ID', grid_var='etr_mm', station_var=None,
station_date_name='date', grid_date_name='date', grid_ID=None,
day_limit=10, years='all', comp=True):
"""
Calculate monthly bias ratios between station climate and gridMET
cells that correspond with each other geographically. Saves data
to CSV files in the given output directory. If run later with
new station data, bias ratios for new stations will be appended
to existing output summary CSV.
Arguments:
input_file_path (str): path to input CSV file containing
paired station/gridMET metadata. This file is
created by running :mod:`gridwxcomp.prep_input` followed by
:mod:`gridwxcomp.download_gridmet_opendap`.
out_dir (str): path to directory to save CSV files with
monthly bias ratios of etr.
Keyword Arguments:
method (str): default 'long_term_mean'. How to calculate mean station to
grid ratios, currently two options 'long_term_mean' takes the
mean of all dates for the station variable that fall in a time
periods, e.g. the month of January, to the mean of all paired
January dates in the gridded product. The other option is
'mean_of_annual' which calculates ratios, for each time period if
enough paired days exist, the ratio of sums for each year in the
record and then takes the mean of the annual ratios. This method
is always used to calculate standard deviation and coefficient of
variation of ratios which describe interannual variation of ratios.
grid_var (str): default 'etr_mm'. Grid climate variable
to calculate bias ratios.
station_var (str): default None. Climate station variable to use
to calculate bias ratios. If None, look up using ``grid_var``
as a key to :attr:`GRIDMET_STATION_VARS` dictionary found as a
module attribute to :mod:`gridwxcomp.calc_bias_ratios`.
grid_ID (int): default None. Grid ID (int cell identifier) to only
calculate bias ratios for a single gridcell.
day_limit (int): default 10. Threshold number of days in month
of missing data, if less exclude month from calculations.
years (int or str): default 'all'. Years to use for calculations
e.g. 2000-2005 or 2011.
comp (bool): default True. Flag to save a "comprehensive"
summary output CSV file that contains station metadata and
statistics in addition to the mean monthly ratios.
Returns:
None
Examples:
From the command line interface,
.. code-block:: sh
$ # for all gridMET cells in input file for gridMET var "etr_mm" (default)
$ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios
$ # for all gridMET cells in input file for gridMET var "eto_mm"
$ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -gv eto_mm
$ # for a specific gridMET cell ID for "etr_mm"
$ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -id 509011
$ # to exclude any months with less than 15 days of data
$ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -d 15
It is also possible for the user to define their own station
variable name if, for example, they are using station data that was
**not** created by `PyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_.
Let's say our station time series has ETo named as 'EO' then
use the ``[-sv, --station-var]`` and ``[-gv, --grid-var]`` options
.. code-block:: sh
$ python calc_bias_ratios.py -i merged_input.csv -o monthly_ratios -sv EO -gv eto_mm
This will produce two CSV files in ``out_dir`` named
"eto_mm_summary_all_yrs.csv" and "eto_mm_summary_comp_all_yrs.csv". If
the ``[-y, --years]`` option is assigned, e.g. as '2010', then the
output CSVs will have '2010' suffix, i.e. 'eto_mm_summary_comp_2010.csv'
For use within Python see :func:`calc_bias_ratios`.
Note:
If ``[-gv, --grid-var]`` command line option or ``grid_var``
keyword argument is given but the station variable is left as default
(None), the corresponding station variable is looked up from the mapping
dictionary in :mod:`gridwxcomp.calc_bias_ratios` named
``GRIDMET_STATION_VARS``. To efficiently use climate data that was
**not** created by `PyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_
which is where the default names are derived you can manually adjust
``GRIDMET_STATION_VARS`` near the top of the :mod:`gridwxcomp.calc_bias_ratios`
submodule file. Alternatively, the gridMET and station variable names
may be explicitly passed as command line or function arguments.
"""
# calculate monthly bias ratios and save to CSV files
calc_bias_ratios(
input_file_path,
out_dir,
method=method,
grid_id_name=grid_id_name,
grid_var=grid_var,
station_var=station_var,
station_date_name=station_date_name,
grid_date_name=grid_date_name,
grid_ID=grid_ID,
day_limit=day_limit,
comp=comp
)
def _save_output(out_df, comp_out_df, out_dir, grid_ID, var_name, yrs):
"""
Save short summary file or overwrite existing data for a single
climate station.
Arguments:
out_df (:class:`pandas.DataFrame`): data containing short
summary info, mainly mean monthly bias ratios for a
single climate station to save.
comp_out_df (:class:`pandas.DataFrame`, bool): either a
single row dataframe with comprehensive summary data
or False (default). depends on ``comp`` argument to
:func:`calc_bias_ratios`. If :class:`pandas.DataFrame`
is passed then save or update existing file.
out_dir (str): path to directory to save or update summary data
for monthly bias ratios.
grid_ID (int, optional): depends on ``grid_ID`` argument
passed to :func:`calc_bias_ratios`. If not None (default)
then save summary files for stations that correspond with
the given gridMET ID with the suffix "_X" where X is the
gridMET ID value.
var_name (str): name of gridMET variable that is being processed.
yrs (str): years used to calc ratios, save to out files as suffix.
Returns:
None
"""
def __save_update(out_df, out_file):
"""
Helper function that is reused to save or update both short and
long summary files one station or row at a time. Saves station ratio
data by appending to existing file or overwriting data for a station if
it was previously calculated. `out_df` is a single row from the ratio
results table representing data for a single climate station-gridcell
pair.
"""
# if short file exists add/overwrite row for station
if os.path.isfile(out_file):
existing_df = pd.read_csv(out_file, index_col='STATION_ID')
if not out_df.index.values[0] in existing_df.index.values:
out_df = pd.concat([existing_df, out_df], sort=False)
out_df.to_csv(out_file, na_rep=-999, index=True)
# overwrite if station is in existing, could change to
# allow for duplicates if values are different
else:
existing_df.loc[out_df.index.values[0], :] =\
out_df.loc[out_df.index.values[0]]
existing_df.to_csv(out_file, na_rep=-999, index=True)
else:
out_df.to_csv(out_file, na_rep=-999, index=True)
# save or update short and comprehensive summary files
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
# save/update short summary file or update existing with new station
if not grid_ID:
out_file = OPJ(
out_dir,
'{v}_summary_{y}.csv'.format(v=var_name, y=yrs)
)
else:
out_file = OPJ(
out_dir,
'{v}_summary_grid_{g}_{y}.csv'.format(
v=var_name, g=grid_ID, y=yrs)
)
__save_update(out_df, out_file)
# if comprehensive summary is requested save/update
if isinstance(comp_out_df, pd.DataFrame):
if not grid_ID:
comp_out_file = OPJ(
out_dir,
'{v}_summary_comp_{y}.csv'.format(v=var_name, y=yrs)
)
else:
comp_out_file = OPJ(
out_dir,
'{v}_summary_comp_{g}_{y}.csv'.format(
v=var_name, g=grid_ID, y=yrs)
)
__save_update(comp_out_df, comp_out_file)
def calc_bias_ratios(input_path, out_dir, method='long_term_mean',
grid_id_name='GRIDMET_ID', grid_var='etr_mm', station_var=None,
var_dict=None, station_date_name='date', grid_date_name='date',
grid_ID=None, day_limit=10, years='all', comp=True):
"""
Read input CSV file and calculate mean monthly bias ratios between
station to corresponding grid cells for all station and grid
pairs, optionally calculate ratios for a single gridcell.
Arguments:
input_path (str): path to input CSV file with matching
station climate and grid metadata. This file is
created by running :func:`gridwxcomp.prep_input` followed by
:func:`gridwxcomp.download_gridmet_opendap`.
out_dir (str): path to directory to save CSV files with
monthly bias ratios of etr.
Keyword Arguments:
method (str): default 'long_term_mean'. How to calculate mean station to
grid ratios, currently two options 'long_term_mean' takes the
mean of all dates for the station variable that fall in a time
periods, e.g. the month of January, to the mean of all paired
January dates in the gridded product. The other option is
'mean_of_annual' which calculates ratios, for each time period if
enough paired days exist, the ratio of sums for each year in the
record and then takes the mean of the annual ratios. This method
is always used to calculate standard deviation and coefficient of
variation of ratios which describe interannual variation of ratios.
grid_id_name (str): default 'GRIDMET_ID'. Name of index/cell identifier
for gridded dataset, only change if supplying user grid data.
grid_var (str): default 'etr_mm'. Grid climate variable
to calculate bias ratios.
station_var (str): default None. Climate station variable to use
to calculate bias ratios. If None, look up using ``grid_var``
as a key to :attr:`GRIDMET_STATION_VARS` dictionary found as a
module attribute to :mod:`gridwxcomp.calc_bias_ratios`.
var_dict (dict): default None. Dictionary that maps grid variable names
to station variable names to overide gridMET and PyWeatherQaQc
defaules used by :attr:`GRIDMET_STATION_VARS`.
grid_ID (int): default None. Grid ID (int cell identifier) to only
calculate bias ratios for a single gridcell.
day_limit (int): default 10. Threshold number of days in month
of missing data, if less exclude month from calculations. Ignored
when ``method='long_term_mean'``.
years (int or str): default 'all'. Years to use for calculations
e.g. 2000-2005 or 2011.
comp (bool): default True. Flag to save a "comprehensive"
summary output CSV file that contains station metadata and
statistics in addition to the mean monthly ratios.
Returns:
None
Examples:
To use within Python for observed ET,
>>> from gridwxcomp import calc_bias_ratios
>>> input_path = 'merged_input.csv'
>>> out_dir = 'monthly_ratios'
>>> grid_variable = 'eto_mm'
>>> calc_bias_ratios(input_path, out_dir, grid_var=grid_variable)
To use custom station data, give the keyword argument ``station_var``,
e.g. if we had climate daily time series data for precipitation
with the column named "p" then,
>>> calc_bias_ratios(input_path, out_dir, grid_var='prcp_mm',
>>> station_var='p')
This results in two CSV files in ``out_dir`` named
"prcp_mm_summary_all_yrs.csv" and "prcp_mm_summary_comp_all_yrs.csv".
Raises:
FileNotFoundError: if input file is invalid or not found.
KeyError: if the input file does not contain file paths to
the climate station and grid time series files. This
occurs if, for example, the :mod:`gridwxcomp.prep_input` and/or
:mod:`gridwxcomp.download_gridmet_opendap` scripts have not been
run first (if using gridMET data). Also raised if the given
``grid_var``, ``station_var``, or values of ``var_dict`` kwargs
are invalid.
ValueError: if the ``method`` kwarg is invalid.
Note:
Growing season and summer periods over which ratios are calculated are
defined as April through October and June through August respectively.
Note:
If an existing summary file contains a climate station that is being
reprocessed its monthly bias ratios and other data will be overwritten.
Also, to proceed with spatial analysis scripts, the comprehensive
summary file must be produced using this function first. If
``grid_var`` keyword argument is given but the ``station_var`` is
left as default (None), the corresponding station variable is looked
up from the mapping dictionary in :mod:`calc_bias_ratios.py`
named :attr:`GRIDMET_STATION_VARS`. To use climate data
that was **not** created by `pyWeatherQAQC <https://github.com/WSWUP/pyWeatherQAQC>`_
for station data and/or gridded data other than gridMET, which is
where the default names are derived, the grid and station
variable names need to be explicitly passed as function arguments.
"""
# ignore np runtime warnings due to calcs with nans, div by 0
np.seterr(divide='ignore', invalid='ignore')
# specific for standard deviation of nans
std_warning = "Degrees of freedom <= 0 for slice"
warnings.filterwarnings("ignore", message=std_warning)
method_options = ('long_term_mean','mean_of_annual')
if method not in method_options:
raise ValueError('{} is not a valid method, use one of: {}'.format(
method, method_options)
)
if var_dict is None:
var_dict = GRIDMET_STATION_VARS
if not var_dict.get(grid_var, None):
print(
'Valid grid variable names:\n',
'\n'.join([i for i in var_dict.keys()]),
'\n'
)
err_msg = 'Invalid grid variable name {}'.format(grid_var)
raise KeyError(err_msg)
if not os.path.isdir(out_dir):
print('{} does not exist, creating directory'.format(out_dir))
os.mkdir(out_dir)
if not os.path.isfile(input_path):
raise FileNotFoundError('Input CSV file given was invalid or not found')
input_df = pd.read_csv(input_path)
# get matching station variable name
if not station_var:
station_var = var_dict.get(grid_var)
# If only calculating ratios for a single cell, change console message
if grid_ID:
single_grid_cell_msg = f'For grid cell ID: {grid_ID}.'
else:
single_grid_cell_msg = ''
print(
f'Calculating ratios between climate station variable: {station_var}'
f'\nand grid variable: {grid_var} using the "{method.replace("_"," ")}"'
f' method. {single_grid_cell_msg}'
)
# loop through each station and calculate monthly ratio
for index, row in input_df.iterrows():
if not 'STATION_FILE_PATH' in row or not 'GRID_FILE_PATH' in row:
raise KeyError('Missing station and/or grid file paths in '+\
'input file. Run prep_input.py followed '+\
'by download_gridmet_opendap.py first.')
# if only doing a single grid cell check for matching ID
if grid_ID and int(grid_ID) != row[grid_id_name]:
continue
# load station and grid time series files
try:
# if time series not from PyWeatherQaQc, CSV with 'date' column
if not row.STATION_FILE_PATH.endswith('.xlsx'):
station_df = pd.read_csv(
row.STATION_FILE_PATH, parse_dates=True,
index_col=station_date_name
)
station_df.index = station_df.index.date # for joining
# if excel file, assume PyWeatherQaQc format
else:
station_df = pd.read_excel(
row.STATION_FILE_PATH,
sheet_name='Corrected Data', parse_dates=True,
index_col=0
)
except:
print('Time series file for station: ', row.STATION_ID,
'was not found, skipping.')
continue
if not station_var in station_df.columns:
err_msg = '{v} not found in the station file: {p}'.\
format(v=station_var, p=row.STATION_FILE_PATH)
raise KeyError(err_msg)
print(
'\nCalculating {v} bias ratios for station:'.format(v=grid_var),
row.STATION_ID
)
grid_df = pd.read_csv(row.GRID_FILE_PATH, parse_dates=True,
index_col=grid_date_name)
# merge both datasets drop missing days
result = pd.concat(
[
station_df[station_var],
grid_df[grid_var]
],
axis=1
)
result = result.reindex(grid_df.index)
result.dropna(inplace=True)
# make datetime index
result.index = pd.to_datetime(result.index)
# apply year filter
result, years_str = parse_yr_filter(result, years, row.STATION_ID)
# for calculating ratios with long-term means later
orig = result.copy()
# monthly sums and day counts for each year
result = result.groupby([result.index.year, result.index.month])\
.agg(['sum','mean','count'])
result.index.set_names(['year', 'month'], inplace=True)
# remove totals with less than XX days
result = result[result[grid_var,'count']>=day_limit]
# calc mean growing season and June to August ratios with month sums
if grid_var in ('tmin_c','tmax_c'):
grow_season = result.loc[
result.index.get_level_values('month').isin([4,5,6,7,8,9,10]),\
(station_var)]['mean'].mean() - result.loc[
result.index.get_level_values('month').isin(
[4,5,6,7,8,9,10]),(grid_var)]['mean'].mean()
june_to_aug = result.loc[
result.index.get_level_values('month').isin(
[6,7,8]), (station_var)]['mean'].mean()\
-result.loc[result.index.get_level_values('month')\
.isin([6,7,8]), (grid_var)]['mean'].mean()
ann_months = list(range(1,13))
annual = result.loc[
result.index.get_level_values('month').isin(ann_months),\
(station_var)]['mean'].mean() - result.loc[
result.index.get_level_values('month').isin(ann_months),\
(grid_var)]['mean'].mean()
else:
grow_season = result.loc[
result.index.get_level_values('month').isin([4,5,6,7,8,9,10]),\
(station_var)]['sum'].sum() / result.loc[
result.index.get_level_values('month').isin(
[4,5,6,7,8,9,10]),(grid_var)]['sum'].sum()
june_to_aug = result.loc[
result.index.get_level_values('month').isin(
[6,7,8]), (station_var)]['sum'].sum()\
/result.loc[result.index.get_level_values('month')\
.isin([6,7,8]), (grid_var)]['sum'].sum()
ann_months = list(range(1,13))
annual = result.loc[
result.index.get_level_values('month').isin(ann_months),\
(station_var)]['sum'].sum() / result.loc[
result.index.get_level_values('month').isin(ann_months),\
(grid_var)]['sum'].sum()
ratio = pd.DataFrame(columns = ['ratio', 'count'])
# ratio of monthly sums for each year
if grid_var in ('tmin_c','tmax_c'):
ratio['ratio']=\
(result[station_var,'mean'])-(result[grid_var,'mean'])
else:
ratio['ratio']=(result[station_var,'sum'])/(result[grid_var,'sum'])
# monthly counts and stddev
ratio['count'] = result.loc[:,(grid_var,'count')]
if result.empty:
print(f'WARNING: no data for site: {row.STATION_ID}, skipping')
continue
# rebuild Index DateTime
ratio['year'] = ratio.index.get_level_values('year').values.astype(int)
ratio['month']=ratio.index.get_level_values('month').values.astype(int)
ratio.index = pd.to_datetime(
ratio.year*10000+ratio.month*100+15,format='%Y%m%d'
)
# useful to know how many years were used in addition to day counts
start_year = ratio.year.min()
end_year = ratio.year.max()
counts = ratio.groupby(ratio.index.month).sum()['count']
# get standard deviation of each years' monthly mean ratio
stdev = {
month: np.std(
ratio.loc[ratio.month.isin([month]), 'ratio'].values
) for month in ann_months
}
stdev = pd.Series(stdev, name='stdev')
# mean of monthly means of all years, can change to median or other meth
final_ratio = ratio.groupby(ratio.index.month).mean()
final_ratio.drop(['year', 'month'], axis=1, inplace=True)
final_ratio['count'] = counts
final_ratio['stdev'] = stdev
final_ratio['cv'] = stdev / final_ratio['ratio']
# calc mean growing season, June through August, ann stdev
grow_season_std = np.std(
ratio.loc[ratio.month.isin([4,5,6,7,8,9,10]), 'ratio'].values
)
june_to_aug_std = np.std(
ratio.loc[ratio.month.isin([6,7,8]), 'ratio'].values
)
annual_std = np.std(
ratio.loc[ratio.month.isin(ann_months), 'ratio'].values
)
# get month abbreviations in a column and drop index values
for m in final_ratio.index:
final_ratio.loc[m,'month'] = calendar.month_abbr[m]
# restructure as a row with station index
months = final_ratio.month.values
final_ratio = final_ratio.T
final_ratio.columns = months
final_ratio.drop('month', inplace=True)
# add monthy means and counts into single row dataframe
ratio_cols = [c + '_mean' for c in final_ratio.columns]
count_cols = [c + '_count' for c in final_ratio.columns]
stddev_cols = [c + '_stdev' for c in final_ratio.columns]
coef_var_cols = [c + '_cv' for c in final_ratio.columns]
# combine all monthly stats
out_cols = ratio_cols + count_cols + stddev_cols + coef_var_cols
final_ratio = pd.concat([
final_ratio.loc['ratio'],
final_ratio.loc['count'],
final_ratio.loc['stdev'],
final_ratio.loc['cv']
])
final_ratio.index = out_cols
# transpose so that each station is one row in final output
final_ratio = final_ratio.to_frame().T
# assign non-monthly stats, growing season, annual, june-aug
final_ratio['growseason_mean'] = grow_season
final_ratio['summer_mean'] = june_to_aug
final_ratio['annual_mean'] = annual
# day counts for all years in non monthly periods
final_ratio['growseason_count'] =\
counts.loc[counts.index.isin([4,5,6,7,8,9,10])].sum()
final_ratio['summer_count'] =\
counts.loc[counts.index.isin([6,7,8])].sum()
final_ratio['annual_count'] =\
counts.loc[counts.index.isin(ann_months)].sum()
# assign stdev, coef. var.
final_ratio['growseason_stdev'] = grow_season_std
final_ratio['summer_stdev'] = june_to_aug_std
final_ratio['annual_stdev'] = annual_std
# coefficient of variation
final_ratio['growseason_cv'] = grow_season_std / grow_season
final_ratio['summer_cv'] = june_to_aug_std / june_to_aug
final_ratio['annual_cv'] = annual_std / annual
# start and end years for interpreting annual CV, stdev...
final_ratio['start_year'] = start_year
final_ratio['end_year'] = end_year
# round numerical data before adding string metadata
for v in final_ratio:
if '_mean' or '_stdev' or '_cv' in v:
final_ratio[v] = final_ratio[v].astype(float).round(3)
else:
final_ratio[v] = final_ratio[v].astype(float).round(0)
# set station ID as index
final_ratio['STATION_ID'] = row.STATION_ID
final_ratio.set_index('STATION_ID', inplace=True)
out = final_ratio.copy()
out.drop(count_cols+stddev_cols+coef_var_cols, axis=1, inplace=True)
# save grid ID for merging with input table, merge other metadata
final_ratio[grid_id_name] = row[grid_id_name]
final_ratio = final_ratio.merge(input_df, on=grid_id_name)
# if more than one site in same gridcell- will have multiple rows
# after merge, select the one for the current station
if final_ratio.shape[0] > 1:
final_ratio=final_ratio.loc[final_ratio.STATION_ID==row.STATION_ID]
final_ratio.reset_index(inplace=True) # for slicing with .at[0]
# long term mean station to mean grid ratio calc as opposed to mean of
# annual ratios- default less bias potential
if method == 'long_term_mean':
month_means = orig.groupby(orig.index.month).mean()
month_means['month'] = month_means.index
for m in month_means.index:
month_means.loc[m,'month'] = f'{calendar.month_abbr[m]}_mean'
month_means.set_index('month', inplace=True)
if grid_var in ('tmin_c','tmax_c'):
month_means['ratios'] =\
month_means[station_var] - month_means[grid_var]
else:
month_means['ratios'] =\
month_means[station_var] / month_means[grid_var]
long_term = month_means.drop([station_var, grid_var],1).T
# non-monthly periods long-term mean to mean ratios
grow_season = orig.loc[orig.index.month.isin([4,5,6,7,8,9,10])]
summer_season = orig.loc[orig.index.month.isin([6,7,8])]
if grid_var in ('tmin_c','tmax_c'):
long_term['growseason_mean'] =\
grow_season[station_var].mean()-grow_season[grid_var].mean()
long_term['summer_mean'] =\
summer_season[station_var].mean()-summer_season[grid_var].mean()
long_term['annual_mean'] =\
orig[station_var].mean() - orig[grid_var].mean()
else:
long_term['growseason_mean'] =\
grow_season[station_var].mean() / grow_season[grid_var].mean()
long_term['summer_mean'] =\
summer_season[station_var].mean()/summer_season[grid_var].mean()
long_term['annual_mean'] =\
orig[station_var].mean() / orig[grid_var].mean()
# overwrite only mean ratios (keep stats from mean of annual ratios)
overwrite = long_term.columns.intersection(final_ratio.columns)
#return long_term, overwrite, final_ratio
final_ratio[overwrite] = long_term[overwrite].values
out[overwrite] = long_term[overwrite].values
final_ratio['ratio_method'] = method
# round numeric columns
final_ratio = final_ratio.round({
'LAT': 10,
'LON': 10,
'ELEV_M': 0,
'ELEV_FT': 0,
'STATION_LAT': 10,
'STATION_LON': 10,
'STATION_ELEV_M': 0
})
# check if day counts for non-monthly periods are too low, if assign na
grow_thresh = 65
sum_thresh = 35
ann_thresh = 125
if final_ratio.at[0,'summer_count'] < sum_thresh:
print('WARNING: less than:', sum_thresh, 'days in summer period',
'\nfor station:',row.STATION_ID,'assigning -999 for all stats')
cols = [col for col in final_ratio.columns if
'summer_' in col and '_count' not in col]
final_ratio.loc[:,cols] = np.nan
if final_ratio.at[0,'growseason_count'] < grow_thresh:
print('WARNING: less than:',grow_thresh,'days in growing season',
'\nfor station:',row.STATION_ID,'assigning -999 for all stats')
cols = [col for col in final_ratio.columns if
'growseason_' in col and '_count' not in col]
final_ratio.loc[:,cols] = np.nan
if final_ratio.at[0,'annual_count'] < ann_thresh:
print('WARNING: less than:',ann_thresh,'days in annual period',
'\nfor station:',row.STATION_ID,'assigning -999 for all stats')
cols = [col for col in final_ratio.columns if
'annual_' in col and '_count' not in col]
final_ratio.loc[:,cols] = np.nan
if comp:
out[grid_id_name] = row[grid_id_name]
out[grid_id_name] = final_ratio[grid_id_name].unique()
# build comprehensive output summary
comp_out = final_ratio
comp_out.set_index('STATION_ID', inplace=True)
# no longer need grid ID in short summary
out.drop(columns=grid_id_name, inplace=True)
# if comp False
else:
comp_out = comp
# save output depending on options
_save_output(out, comp_out, out_dir, grid_ID, grid_var, years_str)
print(
'\nSummary file(s) for bias ratios saved to: \n',
os.path.abspath(out_dir)
)
def arg_parse():
"""
Command line usage of calc_bias_ratios.py which calculates monthly bias
ratios between station climate and grid cells that correspond with
each other geographically. Saves data to CSV files in the given output
directory. If run later with new station data, bias ratios for new
stations will be appended to existing output summary CSV.
"""
parser = argparse.ArgumentParser(
description=arg_parse.__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
optional = parser._action_groups.pop() # optionals listed second
required = parser.add_argument_group('required arguments')
required.add_argument(
'-i', '--input', metavar='PATH', required=True,
help='Input CSV file of merged climate/grid data that '+\
'was created by running prep_input.py and '+\
'download_gridmet_opendap.py')
required.add_argument(
'-o', '--out', metavar='PATH', required=True,
help='Output directory to save CSV files containing bias ratios')
optional.add_argument('-meth', '--method', metavar='', required=False,
default='long_term_mean', help='ratio calc method "long_term_mean" or'+\
'"mean_of_annual"')
optional.add_argument('-gin', '--grid-id-name', metavar='', required=False,
default='GRIDMET_ID', help='Name of gridcell identifier if not using '+\
'gridMET grid')
optional.add_argument(
'-y', '--years', metavar='', required=False, default='all',
help='Years to use, single or range e.g. 2018 or 1995-2010')
optional.add_argument(
'-gv', '--grid-var', metavar='', required=False, default='etr_mm',
help='Grid variable name for bias ratio calculation')
optional.add_argument(
'-sv', '--station-var', metavar='', required=False, default=None,
help='Station variable name for bias ratio calculation')
optional.add_argument(
'-sdn', '--station-date-name', metavar='',required=False,default='date',
help='Date column name in station time series files if not using '+\
'gridMET.')
optional.add_argument(
'-gdn', '--grid-date-name', metavar='', required=False, default='date',
help='Date column name in grid time series files if not using gridMET.')
optional.add_argument(
'-id', '--grid-id', metavar='', required=False, default=None,
help='Optional grid ID to calculate bias ratios for a single '+\
'gridcell')
optional.add_argument('-d', '--day-limit', metavar='', required=False,
default=10, help='Number of days of valid data per month to '+\
'include it in bias correction calculation.')
optional.add_argument('-c', '--comprehensive', required=False,
default=True, action='store_false', dest='comprehensive',
help='Flag, if given, to NOT save comprehensive summary file with '+\
'extra metadata and statistics with the suffix "_comp"')
# parser.add_argument(
# '--debug', default=logging.INFO, const=logging.DEBUG,
# help='Debug level logging', action="store_const", dest="loglevel")
parser._action_groups.append(optional)# to avoid optionals listed first
args = parser.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
main(
input_file_path=args.input,
out_dir=args.out,
method=args.method,
grid_id_name=args.grid_id_name,
grid_var=args.grid_var,
station_var=args.station_var,
station_date_name=args.station_date_name,
grid_date_name=args.grid_date_name,
grid_ID=args.grid_id,
day_limit=args.day_limit,
years=args.years,
comp=args.comprehensive
)
| 37,250 | 11,217 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Attachment) on 2019-01-22.
# 2019, SMART Health IT.
from . import element
class Attachment(element.Element):
"""
C
o
n
t
e
n
t
i
n
a
f
o
r
m
a
t
d
e
f
i
n
e
d
e
l
s
e
w
h
e
r
e
.
F
o
r
r
e
f
e
r
r
i
n
g
t
o
d
a
t
a
c
o
n
t
e
n
t
d
e
f
i
n
e
d
i
n
o
t
h
e
r
f
o
r
m
a
t
s
.
"""
resource_type = "Attachment"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contentType = None
"""
M
i
m
e
t
y
p
e
o
f
t
h
e
c
o
n
t
e
n
t
,
w
i
t
h
c
h
a
r
s
e
t
e
t
c
.
.
Type `str`. """
self.creation = None
"""
D
a
t
e
a
t
t
a
c
h
m
e
n
t
w
a
s
f
i
r
s
t
c
r
e
a
t
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.data = None
"""
D
a
t
a
i
n
l
i
n
e
,
b
a
s
e
6
4
e
d
.
Type `str`. """
self.hash = None
"""
H
a
s
h
o
f
t
h
e
d
a
t
a
(
s
h
a
-
1
,
b
a
s
e
6
4
e
d
)
.
Type `str`. """
self.language = None
"""
H
u
m
a
n
l
a
n
g
u
a
g
e
o
f
t
h
e
c
o
n
t
e
n
t
(
B
C
P
-
4
7
)
.
Type `str`. """
self.size = None
"""
N
u
m
b
e
r
o
f
b
y
t
e
s
o
f
c
o
n
t
e
n
t
(
i
f
u
r
l
p
r
o
v
i
d
e
d
)
.
Type `int`. """
self.title = None
"""
L
a
b
e
l
t
o
d
i
s
p
l
a
y
i
n
p
l
a
c
e
o
f
t
h
e
d
a
t
a
.
Type `str`. """
self.url = None
"""
U
r
i
w
h
e
r
e
t
h
e
d
a
t
a
c
a
n
b
e
f
o
u
n
d
.
Type `str`. """
super(Attachment, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Attachment, self).elementProperties()
js.extend([
("contentType", "contentType", str, False, None, False),
("creation", "creation", fhirdate.FHIRDate, False, None, False),
("data", "data", str, False, None, False),
("hash", "hash", str, False, None, False),
("language", "language", str, False, None, False),
("size", "size", int, False, None, False),
("title", "title", str, False, None, False),
("url", "url", str, False, None, False),
])
return js
import sys
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
| 5,073 | 1,624 |
import asyncio
import os
import struct
import zlib
from os import PathLike
from typing import List
WIILOAD_VERSION_MAJOR = 0
WIILOAD_VERSION_MINOR = 5
async def upload_bytes(dol: bytes, argv: List[str], host: str, port: int = 4299):
"""
Uploads a file it to a Wii.
:param dol: The bytes of a file to upload to wii.
:param argv: Arguments to send to wii. The first value is usually the name of the file.
:param host: The Wii's hostname/ip
:param port: The port that Homebrew Channel is listening at.
:return:
"""
c_data = zlib.compress(dol, 6)
args = b"\x00".join(arg.encode("utf-8") for arg in argv)
args += b"\x00"
reader, writer = await asyncio.open_connection(host, port)
writer.write(b"HAXX")
writer.write(struct.pack("B", WIILOAD_VERSION_MAJOR)) # one byte, unsigned
writer.write(struct.pack("B", WIILOAD_VERSION_MINOR)) # one byte, unsigned
writer.write(struct.pack(">H", len(args))) # bigendian, 2 bytes, unsigned
writer.write(struct.pack(">L", len(c_data))) # bigendian, 4 bytes, unsigned
writer.write(struct.pack(">L", len(dol))) # bigendian, 4 bytes, unsigned
writer.write(c_data)
writer.write(args)
await writer.drain()
writer.close()
await writer.wait_closed()
async def upload_file(path: PathLike, argv: List[str], host: str, port: int = 4299):
"""
Reads a file from disk and uploads it to a Wii.
:param path: Path to a file to be uploaded.
:param argv: Extra arguments to send to wii, after the name of the file.
:param host: The Wii's hostname/ip
:param port: The port that Homebrew Channel is listening at.
:return:
"""
with open(path, "rb") as f:
dol = f.read()
args = [os.path.basename(path)]
args.extend(argv)
return await upload_bytes(dol, args, host, port)
| 1,843 | 652 |
with open('matrix.txt', 'r') as m:
n = m.read().split('\n')
matrix = [i.split(',') for i in n]
del matrix[-1]
for index1, value1 in enumerate(matrix):
for index2, value2 in enumerate(value1):
matrix[index1][index2] = int(value2)
row_pos = len(matrix)-1
ind_pos = len(matrix[row_pos])-1
while row_pos >= 0:
while ind_pos >= 0:
below_value = 10**7
right_value = 10**7
try:
below_value = matrix[row_pos+1][ind_pos]
except:
pass
try:
right_value = matrix[row_pos][ind_pos+1]
except:
pass
if below_value != 10**7 or right_value != 10**7:
matrix[row_pos][ind_pos] += min([below_value, right_value])
ind_pos -= 1
row_pos -= 1
ind_pos = len(matrix[row_pos])-1
print(matrix[0][0]) # 427337
"""
Same concept as the previous pathway problems.
Although I realized that 0,0 will always just be the answer,
no need to walk back down the path.
""" | 1,010 | 364 |
import pandas as pd
from QUANTAXIS.QAUtil import (
DATABASE
)
_table = DATABASE.stock_pankou_day
date = '2021-11-30' # 选最后一天,因为是批量插入,有值就证明存在
def exists(code, field='turn'):
data = _table.find_one({'code':code,'date':date})
if data is None:
return False
if data.get(field) is None:
return False
return True
def exists_shizhi(code):
return exists(code,'shiZhi')
def query_fundamentals(codes,date):
query_condition = {
'date': date,
'code': {
'$in': codes
}
}
item_cursor = _table.find(query_condition)
items_from_collection = [item for item in item_cursor]
df_data = pd.DataFrame(items_from_collection).drop(['_id'],axis=1)
return df_data
if __name__ == "__main__":
#print(exists_shizhi('300522'))
#print(exists('300522'))
print(query_fundamentals(['603501','603986'],'2018-01-10')) | 899 | 359 |
# Generated by Django 2.2 on 2019-04-26 15:52
from django.db import migrations, models
import django.db.models.deletion
import findance.abstract
class Migration(migrations.Migration):
initial = True
dependencies = [
('assets', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OwningEntity',
fields=[
('id', findance.abstract.FindanceIdField(default=findance.abstract.FindanceIdField._build_id, primary_key=True, serialize=False)),
('name', models.TextField(unique=True)),
('assets', models.ManyToManyField(through='assets.AssetOwnership', to='assets.Asset')),
],
options={
'ordering': ('id',),
'abstract': False,
},
),
migrations.CreateModel(
name='EntityControl',
fields=[
('id', findance.abstract.FindanceIdField(default=findance.abstract.FindanceIdField._build_id, primary_key=True, serialize=False)),
('permission', models.CharField(choices=[('o', 'Read Owned'), ('r', 'Read All'), ('w', 'Write'), ('a', 'Admin')], default='r', max_length=1)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='entity.OwningEntity')),
],
options={
'ordering': ('id',),
'abstract': False,
},
),
]
| 1,472 | 420 |
print("\033[1m=-=" * 15)
print("\033[1;32mLojas Tabajara\033[m". center(51))
print("=-=\033[m" * 15)
cont = 1
somaP = 0
while True:
p = float(input(f"Produto {cont}: R$ "))
cont += 1
somaP += p
if p == 0:
break
print(f"\033[1;32mTotal: R${somaP:.2f}\033[m")
pagamento = float(input("\033[1mDinheiro: R$ \033[m"))
while pagamento < somaP:
pagamento = float(input("\033[1mDinheiro: R$ \033[m"))
troco = pagamento - somaP
if pagamento != somaP:
print(f"\033[1;34mTroco: R$ {troco:.2f}\033[m")
elif troco == 0:
print("\033[1;31mNão há volta!")
| 576 | 303 |
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
import json
from datetime import datetime
from influxdb import InfluxDBClient
import secrets
import settings
influx_client = InfluxDBClient(secrets.influx_database_server,
secrets.influx_database_port,
secrets.influx_username,
secrets.influx_password,
database=settings.influx_database_name)
# Flask setup
app = Flask(__name__)
Bootstrap(app)
@app.route("/")
def main():
return render_template("graph_base.html",
title="Plots",
plots=[
{
'content_title': "Temperature (deg C)",
'measurement_type': "temperature"
},
{
'content_title': "Humidity (%)",
'measurement_type': "humidity"
},
{
'content_title': "Pressure (hPa)",
'measurement_type': "pressure"
},
{
'content_title': "CO2 (ppm)",
'measurement_type': "co2"
}
]
)
@app.route("/data")
def data():
mtype = str(request.args.get('type'))
start_utc = datetime.fromtimestamp(int(request.args.get('start')) / 1000.)
end_utc = datetime.fromtimestamp(int(request.args.get('end')) / 1000.)
try:
query = "SELECT value FROM {} \
WHERE time >= \'{}\' AND time <= \'{}\' \
tz('America/Los_Angeles');".format(mtype, start_utc, end_utc)
print(query)
measurements = influx_client.query(query, epoch='u')
except Exception as e:
print("Influx fetch error: {}".format(e))
measurements = []
json_data = {
'cols': [{
'id': 'Timestamp',
'label': 'Timestamp',
'type': 'date',
},
{
'id': mtype,
'label': "{} ({})".format(mtype, settings.units[mtype] if mtype in settings.units.keys() else "unitless"),
'type': 'number',
}],
'rows': [],
}
for m in measurements.get_points():
ts = datetime.fromtimestamp(m['time'] / 1000000)
time_str = "Date({},{},{},{},{},{},{})".format(
ts.year,
ts.month - 1,
ts.day,
ts.hour,
ts.minute,
ts.second,
int(ts.microsecond / 1000.),
)
row = {
'c': [
{'v': time_str},
{'v': m['value']},
]
}
json_data['rows'].append(row)
json_data = json.dumps(json_data)
return json_data
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=False)
| 3,196 | 872 |