content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
"""
from setuptools import setup, find_packages
from Cython.Build import cythonize
import pysam
setup(
name='svtk',
version='0.1',
description='Structural variation toolkit',
author='Matthew Stone',
author_email='mstone5@mgh.harvard.edu',
packages=find_packages(),
package_data={'svtk': ['data/*_template.vcf']},
scripts=['scripts/svtk'],
ext_modules=cythonize('svtk/utils/helpers.pyx'),
include_dirs=pysam.get_include(),
install_requires=[
'numpy',
'scipy',
'pysam>=0.11.2.2',
'pybedtools',
'cython',
'natsort',
'pandas',
]
)
|
nilq/baby-python
|
python
|
from colour import Color
from utils.animation_maker import *
import numpy as np
from time import time
song = 1
in_file = f'./Songs/Song0{song}.wav'
c1, c2 = Color('#fceaa8'), Color('#00e5ff')
#c1, c2 = Color('#000000'), Color('#00fffb')
c3, c4 = Color('#000000'), Color('#000000')
gradient_mode = 'hsl'
n_points = 100
anim_mode = 'fft 2000 8192 1'
# anim_mode = 'volume_bar sim bounce'
fps = 30
out_file = f'./npys/song0{song}.npy' # will still be a .avi file
t1 = time()
point_values = make_animation(in_file, anim_mode, n_points, fps, c1, c2, c3, c4, gradient_mode)
np.save(out_file, point_values)
print(f"Took {time() - t1} secs")
|
nilq/baby-python
|
python
|
import argparse
import asyncio
import rlp
from typing import Any, cast, Dict
from quarkchain.p2p import auth
from quarkchain.p2p import ecies
from quarkchain.p2p.exceptions import (
HandshakeFailure,
HandshakeDisconnectedFailure,
UnreachablePeer,
MalformedMessage,
)
from quarkchain.p2p.kademlia import Node
from quarkchain.p2p.p2p_manager import QuarkServer
from quarkchain.p2p.p2p_proto import Disconnect, DisconnectReason, Hello
from quarkchain.p2p.peer import PeerConnection
def get_quark_peer_factory():
privkey = ecies.generate_privkey()
server = QuarkServer(privkey=privkey, port=38291, network_id=0)
return server.peer_pool.get_peer_factory()
async def handshake_for_version(remote: Node, factory):
"""Perform the auth and P2P handshakes (without sub-protocol handshake) with the given remote.
Disconnect after initial hello message exchange, and return version id
"""
try:
(
aes_secret,
mac_secret,
egress_mac,
ingress_mac,
reader,
writer,
) = await auth.handshake(remote, factory.privkey, factory.cancel_token)
except (ConnectionRefusedError, OSError) as e:
raise UnreachablePeer() from e
connection = PeerConnection(
reader=reader,
writer=writer,
aes_secret=aes_secret,
mac_secret=mac_secret,
egress_mac=egress_mac,
ingress_mac=ingress_mac,
)
peer = factory.create_peer(remote=remote, connection=connection, inbound=False)
# see await peer.do_p2p_handshake()
peer.base_protocol.send_handshake()
try:
cmd, msg = await peer.read_msg(timeout=peer.conn_idle_timeout)
except rlp.DecodingError:
raise HandshakeFailure("Got invalid rlp data during handshake")
except MalformedMessage as e:
raise HandshakeFailure("Got malformed message during handshake") from e
if isinstance(cmd, Disconnect):
msg = cast(Dict[str, Any], msg)
raise HandshakeDisconnectedFailure(
"disconnected before completing sub-proto handshake: {}".format(
msg["reason_name"]
)
)
msg = cast(Dict[str, Any], msg)
if not isinstance(cmd, Hello):
await peer.disconnect(DisconnectReason.bad_protocol)
raise HandshakeFailure(
"Expected a Hello msg, got {}, disconnecting".format(cmd)
)
return msg["client_version_string"]
# to test, run local quarkchain cluster with:
# python cluster.py --p2p --p2p_port=38291 --privkey=9e88b123b2200d6d78bf288a2dd7e3b2f31c77c3b119f8222d5a2d510b4c8d94
async def main():
parser = argparse.ArgumentParser()
# do not use "localhost", use the private ip if you run this from EC2
parser.add_argument(
"--remote",
default="enode://28698cd33c5c78514ce1d8a7228e0071f341d75509dc48f12e26f9e22584740a5b6bf8a447eab8679e8744d283dd4173ddbdc52f44a7cb5ff508ecbd04b500f0@127.0.0.1:38291",
type=str,
)
args = parser.parse_args()
factory = get_quark_peer_factory()
remote = Node.from_uri(args.remote)
version = await handshake_for_version(remote, factory)
print(version)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
|
nilq/baby-python
|
python
|
"""redux_field_creator URL Configuration"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^fields/', include('fields.urls')),
]
|
nilq/baby-python
|
python
|
# echo_04_io_multiplexing.py
import socket
import selectors
sel = selectors.DefaultSelector()
def setup_listening_socket(host='127.0.0.1', port=55555):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen()
sel.register(sock, selectors.EVENT_READ, accept)
def accept(sock):
client_sock, addr = sock.accept()
print('Connection from', addr)
sel.register(client_sock, selectors.EVENT_READ, recv_and_send)
def recv_and_send(sock):
received_data = sock.recv(4096)
if received_data:
# assume sendall won't block
sock.sendall(received_data)
else:
print('Client disconnected:', sock.getpeername())
sel.unregister(sock)
sock.close()
def run_event_loop():
while True:
for key, _ in sel.select():
callback = key.data
sock = key.fileobj
callback(sock)
if __name__ == '__main__':
setup_listening_socket()
run_event_loop()
|
nilq/baby-python
|
python
|
import json
import sys
import getopt
from os import system
sys.path.insert(0, '/requests')
import requests
def getBridgeIP():
r = requests.get('http://www.meethue.com/api/nupnp')
bridges = r.json()
if not bridges:
bridge_ip = 0
else:
bridge_ip = bridges[0]['internalipaddress']
return bridge_ip
def createUser(bridge_ip):
r = requests.post(
'http://{bridge_ip}/api'.format(bridge_ip=bridge_ip),
data=json.dumps({'devicetype': 'Smart Alarm'}))
resp = r.json()[0]
if resp.get('error'):
username = 'Setup Error: %s' % resp['error'].get('description')
else:
username = resp['success']['username']
return username
def main(argv):
try:
bridge_ip = sys.argv[1]
return_value = createUser(bridge_ip)
except:
return_value = "Error: Please provide an IP address"
print return_value
return return_value
if __name__ == "__main__":
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:structs.bzl", "structs")
load("//ocaml/_functions:utils.bzl", "capitalize_initial_char")
load("//ocaml/_functions:module_naming.bzl",
"normalize_module_label",
"normalize_module_name")
load("//ocaml/_rules:impl_common.bzl", "module_sep")
#######################################
def print_config_state(settings, attr):
print(" rule name: %s" % attr.name)
print(" ns_resolver ws: %s" % attr._ns_resolver.workspace_name)
print(" @rules_ocaml//cfg/ns:prefixes: %s" % settings["@rules_ocaml//cfg/ns:prefixes"])
print(" @rules_ocaml//cfg/ns:submodules: %s" % settings["@rules_ocaml//cfg/ns:submodules"])
if hasattr(attr, "submodules"):
print(" attr.submodules: %s" % attr.submodules)
##################################################
def _executable_in_transition_impl(settings, attr):
## FIXME: ppx_executable uses @ppx//mode to set @rules_ocaml//cfg/mode
return {
# "@rules_ocaml//cfg/mode" : settings["@rules_ocaml//cfg/mode:mode"],
# "@ppx//mode" : settings["@rules_ocaml//cfg/mode:mode"], ## Why?
"@rules_ocaml//cfg/ns:prefixes" : ["foo"],
"@rules_ocaml//cfg/ns:submodules" : [],
}
#######################
executable_in_transition = transition(
implementation = _executable_in_transition_impl,
inputs = [
# "@rules_ocaml//cfg/mode:mode",
# "@ppx//mode:mode",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
# "@rules_ocaml//cfg/mode",
# "@ppx//mode",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
#####################################################
def _ocaml_executable_deps_out_transition_impl(settings, attr):
# print(">>> OCAML_EXECUTABLE_DEPS_OUT_TRANSITION: %s" % attr.name)
# if attr.mode:
# mode = attr.mode
# else:
# mode = settings["@rules_ocaml//cfg/mode:mode"]
return {
# "@ppx//mode": mode,
"@rules_ocaml//cfg/ns:prefixes": [],
"@rules_ocaml//cfg/ns:submodules": []
}
################
ocaml_executable_deps_out_transition = transition(
implementation = _ocaml_executable_deps_out_transition_impl,
inputs = [
# "@rules_ocaml//cfg/mode:mode",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules"
],
outputs = [
# "@ppx//mode",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules"
]
)
################################################################
def _module_in_transition_impl(settings, attr):
# print("_module_in_transition_impl %s" % attr.name)
debug = False
# if attr.name in ["_Util"]:
# debug = True
if debug:
print(">>> ocaml_module_in_transition")
print_config_state(settings, attr)
module = None
## if struct uses select() it will not be resolved yet, so we need to test
if hasattr(attr, "struct"):
if attr.struct:
structfile = attr.struct.name
(basename, ext) = paths.split_extension(structfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in settings["@rules_ocaml//cfg/ns:prefixes"]:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
elif module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
# reset to default values
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules" : submodules,
}
####################
module_in_transition = transition(
implementation = _module_in_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
################################################################
################################################################
################################################################
## we need to reset submods list to null on inbound txn so that each
## module will only be built one. Example: half-diamond dep, where X
## is a dep of both a namespaced module and a non-namespaced module,
## and X itself is non-namespaced. we need X to have the same config
## state in all cases so it is only built once.
def _bootstrap_module_in_transition_impl(settings, attr):
# print("_bootstrap_module_in_transition_impl %s" % attr.name)
debug = False
# if attr.name in ["Stdlib", "Stdlib_cmi", "Uchar"]:
# debug = True
if debug:
print(">>> bootstrap_ocaml_module_in_transition")
print_config_state(settings, attr)
print(" resolver: %s" % settings["@rules_ocaml//cfg/bootstrap/ns:resolver"])
print(" t: %s" % type(settings["@rules_ocaml//cfg/bootstrap/ns:resolver"]))
module = None
## if struct uses select() it will not be resolved yet, so we need to test
if hasattr(attr, "struct"):
if attr.struct:
structfile = attr.struct.name
(basename, ext) = paths.split_extension(structfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
## We decide whether or not this module is namespaced, and whether
## it needs to be renamed.
if module in settings["@rules_ocaml//cfg/ns:prefixes"]:
# true if this module is user-provided resolver?
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
elif module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
# reset to default values
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
if prefixes:
# no change
resolver = settings["@rules_ocaml//cfg/bootstrap/ns:resolver"]
else:
# reset to default
resolver = Label("@rules_ocaml//cfg/bootstrap/ns:ns_bootstrap")
return {
"@rules_ocaml//cfg/bootstrap/ns:resolver": resolver,
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules" : submodules,
}
##############################
bootstrap_module_in_transition = transition(
implementation = _bootstrap_module_in_transition_impl,
inputs = [
"@rules_ocaml//cfg/bootstrap/ns:resolver",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/bootstrap/ns:resolver",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
##############################################
# if this nslib in ns:submodules list
# pass on prefix but not ns:submodules
# else
# reset ConfigState
def _nslib_in_transition_impl(settings, attr):
# print("_nslib_in_transition_impl %s" % attr.name)
debug = False
# if attr.name in ["color"]:
# debug = True
if debug:
print("")
print(">>> nslib_in_transition")
print_config_state(settings, attr)
print(attr)
module = normalize_module_name(attr.name)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in settings["@rules_ocaml//cfg/ns:prefixes"]:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
elif module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
# reset to default values
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules" : submodules,
}
###################
nslib_in_transition = transition(
implementation = _nslib_in_transition_impl,
inputs = [
# "@rules_ocaml//cfg/ns:transitivity",
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
#####################################################
def _ocaml_module_deps_out_transition_impl(settings, attr):
# print("_ocaml_module_deps_out_transition_impl %s" % attr.name)
debug = False
if attr.name == "_Grammar":
debug = True
if debug:
print(">>> ocaml_module_deps_out_transition")
print_config_state(settings, attr)
srcfile = attr.struct.name if hasattr(attr, "struct") else attr.src.name
(basename, ext) = paths.split_extension(srcfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in submodules:
## this is an nslib submodule; we need to propagate
## configstate set by nslib, in case we depend on a sibling.
# print("OUT_T mod: %s" % module)
# print("OUT_T pfx: %s" % settings["@rules_ocaml//cfg/ns:prefixes"])
# if module == settings["@rules_ocaml//cfg/ns:prefixes"][-1]:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
## we're not in an nslib context; reset to defaults
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules": submodules
}
#####################
ocaml_module_deps_out_transition = transition(
implementation = _ocaml_module_deps_out_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
#####################
ocaml_module_sig_out_transition = transition(
implementation = _ocaml_module_deps_out_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
################################################################
################################################################
################################################################
def _subsignature_in_transition_impl(settings, attr):
# print("_subsignature_in_transition_impl %s" % attr.name)
debug = False
if attr.name in ["_Feedback"]:
debug = True
if debug:
print(">>> ocaml_subsignature_in_transition")
print_config_state(settings, attr)
module = None
## if struct uses select() it will not be resolved yet, so we need to test
if hasattr(attr, "struct"):
structfile = attr.struct.name
(basename, ext) = paths.split_extension(structfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in settings["@rules_ocaml//cfg/ns:prefixes"]:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
elif module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules" : submodules,
}
####################
subsignature_in_transition = transition(
implementation = _subsignature_in_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
################################################################
def _ocaml_signature_deps_out_transition_impl(settings, attr):
# print("_ocaml_signature_deps_out_transition_impl %s" % attr.name)
debug = False # True
if attr.name == "":
debug = True
if debug:
print(">>> ocaml_signature_deps_out_transition")
print_config_state(settings, attr)
srcfile = attr.src.name
(basename, ext) = paths.split_extension(srcfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
prefix = ""
prefixes = []
submodules = []
if debug:
print("OUT STATE:")
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules": submodules
}
################
ocaml_signature_deps_out_transition = transition(
implementation = _ocaml_signature_deps_out_transition_impl,
# implementation = _ocaml_module_deps_out_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
###########################################################
def _ocaml_subsignature_deps_out_transition_impl(settings, attr):
# print("_ocaml_subsignature_deps_out_transition_impl %s" % attr.name)
debug = False
# if attr.name == ":_Plexing.cmi":
# debug = True
if debug:
print(">>> ocaml_subsignature_deps_out_transition")
print_config_state(settings, attr)
srcfile = attr.src.name
(basename, ext) = paths.split_extension(srcfile)
module = capitalize_initial_char(basename)
submodules = []
for submodule_label in settings["@rules_ocaml//cfg/ns:submodules"]:
submodule = normalize_module_label(submodule_label)
submodules.append(submodule)
if module in submodules:
prefixes = settings["@rules_ocaml//cfg/ns:prefixes"]
submodules = settings["@rules_ocaml//cfg/ns:submodules"]
else:
prefixes = []
submodules = []
if attr.name == "_Plexing.cmi":
prefixes = []
submodules = []
if debug:
print("OUT STATE: %s" % attr.name)
print(" ns:prefixes: %s" % prefixes)
print(" ns:submodules: %s" % submodules)
return {
"@rules_ocaml//cfg/ns:prefixes" : prefixes,
"@rules_ocaml//cfg/ns:submodules": submodules
}
################
ocaml_subsignature_deps_out_transition = transition(
implementation = _ocaml_subsignature_deps_out_transition_impl,
# implementation = _ocaml_module_deps_out_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
##############################################
def _reset_in_transition_impl(settings, attr):
return {
"@rules_ocaml//cfg/ns:prefixes" : [],
"@rules_ocaml//cfg/ns:submodules" : [],
}
reset_in_transition = transition(
implementation = _reset_in_transition_impl,
inputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
],
outputs = [
"@rules_ocaml//cfg/ns:prefixes",
"@rules_ocaml//cfg/ns:submodules",
]
)
##############################################
def _ppx_mode_transition_impl(settings, attr):
ppx_mode_val = settings["@ppx//mode:mode"]
return {
"@rules_ocaml//cfg/mode": ppx_mode_val,
}
ppx_mode_transition = transition(
implementation = _ppx_mode_transition_impl,
inputs = [
"@ppx//mode:mode",
],
outputs = [
"@rules_ocaml//cfg/mode",
]
)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
#
# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
from maple_test import main
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from os import listdir
from os.path import isfile, join
import pandas as pd
import pickle
def get_latest_file(filename_str_contains, datapath='../data/', filetype='parquet'):
"""
Method to get the latest file to given a certain string value.
Parameters
----------
filename_str_contains : str
String with snippet of filename that is searched for
datapath : str
(relative) path of the files. Default '../data'
filetype: str
String which type of file should be read ('parquet', 'pickle'). Default 'parquet'
Returns
-------
pd.DataFrame or model object
"""
# Get list with files
onlyfiles = sorted([f for f in listdir(datapath) if isfile(join(datapath, f))])
# Get last file
filename = [s for s in onlyfiles if filename_str_contains in s][-1]
if filetype == 'parquet':
df = pd.read_parquet(datapath + filename)
return df
if filetype == 'pickle':
model = pickle.load(open(datapath + filename, 'rb'))
return model
def list_filenames(path, filename_str_contains):
"""
Method to get a list of filenames with a certain string value in the name.
Parameters
----------
path : str
Path to search in for files
filename_str_contains : str
String value to search for in the name
Returns
-------
List with string values of all the matched filenames
"""
all_files = sorted([f for f in listdir(path) if isfile(join(path, f))])
filenames = [s for s in all_files if filename_str_contains in s]
return filenames
|
nilq/baby-python
|
python
|
class SymbolTableError(Exception):
"""Exception raised for errors with the symbol table.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
|
nilq/baby-python
|
python
|
# Created by woochanghwang at 12/07/2021
import toolbox.drugbank_handler_postgres as drug_pg
import pandas as pd
def get_DDI_of_a_drug(drug):
# drug_a = row['DrugA_ID']
# drug_b = row['DrugB_ID']
sql = "SELECT * from structured_drug_interactions where subject_drug_drugbank_id = \'{}\' " \
"or affected_drug_drugbank_id = \'{}\'".format(drug, drug)
print(sql)
sql_result= drug_pg.query_postgres_get_DF(sql,**drug_pg.DRUGBANK_DB_INFO)
return sql_result
def get_CT_of_a_condition(condition):
# drug_a = row['DrugA_ID']
# drug_b = row['DrugB_ID']
sql = f"SELECT * from drug_clinicaltrial_2 where condition_id = \'{condition}\' "
print(sql)
sql_result= drug_pg.query_postgres_get_DF(sql,**drug_pg.DRUGBANK_DB_INFO)
return sql_result
def add_CT_on_simulation_result():
candidate_drugs_CT_df = pd.read_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_CT_v2.xlsx",sheet_name="CT" )
print(candidate_drugs_CT_df)
CT_drugs = candidate_drugs_CT_df['drugbank_id'].tolist()
# drug_info_df = pd.read_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_target.SIP.xlsx")
drug_info_df = pd.read_excel("../result/SARS-CoV/Drug/SARS-CoV_candidate_drug_info_target.SIP.xlsx")
# drug_info_df['Clincical_Trial'] = drug_info_df
drug_info_df['Clincical_Trial'] = drug_info_df['source'].isin(CT_drugs)
print(drug_info_df)
# drug_info_df.to_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_target.SIP.CT.v2.xlsx", index=False)
drug_info_df.to_excel("../result/SARS-CoV/Drug/SARS-CoV_candidate_drug_info_target.SIP.CT.v2.xlsx", index=False)
def main():
# condition = "DBCOND0129755"
#
# CT_result = get_CT_of_a_condition(condition)
# # print(CT_result)
# # CT_result.to_excel("../Data/SARS-CoV-2_CT_drugs_v2.xlsx", index=False)
#
# candidate_drugs_df = pd.read_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_drugbank.xlsx")
# # candidate_drugs_df = pd.read_excel("../result/SARS-CoV-2/Drug/")
# print(candidate_drugs_df)
#
# candidate_drugs_CT_df = pd.merge(left=candidate_drugs_df,
# right = CT_result,
# how='left',
# left_on='source',
# right_on='drugbank_id')
# print(candidate_drugs_CT_df)
# # candidate_drugs_CT_df.to_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_CT.xlsx", index=False)
# candidate_drugs_CT_df.to_excel("../result/SARS-CoV-2/Drug/SARS-CoV-2_candidate_drug_info_CT_v2.xlsx", index=False)
add_CT_on_simulation_result()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import unittest
from machinetranslation import translator
from translator import englishToFrench
from translator import frenchToEnglish
class TestTranslator(unittest.TestCase):
# test function
def test_englishToFrench(self):
self.assertIsNotNone(englishToFrench('Hello'),'Bonjour')
self.assertEqual(englishToFrench("Hello"),"Bonjour")
def test_frenchToEnglish(self):
self.assertIsNotNone(frenchToEnglish('Bonjour'),'Hello')
self.assertEqual(frenchToEnglish("Bonjour"),"Hello")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import tkinter as tk
import tkinter.ttk as ttk
from ..elements import TreeList
from ...head.database import Database as db
from ...globals import SURVEY_TYPES
from .templates import ListFrameTemplate
class SurveysListFrame(ListFrameTemplate):
def __init__(self, top):
super().__init__(top)
self.data = []
self.fuel_name: str
self.survey_type: str
def create_head_section(self, top):
title = self.create_title(self, "LISTA POMIARÓW")
option_container = tk.Frame(self)
cboxs_container, self.cboxes =\
self.create_cboxes_container(option_container)
btns_container, self.buttons =\
self.create_btns_container(option_container)
cboxs_container.pack(side="left")
btns_container.pack(side="right", padx=15)
self.buttons[0].configure(command=lambda: top.change_frame(3))
self.cboxes[0].configure(values=db.get_fuels_list())
self.cboxes[1].configure(values=tuple(SURVEY_TYPES.keys()))
for cbox in self.cboxes:
cbox.configure(state='readonly')
cbox.bind("<<ComboboxSelected>>", self.refresh_list)
title.pack(side="top", fill="x")
option_container.pack(side="top", fill="both")
def create_body_section(self, top):
self.tree_list = TreeList(self)
comment_container, self.comment_elements =\
self.create_comment_container(self)
self.tree_list.pack(fill="both", expand=1)
comment_container.pack(side="bottom", fill="x")
columns = {"Lp.": 0,
"Śr. kryt. dyszy [mm]": 0.25,
"Czas próbkowania [ms]": 0.25,
"Masa paliwa [g]": 0.25,
"Data dodania": 0.25}
self.set_list(top, self.tree_list, columns)
def refresh_list(self, event):
self.fuel_name = self.cboxes[0].get()
self.survey_type = self.cboxes[1].get()
if self.survey_type and self.fuel_name:
self.data = self.load_data()
self.reload_list()
def load_data(self):
return db.load_surveys(
self.fuel_name, SURVEY_TYPES[self.survey_type])
@staticmethod
def fill_list(tree_frame, data):
if not data:
return
surveys_data = []
for number, survey in enumerate(data):
surveys_data.append((number+1, survey.jet_diameter, survey.sampling_time,
survey.fuel_mass, survey.save_date))
tree_frame.set_data(surveys_data)
def reload_list(self):
self.tree_list.clean()
self.fill_list(self.tree_list, self.data)
@staticmethod
def create_cboxes_container(top):
container = tk.Frame(top)
left = tk.Frame(container)
fuel_label = tk.Label(left)
fuel_label.configure(text="Paliwo")
fuel_cbox = ttk.Combobox(left)
right = tk.Frame(container)
type_label = tk.Label(right)
type_label.configure(text="Rodzaj pomiaru")
type_cbox = ttk.Combobox(right)
fuel_label.pack(side="left")
fuel_cbox.pack(side="left")
type_label.pack(side="left")
type_cbox.pack(side="left")
left.pack(side="left")
right.pack(side="left", padx=10)
return container, (fuel_cbox, type_cbox)
|
nilq/baby-python
|
python
|
import unittest
import os
import plexcleaner.database as database
from plexcleaner.media import Library, Movie
__author__ = 'Jean-Bernard Ratte - jean.bernard.ratte@unary.ca'
# flake8: noqa
class TestMediaLibrary(unittest.TestCase):
_nb_movie = 98
_effective_size = 100275991932
def test_init(self):
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
self.assertEqual(len(library), self._nb_movie)
def test_update_library(self):
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
movie = Movie(1, u"a", 'b', 1, 2, 2.2, 'd', 1, 'e', '/test')
library._update_library(movie)
self.assertEqual(len(library), self._nb_movie + 1)
def test_effective_size(self):
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
movie = Movie(2, u"a", 'b', 1, 2, 2.2, 'd', 1, 'e', '/test')
movie.exist = False
library._update_library(movie)
self.assertEqual(library.effective_size, self._effective_size)
movie.exist = True
movie.matched = True
library._update_library(movie)
self.assertEqual(library.effective_size, self._effective_size + 2)
def test_iter(self):
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
self.assertEqual(type(library.__iter__()).__name__, 'generator')
m = library.__iter__().next()
self.assertEqual(m.__class__.__name__, 'Movie')
def test_has_missing_file(self):
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
self.assertFalse(library.has_missing_file)
os.rename('tests/library/2 Guns.avi', 'tests/library/Two Guns.avi')
with database.Database(database_override='./tests/database/com.plexapp.plugins.library.db') as db:
library = Library(db)
self.assertTrue(library.has_missing_file)
os.rename('tests/library/Two Guns.avi', 'tests/library/2 Guns.avi')
|
nilq/baby-python
|
python
|
#coding=utf-8
import os
import jieba
import shutil
dirs = []
for d in os.listdir(os.getcwd()):
if os.path.isfile(d):
continue
dirs.append(d)
imgs = []
with open("data.txt",'w') as fw , open("imgs.txt","w") as fi:
for d in dirs:
for f in os.listdir(os.path.join(d)):
fi.write("%s\n" % f)
shutil.move(os.path.join(d,f),f)
fw.write("%s#0\t%s\n" % (f," ".join(jieba.cut(d))) )
|
nilq/baby-python
|
python
|
from urllib.parse import parse_qsl
from establishment.socialaccount.providers.base import Provider
class OAuth2Provider(Provider):
def get_auth_params(self, request, action):
settings = self.get_settings()
ret = settings.get("AUTH_PARAMS", {})
dynamic_auth_params = request.GET.get("auth_params", None)
if dynamic_auth_params:
ret.update(dict(parse_qsl(dynamic_auth_params)))
return ret
def get_scope(self, request):
settings = self.get_settings()
scope = settings.get("SCOPE")
if scope is None:
scope = self.get_default_scope()
dynamic_scope = request.GET.get("scope", None)
if dynamic_scope:
scope.extend(dynamic_scope.split(","))
return scope
def get_default_scope(self):
return []
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
from acapy_wrapper.models.credential_offer import CredentialOffer
from acapy_wrapper.models.credential_proposal import CredentialProposal
from acapy_wrapper.models.indy_cred_abstract import IndyCredAbstract
from acapy_wrapper.models.indy_cred_info import IndyCredInfo
from acapy_wrapper.models.indy_cred_request import IndyCredRequest
from acapy_wrapper.models.indy_credential import IndyCredential
class V10CredentialExchange(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
V10CredentialExchange - a model defined in OpenAPI
auto_issue: The auto_issue of this V10CredentialExchange [Optional].
auto_offer: The auto_offer of this V10CredentialExchange [Optional].
auto_remove: The auto_remove of this V10CredentialExchange [Optional].
connection_id: The connection_id of this V10CredentialExchange [Optional].
created_at: The created_at of this V10CredentialExchange [Optional].
credential: The credential of this V10CredentialExchange [Optional].
credential_definition_id: The credential_definition_id of this V10CredentialExchange [Optional].
credential_exchange_id: The credential_exchange_id of this V10CredentialExchange [Optional].
credential_id: The credential_id of this V10CredentialExchange [Optional].
credential_offer: The credential_offer of this V10CredentialExchange [Optional].
credential_offer_dict: The credential_offer_dict of this V10CredentialExchange [Optional].
credential_proposal_dict: The credential_proposal_dict of this V10CredentialExchange [Optional].
credential_request: The credential_request of this V10CredentialExchange [Optional].
credential_request_metadata: The credential_request_metadata of this V10CredentialExchange [Optional].
error_msg: The error_msg of this V10CredentialExchange [Optional].
initiator: The initiator of this V10CredentialExchange [Optional].
parent_thread_id: The parent_thread_id of this V10CredentialExchange [Optional].
raw_credential: The raw_credential of this V10CredentialExchange [Optional].
revoc_reg_id: The revoc_reg_id of this V10CredentialExchange [Optional].
revocation_id: The revocation_id of this V10CredentialExchange [Optional].
role: The role of this V10CredentialExchange [Optional].
schema_id: The schema_id of this V10CredentialExchange [Optional].
state: The state of this V10CredentialExchange [Optional].
thread_id: The thread_id of this V10CredentialExchange [Optional].
trace: The trace of this V10CredentialExchange [Optional].
updated_at: The updated_at of this V10CredentialExchange [Optional].
"""
auto_issue: Optional[bool] = None
auto_offer: Optional[bool] = None
auto_remove: Optional[bool] = None
connection_id: Optional[str] = None
created_at: Optional[str] = None
credential: Optional[IndyCredInfo] = None
credential_definition_id: Optional[str] = None
credential_exchange_id: Optional[str] = None
credential_id: Optional[str] = None
credential_offer: Optional[IndyCredAbstract] = None
credential_offer_dict: Optional[CredentialOffer] = None
credential_proposal_dict: Optional[CredentialProposal] = None
credential_request: Optional[IndyCredRequest] = None
credential_request_metadata: Optional[Dict[str, Any]] = None
error_msg: Optional[str] = None
initiator: Optional[str] = None
parent_thread_id: Optional[str] = None
raw_credential: Optional[IndyCredential] = None
revoc_reg_id: Optional[str] = None
revocation_id: Optional[str] = None
role: Optional[str] = None
schema_id: Optional[str] = None
state: Optional[str] = None
thread_id: Optional[str] = None
trace: Optional[bool] = None
updated_at: Optional[str] = None
@validator("created_at")
def created_at_pattern(cls, value):
assert value is not None and re.match(
r"^\d{4}-\d\d-\d\d[T ]\d\d:\d\d(?:\:(?:\d\d(?:\.\d{1,6})?))?(?:[+-]\d\d:?\d\d|Z|)$",
value,
)
return value
@validator("credential_definition_id")
def credential_definition_id_pattern(cls, value):
assert value is not None and re.match(
r"^([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}):3:CL:(([1-9][0-9]*)|([123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+)):(.+)?$",
value,
)
return value
@validator("schema_id")
def schema_id_pattern(cls, value):
assert value is not None and re.match(
r"^[123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz]{21,22}:2:.+:[0-9.]+$",
value,
)
return value
@validator("updated_at")
def updated_at_pattern(cls, value):
assert value is not None and re.match(
r"^\d{4}-\d\d-\d\d[T ]\d\d:\d\d(?:\:(?:\d\d(?:\.\d{1,6})?))?(?:[+-]\d\d:?\d\d|Z|)$",
value,
)
return value
V10CredentialExchange.update_forward_refs()
|
nilq/baby-python
|
python
|
from pyilluminate import Illuminate
import numpy as np
import pytest
pytestmark = pytest.mark.device
@pytest.fixture(scope='module')
def light():
with Illuminate() as light:
yield light
def test_single_led_command(light):
light.color = 1
# setting led with an integer
light.led = 30
def test_short_led_command(light):
light.color = 1
light.led = range(30, 60)
def test_short_led_command_list(light):
light.color = 1
light.led = list(range(30, 60))
def test_short_led_command_array(light):
light.color = 1
light.led = np.arange(30, 60, dtype=int)
def test_long_led_command(light):
light.color = 1
# This command is often longer than the internal buffer of the system
# Therefore, without the ability to split up long commands, the teensy
# may hang
light.led = range(609)
|
nilq/baby-python
|
python
|
"""Tests for deletion of assemblies."""
import os
import unittest
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# add path to import app
# import sys
# sys.path.append("./")
from app import db
from app.models import (
Dataset,
Assembly,
)
class TestDeleteAssembly(LoginTestCase, TempDirTestCase):
"""Tests for deletion of assemblies."""
def _create_empty_file_in_tempdir(self, file_name):
file_path = os.path.join(self.TEMP_PATH, file_name)
open(file_path, "w").close()
return file_path
def setUp(self):
super().setUp()
# define Assemblies
self.assembly_user_1 = Assembly(
id=1,
user_id=1,
name="test",
chrom_sizes=self._create_empty_file_in_tempdir("hg19.txt"),
chrom_arms=self._create_empty_file_in_tempdir("arms.txt"),
)
self.assembly_user_1_w_datasets = Assembly(
id=2,
user_id=1,
name="test",
chrom_sizes=self._create_empty_file_in_tempdir("hg1.txt"),
chrom_arms=self._create_empty_file_in_tempdir("arms2.txt"),
)
self.assembly_user_2 = Assembly(
id=3,
user_id=2,
name="test2",
chrom_sizes=self._create_empty_file_in_tempdir("hg38.txt"),
chrom_arms=self._create_empty_file_in_tempdir("arms38.txt"),
)
# define associted datasets
self.dataset1 = Dataset(id=1, assembly=2)
# aut
token = self.add_and_authenticate("test", "asdf")
# create token_headers
self.token_headers = self.get_token_header(token)
def test_no_auth(self):
"""No authentication provided, response should be 401"""
# protected route
response = self.client.delete(
"/api/assemblies/1/", content_type="application/json"
)
self.assertEqual(response.status_code, 401)
def test_delete_wo_id(self):
"""Should return 405 since delete is not allowed for /api/assemblies"""
response = self.client.delete(
"/api/assemblies/", content_type="application/json"
)
self.assertEqual(response.status_code, 405)
def test_delete_assembly_does_not_exist(self):
"""test deletion of collection that does not exist."""
# by user id 2
response = self.client.delete(
"/api/assemblies/500/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 404)
def test_delete_assembly_wo_permission(self):
"""Should return 403 since assembly is not owned."""
# add session
db.session.add(self.assembly_user_2)
db.session.commit()
# by user id 2
response = self.client.delete(
"/api/assemblies/3/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
def test_delete_owned_assembly(self):
"""Check whether owned assembly is deleted correctly."""
# add session
db.session.add_all([self.assembly_user_1])
db.session.commit()
# by user id 2
response = self.client.delete(
"/api/assemblies/1/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(Assembly.query.all()), 0)
self.assertEqual(len(os.listdir(self.TEMP_PATH)), 4)
def test_delete_assembly_w_associated_datasets_forbidden(self):
"""Tests whether deletion of assembly is forbidden if there are associated datasets."""
# add session
db.session.add_all([self.assembly_user_1_w_datasets, self.dataset1])
db.session.commit()
# make call
response = self.client.delete(
"/api/assemblies/2/",
headers=self.token_headers,
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
if __name__ == "__main__":
res = unittest.main(verbosity=3, exit=False)
|
nilq/baby-python
|
python
|
import ccxt
def build_exchange(name):
exchange = getattr(ccxt, name)
return exchange({
'enableRateLimit': True
})
def timestamps_to_seconds(data):
for datum in data:
datum[0] = datum[0] // 1000
def secs_to_millis(datum):
return datum * 1000
|
nilq/baby-python
|
python
|
# TODO: add more filetypes
paste_file_types = [
["All supprted files", ""],
("All files", "*"),
("Plain text", "*.txt"),
("Bash script", "*.sh"),
("Batch script", "*.bat"),
("C file", "*.c"),
("C++ file", "*.cpp"),
("Python file", "*.py"),
("R script file", "*.R")
]
for _, extension in paste_file_types[2::]:
paste_file_types[0][1] += extension + " "
# TODO: add more syntax
paste_syntax = {
"None": "text",
"Bash" : "bash",
"Batch" : "dos",
"C" : "c",
"C++" : "cpp",
"Python" : "python",
"R" : "rsplus"
}
paste_expiry = {
"Never" : "N",
"10 minutes" : "10M",
"1 hour" : "1H",
"1 day" : "1D",
"1 week" : "1W",
"2 weeks" : "2W",
"1 month" : "1M",
"6 months" : "6M",
"1 year" : "1Y"
}
paste_privacy = {
"Unlisted" : "1",
"Public" : "0"
}
|
nilq/baby-python
|
python
|
'Faça um script de menu que mostra na tela, com o título de "Menu Principal" e mais três opções:'
# 1. FIM
# 2. CADASTRO
# 3. CONSULTA
"O programa recebe um input do teclado a opção desejada e mostra uma mensagem confirmando a opção escolhida."
# Caso a opção escolhida seja inválida, mostrar uma mensagem de erro "Opção Inválida"."
print("-=-=- MENU PRINCIPAL -=-=-")
print("\nDigite a opção desejada para prosseguir:\n")
print("Digite 1 -> FIM")
print("Digite 2 -> CADASTRO")
print("Digite 3 -> CONSULTA")
fim = "FIM"
cadastro = "CADASTRO"
consulta = "CONSULTA"
option = int(input("\nDigite a opção desejada: "))
if option == 1:
print(f"\nOpção 1: {fim}")
elif option == 2:
print(f"\nOpção 2: {cadastro}")
elif option == 3:
print(f"\nOpção 3: {consulta}")
else:
print("Opção Inválida")
|
nilq/baby-python
|
python
|
from django.http import HttpResponse, JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from tasks.models import Task
from tasks.serializers.tasksCreateSerializer import TasksCreateSerializer
from tasks.serializers.taskUpdateSerializer import TasksUpdateSerializer
from tasks.serializers.tasksListSerializer import TasksListSerializer
from tasks.serializers.taskUpdateStatusSerializer import TasksUpdateStatusSerializer
from rest_framework import generics
from datetime import datetime
class TaskList(generics.ListAPIView):
"""
List all tasks, or create a new tasks.
"""
def get(self, request, format=None):
filters = {
'title__icontains': request.query_params.get('search', ''),
'created_at__lte': request.query_params.get('end_at', datetime.now())
}
if request.query_params.get("start_at") is not None:
start_at_obj = datetime.strptime(request.query_params.get("start_at"), '%Y-%m-%d')
filters['created_at__gte'] = datetime.combine(start_at_obj.date(), datetime.min.time())
tasks = Task.objects.filter(**filters)
serializer = TasksListSerializer(tasks, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = TasksCreateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskStatus(generics.ListAPIView):
"""
Update status task
"""
def get_object(self, pk):
try:
return Task.objects.get(pk=pk)
except Task.DoesNotExist:
raise Http404
def put(self, request, pk, format=None):
task = self.get_object(pk)
serializer = TasksUpdateStatusSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskDetail(generics.ListAPIView):
"""
Retrieve, update or delete a task
"""
def get_object(self, pk):
try:
return Task.objects.get(pk=pk)
except Task.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
task = self.get_object(pk)
serializer = TasksListSerializer(task)
return Response(serializer.data)
def put(self, request, pk, format=None):
task = self.get_object(pk)
serializer = TasksUpdateSerializer(task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
task = self.get_object(pk)
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
nilq/baby-python
|
python
|
from math import sqrt
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils import data as data
from torchvision import transforms as transforms
from data.datasets import MNIST_truncated, FEMNIST, FashionMNIST_truncated, SVHN_custom, CIFAR10_truncated, Generated
class AddGaussianNoise(object):
def __init__(self, mean=0., std=1., net_id=None, total=0):
self.std = std
self.mean = mean
self.net_id = net_id
self.num = int(sqrt(total))
if self.num * self.num < total:
self.num = self.num + 1
def __call__(self, tensor):
if self.net_id is None:
return tensor + torch.randn(tensor.size()) * self.std + self.mean
else:
tmp = torch.randn(tensor.size())
filt = torch.zeros(tensor.size())
size = int(28 / self.num)
row = int(self.net_id / size)
col = self.net_id % size
for i in range(size):
for j in range(size):
filt[:, row * size + i, col * size + j] = 1
tmp = tmp * filt
return tensor + tmp * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def get_loaderargs(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0,
augment=True, num_workers=(0, 0)):
if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY'):
if dataset == 'mnist':
dl_obj = MNIST_truncated
transform_train = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
transform_test = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
elif dataset == 'femnist':
dl_obj = FEMNIST
transform_train = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
transform_test = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
elif dataset == 'fmnist':
dl_obj = FashionMNIST_truncated
transform_train = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
transform_test = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
elif dataset == 'svhn':
dl_obj = SVHN_custom
transform_train = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
transform_test = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
elif dataset == 'cifar10':
dl_obj = CIFAR10_truncated
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(
Variable(x.unsqueeze(0), requires_grad=False),
(4, 4, 4, 4), mode='reflect').data.squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)
])
# data prep for test set
transform_test = transforms.Compose([
transforms.ToTensor(),
AddGaussianNoise(0., noise_level, net_id, total)])
else:
dl_obj = Generated
transform_train = None
transform_test = None
train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True,
transform=transform_train if augment else transform_test)
test_ds = dl_obj(datadir, train=False, transform=transform_test)
train_args = {
'dataset': train_ds, 'batch_size': train_bs, 'shuffle': True, 'drop_last': False, 'pin_memory': True,
'num_workers': num_workers[0], 'persistent_workers': True
}
test_args = {
'dataset': test_ds, 'batch_size': test_bs, 'shuffle': True, 'drop_last': False, 'pin_memory': True,
'num_workers': num_workers[1], 'persistent_workers': True
}
# train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False,
# pin_memory=True, num_workers=num_workers[0], persistent_workers=True)
# test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False,
# pin_memory=True, num_workers=num_workers[1], persistent_workers=True)
return train_args, test_args, train_ds, test_ds
|
nilq/baby-python
|
python
|
class Solution(object):
def maxDistance(self, nums):
"""
:type arrays: List[List[int]]
:rtype: int
"""
low = float('inf')
high = float('-inf')
res = 0
for num in nums:
# We can use num[0] && num[-1] only because these lists are sorted
res = max(res, max(high - num[0], num[-1] - low))
low = min(low, min(num))
high = max(high, max(num))
return res
|
nilq/baby-python
|
python
|
from django.conf.urls import include, url
from mailviews.previews import autodiscover, site
autodiscover()
app_name = 'mailviews'
urlpatterns = [
url(regex=r'', view=site.urls)
]
|
nilq/baby-python
|
python
|
# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.
from flask import jsonify, request
from ..models import FarmerRegistration, FarmerNotFoundError
def GetFarmerHandler(iyo_organization):
try:
farmer = FarmerRegistration.get(iyo_organization)
except FarmerNotFoundError:
return jsonify(), 404
return farmer.to_json(use_db_field=False), 200, {'Content-type': 'application/json'}
|
nilq/baby-python
|
python
|
import web
from web import form
import socket
import sensor
from threading import Thread
localhost = "http://" + socket.gethostbyname(socket.gethostname()) + ":8080"
print(localhost)
urls = (
'/','Login',
'/control','Page_one',
'/left', 'Left',
'/right', 'Right',
'/forward', 'Forward',
'/start', 'Start',
'/stop', 'Stop',
'/backward', 'Backward',
'/about', 'About',
'/setting','Setting',
'/updatelog', 'Updatelog',
'/source_code', 'Sourcecode',
'/logoff', 'Logoff',
'/stopserver', 'Stopserver',
'/result_sensor_ultrasonic', 'Result_sensor_ultrasonic',
)
app = web.application(urls, globals())
loginform = form.Form(
form.Textbox("USERNAME",
form.notnull,
form.Validator('wrong', lambda x: x == "martin")),
form.Textbox("PASSWORD",
form.notnull,
form.Validator('wrong', lambda x: x == "12341234")),
form.Checkbox('I am not a robot'))
render = web.template.render('templates/')
class Login:
def GET(self):
return render.login(loginform)
def POST(self):
if not loginform.validates():
return render.login(loginform)
else:
sensor.gpio_startup()
return web.seeother('/control')
class Page_one:
def GET(self):
return render.page_one()
def POST(self):
return render.page_one()
class Left:
def GET(self):
sensor.move_left = True
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
print("left")
return "left"
class Right:
def GET(self):
sensor.move_left = False
sensor.move_right = True
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
print("right")
return "right"
class Forward:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = True
sensor.move_forward = True
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
print("forward")
return "forward"
class Start:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
print("start")
return "start"
class Backward:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = True
sensor.move_forward = False
sensor.move_backward = True
sensor.move_stop = False
sensor.autostop = False
print("backward")
return "backward"
class Stop:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
print("stop")
return "stop"
class About:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
return render.about()
class Setting:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
return render.setting()
class Updatelog:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
return render.updatelog()
class Sourcecode:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
return render.source_code()
class Logoff:
def GET(self):
global gpiodidstartup
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
sensor.gpio_end()
gpiodidstartup = False
return render.logoff()
class Stopserver:
def GET(self):
sensor.move_left = False
sensor.move_right = False
sensor.move_netural = False
sensor.move_forward = False
sensor.move_backward = False
sensor.move_stop = False
sensor.autostop = False
return exit()
class Result_sensor_ultrasonic:
def GET(self):
return sensor.sensor_ultrasonic()
def motormovement():
while True:
while sensor.gpiodidstartup:
t1 = Thread(target=sensor.motor_turn_left)
t2 = Thread(target=sensor.motor_turn_right)
t3 = Thread(target=sensor.motor_netural)
t4 = Thread(target=sensor.motor_stop)
t5 = Thread(target=sensor.motor_forward)
t6 = Thread(target=sensor.motor_backward)
t7 = Thread(target=sensor.collision_prevention_system)
t1.daemon = True
t2.daemon = True
t3.daemon = True
t4.daemon = True
t5.daemon = True
t6.daemon = True
t7.daemon = True
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t7.start()
return
if __name__ == "__main__" :
global temp_variable
temp_variable = True
motor = Thread(target=motormovement)
motor.daemon = True
motor.start()
app.run()
|
nilq/baby-python
|
python
|
import os
import sys
command = " ".join(sys.argv[1:])
os.system(command)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals, print_function
from pprint import pprint
import xmltodict
with open("show_security_zones.xml") as infile:
show_security_zones = xmltodict.parse(infile.read())
print("\n\n")
print("Print the new variable and its type")
print("-" * 20)
pprint(show_security_zones)
print(type(show_security_zones))
print("\n\n")
print("Print out index and name of the security zones")
print("-" * 20)
zones = show_security_zones["zones-information"]["zones-security"]
for index, zone in enumerate(zones):
print(f"Security Zone #{index + 1}: {zone['zones-security-zonename']}")
print("\n\n")
|
nilq/baby-python
|
python
|
# https://www.urionlinejudge.com.br/judge/en/problems/view/1012
entrada = input().split()
a = float(entrada[0])
b = float(entrada[1])
c = float(entrada[2])
print(f"TRIANGULO: {a*c/2:.3f}")
print(f"CIRCULO: {3.14159*c**2:.3f}")
print(f"TRAPEZIO: {(a+b)*c/2:.3f}")
print(f"QUADRADO: {b**2:.3f}")
print(f"RETANGULO: {a*b:.3f}")
|
nilq/baby-python
|
python
|
import torch
tau = 6.28318530718
class FractalPerlin2D(object):
def __init__(self, shape, resolutions, factors, generator=torch.random.default_generator):
shape = shape if len(shape)==3 else (None,)+shape
self.shape = shape
self.factors = factors
self.generator = generator
self.device = generator.device
self.resolutions = resolutions
self.grid_shapes = [(shape[1]//res[0], shape[2]//res[1]) for res in resolutions]
#precomputed tensors
self.linxs = [torch.linspace(0,1,gs[1],device=self.device) for gs in self.grid_shapes]
self.linys = [torch.linspace(0,1,gs[0],device=self.device) for gs in self.grid_shapes]
self.tl_masks = [self.fade(lx)[None,:]*self.fade(ly)[:,None] for lx, ly in zip(self.linxs, self.linys)]
self.tr_masks = [torch.flip(tl_mask,dims=[1]) for tl_mask in self.tl_masks]
self.bl_masks = [torch.flip(tl_mask,dims=[0]) for tl_mask in self.tl_masks]
self.br_masks = [torch.flip(tl_mask,dims=[0,1]) for tl_mask in self.tl_masks]
def fade(self, t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
def perlin_noise(self, octave, batch_size):
res = self.resolutions[octave]
angles = torch.zeros((batch_size, res[0]+2, res[1]+2), device=self.device)
angles.uniform_(0, tau, generator=self.generator)
rx = torch.cos(angles)[:,:,:,None]*self.linxs[octave]
ry = torch.sin(angles)[:,:,:,None]*self.linys[octave]
prx, pry = rx[:,:,:,None,:], ry[:,:,:,:,None]
nrx, nry = -torch.flip(prx, dims=[4]), -torch.flip(pry, dims=[3])
br = prx[:,:-1,:-1] + pry[:,:-1,:-1]
bl = nrx[:,:-1,1:] + pry[:,:-1,1:]
tr = prx[:,1:,:-1] + nry[:,1:,:-1]
tl = nrx[:,1:,1:] + nry[:,1:,1:]
grid_shape = self.grid_shapes[octave]
grids = self.br_masks[octave]*br + self.bl_masks[octave]*bl + self.tr_masks[octave]*tr + self.tl_masks[octave]*tl
noise = grids.permute(0,1,3,2,4).reshape((batch_size, self.shape[1]+grid_shape[0], self.shape[2]+grid_shape[1]))
A = torch.randint(0,grid_shape[0],(batch_size,), device=self.device, generator=self.generator)
B = torch.randint(0,grid_shape[1],(batch_size,), device=self.device, generator=self.generator)
noise = torch.stack([noise[n,a:a-grid_shape[0], b:b-grid_shape[1]] for n,(a,b) in enumerate(zip(A,B))])
return noise
def __call__(self, batch_size=None):
batch_size = self.shape[0] if batch_size is None else batch_size
shape = (batch_size,) + self.shape[1:]
noise = torch.zeros(shape, device=self.device)
for octave, factor in enumerate(self.factors):
noise += factor*self.perlin_noise(octave, batch_size=batch_size)
return noise
|
nilq/baby-python
|
python
|
import json
from unittest.mock import Mock
import graphene
from django.conf import settings
from django.shortcuts import reverse
from django_countries import countries
from django_prices_vatlayer.models import VAT
from tests.utils import get_graphql_content
from saleor.core.permissions import MODELS_PERMISSIONS
from saleor.graphql.core.utils import clean_seo_fields, snake_to_camel_case
from saleor.graphql.product import types as product_types
from saleor.graphql.utils import get_database_id
from .utils import assert_no_permission
def test_query_authorization_keys(authorization_key, admin_api_client, user_api_client):
query = """
query {
shop {
authorizationKeys {
name
key
}
}
}
"""
response = admin_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert data['authorizationKeys'][0]['name'] == authorization_key.name
assert data['authorizationKeys'][0]['key'] == authorization_key.key
response = user_api_client.post(reverse('api'), {'query': query})
assert_no_permission(response)
def test_query_countries(user_api_client):
query = """
query {
shop {
countries {
code
country
}
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert len(data['countries']) == len(countries)
def test_query_currencies(user_api_client):
query = """
query {
shop {
currencies
defaultCurrency
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert len(data['currencies']) == len(settings.AVAILABLE_CURRENCIES)
assert data['defaultCurrency'] == settings.DEFAULT_CURRENCY
def test_query_name(user_api_client, site_settings):
query = """
query {
shop {
name
description
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert data['description'] == site_settings.description
assert data['name'] == site_settings.site.name
def test_query_domain(user_api_client, site_settings):
query = """
query {
shop {
domain {
host
sslEnabled
url
}
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert data['domain']['host'] == site_settings.site.domain
assert data['domain']['sslEnabled'] == settings.ENABLE_SSL
assert data['domain']['url']
def test_query_tax_rates(admin_api_client, user_api_client, vatlayer):
vat = VAT.objects.order_by('country_code').first()
query = """
query {
shop {
taxRates {
countryCode
standardRate
reducedRates {
rate
rateType
}
}
}
}
"""
response = admin_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert data['taxRates'][0]['countryCode'] == vat.country_code
assert data['taxRates'][0]['standardRate'] == vat.data['standard_rate']
assert len(data['taxRates'][0]['reducedRates']) == len(vat.data['reduced_rates'])
response = user_api_client.post(reverse('api'), {'query': query})
assert_no_permission(response)
def test_query_tax_rate(user_api_client, admin_api_client, vatlayer):
vat = VAT.objects.order_by('country_code').first()
query = """
query taxRate($countryCode: String!) {
shop {
taxRate(countryCode: $countryCode) {
countryCode
}
}
}
"""
variables = json.dumps({'countryCode': vat.country_code})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert data['taxRate']['countryCode'] == vat.country_code
response = user_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
assert_no_permission(response)
def test_query_languages(settings, user_api_client):
query = """
query {
shop {
languages {
code
language
}
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
assert len(data['languages']) == len(settings.LANGUAGES)
def test_query_permissions(admin_api_client, user_api_client):
query = """
query {
shop {
permissions {
code
name
}
}
}
"""
response = admin_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['shop']
permissions = data['permissions']
permissions_codes = {permission.get('code') for permission in permissions}
assert len(permissions_codes) == len(MODELS_PERMISSIONS)
for code in permissions_codes:
assert code in MODELS_PERMISSIONS
response = user_api_client.post(reverse('api'), {'query': query})
assert_no_permission(response)
def test_query_navigation(user_api_client, site_settings):
query = """
query {
shop {
navigation {
main {
name
}
secondary {
name
}
}
}
}
"""
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
navigation_data = content['data']['shop']['navigation']
assert navigation_data['main']['name'] == site_settings.top_menu.name
assert navigation_data['secondary']['name'] == site_settings.bottom_menu.name
def test_clean_seo_fields():
title = 'lady title'
description = 'fantasy description'
data = {'seo':
{'title': title,
'description': description}}
clean_seo_fields(data)
assert data['seo_title'] == title
assert data['seo_description'] == description
def test_user_error_field_name_for_related_object(admin_api_client):
query = """
mutation {
categoryCreate(input: {name: "Test", parent: "123456"}) {
errors {
field
message
}
category {
id
}
}
}
"""
response = admin_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
data = content['data']['categoryCreate']['category']
assert data is None
error = content['data']['categoryCreate']['errors'][0]
assert error['field'] == 'parent'
def test_get_database_id(product):
info = Mock(
schema=Mock(
get_type=Mock(
return_value=Mock(graphene_type=product_types.Product))))
node_id = graphene.Node.to_global_id('Product', product.pk)
pk = get_database_id(info, node_id, product_types.Product)
assert int(pk) == product.pk
def test_snake_to_camel_case():
assert snake_to_camel_case('test_camel_case') == 'testCamelCase'
assert snake_to_camel_case('testCamel_case') == 'testCamelCase'
assert snake_to_camel_case(123) == 123
def test_mutation_returns_error_field_in_camel_case(admin_api_client, variant):
# costPrice is snake case variable (cost_price) in the backend
query = """
mutation testCamel($id: ID!, $cost: Decimal) {
productVariantUpdate(id: $id,
input: {costPrice: $cost, trackInventory: false}) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
variables = json.dumps({
'id': graphene.Node.to_global_id('ProductVariant', variant.id),
'cost': '12.1234'})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
error = content['data']['productVariantUpdate']['errors'][0]
assert error['field'] == 'costPrice'
|
nilq/baby-python
|
python
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import lstm_rnn
from tests.common.gen_random import random_gaussian
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def lstmcell_run(shape, dtype, kernel_name="lstmcell", attrs={}):
# shape: batch_size, input_size, hidden_size
batch_size = shape[0]
input_size = shape[1]
hidden_size = shape[2]
W_ih_shape = (4 * hidden_size, input_size,)
W_hh_shape = (4 * hidden_size, hidden_size,)
b_shape = (4 * hidden_size,)
c_prev_shape = (batch_size, hidden_size,)
h_prev_shape = (batch_size, hidden_size,)
x_shape = (batch_size, input_size,)
print("lstmcell - ")
op_attrs = []
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(lstm_rnn.lstmcell,
[x_shape, h_prev_shape, c_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
W_hh, W_ih, b_hh, b_ih, c, c_prev, expect_c, expect_h, h, h_prev, x = gen_lstmcell_data(W_hh_shape,
W_ih_shape,
b_shape,
c_prev_shape,
dtype, h_prev_shape,
hidden_size,
x_shape)
return mod, (expect_c, expect_h), {"args": (x, h_prev, c_prev, W_ih, W_hh, b_ih, b_hh, h, c), 'outputs': (-2, -1), 'tuning': False}
else:
return mod
else:
mod = utils.op_build_test(lstm_rnn.lstmcell,
[x_shape, h_prev_shape, c_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
W_hh, W_ih, b_hh, b_ih, c, c_prev, expect_c, expect_h, h, h_prev, x = gen_lstmcell_data(W_hh_shape, W_ih_shape,
b_shape, c_prev_shape,
dtype, h_prev_shape,
hidden_size, x_shape)
h, c = utils.mod_launch(mod, (x, h_prev, c_prev, W_ih, W_hh, b_ih, b_hh, h, c), outputs=(-2, -1),
expect=(expect_h, expect_c))
assert_res = True
assert_res &= compare_tensor(h, expect_h, rtol=5e-02, atol=5e-02, equal_nan=True)
print("act_h_output = ", h)
print("expect_h_output = ", expect_h)
print("LSTM_cell assert_res_h = ", assert_res)
assert_res &= compare_tensor(c, expect_c, rtol=5e-02, atol=5e-02, equal_nan=True)
print("act_c_output = ", c)
print("expect_c_output = ", expect_c)
print("LSTM_cell assert_res = ", assert_res)
# input("Press ENTER...")
return (x, h_prev, c_prev, W_ih, W_hh, b_ih, b_hh), (h, c), (expect_h, expect_c), assert_res
def gen_lstmcell_data(W_hh_shape, W_ih_shape, b_shape, c_prev_shape, dtype, h_prev_shape, hidden_size, x_shape):
W_ih = random_gaussian(W_ih_shape, miu=0.1, sigma=0.1).astype(dtype)
W_hh = random_gaussian(W_hh_shape, miu=0.1, sigma=0.1).astype(dtype)
b_ih = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
b_hh = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
c_prev = random_gaussian(c_prev_shape, miu=0.1, sigma=0.1).astype(dtype)
h_prev = random_gaussian(h_prev_shape, miu=0.1, sigma=0.1).astype(dtype)
x = random_gaussian(x_shape, miu=0.1, sigma=0.1).astype(dtype)
np_hx = np.concatenate((x, h_prev), axis=1)
np_wx = np.concatenate((W_ih, W_hh), axis=1)
np_t = np.dot(np_hx, np_wx.transpose(1, 0)) + b_ih + b_hh
np_i = sigmoid(np_t[:, 0: hidden_size])
np_f = sigmoid(np_t[:, hidden_size: 2 * hidden_size])
np_c_ = np.tanh(np_t[:, 2 * hidden_size: 3 * hidden_size])
np_o = sigmoid(np_t[:, 3 * hidden_size: 4 * hidden_size])
expect_c = np.add(np.multiply(np_f, c_prev), np.multiply(np_i, np_c_))
expect_h = np.multiply(np_o, np.tanh(expect_c))
h = np.full(h_prev.shape, np.nan, dtype)
c = np.full(c_prev.shape, np.nan, dtype)
return W_hh, W_ih, b_hh, b_ih, c, c_prev, expect_c, expect_h, h, h_prev, x
def rnn_tanh_cell_run(shape, dtype, kernel_name="rnn_tanh_cell", attrs={}):
# shape: batch_size, input_size, hidden_size
batch_size = shape[0]
input_size = shape[1]
hidden_size = shape[2]
W_ih_shape = (hidden_size, input_size,)
W_hh_shape = (hidden_size, hidden_size,)
b_shape = (hidden_size,)
h_prev_shape = (batch_size, hidden_size,)
x_shape = (batch_size, input_size,)
print("rnn_tanh_cell - ")
op_attrs = []
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(lstm_rnn.rnn_tanh_cell,
[x_shape, h_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x = gen_rnn_tanh_cell_data(W_hh_shape, W_ih_shape, b_shape,
dtype, h_prev_shape, x_shape)
return mod, expect, (x, h_prev, W_ih, W_hh, b_ih, b_hh, h)
else:
return mod
else:
mod = utils.op_build_test(lstm_rnn.rnn_tanh_cell,
[x_shape, h_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x = gen_rnn_tanh_cell_data(W_hh_shape, W_ih_shape, b_shape, dtype,
h_prev_shape, x_shape)
h = utils.mod_launch(mod, (x, h_prev, W_ih, W_hh, b_ih, b_hh, h))
assert_res = compare_tensor(h, expect, rtol=5e-02, atol=5e-02, equal_nan=True)
# print("act_h_output = ", h)
# print("expect_h_output = ", expect)
print("RNN_tanh_cell assert_res = ", assert_res)
return (x, h_prev, W_ih, W_hh, b_ih, b_hh), h, expect, assert_res
def gen_rnn_tanh_cell_data(W_hh_shape, W_ih_shape, b_shape, dtype, h_prev_shape, x_shape):
x = random_gaussian(x_shape, miu=0.1, sigma=0.1).astype(dtype)
h_prev = random_gaussian(h_prev_shape, miu=0.1, sigma=0.1).astype(dtype)
W_ih = random_gaussian(W_ih_shape, miu=0.1, sigma=0.1).astype(dtype)
W_hh = random_gaussian(W_hh_shape, miu=0.1, sigma=0.1).astype(dtype)
b_ih = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
b_hh = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
np_igates = np.dot(x, W_ih.transpose(1, 0)) + b_ih
np_hgates = np.dot(h_prev, W_hh.transpose(1, 0)) + b_hh
expect = np.tanh(np_igates + np_hgates)
h = np.full(h_prev.shape, np.nan, dtype)
return W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x
def rnn_relu_cell_run(shape, dtype, kernel_name="rnn_relu_cell", attrs={}):
# shape: batch_size, input_size, hidden_size
batch_size = shape[0]
input_size = shape[1]
hidden_size = shape[2]
W_ih_shape = (hidden_size, input_size,)
W_hh_shape = (hidden_size, hidden_size,)
b_shape = (hidden_size,)
h_prev_shape = (batch_size, hidden_size,)
x_shape = (batch_size, input_size,)
print("rnn_relu6_cell - ")
op_attrs = []
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(lstm_rnn.rnn_relu_cell,
[x_shape, h_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x = gen_rnn_relu_cell_data(W_hh_shape, W_ih_shape, b_shape,
dtype, h_prev_shape, x_shape)
return mod, expect, (x, h_prev, W_ih, W_hh, b_ih, b_hh, h)
else:
return mod
else:
mod = utils.op_build_test(lstm_rnn.rnn_relu_cell,
[x_shape, h_prev_shape, W_ih_shape, W_hh_shape, b_shape, b_shape, ],
[dtype, dtype, dtype, dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x = gen_rnn_relu_cell_data(W_hh_shape, W_ih_shape, b_shape, dtype,
h_prev_shape, x_shape)
h = utils.mod_launch(mod, (x, h_prev, W_ih, W_hh, b_ih, b_hh, h))
assert_res = compare_tensor(h, expect, rtol=5e-02, atol=5e-02, equal_nan=True)
# print("act_h_output = ", h)
# print("expect_h_output = ", expect)
print("RNN_relu6_cell assert_res = ", assert_res)
return (x, h_prev, W_ih, W_hh, b_ih, b_hh), h, expect, assert_res
def gen_rnn_relu_cell_data(W_hh_shape, W_ih_shape, b_shape, dtype, h_prev_shape, x_shape):
x = random_gaussian(x_shape, miu=0.1, sigma=0.1).astype(dtype)
h_prev = random_gaussian(h_prev_shape, miu=0.1, sigma=0.1).astype(dtype)
W_ih = random_gaussian(W_ih_shape, miu=0.1, sigma=0.1).astype(dtype)
W_hh = random_gaussian(W_hh_shape, miu=0.1, sigma=0.1).astype(dtype)
b_ih = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
b_hh = random_gaussian(b_shape, miu=0.1, sigma=0.1).astype(dtype)
np_igates = np.dot(x, W_ih.transpose(1, 0)) + b_ih
np_hgates = np.dot(h_prev, W_hh.transpose(1, 0)) + b_hh
np_t = np_igates + np_hgates
# relu6
zero = np.full(np_t.shape, 0, dtype)
six = np.full(np_t.shape, 6, dtype)
max = np.maximum(np_t, zero)
expect = np.minimum(max, six)
h = np.full(h_prev.shape, np.nan, dtype)
return W_hh, W_ih, b_hh, b_ih, expect, h, h_prev, x
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
# scripts/manifest/lib/verify_input.py
# (c) 2020 Sam Caldwell. See LICENSE.txt.
#
# input verification functions.
#
"""
from .hasher import SUPPORTED_HASH_ALGORITHMS
def url_string(s: str) -> bool:
"""
Verify the given string (s) is a URL.
:param s: string
:return: bool
"""
# ToDo: Verify url pattern
return type(s) is str
def hash_string(s: str) -> bool:
"""
Verify the manifest hash string for a given artifact.
:param s: str
:return: bool
"""
# ToDo: verify hash pattern
return type(s) is str
def alg_string(s: str) -> bool:
"""
Verify the manifest alg parameter (which
will determine which hashing algorithm
is used).
:param s: str
:return: bool
"""
return type(s) is str and (
s in [k for k, _ in SUPPORTED_HASH_ALGORITHMS.items()]
)
def verify_hash_bool(b: bool) -> bool:
"""
Verify the manifest verify_hash flag (boolean)
:param b:
:return:
"""
return type(b) is bool
def download_bool(b: bool) -> bool:
"""
Verify the manifest download flag (boolean)
:param b: bool
:return: bool
"""
return type(b) is bool
|
nilq/baby-python
|
python
|
from flask import Flask, request, jsonify
import customs.orchestrator as orch
import customs.validator as valid
app = Flask(__name__)
@app.route('/customs', methods=['POST'])
def customs():
content = request.get_json(silent=True) # exception?
check = valid.is_json_correct(content)
if check is True:
response = orch.process(content)
else:
response = check
return response
if __name__ == '__main__':
app.run(debug=True)
|
nilq/baby-python
|
python
|
a = float(input("Zadej velikost polomeru kruhu v cm: "))
obsah = a*a*3.14
obvod = 2*3.14*a
print("Obsah je", "", obsah, "cm")
print("Obvod je", "", obvod, "cm")
|
nilq/baby-python
|
python
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
import os
print(os.getcwd())
import sys
from local_methods import parse_job_err
# +
#| - Import Modules
import os
import sys
import time
import pickle
import subprocess
from pathlib import Path
# from contextlib import contextmanager
import numpy as np
from ase import io
# #########################################################
from vasp.vasp_methods import parse_incar, read_incar
from vasp.parse_oszicar import parse_oszicar
# from vasp.vasp_methods import
# #########################################################
from dft_job_automat.compute_env import ComputerCluster
# #########################################################
from proj_data import compenv
from methods import temp_job_test, cwd
#__|
# -
path_full_i = "/home/raulf2012/rclone_temp/PROJ_irox_oer/dft_workflow/run_slabs/run_o_covered/out_data/dft_jobs/slac/v2blxebixh/100/01_attempt/_01"
compenv_i = "slac"
# +
path = path_full_i
compenv = compenv_i
# def parse_job_err(path, compenv=None):
# """
# """
#| - parse_job_err
# print(path)
status_dict = {
"timed_out": None,
"error": None,
"error_type": None,
"brmix_issue": None,
}
if compenv is None:
compenv = os.environ["COMPENV"]
# | - Parsing SLAC job
print("TEMP 00")
if compenv == "slac":
job_out_file_path = os.path.join(path, "job.out")
my_file = Path(job_out_file_path)
if my_file.is_file():
with open(job_out_file_path, 'r') as f:
lines = f.readlines()
# print("This spot here now 0")
for line in lines:
if "job killed after reaching LSF run time limit" in line:
# print("Found following line in job.err")
# print("job killed after reaching LSF run time limit")
status_dict["timed_out"] = True
break
#__|
# | - Parsing error file
job_err_file_path = os.path.join(path, "job.err")
my_file = Path(job_err_file_path)
if my_file.is_file():
with open(job_err_file_path, 'r') as f:
lines = f.readlines()
# else:
for line in lines:
if "DUE TO TIME LIMIT" in line:
status_dict["timed_out"] = True
if "Traceback (most recent call last):" in line:
status_dict["error"] = True
if "ValueError: could not convert string to float" in line:
status_dict["error"] = True
status_dict["error_type"] = "calculation blown up"
#__|
# | - Parsing out file
#| - old parser here, keeping for now
if status_dict["error"] is True:
job_out_file_path = os.path.join(path, "job.out")
my_file = Path(job_out_file_path)
if my_file.is_file():
with open(job_out_file_path, 'r') as f:
lines = f.readlines()
for line in lines:
err_i = "VERY BAD NEWS! internal error in subroutine SGRCON:"
if err_i in line:
status_dict["error_type"] = "Error in SGRCON (symm error)"
break
#__|
my_file_0 = Path(os.path.join(path, "job.out"))
my_file_1 = Path(os.path.join(path, "job.out.short"))
if my_file_0.is_file():
job_out_file = my_file_0
elif my_file_1.is_file():
job_out_file = my_file_1
else:
job_out_file = None
if job_out_file is not None:
with open(job_out_file, 'r') as f:
lines = f.readlines()
#| - Checking for BRMIX error
for line in lines:
err_i = "BRMIX: very serious problems"
if err_i in line:
status_dict["brmix_issue"] = True
status_dict["error"] = True
# break
#__|
#__|
# return(status_dict)
#__|
# -
status_dict
# +
# parse_job_err(path_full_i, compenv=compenv_i)
# -
assert False
# + active=""
#
#
#
#
#
#
#
# + jupyter={"source_hidden": true}
from local_methods import temp_job_test
temp_job_test()
|
nilq/baby-python
|
python
|
#import random package
import random
#get random number between 1 and 10
#print(random.randint(0, 1))
choice = random.randint(0,3)
if choice == 0:
print("Tails")
elif choice == 1:
print("Heads")
else:
print("Your quarter fell in the gutter.")
#get random number between 1 and 100
# print(random.randint(0, 10000))
|
nilq/baby-python
|
python
|
# Copyright (c) 2018, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Internal utilities'''
import mmap
def map_file(file_name, new_size=None):
'''Map an existing file into memory. If new_size is specified the
file is truncated or extended to that size.
Returns a Python mmap object.
'''
with open(file_name, 'rb+') as f:
if new_size is not None:
f.truncate(new_size)
return mmap.mmap(f.fileno(), 0)
# Method decorator. To be used for calculations that will always deliver the same result.
# The method cannot take any arguments and should be accessed as an attribute.
class cachedproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, type_):
obj = obj or type_
value = self.f(obj)
setattr(obj, self.f.__name__, value)
return value
|
nilq/baby-python
|
python
|
from mopidy import backend
class KitchenPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
return self.backend.library.get_playback_uri(uri)
|
nilq/baby-python
|
python
|
url = "https://lenta.ru/parts/news/"
user_agent = "text"
|
nilq/baby-python
|
python
|
#Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
#For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
#Bonus: Can you do this in one pass?
def k_sum_in_list(k, list):
needed = {}
for n in list:
if needed.get(n, False):
return True
else:
needed[k-n] = True
return False
|
nilq/baby-python
|
python
|
"""Test suits for Anytown Mapper."""
import math
import os
import unittest
import sqlite3
import tempfile
from main import app
from main import get_db
from main import init_db
from anytownlib.kavrayskiy import coords_to_kavrayskiy
from anytownlib.kavrayskiy import make_global_level_image
from anytownlib.map_cache import fetch_from_map_cache
from anytownlib.map_cache import insert_into_map_cache
from anytownlib.map_cache import update_map_cache
from anytownlib.mapmaker import format_coords
from anytownlib.user_profiles import get_user_info
from anytownlib.user_profiles import get_user_location_history
from anytownlib.user_profiles import update_user
from anytownlib.user_profiles import update_user_location_history
class TestAnytownLibKavrayskiy(unittest.TestCase):
"""Test anytownlib kavrayskiy functions."""
def setUp(self):
"""Setup method."""
self.resize_factor = 0.05
def test_coords_to_kavrayskiy(self):
"""Test coords_to_kavrayskiy method."""
x, y = coords_to_kavrayskiy((0, 0))
self.assertEquals(x, 0)
self.assertEquals(y, 0)
x, y = coords_to_kavrayskiy((90, 0))
self.assertEquals(x, 0)
self.assertAlmostEquals(y, math.pi / 2)
x, y = coords_to_kavrayskiy((-90, 90))
self.assertAlmostEquals(x, math.pi * math.sqrt(3) / 8)
self.assertAlmostEquals(y, -math.pi / 2)
x, y = coords_to_kavrayskiy((0, -45))
self.assertAlmostEquals(x, -math.pi * math.sqrt(3) / 8)
self.assertEquals(y, 0)
def test_make_global_level_image(self):
"""Test make_global_level_image method."""
map_0_0_im = make_global_level_image(
(0, 0), resize_factor=self.resize_factor)
self.assertEquals(map_0_0_im.getpixel(
(int(1732 * self.resize_factor), int(1000 * self.resize_factor))
), (255, 0, 0, 255))
map_south_pole = make_global_level_image(
(-90, 0), resize_factor=self.resize_factor)
self.assertEquals(map_south_pole.getpixel(
(int(1732 * self.resize_factor), int(1999 * self.resize_factor))
), (255, 0, 0, 255))
map_cu = make_global_level_image(
(40.1164, -88.2434), resize_factor=self.resize_factor)
self.assertEquals(map_cu.getpixel(
(int(948.72 * self.resize_factor),
int(554.26 * self.resize_factor))
), (255, 0, 0, 255))
class TestAnytownLibMapCacheAndUserProfiles(unittest.TestCase):
"""Test anytownlib map_cache and user_profiles functions."""
def setUp(self):
"""Setup method."""
self.db_fd, app.config['DATABASE'] = tempfile.mkstemp()
app.config['TESTING'] = True
self.app = app.test_client()
with app.app_context():
init_db()
self.place1 = {
'place_id': u'PID1',
'coords': (-12, 34),
'city': u'Kaningina Reserve',
'region': u'Nkhata Bay',
'country_name': u'Malawi',
'country_code': u'MW'
}
self.place1_updated_city = u'Village'
self.place1_updated_region = u'Deforested Area'
self.place2 = {
'place_id': u'PID2',
'coords': (56, -78.9),
'city': u'Flaherty Island',
'region': u'Nunavut',
'country_name': u'Canada',
'country_code': u'CA'
}
self.user1 = {
'user_id': '1',
'name': 'Al Bert',
'email': 'albert@big.al'
}
self.user2 = {
'user_id': '2',
'name': 'Can Dee',
'email': 'ilove@candee.ee'
}
def tearDown(self):
"""Teardown method."""
os.close(self.db_fd)
os.unlink(app.config['DATABASE'])
def test_functions(self):
"""Test functions in the map_cache and user_profiles modules.
First test fetch, insert, update methods from map_cache.
Then test get_user_info, update_user, and update_user_location_history
from user_profiles.
"""
with app.app_context():
# START map_cache testing
self.assertIsNone(
fetch_from_map_cache(get_db(), self.place1['place_id'],
test=True))
self.assertIsNone(
fetch_from_map_cache(get_db(), self.place2['place_id'],
test=True))
insert_into_map_cache(get_db(), test=True, **self.place1)
place1 = fetch_from_map_cache(get_db(), self.place1['place_id'],
test=True)
self.assertIsNotNone(place1)
self.assertEquals(place1['city'], self.place1['city'])
self.assertEquals(place1['region'], self.place1['region'])
self.assertIsNone(
fetch_from_map_cache(get_db(), self.place2['place_id'],
test=True))
insert_into_map_cache(get_db(), test=True, **self.place2)
self.assertIsNotNone(
fetch_from_map_cache(get_db(), self.place1['place_id'],
test=True))
place2 = fetch_from_map_cache(get_db(), self.place2['place_id'],
test=True)
self.assertIsNotNone(place2)
self.assertEquals(place2['city'], self.place2['city'])
self.assertEquals(place2['region'], self.place2['region'])
self.place1['city'] = self.place1_updated_city
self.place1['region'] = self.place1_updated_region
update_map_cache(get_db(), test=True, **self.place1)
place1 = fetch_from_map_cache(get_db(), self.place1['place_id'],
test=True)
self.assertIsNotNone(place1)
self.assertEquals(place1['city'], self.place1_updated_city)
self.assertEquals(place1['region'], self.place1_updated_region)
self.assertIsNotNone(
fetch_from_map_cache(get_db(), self.place2['place_id'],
test=True))
self.assertRaises(
sqlite3.IntegrityError,
lambda: insert_into_map_cache(
get_db(), test=True, **self.place2))
# START user_profile testing
self.assertIsNone(get_user_info(get_db(), self.user1['user_id'],
test=True))
self.assertIsNone(get_user_info(get_db(), self.user2['user_id'],
test=True))
update_user(get_db(), test=True, **self.user1)
self.assertIsNotNone(get_user_info(get_db(), '1', test=True))
self.assertIsNone(get_user_info(get_db(), '2', test=True))
update_user_location_history(
get_db(), self.user1['user_id'], self.place1['place_id'],
test=True)
self.assertRaises(
sqlite3.IntegrityError,
lambda: update_user_location_history(
get_db(), self.user1['user_id'], 'THIS DOESNT EXIST',
test=True))
self.assertRaises(
sqlite3.IntegrityError,
lambda: update_user_location_history(
get_db(), self.user2['user_id'], self.place1['place_id'],
test=True))
self.assertEquals(
len(get_user_location_history(
get_db(), self.user1['user_id'], 0, test=True)), 1)
update_user(get_db(), test=True, **self.user2)
self.assertIsNotNone(get_user_info(get_db(), '1', test=True))
self.assertIsNotNone(get_user_info(get_db(), '2', test=True))
update_user_location_history(
get_db(), self.user1['user_id'], self.place2['place_id'],
test=True)
update_user_location_history(
get_db(), self.user2['user_id'], self.place2['place_id'],
test=True)
self.assertRaises(
sqlite3.IntegrityError,
lambda: update_user_location_history(
get_db(), self.user1['user_id'], 'THIS DOESNT EXIST',
test=True))
self.assertRaises(
sqlite3.IntegrityError,
lambda: update_user_location_history(
get_db(), self.user2['user_id'], 'THIS DOESNT EXIST',
test=True))
self.assertEquals(
len(get_user_location_history(
get_db(), self.user1['user_id'], 0, test=True)), 2)
self.assertEquals(
len(get_user_location_history(
get_db(), self.user2['user_id'], 0, test=True)), 1)
class TestAnytownLibMapmaker(unittest.TestCase):
"""Test anytownlib mapmaker functions."""
def test_format_coords(self):
"""Test format_coords method."""
self.assertEquals(format_coords((0, 0)), u'0.0\u00b0 N, 0.0\u00b0 E')
self.assertEquals(format_coords(
(44.9, -93.5)), u'44.9\u00b0 N, 93.5\u00b0 W')
self.assertEquals(format_coords(
(-37.8, 145)), u'37.8\u00b0 S, 145.0\u00b0 E')
|
nilq/baby-python
|
python
|
# flake8: noqa
# Import all APIs into this package.
# If you have many APIs here with many many models used in each API this may
# raise a `RecursionError`.
# In order to avoid this, import only the API that you directly need like:
#
# from .api.chamber_schedule_api import ChamberScheduleApi
#
# or import this package, but before doing it, use:
#
# import sys
# sys.setrecursionlimit(n)
# Import APIs into API package:
from GrowCabApi.api.chamber_schedule_api import ChamberScheduleApi
from GrowCabApi.api.chambers_api import ChambersApi
from GrowCabApi.api.configurations_api import ConfigurationsApi
from GrowCabApi.api.measure_group_api import MeasureGroupApi
from GrowCabApi.api.sensor_api import SensorApi
from GrowCabApi.api.sensors_api import SensorsApi
|
nilq/baby-python
|
python
|
from flask import Flask,render_template,request,redirect,url_for
import os, git
def styleSheet():
stylesheet = "/static/styles/light_theme.css"
if request.cookies.get('darktheme') == 'True':
stylesheet = "/static/styles/dark_theme.css"
return stylesheet
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'data.sqlite'),
)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# On every reload ... do this
@app.before_request
def before_request():
global stylesheet
stylesheet = styleSheet()
# Allows to render the favicon in all the website
@app.route('/favicon.ico')
def favicon():
return redirect(url_for('static',filename='img/logo.ico'))
# Principal link
@app.route('/', endpoint='index')
def index():
return render_template('index.html',style=stylesheet)
# Auto reload for pythonanywhere
@app.route('/update_server', methods=['POST'])
def webhook():
if request.method == 'POST':
repo = git.Repo('/home/JulienDv/Projet2-Site-Web')
origin = repo.remotes.origin
origin.pull()
return 'Updated PythonAnywhere successfully', 200
else:
return 'Wrong event type', 400
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html',style=stylesheet)
from webSite import db_init
db_init.init_app(app)
from . import pages
app.register_blueprint(pages.bp)
return app
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
try:
from scapy.all import *
#from scapy.layers.dot11 import Dot11, Dot11Elt, Dot11Auth, Dot11Beacon, Dot11ProbeReq, Dot11ProbeResp, RadioTap
from gui.tabulate_scan_results import Gui
from multiprocessing import Process
import time, threading, random, Queue
import signal, os
import scapy_ex
from Tkinter import Toplevel
except ImportError, e:
pass
class WifiScanner:
def __init__(self, q, iface, target):
self.iface = iface
self.queue = q
global networks
self.count = 0
if target == "AP":
sniff(iface=iface, lfilter = lambda x: (x.haslayer(Dot11Beacon) or x.haslayer(Dot11ProbeResp)), timeout=0.2, prn=lambda x: self.add_networks(x) )
elif target == "Client":
sniff(iface=iface, lfilter= lambda x: (x.haslayer(Dot11ProbeReq) or x.haslayer(Dot11ProbeResp) or x.haslayer(Dot11Auth)), timeout=0.2, prn=lambda x: self.add_stations(x))
elif target == "position":
sniff(iface=iface, lfilter = lambda x: x.haslayer(RadioTap), timeout=0.2, prn=lambda x: self.get_positions(x))
def get_positions(self, pkt):
station_mgmt_types = (0,2,4)
signal = str(pkt[RadioTap].dBm_AntSignal) + " dBm"
if pkt.type == 0 and pkt.subtype == 8:
if pkt.addr2 not in networks :
self.add_networks(pkt)
elif pkt.type == 0 and pkt.subtype in station_mgmt_types:
if pkt.addr1 not in networks :
self.add_stations(pkt)
else:
pass
# Function to handle frames related to Access Points and handle their scanned results.
def add_networks(self, pkt):
try:
essid = pkt[Dot11Elt].info if "\x00" not in pkt[Dot11Elt].info and pkt[Dot11Elt].info != '' else 'Hidden SSID'
channel = int(ord(pkt[Dot11Elt:3].info))
except IndexError, e:
print "Error:", e
return
self.enc=''
self.cipher = ''
self.auth = ''
bssid = pkt[Dot11].addr3
signal = str(pkt[RadioTap].dBm_AntSignal) + " dBm"
if bssid not in networks:
#known_networks[bssid] = (essid, channel)
networks.append(bssid)
enctype = self.getEncType(pkt)
print "{0:2}\t{1:20}\t{2:20}\t{3:8}\t{4:8}".format(channel, essid, bssid, signal, enctype)
#msg = "%d %s %s %s" % (channel, essid, bssid, signal)
msg = ["ap", channel, essid, bssid, signal, enctype, self.iface]
self.queue.put(msg)
#self.gui.add_row(channel, signal, bssid, essid)
def getEncType(self, packet):
if packet.hasflag('cap', 'privacy'):
elt_rsn = packet[Dot11].rsn()
if elt_rsn:
self.enc = elt_rsn.enc
self.cipher = elt_rsn.cipher
self.auth = elt_rsn.auth
return self.enc + "/" + self.cipher + "-" + self.auth
else:
self.enc = 'WEP'
self.cipher = 'WEP'
self.auth = ''
return self.enc + "/" + self.cipher
else:
self.enc = 'OPN'
self.cipher = ''
self.auth = ''
return self.enc
def add_stations(self, pkt):
self.count += 1
signal = str(pkt[RadioTap].dBm_AntSignal) + " dBm"
if pkt.haslayer(Dot11ProbeReq):
dstmac = pkt.addr1
mac = pkt.addr2
if mac not in networks:
networks.append(mac)
if pkt.info == "": ssid = "BROADCAST"
else: ssid = pkt.info
print "%s is probing %s %s: %s" % (mac,dstmac,ssid, signal)
msg = ["sta", 0, ssid, mac, signal, "None", self.iface]
self.queue.put(msg)
'''
if pkt.haslayer(Dot11ProbeResp):
dstmac = pkt.addr1
bssid = pkt.addr2
ssid = pkt.info
print "%s (%s) Probe Response to %s: %s" % (ssid,bssid,dstmac, signal)
essid = ''
ap_bssid = ''
mode = ''
try:
essid = pkt[Dot11].essid()
ap_bssid = pkt[Dot11].ap_bssid()
except e:
print e
if pkt[Dot11].hasflag('FCfield', 'to-DS'):
mode = pkt[Dot11].hasflag('FCfield', 'pw-mgt')
print self.count, "\t", essid, "\t", ap_bssid, "\t", mode'''
class ThreadedClient(object):
#def __init__(self, parent, iface, canvas, target):
def main(self, parent, iface, canvas, target):
self.parent = parent
# assign Wireless Interface
self.iface = iface
# assign scan target
self.target = target
# initiate a global variable networks to store obtained networks
global networks
networks = []
# Making sure that our wireless interface isn't down.
try:
os.system("sudo ifconfig %s up" % self.iface)
except OSError:
pass
# Create the queue
self.queue = Queue.Queue()
# Set up Gui part
self.gui = Gui(parent, self.queue, self.endApplication, canvas)
self.running = True
self.multiThreader()
signal.signal(signal.SIGINT, self.stop_channel_hop)
# Start the periodic call in the GUI to check the queue
self.periodicCall()
def multiThreader(self):
# start a thread to run sniffer
self.thread1 = threading.Thread(target=self.workerThread1)
self.thread1.start()
# start a thread to run channel hopper
self.thread2 = threading.Thread(target=self.channel_hopper, args=(self.iface,))
self.thread2.start()
def periodicCall(self):
""" Check every 200 ms if there is something new in the queue. """
self.parent.after(200, self.periodicCall)
self.gui.processIncoming()
if not self.running:
try:
self.thread1.join()
self.thread2.join()
except e: print e
def workerThread1(self):
while self.running:
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#print "Sniffer started at: %s" %t
scan = WifiScanner(self.queue, self.iface, self.target)
def endApplication(self):
self.running = False
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print "Sniffer stopped at: %s" %t
def channel_hopper(self, iface):
while self.running:
try:
channel = random.randrange(1,13)
os.system("sudo iwconfig %s channel %d" % (iface, channel))
time.sleep(1.5)
except KeyboardInterrupt:
break
def stop_channel_hop(self, signal, frame):
# calls endApplication function
self.endApplication()
|
nilq/baby-python
|
python
|
from pyibex import Interval, tan, bwd_imod, atan
import math
def Cmod(x,y):
xx = Interval(x)
yy = Interval(y)
bwd_imod(xx,yy,2*math.pi)
return yy
def Catan2(x,y,th):
iZeroPi2 = Interval(0)| Interval.PI/2.
iPi2Pi = Interval.PI/2. | Interval.PI
if x.is_empty() or y.is_empty() or th.is_empty():
return Interval.EMPTY_SET, Interval.EMPTY_SET, Interval.EMPTY_SET
if x.is_subset(Interval.POS_REALS) and y.is_subset(Interval.POS_REALS) and th.is_subset(iZeroPi2):
th_tmp = Cmod(th, iZeroPi2)
tan_lb = tan(Interval(th_tmp.lb()))
tan_ub = Interval(1e10) if th_tmp.ub() == Interval.HALF_PI.ub() else tan(Interval(th_tmp.ub()))
tanTh = tan_lb | tan_ub
# xx, yy = Interval(x), Interval(y)
# bwd_mul(yy, tan(th_tmp), xx)
xx = x & y/tanTh#tan(th_tmp)
yy = y & x*tanTh#tan(th_tmp)
thr = th_tmp & atan(y/x)
# print("RESULT : ",x, y, th_tmp, tan(th_tmp), tan_lb, tan_ub, y/tanTh)
if xx.is_empty() or yy.is_empty() or thr.is_empty():
return Interval.EMPTY_SET, Interval.EMPTY_SET, Interval.EMPTY_SET
# print(yy,xx,thr)
bwd_imod(th, thr, 2*Interval.PI.ub())
return xx, yy, thr
else:
# th_tmp = Interval(th)
# bwd_imod( th_tmp, th ,2*math.pi)
x1 = x & Interval.POS_REALS
y1 = y & Interval.POS_REALS
th1 = Cmod(th,iZeroPi2)
x1, y1, th1 = Catan2(x1, y1, th1)
th11 = Interval(th)
bwd_imod(th11, th1, 2*Interval.PI.ub())
x2 = x & Interval.POS_REALS
y2 = y & Interval.NEG_REALS
th2 = -Cmod(th, -iZeroPi2)
x2, y2, th2 = Catan2(x2, -y2, th2)
th22 = Interval(th)
bwd_imod(th22, -th2, 2*Interval.PI.ub())
x3 = x & Interval.NEG_REALS
y3 = y & Interval.NEG_REALS
th3 = Interval.PI + Cmod(th, -iPi2Pi)
x3, y3, th3 = Catan2(-x3, -y3, (th3 & iZeroPi2) )
th33 = Interval(th)
bwd_imod(th33, th3 - Interval.PI, 2*Interval.PI.ub())
#
x4 = x & Interval.NEG_REALS
y4 = y & Interval.POS_REALS
th4 = Interval.PI - Cmod( th,iPi2Pi)
x4, y4, th4 = Catan2(-x4, y4, ( th4 & iZeroPi2) )
th44 = Interval(th)
bwd_imod(th44, Interval.PI - th4, 2*Interval.PI.ub())
xx = ( x1 | x2 | (-x3) | (-x4) )
yy = ( y1 | (-y2) | (-y3) | (y4) )
thh = ( th11 | (th22) | ( th33 ) | ( th44 ) )
return xx,yy,thh
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import maccleanmessages
import sys
from setuptools import setup
requires=[]
if sys.version_info[:2] == (2, 6):
requires.append('argparse>=1.1')
setup(
name='maccleanmessages',
version=maccleanmessages.__version__,
description='Remove macOS Messages app history (chats / texts / iMessages)',
long_description=open('README.rst').read(),
url='https://github.com/asbhat/mac-clean-messages',
author='Aditya Bhat',
author_email='aditya.s.bhat@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: MacOS X'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS :: MacOS X'
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Chat',
'Topic :: Utilities'
],
keywords='mac clean messages',
packages=['maccleanmessages'],
install_requires=requires,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['pytest'],
'test': ['pytest'],
},
package_data={
'maccleanmessages': ['config/*.cfg'],
},
entry_points={
'console_scripts': [
'clean-messages=maccleanmessages.driver:main',
],
},
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from pyparsing import lineno, col
"""
Base classes for implementing decoder parsers.
A decoder parser receives a data structure and returns an instance of a class
from the domain model.
For example, it may receive a JSON and return a CWRFile instance.
All decoders are expected to implement the Decoder interface. This offers a
single method which receives the data to decode, and returns an equivalent
model class instance.
Additionally, a decoder for handling grammar rules is included:
- GrammarDecoder, which decodes a string applying a Pyparsing grammar rule.
Note that in the context of the project these parsers are expected to be
lossfull. Not all the data in the CWR files is useful, some rows contains
information which can be safely ignored, but also they may parse sources, such
as JSON structures, where additional and unneeded data has been added.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class Decoder(object, metaclass=ABCMeta):
"""
Interface for implementing decoder parsers. These parser receive a data
structure and return an instance of a class from the domain model.
"""
def __init__(self):
pass
@abstractmethod
def decode(self, data):
"""
Decodes the data, creating an instance of a class from the domain model.
:param data: the data to decode
:return: a class representing the data
"""
raise NotImplementedError('The decode method must be implemented')
class GrammarDecoder(Decoder):
"""
Decodes a string applying grammar rules to it, which are meant to be
Pyparsing grammar rules.
The actual parsing work will be done by this rule, and it is expected to
include any action required to transform it into the returned value.
Note that the decoder expects an string, but it can contain multiple
lines, separated by the new line indicator.
"""
def __init__(self, grammar):
super(GrammarDecoder, self).__init__()
self._grammar = grammar
@property
def grammar(self):
"""
Grammar for decoding the string.
This is meant to be a Pyparsing grammar rule, and it is expected to
include an action for transforming the text into the expected result.
:return: the grammar used for decoding the string
"""
return self._grammar
def decode(self, text):
"""
Decodes the string, creating an instance of a class from the domain
model.
For this a Pyparsing grammar rule will be applied to the string.
:param text: the data to decode
:return: a class representing the data
"""
return self._grammar.parseString(text)
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2020 Adam Dodd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os.path
import sys
import types
import numpy as np
import pyopencl as cl
from pyopencl import mem_flags as mf
from . import fft
from . import wav
################################################################################
def ft_freqs(bins, startFreq, endFreq):
if endFreq <= startFreq:
raise ValueError("`endFreq <= startFreq`")
assert type(startFreq) == float
assert type(endFreq) == float
freqRange = endFreq - startFreq
freqStep = freqRange / float(bins - 1)
print("freqRange =", freqRange)
print("freqStep =", freqStep)
return [startFreq + (float(i) * freqStep) for i in range(0, bins)]
################################################################################
def wav2bmp_ocl(fs, wav, size, bins, startFreq, endFreq, overlapDec=0.0):
freqs = ft_freqs(bins, startFreq, endFreq)
freqCount = len(freqs)
n = len(wav)
start, step, iters = fft.get_fft_stats(n, size, overlapDec)
platforms = cl.get_platforms()
if len(platforms) > 1:
p = -1
for i in range(0, len(platforms)):
if "NVIDIA" in platforms[i].vendor:
p = i
break
if p == -1:
for i in range(0, len(platforms)):
if "AMD" in platforms[i].vendor:
p = i
break
if p == -1:
print("Unable to intelligently choose a platform; " +
"defaulting to platform [0]")
p = 0
elif len(platforms) == 1:
p = 0
else:
raise RuntimeError("No OpenCL platforms found")
print(("\nSelected OpenCL platform [{}]\n" +
"> name: \"{}\"\n" +
"> version: \"{}\"\n").format(
p, platforms[p].name, platforms[p].version))
ctx = cl.Context(
dev_type=cl.device_type.ALL,
properties=[(cl.context_properties.PLATFORM, platforms[p])])
prog = cl.Program(
ctx, open(os.path.join(
os.path.dirname(sys.argv[0]), "w2b", "ft_kernel.cl")) \
.read()).build()
print("bins:", bins)
print("iters:", iters)
inSamp = np.ascontiguousarray(np.ndarray((size, iters), dtype="float32"))
inFreqs = np.ascontiguousarray(np.ndarray(freqCount, dtype="float32"))
outAbs = np.ascontiguousarray(np.ndarray((bins, iters), dtype="float32"))
outAng = np.ascontiguousarray(np.ndarray((bins, iters), dtype="float32"))
inFreqs[:] = freqs
c = 0
wnd = np.hanning(size)
for i in range(start, n, step):
# Do stuff
if i < 0:
bufStart = size - (size + i)
bufEnd = size
wavEnd = i + size
inSamp[0:bufStart, c] = 0.0
inSamp[bufStart:bufEnd, c] = wav[0:wavEnd]
elif (i + size) >= n:
bufEnd = n - i
wavStart = i
inSamp[0:bufEnd, c] = wav[wavStart:n]
inSamp[bufEnd:size, c] = 0.0
else:
wavStart = i
wavEnd = wavStart + size
inSamp[:, c] = wav[wavStart:wavEnd]
if type(wnd) != type(None):
inSamp[:, c] *= wnd
c += 1
assert c == iters
assert not np.any(np.isnan(inSamp))
assert not np.any(np.isnan(inFreqs))
print("inSamp.nbytes =", inSamp.nbytes)
print("inFreqs.nbytes =", inFreqs.nbytes)
print("outAbs.nbytes =", outAbs.nbytes)
print("outAng.nbytes =", outAng.nbytes)
inSampBuf = cl.Buffer(ctx, mf.READ_ONLY, inSamp.nbytes)
inFreqsBuf = cl.Buffer(ctx, mf.READ_ONLY, inFreqs.nbytes)
outAbsBuf = cl.Buffer(ctx, mf.WRITE_ONLY, outAbs.nbytes)
outAngBuf = cl.Buffer(ctx, mf.WRITE_ONLY, outAng.nbytes)
queue = cl.CommandQueue(ctx)
cl.enqueue_copy(queue, inSampBuf, inSamp)
cl.enqueue_copy(queue, inFreqsBuf, inFreqs)
queue.finish()
prog.slowft(queue, (iters, bins), None,
np.uint32(fs), inSampBuf, np.uint32(size), np.uint32(bins),
np.uint32(iters), inFreqsBuf, np.uint32(freqCount),
outAbsBuf, outAngBuf)
queue.finish()
cl.enqueue_copy(queue, outAbs, outAbsBuf)
cl.enqueue_copy(queue, outAng, outAngBuf)
queue.finish()
assert not np.any(np.isnan(outAbs))
assert not np.any(np.isnan(outAng))
print("\nminmax outAbs: {} , {}".format(np.amin(outAbs), np.amax(outAbs)))
print("minmax outAng: {} , {}".format(np.amin(outAng), np.amax(outAng)))
return outAbs, outAng
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
def categorical_accuracy(preds, y):
"""Return accuracy given a batch of label distributions and true labels"""
max_preds = preds.argmax(dim=1, keepdim=True)
correct = max_preds.squeeze(1).eq(y)
return correct.sum().float() / torch.FloatTensor([y.shape[0]])
class LR(nn.Module):
"""Logistic regressor over mean input embedding"""
def __init__(self, vocab_size, embedding_dim, n_classes):
super(LR, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.classifier = nn.Sequential(
nn.Linear(embedding_dim, n_classes)
)
def forward(self, data, probs=False):
text, length = data
text_embed = self.embeddings(text)
mean_embed = text_embed.sum(0)
mean_embed /= (length.float().unsqueeze(1) + 1)
logits = self.classifier(mean_embed)
return logits
class DAN(nn.Module):
"""Deep Averaging Network (Iyyer et al., 2015)."""
def __init__(self, vocab_size, embedding_dim, n_classes, n_layers=3,
hidden_dim=300, dropout=0.0):
super(DAN, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
layers = [nn.Linear(embedding_dim, hidden_dim), nn.ReLU(),
nn.Dropout(dropout)]
for _ in range(n_layers - 1):
layers.extend([
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Dropout(dropout)
])
layers.append(nn.Linear(hidden_dim, n_classes))
self.classifier = nn.Sequential(*layers)
self._softmax = nn.LogSoftmax()
def forward(self, data, probs=False):
# data is (text, length) tuple
# text.shape == (sent len, batch size)
text, length = data
# text_embed.shape == (sent len, batch size, emb dim)
text_embed = self.embeddings(text)
# mean_embed.shape == (batch size, emb dim)
mean_embed = text_embed.sum(0)
mean_embed /= (length.float().unsqueeze(1) + 1)
logits = self.classifier(mean_embed)
if probs:
return self._softmax(logits)
else:
return logits
class CNN(nn.Module):
"""Convolutional neural networks from (Kim, 2014)."""
def __init__(self, vocab_size, embedding_dim, n_classes,
n_filters=100, filter_sizes=(3, 4, 5), dropout=.0):
super(CNN, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.convs = nn.ModuleList([nn.Conv2d(in_channels=1,
out_channels=n_filters,
kernel_size=(fs, embedding_dim))
for fs in filter_sizes])
self.classifier = nn.Linear(len(filter_sizes)*n_filters, n_classes)
self.dropout = nn.Dropout(dropout)
def forward(self, batch):
# text.shape == [sent len, batch size]
text, length = batch
x = text.permute(1, 0)
# x.shape = [batch size, sent len]
embedded = self.embeddings(x)
# embedded.shape = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
# embedded.shape == [batch size, 1, sent len, emb dim]
conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
# conved[n].shape = =[batch size, n_filters, sent len - filter_sizes[n]]
pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]
# pooled.shape == [batch size, n_filters]
cat = self.dropout(torch.cat(pooled, dim=1))
return self.classifier(cat)
def get_model(model_type, vocab_size, emb_dim, n_classes, dropout=.0):
if model_type == 'lr':
if dropout > 0:
logging.warning("Logistic Regression doesn't support dropout.")
return LR(vocab_size, emb_dim, n_classes)
elif model_type == 'dan':
return DAN(vocab_size, emb_dim, n_classes, dropout=dropout)
elif model_type == 'cnn':
return CNN(vocab_size, emb_dim, n_classes, dropout=dropout)
else:
raise ValueError('Model type not implemented')
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate_rank(model, iterator, criterion):
model.embeddings.weight.requires_grad = True
epoch_loss = 0
epoch_acc = 0
model.eval()
for batch in iterator:
predictions = model(batch.text)
loss = criterion(predictions, batch.label)
acc = categorical_accuracy(predictions, batch.label)
loss.backward()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
|
nilq/baby-python
|
python
|
from pathlib import Path
import pandas as pd
DATA_DIR = Path('data')
DIR = DATA_DIR / 'raw' / 'jigsaw-unintended-bias-in-toxicity-classification'
def load_original_dataset():
df = pd.read_csv(DIR / 'train.csv', index_col=0)
df['label'] = (df.target >= 0.5)
df = df[['comment_text', 'label']]
df.columns = ['text', 'label']
df.index = 'ji' + df.index.astype(str)
return df
def load_dataset():
df = pd.read_csv(DATA_DIR / 'jigsaw.csv', index_col=0)
return df
|
nilq/baby-python
|
python
|
import yunionclient
from yunionclient.common import utils
import json
@utils.arg('--limit', metavar='<NUMBER>', default=20, help='Page limit')
@utils.arg('--offset', metavar='<OFFSET>', help='Page offset')
@utils.arg('--order-by', metavar='<ORDER_BY>', help='Name of fields order by')
@utils.arg('--order', metavar='<ORDER>', choices=['desc', 'asc'], help='order')
@utils.arg('--details', action='store_true', help='More detailed list')
@utils.arg('--search', metavar='<KEYWORD>', help='Filter result by simple keyword search')
@utils.arg('--admin', action='store_true', help='Is admin call?')
@utils.arg('--tenant', metavar='<TENANT>', help='Tenant ID or Name')
@utils.arg('--filter', metavar='<FILTER>', action='append', help='Filters')
@utils.arg('--filter-any', action='store_true', help='If true, match if any of the filters matches; otherwise, match if all of the filters match')
@utils.arg('--field', metavar='<FIELD>', action='append', help='Show only specified fields')
@utils.arg('--type', metavar='<TYPE>', choices=['guest', 'baremetal'], help='Server type of the networks. List all if not specify')
def do_network_list(client, args):
""" List all virtual networks """
page_info = utils.get_paging_info(args)
if args.type:
page_info['server_type'] = args.type
nets = client.networks.list(**page_info)
utils.print_list(nets, client.networks.columns)
@utils.arg('id', metavar='<WIRE_ID>', help='Substrate id')
@utils.arg('--name', metavar='<NETWORK_NAME>', required=True, help='Name of network to create')
@utils.arg('--start-ip', metavar='<NETWORK_START_IP>', required=True, help='Start ip of network')
@utils.arg('--end-ip', metavar='<NETWORK_END_IP>', required=True, help='End ip of network')
@utils.arg('--netmask', metavar='<NETWORK_MASK>', required=True, help='Network mask length')
@utils.arg('--desc', metavar='<DESCRIPTION>', help='Description')
@utils.arg('--gateway', metavar='<GATEWAY>', help='Default gateway')
@utils.arg('--dns', metavar='<DNS>', help='DNS server')
@utils.arg('--domain', metavar='<DOMAIN>', help='Default domain')
@utils.arg('--dhcp', metavar='<DHCP>', help='DHCP server')
#@utils.arg('--vlan', metavar='<VLAN_ID>', help='vlan ID')
@utils.arg('--type', metavar='<TYPE>', choices=['guest', 'baremetal'], help='Server type of the networks')
def do_wire_create_network(client, args):
""" Create a virtual network over a wire """
kwargs = {}
kwargs['name'] = args.name
kwargs['guest_ip_start'] = args.start_ip
kwargs['guest_ip_end'] = args.end_ip
kwargs['guest_ip_mask'] = args.netmask
#kwargs['wire_id'] = args.id
if args.desc is not None:
kwargs['description'] = args.desc
if args.gateway is not None:
kwargs['guest_gateway'] = args.gateway
if args.dns is not None:
kwargs['guest_dns'] = args.dns
if args.dhcp is not None:
kwargs['guest_dhcp'] = args.dhcp
if args.domain is not None:
kwargs['guest_domain'] = args.domain
if args.type:
kwargs['server_type'] = args.type
#if args.vlan is not None:
# kwargs['vlan_id'] = args.vlan
net = client.wires.create_descendent(args.id,
yunionclient.api.networks.NetworkManager, **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to show')
def do_network_show(client, args):
""" Show details of a virtual network """
net = client.networks.get(args.id)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of virtual network to get metadata info')
@utils.arg('--field', metavar='<METADATA_FIELD>', help='Field name of metadata')
def do_network_metadata(client, args):
""" Show metadata info of a virtual network """
kwargs = {}
if args.field is not None:
kwargs['field'] = args.field
meta = client.networks.get_metadata(args.id, **kwargs)
if isinstance(meta, dict):
utils.print_dict(meta)
else:
print(meta)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of virtual network to get metadata info')
@utils.arg('--key', metavar='<KEYNAME>', help='Key name of secret')
@utils.arg('--secret', metavar='<SECRET>', help='Key secret')
@utils.arg('--server', metavar='<SECRET>', help='Alternate DNS update server')
def do_network_set_dns_update_key(client, args):
""" Set DNS update key info for a virtual network """
kwargs = {}
kwargs['dns_update_key_name'] = args.key
kwargs['dns_update_key_secret'] = args.secret
kwargs['dns_update_server'] = args.server
net = client.networks.set_metadata(args.id, **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of virtual network to get metadata info')
def do_network_remove_dns_update_key(client, args):
""" Set DNS update key info for a virtual network """
kwargs = {}
kwargs['dns_update_key_name'] = 'None'
kwargs['dns_update_key_secret'] = 'None'
kwargs['dns_update_server'] = 'None'
net = client.networks.set_metadata(args.id, **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to show')
@utils.arg('--name', metavar='<NETWORK_NAME>', help='Name of network to create')
@utils.arg('--start-ip', metavar='<NETWORK_START_IP>', help='Start ip of network')
@utils.arg('--end-ip', metavar='<NETWORK_END_IP>', help='End ip of network')
@utils.arg('--netmask', metavar='<NETWORK_MASK>', help='Network mask length')
@utils.arg('--desc', metavar='<DESCRIPTION>', help='Description')
@utils.arg('--gateway', metavar='<GATEWAY>', help='Default gateway')
@utils.arg('--dns', metavar='<DNS>', help='DNS server')
@utils.arg('--domain', metavar='<DOMAIN>', help='Default domain')
@utils.arg('--dhcp', metavar='<DHCP>', help='DHCP server')
#@utils.arg('--vlan', metavar='<VLAN_ID>', help='vlan ID')
def do_network_update(client, args):
""" Update a virtual network """
kwargs = {}
if args.name is not None:
kwargs['name'] = args.name
if args.start_ip is not None:
kwargs['guest_ip_start'] = args.start_ip
if args.end_ip is not None:
kwargs['guest_ip_end'] = args.end_ip
if args.netmask is not None:
kwargs['guest_ip_mask'] = args.netmask
if args.desc is not None:
kwargs['description'] = args.desc
if args.gateway is not None:
kwargs['guest_gateway'] = args.gateway
if args.dns is not None:
kwargs['guest_dns'] = args.dns
if args.dhcp is not None:
kwargs['guest_dhcp'] = args.dhcp
if args.domain is not None:
kwargs['guest_domain'] = args.domain
#if args.vlan is not None:
# kwargs['vlan_id'] = args.vlan
if len(kwargs) == 0:
raise Exception("Empty data", "empty data")
net = client.networks.update(args.id, **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to delete')
def do_network_delete(client, args):
""" delete a virtual network """
net = client.networks.delete(args.id)
utils.print_dict(net)
#@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to synchronize config')
#def do_network_sync(client, args):
# """ Synchronize the configuration of a virtual network """
# try:
# net = client.networks.perform_action(args.id, 'sync')
# utils.print_dict(net)
# except Exception as e:
# utils.show_exception_and_exit(e)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to make public')
def do_network_public(client, args):
""" Make a virtual network public """
net = client.networks.perform_action(args.id, 'public')
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to make private')
def do_network_private(client, args):
""" Make a virtual network private """
net = client.networks.perform_action(args.id, 'private')
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to make private')
@utils.arg('--net', metavar='<DESTINATION>', action='append', help='Route destination prefix')
@utils.arg('--gw', metavar='<GATEWAY>', action='append', help='Route gateway')
def do_network_set_static_routes(client, args):
""" Make a virtual network private """
kwargs = {}
if args.net and args.gw:
if len(args.net) != len(args.gw):
raise Exception('Inconsistent network and gateway pairs')
routes = {}
for i in range(len(args.net)):
routes[args.net[i]] = args.gw[i]
kwargs['static_routes'] = json.dumps(routes)
else:
kwargs['static_routes'] = None
net = client.networks.set_metadata(args.id, **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to swap IP address')
@utils.arg('node1', metavar='<NODE_ID>', help='ID of a VM or group')
@utils.arg('node2', metavar='<NODE_ID>', help='ID of a VM or group')
@utils.arg('--node1-type', metavar='<NODE_TYPE>', choices=['server', 'group'], help='Type of node1, either server or group')
@utils.arg('--node2-type', metavar='<NODE_TYPE>', choices=['server', 'group'], help='Type of node2, either server or group')
def do_network_swap_address(client, args):
""" Swap IP address between virtual server or group """
kwargs = {'node1': args.node1,
'node2': args.node2}
if args.node1_type:
kwargs['node1_type'] = args.node1_type
if args.node2_type:
kwargs['node2_type'] = args.node2_type
net = client.networks.perform_action(args.id, 'swap-address', **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to update')
@utils.arg('dns', metavar='<DNS_SERVER>', help='DNS server address')
@utils.arg('key', metavar='<DNS_UPDATE_KEY>', help='DNS update key name')
@utils.arg('secret', metavar='<DNS_UPDATE_SECRET>', help='DNS update key secret')
def do_network_add_dns_update_target(client, args):
""" Add a dns update target to a network """
kwargs = {'server': args.dns}
kwargs['key'] = args.key
kwargs['secret'] = args.secret
net = client.networks.perform_action(args.id, 'add-dns-update-target',
**kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to update')
@utils.arg('dns', metavar='<DNS_SERVER>', help='DNS server address')
@utils.arg('key', metavar='<DNS_UPDATE_KEY>', help='DNS update key name')
def do_network_remove_dns_update_target(client, args):
""" Remove a dns update target from a network """
kwargs = {'server': args.dns, 'key': args.key}
net = client.networks.perform_action(args.id, 'remove-dns-update-target',
**kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to update')
@utils.arg('ip', metavar='<IPADDR>', help='IP address to reserve')
@utils.arg('--notes', metavar='<NOTES>', help='Notes')
def do_network_reserve_ip(client, args):
""" Reserve an IP address from pool """
kwargs = {'ip': args.ip}
if args.notes:
kwargs['notes'] = args.notes
net = client.networks.perform_action(args.id, 'reserve-ip', **kwargs)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to update')
@utils.arg('ip', metavar='<IPADDR>', help='IP address to reserve')
def do_network_release_reserved_ip(client, args):
""" Release a reserved IP into pool """
net = client.networks.perform_action(args.id, 'release-reserved-ip',
ip=args.ip)
utils.print_dict(net)
@utils.arg('id', metavar='<NETWORK_ID>', help='ID of network to update')
def do_network_reserved_ips(client, args):
""" Show all reserved IPs """
net = client.networks.get_specific(args.id, 'reserved-ips')
utils.print_dict(net)
@utils.arg('--limit', metavar='<NUMBER>', default=20, help='Page limit')
@utils.arg('--offset', metavar='<OFFSET>', help='Page offset')
@utils.arg('--order-by', metavar='<ORDER_BY>', help='Name of fields order by')
@utils.arg('--order', metavar='<ORDER>', choices=['desc', 'asc'], help='order')
@utils.arg('--details', action='store_true', help='More detailed list')
@utils.arg('--search', metavar='<KEYWORD>', help='Filter result by simple keyword search')
@utils.arg('--filter', metavar='<FILTER>', action='append', help='Filters')
@utils.arg('--filter-any', action='store_true', help='If true, match if any of the filters matches; otherwise, match if all of the filters match')
@utils.arg('--field', metavar='<FIELD>', action='append', help='Show only specified fields')
def do_reserved_ip_list(client, args):
""" Show all reserved IPs for any network """
page_info = utils.get_paging_info(args)
ips = client.reservedips.list(**page_info)
utils.print_list(ips, client.reservedips.columns)
|
nilq/baby-python
|
python
|
inter = dict.fromkeys([x for x in a if x in b])
|
nilq/baby-python
|
python
|
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
s= n1 + n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
print(f'A soma é: {s}, o produto é: {m}, a divisão é: {d:.3f},', end=' ')
print(f'a divisão inteira é: {di} e a potência é: {e}')
|
nilq/baby-python
|
python
|
"""
Utility functions
=======================
"""
import time
from . import RUNNING_ON_PI
if RUNNING_ON_PI: # pragma: no cover
import RPi.GPIO as GPIO
def digital_write(pin, value): # pragma: no cover
GPIO.output(pin, value)
def digital_read(pin): # pragma: no cover
return GPIO.input(pin)
def delay_ms(delaytime): # pragma: no cover
time.sleep(delaytime / 1000.0)
def set_bit(value, bit_index):
# TODO: Verify value
# bit_index needs to be type int
if not isinstance(bit_index, int):
raise TypeError("bit_index needs to be type int")
return value | (1 << bit_index)
def unset_bit(value, bit_index):
# TODO: Verify value
# bit_index needs to be type int
if not isinstance(bit_index, int):
raise TypeError("bit_index needs to be type int")
return value & ~(1 << bit_index)
|
nilq/baby-python
|
python
|
from unittest.mock import patch
from django.test import TestCase
from vimage.core.base import VimageKey, VimageValue, VimageEntry
from .const import dotted_path
class VimageEntryTestCase(TestCase):
def test_entry(self):
ve = VimageEntry('app', {})
self.assertIsInstance(ve.key, VimageKey)
self.assertEqual(ve.key.key, VimageKey('app').key)
self.assertIsInstance(ve.value, VimageValue)
self.assertEqual(ve.value.value, VimageValue({}).value)
def test_str(self):
ve = VimageEntry('app', {})
self.assertEqual(str(ve), 'app: {}')
def test_repr(self):
ve = VimageEntry('app', {})
self.assertEqual(
repr(ve),
"VimageEntry('app', {})"
)
self.assertIsInstance(eval(repr(ve)), VimageEntry)
def test_is_valid(self):
m_path = dotted_path('base', 'VimageKey', 'is_valid')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
ve.is_valid()
self.assertTrue(m.called)
m_path = dotted_path('base', 'VimageValue', 'is_valid')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
ve.is_valid()
self.assertTrue(m.called)
def test_app_label(self):
m_path = dotted_path('base', 'VimageKey', 'get_app_label')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
label = ve.app_label
self.assertTrue(m.called)
def test_fields(self):
m_path = dotted_path('base', 'VimageKey', 'get_fields')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
sp = ve.fields
self.assertTrue(m.called)
def test_specificity(self):
m_path = dotted_path('base', 'VimageKey', 'get_specificity')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
sp = ve.specificity
self.assertTrue(m.called)
def test_mapping(self):
m_path = dotted_path('base', 'VimageValue', 'type_validator_mapping')
with patch(m_path) as m:
ve = VimageEntry('myapp.models', {'SIZE': 10})
sp = ve.mapping
self.assertTrue(m.called)
def test_entry_info(self):
ve = VimageEntry('myapp.models', {'SIZE': 10})
self.assertEqual(
sorted(ve.entry_info.keys()),
['app_label', 'fields', 'mapping', 'specificity']
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Utilities related to reading and generating indexable search content."""
from __future__ import absolute_import
import os
import fnmatch
import re
import codecs
import logging
import json
from builtins import next, range
from pyquery import PyQuery
log = logging.getLogger(__name__)
def process_mkdocs_json(version, build_dir=True):
"""Given a version object, return a list of page dicts from disk content."""
if build_dir:
full_path = version.project.full_json_path(version.slug)
else:
full_path = version.project.get_production_media_path(
type_='json', version_slug=version.slug, include_file=False)
html_files = []
for root, _, files in os.walk(full_path):
for filename in fnmatch.filter(files, '*.json'):
html_files.append(os.path.join(root, filename))
page_list = []
for filename in html_files:
if not valid_mkdocs_json(file_path=filename):
continue
relative_path = parse_path_from_file(file_path=filename)
html = parse_content_from_file(file_path=filename)
headers = parse_headers_from_file(documentation_type='mkdocs', file_path=filename)
sections = parse_sections_from_file(documentation_type='mkdocs', file_path=filename)
try:
title = sections[0]['title']
except IndexError:
title = relative_path
page_list.append({
'content': html,
'path': relative_path,
'title': title,
'headers': headers,
'sections': sections,
})
return page_list
def recurse_while_none(element):
if element.text is None:
return recurse_while_none(element.getchildren()[0])
return element.text
def valid_mkdocs_json(file_path):
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.warning('(Search Index) Unable to index file: %s, error: %s', file_path, e)
return None
page_json = json.loads(content)
for to_check in ['url', 'content']:
if to_check not in page_json:
log.warning('(Search Index) Unable to index file: %s error: Invalid JSON', file_path)
return None
return True
def parse_path_from_file(file_path):
"""Retrieve path information from a json-encoded file on disk."""
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.warning('(Search Index) Unable to index file: %s, error: %s', file_path, e)
return ''
page_json = json.loads(content)
path = page_json['url']
# The URLs here should be of the form "path/index". So we need to
# convert:
# "path/" => "path/index"
# "path/index.html" => "path/index"
# "/path/index" => "path/index"
path = re.sub('/$', '/index', path)
path = re.sub('\.html$', '', path)
path = re.sub('^/', '', path)
return path
def parse_content_from_file(file_path):
"""Retrieve content from a json-encoded file on disk."""
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s', file_path, e)
return ''
page_json = json.loads(content)
page_content = page_json['content']
content = parse_content(page_content)
if not content:
log.info('(Search Index) Unable to index file: %s, empty file', file_path)
else:
log.debug('(Search Index) %s length: %s', file_path, len(content))
return content
def parse_content(content):
"""
Prepare the text of the html file.
Returns the body text of a document
"""
try:
to_index = PyQuery(content).text()
except ValueError:
return ''
return to_index
def parse_headers_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing headers for %s', file_path)
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s',
file_path, e)
return ''
page_json = json.loads(content)
page_content = page_json['content']
headers = parse_headers(documentation_type, page_content)
if not headers:
log.error('Unable to index file headers for: %s', file_path)
return headers
def parse_headers(documentation_type, content):
headers = []
if documentation_type == 'mkdocs':
for element in PyQuery(content)('h2'):
headers.append(recurse_while_none(element))
return headers
def parse_sections_from_file(documentation_type, file_path):
log.debug('(Search Index) Parsing sections for %s', file_path)
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('(Search Index) Unable to index file: %s, error :%s', file_path, e)
return ''
page_json = json.loads(content)
page_content = page_json['content']
sections = parse_sections(documentation_type, page_content)
if not sections:
log.error('Unable to index file sections for: %s', file_path)
return sections
def parse_sphinx_sections(content):
"""Generate a list of sections from sphinx-style html."""
body = PyQuery(content)
h1_section = body('.section > h1')
if h1_section:
div = h1_section.parent()
h1_title = h1_section.text().replace(u'¶', '').strip()
h1_id = div.attr('id')
h1_content = ""
next_p = next(body('h1'))
while next_p:
if next_p[0].tag == 'div' and 'class' in next_p[0].attrib:
if 'section' in next_p[0].attrib['class']:
break
h1_content += "\n%s\n" % next_p.html()
next_p = next(next_p)
if h1_content:
yield {
'id': h1_id,
'title': h1_title,
'content': h1_content,
}
# Capture text inside h2's
section_list = body('.section > h2')
for num in range(len(section_list)):
div = section_list.eq(num).parent()
header = section_list.eq(num)
title = header.text().replace(u'¶', '').strip()
section_id = div.attr('id')
content = div.html()
yield {
'id': section_id,
'title': title,
'content': content,
}
def parse_mkdocs_sections(content):
"""
Generate a list of sections from mkdocs-style html.
May raise a ValueError
"""
body = PyQuery(content)
try:
# H1 content
h1 = body('h1')
h1_id = h1.attr('id')
h1_title = h1.text().strip()
h1_content = ""
next_p = next(body('h1'))
while next_p:
if next_p[0].tag == 'h2':
break
h1_html = next_p.html()
if h1_html:
h1_content += "\n%s\n" % h1_html
next_p = next(next_p)
if h1_content:
yield {
'id': h1_id,
'title': h1_title,
'content': h1_content,
}
# H2 content
section_list = body('h2')
for num in range(len(section_list)):
h2 = section_list.eq(num)
h2_title = h2.text().strip()
section_id = h2.attr('id')
h2_content = ""
next_p = next(body('h2'))
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.html()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next(next_p)
if h2_content:
yield {
'id': section_id,
'title': h2_title,
'content': h2_content,
}
# we're unsure which exceptions can be raised
# pylint: disable=bare-except
except:
log.exception('Failed indexing')
def parse_sections(documentation_type, content):
"""Retrieve a list of section dicts from a string of html."""
sections = []
if 'sphinx' in documentation_type:
sections.extend(parse_sphinx_sections(content))
if 'mkdocs' in documentation_type:
try:
sections.extend(parse_mkdocs_sections(content))
except ValueError:
return ''
return sections
|
nilq/baby-python
|
python
|
from django.test import TestCase
from wagtailimportexport import functions
class TestNullPKs(TestCase):
"""
Test cases for null_pks method in functions.py
"""
def test_null(self):
pass
class TestNullFKs(TestCase):
"""
Test cases for null_fks method in functions.py
"""
def test_null(self):
pass
class TestZipContents(TestCase):
"""
Test cases for zip_contents method in functions.py
"""
def test_null(self):
pass
class TestUnZipContents(TestCase):
"""
Test cases for unzip_contents method in functions.py
"""
def test_null(self):
pass
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django.test.utils import isolate_apps
from .models import (
Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem,
)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTests(TestCase):
def test_output(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>"""
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE"><input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>"""
)
platypus = Animal.objects.create(
common_name='Platypus', latin_name='Ornithorhynchus anatinus',
)
platypus.tags.create(tag='shiny')
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id"></p>""" % tagged_item_id
)
lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50"></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE">
<input type="hidden" name="x-0-id" id="id_x-0-id"></p>"""
)
def test_options(self):
TaggedItemFormSet = generic_inlineformset_factory(
TaggedItem,
can_delete=False,
exclude=['tag'],
extra=3,
)
platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus')
harmless = platypus.tags.create(tag='harmless')
mammal = platypus.tags.create(tag='mammal')
# Works without a queryset.
formset = TaggedItemFormSet(instance=platypus)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" '
'id="id_generic_relations-taggeditem-content_type-object_id-0-id">' % harmless.pk
)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertEqual(formset.forms[1].instance, mammal)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset can be used to alter display ordering.
formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag'))
self.assertEqual(len(formset.forms), 5)
self.assertEqual(formset.forms[0].instance, mammal)
self.assertEqual(formset.forms[1].instance, harmless)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset that omits items.
formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm'))
self.assertEqual(len(formset.forms), 4)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertIsNone(formset.forms[1].instance.pk)
def test_get_queryset_ordering(self):
"""
BaseGenericInlineFormSet.get_queryset() adds default ordering, if
needed.
"""
inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',))
formset = inline_formset(instance=Gecko.objects.create())
self.assertIs(formset.get_queryset().ordered, True)
def test_initial(self):
quartz = Mineral.objects.create(name='Quartz', hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_meta_widgets(self):
"""TaggedItemForm has a widget defined in Meta."""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
@isolate_apps('generic_relations')
def test_incorrect_content_type(self):
class BadModel(models.Model):
content_type = models.PositiveIntegerField()
msg = "fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType"
with self.assertRaisesMessage(Exception, msg):
generic_inlineformset_factory(BadModel, TaggedItemForm)
def test_save_new_uses_form_save(self):
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = 'custom method'
return super().save(*args, **kwargs)
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, 'custom method')
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
def test_initial_count(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MAX_NUM_FORMS': '',
}
formset = GenericFormSet(data=data, prefix='form')
self.assertEqual(formset.initial_form_count(), 3)
formset = GenericFormSet(data=data, prefix='form', save_as_new=True)
self.assertEqual(formset.initial_form_count(), 0)
def test_save_as_new(self):
"""
The save_as_new parameter creates new items that are associated with
the object.
"""
lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')
yellow = lion.tags.create(tag='yellow')
hairy = lion.tags.create(tag='hairy')
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '2',
'form-MAX_NUM_FORMS': '',
'form-0-id': str(yellow.pk),
'form-0-tag': 'hunts',
'form-1-id': str(hairy.pk),
'form-1-tag': 'roars',
}
formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True)
self.assertTrue(formset.is_valid())
tags = formset.save()
self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars'])
hunts, roars = tags
self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts, roars, yellow])
|
nilq/baby-python
|
python
|
from django.db import models
class Core(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(max_length=100)
back_story = models.TextField()
height = models.CharField(max_length=50)
weight = models.FloatField()
strength = models.IntegerField()
wisdom = models.IntegerField()
constitution = models.IntegerField()
dexterity = models.IntegerField()
intelligence = models.IntegerField()
charisma = models.IntegerField()
|
nilq/baby-python
|
python
|
# high-level interface for interacting with Brick graphs using rdflib
# setup our query environment
from rdflib import RDFS, RDF, Namespace, Graph, URIRef
class BrickGraph(object):
def __init__(self, filename='metadata/sample_building.ttl'):
self.bldg = Graph()
self.bldg = Graph()
self.bldg.parse("Brick/dist/Brick.ttl", format='turtle')
self.bldg.parse(filename, format='turtle')
BRICK = Namespace('https://brickschema.org/schema/1.0.1/Brick#')
BF = Namespace('https://brickschema.org/schema/1.0.1/BrickFrame#')
EX = Namespace('http://example.com#')
self.bldg.bind('ex', EX)
self.bldg.bind('brick', BRICK)
self.bldg.bind('bf', BF)
self.m = {
'https://brickschema.org/schema/1.0.1/Brick': 'brick',
'http://www.w3.org/1999/02/22-rdf-syntax-ns': 'rdf',
'http://www.w3.org/2000/01/rdf-schema': 'rdfs',
'https://brickschema.org/schema/1.0.1/BrickFrame': 'bf',
'http://www.w3.org/2002/07/owl': 'owl',
'http://www.w3.org/2004/02/skos/core': 'skos',
'http://example.com': 'ex',
}
def query(self, query, fullURI=False):
query = """PREFIX brick: <https://brickschema.org/schema/1.0.1/Brick#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX bf: <https://brickschema.org/schema/1.0.1/BrickFrame#>
PREFIX ex: <http://example.com#>
"""+query
rows = self.bldg.query(query)
if not fullURI:
rows = [[self.m[r.split('#')[0]] + ':' + r.split('#')[1] if isinstance(r, URIRef) and '#' in r else r for r in row] for row in rows]
return rows
return list(rows)
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python3
from random import randint
def sortea_numero():
return randint(1, 6)
def eh_impar(numero: float):
return numero % 2 != 0
def acertou(numero_sorteado: float, numero: float):
return numero_sorteado == numero
if __name__ == '__main__':
numero_sorteado = sortea_numero()
for it in range(1, 7):
if eh_impar(it):
continue
if acertou(numero_sorteado, it):
print('ACERTOU!', numero_sorteado)
break
else:
print('NÃO ACERTOU O NUMERO!')
|
nilq/baby-python
|
python
|
import pytest
from datetime import datetime, timedelta
import xarray as xr
import cftime
import numpy as np
import vcm
from vcm.cubedsphere.constants import TIME_FMT
from vcm.convenience import (
cast_to_datetime,
parse_current_date_from_str,
parse_timestep_str_from_path,
round_time,
parse_datetime_from_str,
shift_timestamp,
)
@pytest.mark.parametrize(
"date, shift, expected",
[
("20160801.000000", 15, "20160801.000015"),
("20160801.000000", 90, "20160801.000130"),
("20160801.000000", -5, "20160731.235955"),
],
)
def test_shift_timestamp(date, shift, expected):
assert shift_timestamp(date, shift) == expected
def test_extract_timestep_from_path():
timestep = "20160801.001500"
good_path = f"gs://path/to/timestep/{timestep}/"
assert parse_timestep_str_from_path(good_path) == timestep
def test_extract_timestep_from_path_with_no_timestep_in_path():
with pytest.raises(ValueError):
bad_path = "gs://path/to/not/a/timestep/"
parse_timestep_str_from_path(bad_path)
def test_datetime_from_string():
current_time = datetime.now()
time_str = current_time.strftime(TIME_FMT)
parsed_datetime = parse_datetime_from_str(time_str)
assert parsed_datetime.year == current_time.year
assert parsed_datetime.month == current_time.month
assert parsed_datetime.day == current_time.day
assert parsed_datetime.hour == current_time.hour
assert parsed_datetime.minute == current_time.minute
assert parsed_datetime.second == current_time.second
def test_current_date_from_string():
timestamp = "20160801.001500"
expected_current_date = [2016, 8, 1, 0, 15, 0]
assert expected_current_date == parse_current_date_from_str(timestamp)
def test_convert_timestamps():
arr = xr.DataArray(["20190101.000000", "20160604.011500"], attrs={"foo": "bar"})
out = vcm.convert_timestamps(arr)
assert isinstance(out[0].item(), cftime.DatetimeJulian)
assert out.attrs == arr.attrs
@pytest.mark.parametrize(
"input_time, expected",
[
("20160101.000000", datetime(2016, 1, 1, 0, 0, 0)),
("HDHF/20160101.000000", datetime(2016, 1, 1, 0, 0, 0)),
(datetime(2016, 1, 1, 1, 1, 1, 1), datetime(2016, 1, 1, 1, 1, 1, 1)),
(
cftime.DatetimeJulian(2016, 1, 1, 1, 1, 1, 1),
datetime(2016, 1, 1, 1, 1, 1, 1),
),
(cftime.DatetimeJulian(2016, 1, 1), datetime(2016, 1, 1)),
(
np.datetime64(datetime(2016, 1, 1, 1, 1, 1, 1)),
datetime(2016, 1, 1, 1, 1, 1, 1),
),
],
)
def test__cast_to_datetime(input_time, expected):
casted_input_time = cast_to_datetime(input_time)
assert casted_input_time == expected
assert isinstance(casted_input_time, datetime)
def _example_with_second(**kwargs):
default = dict(year=2016, month=8, day=5, hour=23, minute=7, second=0)
default.update(kwargs)
return cftime.DatetimeJulian(**default)
second = timedelta(seconds=1)
minute = timedelta(minutes=1)
@pytest.mark.parametrize(
"input_,expected,tol",
[
(
_example_with_second(second=29, microsecond=986267),
_example_with_second(second=30),
second,
),
(
_example_with_second(second=59, minute=7, microsecond=986267),
_example_with_second(second=0, minute=8),
second,
),
# cftime arithmetic is not associative so need to check this case
(
_example_with_second(second=59, minute=7, microsecond=986368),
_example_with_second(second=59, minute=7, microsecond=986368),
timedelta(microseconds=1),
),
(
_example_with_second(minute=7, microsecond=186267),
_example_with_second(minute=7),
second,
),
(_example_with_second(minute=7), _example_with_second(minute=7), second,),
(
_example_with_second(minute=10),
_example_with_second(minute=15),
15 * minute,
),
],
)
def test_round_time(input_, expected, tol):
assert round_time(input_, to=tol) == expected
def test_round_time_datetime_julian():
assert isinstance(
round_time(cftime.DatetimeJulian(2016, 1, 1)), cftime.DatetimeJulian
)
def test_round_time_data_array():
time = xr.DataArray([cftime.DatetimeJulian(2016, 1, 1)], dims=["time"])
xr.testing.assert_equal(time, round_time(time))
assert isinstance(round_time(time), xr.DataArray)
def test_round_time_numpy():
time = np.array([cftime.DatetimeJulian(2016, 1, 1)])
ans = round_time(time)
assert isinstance(ans, np.ndarray)
|
nilq/baby-python
|
python
|
# This code is based off the DUET algorithm presented in:
# O. Yilmaz and S. Rickard, "Blind separation of speech mixtures via time-frequency masking."
# S. Rickard, "The DUET Blind Source Separation Algorithm"
#
# At this time, the algorithm is not working when returning to the time domain
# and, to be honest, I haven't yet figured out why. At a later time I'll try
# and add code implementing the below algorithms
#
# A. S. Master, "Bayesian two source modeling for separation of N sources from stereo signals."
# J. Woodruff and B. Pardo, "Active source estimation for improved signal estimation"
#
#
import duet as duet
import matplotlibwrapper as mplw
import numpy as np
import stft as stft
import soundfile as sf
import sounddevice as sd
#Input values are defined here
window_length= 1024 #In the DUET paper this was found to be the optimal window length for the STFT
delay_size = 512 #In the DUET paper this was chosen as the delay spacing
output_plots = True #Defines whether this will just be for use or analysis
output_sounds = True #Defines whether the separated sources are output
p = 0.5 #Both of these can be tuned depending on the MLE
q = 0 #Both of these can be tuned depending on the MLE
alpha_cutoff = 1
number_alpha_bins = 50
delta_cutoff = 4
number_delta_bins = 50
number_of_sources = 3
data, fs = sf.read('../stereomixtures/a4bb4b4.aiff') #Read in the sample music
#Get the relevant left and right channel data
l_data = data[:,0]
r_data = data[:,1]
#Get l and r stft for each dataset
l_stft_data = stft.short_time_fourier_transform(l_data, window_length, delay_size)
r_stft_data = stft.short_time_fourier_transform(r_data, window_length, delay_size)
#Remove the dc component of the data since this causes problems for when w = 0 for 1/w
l_stft_data = np.delete(l_stft_data, 0, 1)
r_stft_data = np.delete(r_stft_data, 0, 1)
#Estimate mixing parameters
symmetric_attenuation_estimation, delay_estimation = duet.estimate_parameters(l_stft_data, r_stft_data, window_length)
weighted_data = duet.generate_weighted_data(l_stft_data, r_stft_data, window_length, p, q)
filtered_weighted_data, filtered_attenutation_data, filtered_delay_data = duet.filter_data(weighted_data, symmetric_attenuation_estimation, delay_estimation, alpha_cutoff, delta_cutoff)
histogram_data = duet.generate_histogram(filtered_weighted_data, filtered_attenutation_data, filtered_delay_data, number_alpha_bins, number_delta_bins)
smoothed_histogram_data = duet.smooth_histogram(histogram_data, 4)
if (output_plots):
mplw.simple_surface_plot(histogram_data, number_alpha_bins, number_delta_bins, alpha_cutoff, delta_cutoff)
mplw.simple_surface_plot(smoothed_histogram_data, number_alpha_bins, number_delta_bins, alpha_cutoff, delta_cutoff)
#Ideally, we'd have an effective peak-picking algorithm here
#Alas, for now, we do not
peak_alphas = np.array([-0.478, -0.2, 0.123])
peak_deltas = np.array([-0.600, -1.18, 1.32])
stft_sources = duet.separate_sources(l_stft_data, r_stft_data, duet.compute_w(window_length), duet.compute_a(peak_alphas), peak_deltas, number_of_sources)
#Lastly, convert back to the time domain
td_sources = []
for i in range(len(stft_sources)):
td_sources.append(stft.inverse_short_time_fourier_transform(stft_sources[i], len(l_data), window_length, delay_size))
#Finally, save the separated sources
if (output_sounds):
for i in range(0, len(td_sources)):
#mplw.simple_line_plot(td_sources[i])
sf.write("Sourcenumber" + str(i) + ".aiff", td_sources[i], fs)
|
nilq/baby-python
|
python
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the BSD License.
"""The way that Flask is designed, we have to read our configuration and
initialize many things on module import, which is a bit lame. There are
probably ways to work around that but things work well enough as is.
"""
import logging
import sys
from pkg_resources import get_distribution, DistributionNotFound, parse_version
try:
# version information is saved under hera_librarian package
__version__ = get_distribution("hera_librarian").version
except DistributionNotFound:
# package is not installed
pass
_log_level_names = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
def _initialize():
import json
import os.path
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
if "LIBRARIAN_CONFIG_PATH" not in os.environ:
raise ValueError(
"The `LIBRARIAN_CONFIG_PATH` environment variable must be set "
"before starting the librarian server. Run `export "
"LIBRARIAN_CONFIG_PATH=/path/to/config.json` and try again."
)
config_path = os.environ["LIBRARIAN_CONFIG_PATH"]
try:
with open(config_path) as f:
config = json.load(f)
except FileNotFoundError:
raise ValueError(f"Librarian configuration file {config_path} not found.")
if 'SECRET_KEY' not in config:
print('cannot start server: must define the Flask "secret key" as the item '
'"SECRET_KEY" in "server-config.json"', file=sys.stderr)
sys.exit(1)
# TODO: configurable logging parameters will likely be helpful. We use UTC
# for timestamps using standard ISO-8601 formatting. The Python docs claim
# that 8601 is the default format but this does not appear to be true.
loglevel_cfg = config.get('log_level', 'info')
loglevel = _log_level_names.get(loglevel_cfg)
warn_loglevel = (loglevel is None)
if warn_loglevel:
loglevel = logging.INFO
logging.basicConfig(
level=loglevel,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%dT%H:%M:%SZ'
)
import time
logging.getLogger('').handlers[0].formatter.converter = time.gmtime
logger = logging.getLogger('librarian')
if warn_loglevel:
logger.warn('unrecognized value %r for "log_level" config item', loglevel_cfg)
tf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask('librarian', template_folder=tf)
app.config.update(config)
db = SQLAlchemy(app)
return logger, app, db
logger, app, db = _initialize()
def is_primary_server():
"""Ugh, need to figure out new model to deal with all of this.
"""
if app.config.get('server', 'flask') != 'tornado':
return True
import tornado.process
return tornado.process.task_id() == 0
# We have to manually import the modules that implement services. It's not
# crazy to worry about circular dependency issues, but everything will be all
# right.
from . import webutil
from . import observation
from . import store
from . import file
from . import bgtasks
from . import search
from . import misc
# Finally ...
def get_version_info():
"""
Extract version info from version tag.
We're using setuptools_scm, so the git information is in the version tag.
The one exception is when we're running from a tagged release. In that case,
we get the git hash of the corresponding release from GitHub directly.
Parameters
----------
None
Returns
-------
tag : str
The semantic version of the installed librarian server.
git_hash : str
The git hash of the installed librarian server.
"""
parsed_version = parse_version(__version__)
tag = parsed_version.base_version
local = parsed_version.local
if local is None:
# we're running from a "clean" (tagged/released) repo
# get the git info from GitHub directly
from subprocess import CalledProcessError, check_output
gitcmd = [
"git",
"ls-remote",
"https://github.com/HERA-Team/librarian.git",
f"v{tag}",
]
try:
output = check_output(gitcmd).decode("utf-8")
git_hash = output.split()[0]
except CalledProcessError:
git_hash = "???"
else:
# check if version has "dirty" tag
split_local = local.split(".")
if len(split_local) > 1:
logger.warn("running from a codebase with uncommited changes")
# get git info from the tag--the hash has a leading "g" we ignore
git_hash = split_local[0][1:]
return tag, git_hash
def commandline(argv):
from . import bgtasks
version_string, git_hash = get_version_info()
logger.info('starting up Librarian %s (%s)', version_string, git_hash)
app.config['_version_string'] = version_string
app.config['_git_hash'] = git_hash
server = app.config.get('server', 'flask')
host = app.config.get('host', None)
port = app.config.get('port', 21106)
debug = app.config.get('flask_debug', False)
n_server_processes = app.config.get('n_server_processes', 1)
if host is None:
print('note: no "host" set in configuration; server will not be remotely accessible',
file=sys.stderr)
maybe_add_stores()
if n_server_processes > 1:
if server != 'tornado':
print('error: can only use multiple processes with Tornado server', file=sys.stderr)
sys.exit(1)
if server == 'tornado':
# Need to set up HTTP server and fork subprocesses before doing
# anything with the IOLoop.
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado import web
from .webutil import StreamFile
flask_app = WSGIContainer(app)
tornado_app = web.Application([
(r'/stream/.*', StreamFile),
(r'.*', web.FallbackHandler, {'fallback': flask_app}),
])
http_server = HTTPServer(tornado_app)
http_server.bind(port, address=host)
http_server.start(n_server_processes)
db.engine.dispose() # force new connection after potentially forking
do_mandc = app.config.get('report_to_mandc', False)
if do_mandc:
from . import mc_integration
mc_integration.register_callbacks(version_string, git_hash)
if server == 'tornado':
# Set up periodic report on background task status; also reminds us
# that the server is alive.
bgtasks.register_background_task_reporter()
if is_primary_server():
# Primary server is also in charge of checking out whether there's
# anything to do with our standing orders.
from tornado.ioloop import IOLoop
from . import search
IOLoop.current().add_callback(search.queue_standing_order_copies)
search.register_standing_order_checkin()
# Hack the logger to indicate which server we are.
import tornado.process
taskid = tornado.process.task_id()
if taskid is not None:
fmtr = logging.getLogger('').handlers[0].formatter
fmtr._fmt = fmtr._fmt.replace(': ', ' #%d: ' % taskid)
if server == 'flask':
print('note: using "flask" server, so background operations will not work',
file=sys.stderr)
app.run(host=host, port=port, debug=debug)
elif server == 'tornado':
from tornado.ioloop import IOLoop
IOLoop.current().start()
else:
print('error: unknown server type %r' % server, file=sys.stderr)
sys.exit(1)
use_globus = app.config.get("use_globus", False)
if use_globus:
have_all_info = True
# make sure we have the other information that we need
if "globus_client_id" not in app.config.keys():
print(
"error: globus_client_id must be in the config file to use "
"globus.",
file=sys.stderr,
)
have_all_info = False
if "globus_transfer_token" not in app.config.keys():
print(
"error: globus_transfer_token must be in the config file to use "
"globus.",
file=sys.stderr,
)
have_all_info = False
if not have_all_info:
app.config["use_globus"] = False
else:
# add the key just in case it wasn't there
app.config["use_globus"] = False
bgtasks.maybe_wait_for_threads_to_finish()
def maybe_add_stores():
"""Add any stores specified in the configuration file that we didn't already
know about.
"""
from .dbutil import SQLAlchemyError
from .store import Store
for name, cfg in app.config.get('add-stores', {}).items():
prev = Store.query.filter(Store.name == name).first()
if prev is None:
store = Store(name, cfg['path_prefix'], cfg['ssh_host'])
store.http_prefix = cfg.get('http_prefix')
store.available = cfg.get('available', True)
db.session.add(store)
try:
db.session.commit()
except SQLAlchemyError:
db.rollback()
raise # this only happens on startup, so just refuse to start
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from datetime import datetime
import pandas as pd
class Funcion_salida:
def Retirar_desplegar_teclado(self):
MOV = -100
# movimiento botones
self.salida_nombre.setGeometry(self.width/3.6, (self.height/2.7)+MOV,
self.width/4.2, self.height/12)
self.salida_cedula.setGeometry(self.width/3.6, (self.height/2.7)+(self.height/8.5)+MOV,
self.width/4.2, self.height/12)
self.salida_salida.setGeometry(self.width/1.8, (self.height/2.7)+MOV,
self.width/6, self.height/4.9)
self.Teclado()
self.NotTecladoNumerico()
self.campo = 'retirar-nombre'
def Retirar_guardar_teclado(self):
# movimiento botones
self.salida_nombre.setGeometry(self.width/3.6, self.height/2.7,
self.width/4.2, self.height/12)
self.salida_cedula.setGeometry(self.width/3.6, (self.height/2.7)+self.height/8.5,
self.width/4.2, self.height/12)
self.salida_salida.setGeometry(self.width/1.8, self.height/2.7,
self.width/6, self.height/4.9)
self.NotTeclado()
def Retirar_desplegar_teclado_numerico_cedula(self):
MOV = -100
# movimiento botones
self.salida_nombre.setGeometry(self.width/3.6, (self.height/2.7)+MOV,
self.width/4.2, self.height/12)
self.salida_cedula.setGeometry(self.width/3.6, (self.height/2.7)+(self.height/8.5)+MOV,
self.width/4.2, self.height/12)
self.salida_salida.setGeometry(self.width/1.8, (self.height/2.7)+MOV,
self.width/6, self.height/4.9)
self.NotTeclado()
self.TecladoNumerico()
self.campo = 'retirar-cedula'
def restar_deltas(self, HoraOut, HoraIn):
'''
Devuelve la diferencia en minutos
'''
Fecha_Hoy = datetime.today().strftime('%d-%m-%Y')
HoraOut = HoraOut.split(':')
HoraIn = HoraIn.split(':')
# Tiempo total en minutos
if not Fecha_Hoy == "aaa":
NumOut = int(HoraOut[0]) * 60 + int(HoraOut[1])
NumIn = int(HoraIn[0]) * 60 + int(HoraIn[1])
delta = NumOut - NumIn
return str(delta)
else:
self.dialogo_mensaje = "Ha ocurrido un error al verifcar \nlas fechas, si persiste comuniquese \n con el fabricante \n "
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
def SalidaSalida(self):
# variables locales
nombre = self.salida_nombre.text()
cedula = self.salida_cedula.text()
HoraOut = datetime.today().strftime('%H:%M')
try:
df = pd.read_csv('src/models/data/DB.csv')
if nombre != "" and cedula != "": # lógica para leer si los campos están vacíos
if not nombre.isdigit() and not cedula.isalpha(): # detecta si numeros o letras donde no deben
persona = df[(df['Cedula'] == str(cedula)) & (df['IsIn'])].index.tolist()
if persona:
ced = persona[0]
self.df_as_txt = open("src/models/data/DB.csv", "r")
lineas = self.df_as_txt.readlines()
self.df_as_txt.close()
lineas[ced + 1] = lineas[ced + 1].replace('HO*', HoraOut).replace('D*', self.restar_deltas(
HoraOut, df['HoraIn'][ced])).replace('True', 'False')
self.df_as_txt = open("src/models/data/DB.csv", "w")
for l in lineas:
self.df_as_txt.write(l)
self.df_as_txt.close()
# Confirmacion
self.dialogo_mensaje = "Se ha retirado correctamente\n "
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
self.HomeWindow()
else:
self.dialogo_mensaje = "Error, no se encontró a ese usuario\n "
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
else:
self.dialogo_mensaje = "Error, verifique los datos ingresados\n "
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
print('hola')
else:
self.dialogo_mensaje = "Debe llenar todos los campos\nantes de continuar"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
except Exception as e:
print(e)
self.dialogo_mensaje = "Error, intente nuevamente\n\nSi el error persiste comuniquese con el fabricante"
self.dialogo.setInformativeText(self.dialogo_mensaje)
self.dialogo.show()
|
nilq/baby-python
|
python
|
class Command:
def __init__(self, name, obj, method, help_text):
assert hasattr(method, '__call__')
self._name = str(name)
self._obj = obj
self._method = method
self._help = str(help_text)
@property
def name(self):
return self._name
@property
def help(self):
return self._help
def __call__(self, *args):
return self._method.__call__(*args)
def __str__(self):
return self.name
|
nilq/baby-python
|
python
|
from __future__ import print_function
from hawc_hal import HAL
import matplotlib.pyplot as plt
from threeML import *
import pytest
from conftest import point_source_model
@pytest.fixture(scope='module')
def test_fit(roi, maptree, response, point_source_model):
pts_model = point_source_model
hawc = HAL("HAWC",
maptree,
response,
roi)
# Use from bin 1 to bin 9
hawc.set_active_measurements(1, 9)
# Display information about the data loaded and the ROI
hawc.display()
# Get the likelihood value for the saturated model
hawc.get_saturated_model_likelihood()
data = DataList(hawc)
jl = JointLikelihood(pts_model, data, verbose=True)
param_df, like_df = jl.fit()
return jl, hawc, pts_model, param_df, like_df, data
def test_simulation(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
sim = hawc.get_simulated_dataset("HAWCsim")
sim.write("sim_resp.hd5", "sim_maptree.hd5")
def test_plots(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
# See the model in counts space and the residuals
fig = hawc.display_spectrum()
# Save it to file
fig.savefig("hal_src_residuals.png")
# Look at the data
fig = hawc.display_stacked_image(smoothing_kernel_sigma=0.17)
# Save to file
fig.savefig("hal_src_stacked_image.png")
# Look at the different energy planes (the columns are model, data, residuals)
fig = hawc.display_fit(smoothing_kernel_sigma=0.3)
fig.savefig("hal_src_fit_planes.png")
fig = hawc.display_fit(smoothing_kernel_sigma=0.3, display_colorbar=True)
fig.savefig("hal_src_fit_planes_colorbar.png")
def test_compute_TS(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
# Compute TS
src_name = pts_model.pts.name
jl.compute_TS(src_name, like_df)
def test_goodness(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
# Compute goodness of fit with Monte Carlo
gf = GoodnessOfFit(jl)
gof, param, likes = gf.by_mc(10)
print("Prob. of obtaining -log(like) >= observed by chance if null hypothesis is true: %.2f" % gof['HAWC'])
# it is a good idea to inspect the results of the simulations with some plots
# Histogram of likelihood values
fig, sub = plt.subplots()
likes.hist(ax=sub)
# Overplot a vertical dashed line on the observed value
plt.axvline(jl.results.get_statistic_frame().loc['HAWC', '-log(likelihood)'],
color='black',
linestyle='--')
fig.savefig("hal_sim_all_likes.png")
# Plot the value of beta for all simulations (for example)
fig, sub = plt.subplots()
param.loc[(slice(None), ['pts.spectrum.main.Cutoff_powerlaw.index']), 'value'].plot()
fig.savefig("hal_sim_all_index.png")
def test_fit_with_free_position(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
hawc.psf_integration_method = 'fast'
# Free the position of the source
pts_model.pts.position.ra.free = True
pts_model.pts.position.dec.free = True
# Set boundaries (no need to go further than this)
ra = pts_model.pts.position.ra.value
dec = pts_model.pts.position.dec.value
pts_model.pts.position.ra.bounds = (ra - 0.5, ra + 0.5)
pts_model.pts.position.dec.bounds = (dec - 0.5, dec + 0.5)
# Fit with position free
param_df, like_df = jl.fit()
# Make localization contour
# pts.position.ra(8.362 + / - 0.00028) x
# 10
# deg
# pts.position.dec(2.214 + / - 0.00025) x
# 10
a, b, cc, fig = jl.get_contours(pts_model.pts.position.dec, 22.13, 22.1525, 5,
pts_model.pts.position.ra, 83.615, 83.635, 5)
plt.plot([ra], [dec], 'x')
fig.savefig("hal_src_localization.png")
hawc.psf_integration_method = 'exact'
pts_model.pts.position.ra.free = False
pts_model.pts.position.dec.free = False
def test_bayesian_analysis(test_fit):
jl, hawc, pts_model, param_df, like_df, data = test_fit
# Of course we can also do a Bayesian analysis the usual way
pts_model.pts.position.ra.free = False
pts_model.pts.position.dec.free = False
# For this quick example, let's use a uniform prior for all parameters
for parameter in list(pts_model.parameters.values()):
if parameter.fix:
continue
if parameter.is_normalization:
parameter.set_uninformative_prior(Log_uniform_prior)
else:
parameter.set_uninformative_prior(Uniform_prior)
# Let's execute our bayes analysis
bs = BayesianAnalysis(pts_model, data)
bs.set_sampler("emcee")
bs.sampler.setup(n_iterations=20, n_burn_in=20, n_walkers=30)
_ = bs.sample()
fig = bs.results.corner_plot()
fig.savefig("hal_corner_plot.png")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(0,os.environ['HOME']+'/P3D-PLASMA-PIC/p3dpy/')
import numpy as np
from mpi4py import MPI
from subs import create_object
import AnalysisFunctions as af
#
# MPI INITIALIZATION
#
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
status = MPI.Status()
#
# CREATE P3D OBJECT & OPEN OUTPUT FILE
#
rc=create_object()
rc.vars2load(['bx','by'])
rc.loadenergies()
#
# DECIDE THE NUMBER OF SLICES
#
extslc=rc.numslices%size
nt=(rc.numslices-extslc)/size
if rank==0:
bs=0; fs=nt+extslc
else:
bs=extslc+rank*nt; fs=bs+nt
print(rank, bs, fs, nt)
print()
print()
#
# CREATE OUTPUT ARRAYS
#
if rank==0:
sclmx=np.zeros(rc.numslices); krtmx=np.zeros(rc.numslices); tsris=np.zeros(rc.numslices)
else:
snddata=np.zeros((3,nt))
#
# MAIN LOOP
#
bl=0.2; fl=5.; stepl=2
if fl > 0.5*rc.lx:
fl=0.5*rc.lx
if bl < rc.dx:
bl=rc.dx
bsl=int(bl/rc.dx); fsl=int(fl/rc.dx)
if rank == 0:
t_start=MPI.Wtime()
comm.Barrier()
for i in range(bs,fs):
rc.loadslice(i)
idx = (bs-i)
#Assign the appropriate nonlinear time to the timeseries
if rank == 0:
tsris[i] = rc.ta[np.argmin(np.abs(rc.t-rc.time))]
else:
snddata[0,idx] = rc.ta[np.argmin(np.abs(rc.t-rc.time))]
#Compute the scale dependent kurtosis between bl,fl
rx,sdx=af.sdk(rc.bx,bs=bsl,fs=fsl,step=stepl,dx=rc.dx,ax=0)
ry,sdy=af.sdk(rc.by,bs=bsl,fs=fsl,step=stepl,dx=rc.dx,ax=1)
#Find the location of maximum value of kurtosis between sdx,sdy
tsdk=np.array([sdx,sdy])
rr =np.array([rx,ry])
xx=np.unravel_index(tsdk.argmax(),tsdk.shape)
if rank == 0:
sclmx[i]=rr[xx]
krtmx[i]=tsdk[xx]
print('rank,t,sclmx,mxkurt\t',rank,tsris[i],sclmx[i],krtmx[i])
else:
snddata[1,idx]=rr[xx]
snddata[2,idx]=tsdk[xx]
print('rank,t,sclmx,mxkurt\t',rank,snddata[0,idx],snddata[1,idx],snddata[2,idx])
#
# PROC 0 COLLECTS DATA AND WRITES THE FILE
#
if rank > 0:
comm.send(snddata, dest=0, tag=13)
else:
for src in range(1,comm.size):
tbs=extslc+src*nt; tfs=extslc+(src+1)*nt
rcvdata=comm.recv(source=src,tag=13,status=status)
for j in range(tbs,tfs):
tsris[j] =rcvdata[0,j-tbs]
sclmx[j]=rcvdata[1,j-tbs]
krtmx[j]=rcvdata[2,j-tbs]
#
comm.Barrier()
#
if rank == 0:
print('Done Computing. Writing the file now')
outf=open('mxkurtdi.'+rc.dirname+'.dat','w')
print('#','t,\t tt,\t lxc,\t lyc,\t lc', file=outf)
for i in range(rc.numslices):
print(tsris[i],sclmx[i],krtmx[i], file=outf)
outf.close()
t_fin = MPI.Wtime()-t_start
print('Total time taken %0.3f'%t_fin)
#
|
nilq/baby-python
|
python
|
import unittest
from sharepp import SharePP, Coin
class SharePPTest(unittest.TestCase):
def test_valid_input(self):
price = SharePP.get_etf_price("LU1781541179")
self.assertTrue(float, type(price))
def test_invalid_input(self):
try:
SharePP.get_etf_price("invalid_isin")
self.fail("Expected exception not thrown!")
except ValueError as e:
self.assertEqual(
"You must provide a string object representing a valid ISIN!",
str(e))
def test_get_coin_price(self):
for coin in Coin:
price = SharePP.get_coin_price(coin)
self.assertTrue(float, type(price))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-19 16:23:55
from __future__ import unicode_literals
from flask import render_template, request, json
from flask import Response
from .app import app
from pyspider.libs import result_dump
@app.route('/results')
def result():
resultdb = app.config['resultdb']
project = request.args.get('project')
group = request.args.get('group')
taskid = request.args.get('taskid')
url = request.args.get('url')
upload_status = request.args.get('upload_status')
offset = int(request.args.get('offset', 0))
limit = int(request.args.get('limit', 20))
count = resultdb.count(project, group, taskid=taskid, url=url, upload_status=upload_status)
results = list(resultdb.select(project, group, taskid=taskid, url=url, upload_status=upload_status, offset=offset, limit=limit))
return render_template(
"result.html", count=count, results=results, group=group,
result_formater=result_dump.result_formater,
project=project, offset=offset, limit=limit, json=json, taskid='' if taskid is None else taskid,
url='' if url is None else url, upload_status='' if upload_status is None else upload_status
)
@app.route('/results/dump/<project>-<group>.<_format>')
def dump_result(project, group, _format):
resultdb = app.config['resultdb']
offset = int(request.args.get('offset', 0)) or None
limit = int(request.args.get('limit', 0)) or None
results = resultdb.select(project, group, offset=offset, limit=limit)
if _format == 'json':
valid = request.args.get('style', 'rows') == 'full'
return Response(result_dump.dump_as_json(results, valid),
mimetype='application/json')
elif _format == 'txt':
return Response(result_dump.dump_as_txt(results),
mimetype='text/plain')
elif _format == 'csv':
return Response(result_dump.dump_as_csv(results),
mimetype='text/csv')
|
nilq/baby-python
|
python
|
import os
import shutil
import subprocess
def create_videos(video_metadata, relevant_directories, frame_name_format, delete_source_imagery):
stylized_frames_path = relevant_directories['stylized_frames_path']
dump_path_bkg_masked = relevant_directories['dump_path_bkg_masked']
dump_path_person_masked = relevant_directories['dump_path_person_masked']
combined_img_bkg_pattern = os.path.join(dump_path_bkg_masked, frame_name_format)
combined_img_person_pattern = os.path.join(dump_path_person_masked, frame_name_format)
stylized_frame_pattern = os.path.join(stylized_frames_path, frame_name_format)
dump_path = os.path.join(stylized_frames_path, os.path.pardir)
combined_img_bkg_video_path = os.path.join(dump_path, f'{os.path.basename(dump_path_bkg_masked)}.mp4')
combined_img_person_video_path = os.path.join(dump_path, f'{os.path.basename(dump_path_person_masked)}.mp4')
stylized_frame_video_path = os.path.join(dump_path, 'stylized.mp4')
ffmpeg = 'ffmpeg'
if shutil.which(ffmpeg): # if ffmpeg is in system path
audio_path = relevant_directories['audio_path']
def build_ffmpeg_call(img_pattern, audio_path, out_video_path):
input_options = ['-r', str(video_metadata['fps']), '-i', img_pattern, '-i', audio_path]
encoding_options = ['-c:v', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', '-c:a', 'copy']
pad_options = ['-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2'] # libx264 won't work for odd dimensions
return [ffmpeg] + input_options + encoding_options + pad_options + [out_video_path]
subprocess.call(build_ffmpeg_call(combined_img_bkg_pattern, audio_path, combined_img_bkg_video_path))
subprocess.call(build_ffmpeg_call(combined_img_person_pattern, audio_path, combined_img_person_video_path))
subprocess.call(build_ffmpeg_call(stylized_frame_pattern, audio_path, stylized_frame_video_path))
else:
raise Exception(f'{ffmpeg} not found in the system path, aborting.')
if delete_source_imagery:
shutil.rmtree(dump_path_bkg_masked)
shutil.rmtree(dump_path_person_masked)
shutil.rmtree(stylized_frames_path)
print('Deleting stylized/combined source images done.')
|
nilq/baby-python
|
python
|
from django.urls import path, include
from mighty.functions import setting
from mighty.applications.user import views
urlpatterns = [
path('user/', include([
path('create/', views.CreateUserView.as_view(), name="use-create"),
])),
]
api_urlpatterns = [
path('user/', include([
path('form/', include([
path('create/', views.CreatUserFormView.as_view(), name="api-user-form-create"),
])),
path('check/', include([
path('email/', views.UserEmailCheckView.as_view(), name="api-user-check-email"),
path('phone/', views.UserPhoneCheckView.as_view(), name="api-user-check-phone"),
])),
path('', views.CreateUserView.as_view(), name="api-user-profile"),
path('profile/', views.ProfileView.as_view(), name="api-user-profile"),
path('invitation/', include([
path('<uuid:uid>/', views.InvitationDetailView.as_view(), name="api-user-invitation"),
path('<uuid:uid>/<str:action>/', views.InvitationDetailView.as_view(), name="api-user-invitation-action"),
]))
]))
]
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand, CommandError
from openfood.models import Product, Category, Position
from django.db import models
from datetime import datetime
import sys
import requests
class Collector:
"""
Get products from Open Food Facts database.
Register fields for 'Products' & 'Categories'.
The many to many connection table 'Position' contains 'rank' field,
according to the position of each category in the product hierarchy.
"""
def __init__(self, url="https://fr.openfoodfacts.org/cgi/search.pl",
number_by_grade=[
('a', 150), ('b', 150), ('c', 150), ('d', 150), ('e', 150)
],
categories=[
"Salty snacks", "Cheeses", "Beverage", "Sauces",
"Biscuits", "Frozen foods", "pizzas", "chocolats",
"Candies", "Snacks sucrés",]
):
self.url = url
self.grades = number_by_grade
self.categories = categories
self.products = []
def fetch(self, category="Cheese", grade="a", products_number=50,
product_keys = [ 'product_name', 'nutrition_grades',
'url', 'code', 'brands', 'stores', 'categories_hierarchy',
'image_url', ]):
"""
Get [products_number] products in [category] & grade [grade,
keep only the needed fields listed in [product_keys].
"""
args = {
'action': "process",
'tagtype_0': "categories",
'tag_contains_0': "contains",
'tag_0': category,
'nutrition_grades': grade,
'json': 1,
'page_size': 1000,
}
response = requests.get(self.url, params=args)
products = response.json()["products"]
products_to_store = []
for product in products:
product_to_store = {}
try:
for key in product_keys:
product_to_store[key] = product[key]
products_to_store.append(product_to_store)
except KeyError:
# print("Key Error on {}.".format(key))
pass
if len(products_to_store) == products_number:
print("Number reached !!!")
break
self.products.extend(products_to_store)
def register(self):
for product in self.products:
new_product = Product()
new_product.product_name = product['product_name']
new_product.grade = product['nutrition_grades']
new_product.url = product['url']
new_product.barcode = product['code']
new_product.brand = product['brands']
new_product.store = product['stores']
new_product.product_img_url = product['image_url']
new_product.save()
for i, category in enumerate(product['categories_hierarchy'][::-1]):
new_category = Category.objects.get_or_create(
category_name=category,
)
new_position = Position()
new_position.product = new_product
new_position.category = new_category[0]
new_position.rank = i
new_position.save()
def populate(self):
for category in self.categories:
for grade in self.grades:
self.fetch(category=category, grade=grade[0],
products_number=grade[1])
print("Products:", len(self.products))
print("Registering products in database...")
self.register()
print("{} products registered in database.".format(len(self.products)))
def empty(self):
products_to_delete = Product.objects.filter(favorized=0)
products_to_delete_number = len(products_to_delete)
total_products = len(Product.objects.all())
products_to_delete.delete()
print("-\n{} deleted on a total of {}.-\n".format(
products_to_delete_number,
total_products,
)
)
class Command(BaseCommand):
"""
Django command to refresh data.
"""
def handle(self, *args, **options):
collector = Collector()
orig_stdout = sys.stdout
if 'win' in sys.platform:
filename = 'refresh_logs/refresh-{}.txt'.format(datetime.strftime(datetime.now(), "%d-%m-%Y@%H-%M-%S"))
else:
filename = '/home/gil/oc-projet-10/refresh_logs/refresh-{}.txt'.format(datetime.strftime(datetime.now(), "%d-%m-%Y@%H-%M-%S"))
log = open(filename, 'w')
sys.stdout = log
print("Operation started at {}.\n-".format(datetime.strftime(datetime.now(), "%H:%M:%S")))
collector.empty()
collector.populate()
print("-\nOperation ended at {}.".format(datetime.strftime(datetime.now(), "%H:%M:%S")))
sys.stdout = orig_stdout
|
nilq/baby-python
|
python
|
import altair as alt
from model import data_wrangle
def return_fatality_bar_chart(value=None):
"""
creates an altair chart object for the plot of the fatality barchart.
Parameters
----------
value: the value passed in from the radio buttons.
"0", "1", and "2" for "first-world", "non-first-world", and "both" respectively.
Returns
-------
Altair.Chart object of the bar chart.
"""
value = '0' if value is None else value
plot2 = alt.Chart(data_wrangle.chart_2_data,
title="Number of fatalities for airlines that had an incident between 1985 and 2014"
).mark_bar().encode(
alt.Y("airline:N",
title="Airline (* includes regional subsidiaries)",
sort=alt.EncodingSortField(
field="total_fatalities_per_b_avail_seat_km",
order="ascending")),
alt.X("total_fatalities_per_b_avail_seat_km:Q",
axis=alt.Axis(
title="Rate of fatalities per billion available seat kilometers")),
tooltip=[alt.Tooltip(shorthand="total_fatalities_per_b_avail_seat_km:Q",
title="count of fatalities")]
).configure_mark(color="blue"
).configure_title(fontSize=18
).configure_legend(labelFontSize=13
).configure_axis(labelFontSize=11,
titleFontSize=14
).properties(width=800,
height=600)
if value != "2":
if value == "0":
color_range = ["blue", "grey"]
else: # To remove 'Non First World', we do not need to do anything here
color_range = ["gray", "blue"]
plot2 = alt.Chart(data_wrangle.chart_2_data,
title="Rate of fatal incidents for airlines between 1985 and 2014"
).mark_bar().encode(
alt.Y("airline:N",
title="Airline (* includes regional subsidiaries)",
sort=alt.EncodingSortField(
field="total_fatalities_per_b_avail_seat_km",
order="ascending")),
alt.X("total_fatalities_per_b_avail_seat_km:Q",
axis=alt.Axis(
title="Normalized Rate of fatal incidents (incident/billion km/seat)")),
alt.Color("first_world",
title=None,
scale=alt.Scale(domain=["First World", "Other"],
range=color_range)),
tooltip=[alt.Tooltip(shorthand="total_fatalities_per_b_avail_seat_km:Q",
title="count of fatalities")]
).configure_title(fontSize=18
).configure_legend(labelFontSize=13
).configure_axis(labelFontSize=11,
titleFontSize=14
).properties(width=800, height=600)
return plot2
return_fatality_bar_chart(0)
|
nilq/baby-python
|
python
|
import sys
from collections import OrderedDict
import calplot
import pandas as pd
def loadData(filename: str):
df = pd.read_csv(filename, usecols=['DateTime', 'Open', 'High', 'Low', 'Close', 'Volume'], na_values=['nan'])
df['DateTime'] = pd.to_datetime(df['DateTime'], utc=True).dt.tz_convert('US/Eastern')
df = df.set_index('DateTime')
return df
def resample(df):
return df.resample('1min').agg(
OrderedDict([
('Open', 'first'),
('High', 'max'),
('Low', 'min'),
('Close', 'last'),
('Volume', 'sum'),
])
).dropna()
if __name__ == '__main__':
file_path = sys.argv[1]
data_frame = loadData(file_path)
# data_frame = resample(data_frame)
data_frame['hasDay'] = 1
fig, _ = calplot.calplot(data_frame['hasDay'], cmap='Blues', colorbar=False)
print(f"Calendar hitmap has been saved to {file_path}_hitmap.png")
fig.savefig(f"{file_path}_hitmap.png")
|
nilq/baby-python
|
python
|
from aiohttp import web, test_utils
import typing
import asyncio
import functools
from .const import InputQuery
import attr
@attr.s
class AppContainer:
host: typing.Optional[str] = attr.ib(default=None)
port: typing.Optional[int] = attr.ib(default=None)
_app: web.Application = attr.ib(factory=web.Application)
_route: web.RouteTableDef = attr.ib(factory=web.RouteTableDef, init=False)
appRunner = attr.ib(type=web.AppRunner)
@appRunner.default
def app_runner_def(self):
return web.AppRunner(self._app)
site = attr.ib(type=web.TCPSite, default=None)
def get(self, path, **kwargs):
return self._route.get(path, **kwargs)
def put(self, path, **kwargs):
return self._route.put(path, **kwargs)
async def start(self):
self._app.add_routes(self._route)
await self.appRunner.setup()
self.site = web.TCPSite(self.appRunner, self.host, self.port)
await self.site.start()
def get_app(self):
self._app.add_routes(self._route)
return self._app
async def stop(self):
await self.site.stop()
def test_client(self) -> test_utils.TestClient:
return test_utils.TestClient(test_utils.TestServer(self.get_app()), loop=asyncio.get_event_loop())
async def make_server(handle: typing.Callable[[web.Request], typing.Awaitable[web.Response]], port: int) -> typing.Tuple[web.Server, web.TCPSite]:
"""
Make server and start it immidiatly
:param handle: handler coroutinefunction
:param port: port on wich server will be started
:return:
"""
assert asyncio.iscoroutinefunction(handle), 'handle must coroutine function'
server = web.Server(handle)
runner = web.ServerRunner(server)
await runner.setup()
site = web.TCPSite(runner, 'localhost', port)
await site.start()
return server, site
def cancelok(foo):
"""
Deco foo not to raise on cancelation
:param foo:
:return:
"""
@functools.wraps(foo)
async def wrapper(*args, **kwargs):
try:
return await foo(*args, **kwargs)
except asyncio.CancelledError:
return
return wrapper
def make_query(query: dict):
if isinstance(query, InputQuery):
query = query._asdict()
return '&'.join([f'{x}={y}' for x, y in query.items()])
|
nilq/baby-python
|
python
|
'''OpenGL extension SGIS.texture_color_mask
Overview (from the spec)
This extension implements the same functionality for texture
updates that glColorMask implements for color buffer updates.
Masks for updating textures with indexed internal formats
(the analog for glIndexMask) should be supported by a separate extension.
The extension allows an application to update a subset of
components in an existing texture. The masks are applied after
all pixel transfer operations have been performed, immediately
prior to writing the texel value into texture memory. They
apply to all texture updates.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/SGIS/texture_color_mask.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIS_texture_color_mask'
GL_TEXTURE_COLOR_WRITEMASK_SGIS = constant.Constant( 'GL_TEXTURE_COLOR_WRITEMASK_SGIS', 0x81EF )
glTextureColorMaskSGIS = platform.createExtensionFunction(
'glTextureColorMaskSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLboolean, constants.GLboolean, constants.GLboolean, constants.GLboolean,),
doc = 'glTextureColorMaskSGIS( GLboolean(red), GLboolean(green), GLboolean(blue), GLboolean(alpha) ) -> None',
argNames = ('red', 'green', 'blue', 'alpha',),
)
def glInitTextureColorMaskSGIS():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
nilq/baby-python
|
python
|
from .api import AppSyncAPI
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import time
from typing import List, Dict, Any
from chaosaws import aws_client
from chaoslib.exceptions import FailedActivity
from chaosaws.types import AWSResponse
from chaoslib.types import Configuration, Secrets
from logzero import logger
from .constants import OS_LINUX, OS_WINDOWS, GREP_PROCESS
from chaosaws.ec2_os import construct_script_content
__all__ = ["describe_os_type", "describe_instance",
"ensure_tc_installed", "ensure_tc_uninstalled",
"grep_process_exist"]
def describe_os_type(instance_id, configuration, secrets):
res = describe_instance(instance_id, configuration, secrets)
os = "linux"
try:
os = res['Reservations'][0]['Instances'][0]['Platform']
except KeyError:
logger.warning("No Platform key, so it is Linux")
return os
def describe_instance(instance_id: str,
configuration: Configuration = None,
secrets: Secrets = None) -> AWSResponse:
client = aws_client('ec2', configuration, secrets)
return client.describe_instances(InstanceIds=[
instance_id,
])
def ensure_tc_installed(instance_ids: List[str] = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
response = []
for instance_id in instance_ids:
response.append(
__simple_ssm_helper(
instance_id=instance_id,
configuration=configuration,
secrets=secrets,
default_timeout=30,
action="ensure_tc_installed",
failure_matcher="Install iproute-tc package failed."
)
)
return response
def ensure_tc_uninstalled(instance_ids: List[str] = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
response = []
for instance_id in instance_ids:
response.append(
__simple_ssm_helper(
instance_id=instance_id,
configuration=configuration,
secrets=secrets,
default_timeout=30,
action="ensure_tc_uninstalled",
failure_matcher="Remove iproute-tc package failed."
)
)
return response
def grep_process_exist(instance_ids: List[str] = None,
process_name: str = None,
configuration: Configuration = None,
secrets: Secrets = None) -> List[AWSResponse]:
"""
Grep pid of process name
Parameters
----------
instance_ids : List[str]
Filter the virtual machines. If the filter is omitted all machines in
the subscription will be selected as potential chaos candidates.
process_name : str
Name of the process to be killed
configuration : Configuration
Chaostoolkit Configuration
secrets : Secrets
Chaostoolkit Secrets
"""
logger.debug(
"Start network_latency: configuration='{}', instance_ids='{}'".format(
configuration, instance_ids))
response = []
try:
for instance in instance_ids:
param = dict()
param["duration"] = "1"
param["instance_id"] = instance
param["process_name"] = process_name
response.append(
__simple_ssm_helper(instance_id=instance,
configuration=configuration,
secrets=secrets,
action=GREP_PROCESS,
parameters=param)
)
return response
except Exception as x:
raise FailedActivity(
"failed issuing a execute of shell script via AWS SSM {}".format(
str(x)
))
###############################################################################
# Private helper functions
###############################################################################
def __simple_ssm_helper(instance_id: str,
configuration: Configuration = None,
secrets: Secrets = None,
default_timeout: int = 30,
action: str = None,
parameters: Dict[str, Any] = None,
failure_matcher: str = "failed") -> AWSResponse:
client = aws_client("ssm", configuration, secrets)
if not instance_id:
raise FailedActivity(
"you must specify the instance_id"
)
try:
if describe_os_type(instance_id, configuration, secrets) == "windows":
os_type = OS_WINDOWS
# TODO with PowerShell
cmd = ""
document_name = ""
else:
os_type = OS_LINUX
document_name = "AWS-RunShellScript"
res_send_command = client.send_command(
InstanceIds=[instance_id],
DocumentName=document_name,
Parameters={
'commands':
[construct_script_content(action, os_type, parameters)]
},
)
cmd_id = res_send_command["Command"]["CommandId"]
logger.info("ssm run command is sent, id {}".format(cmd_id))
totalwait = 0
interval = 1
while True:
res_list = client.list_command_invocations(
CommandId=cmd_id,
Details=True
)
try:
cp = res_list['CommandInvocations'][0]['CommandPlugins'][0]
status = cp['Status']
if status == "InProgress":
time.sleep(interval)
totalwait += interval
if totalwait > default_timeout:
raise FailedActivity(
"Script exceeded default timeout {}".format(
default_timeout
)
)
continue
elif status == "Failed":
break
elif status == "Success":
break
else:
break
except IndexError:
time.sleep(1)
continue
for command_invocation in res_list['CommandInvocations']:
for invocation in command_invocation['CommandPlugins']:
if invocation['Name'] == 'aws:runShellScript':
if failure_matcher in invocation['Output']:
raise FailedActivity(
"The result of command failed as:\n{}".format(
failure_matcher
)
)
logger.info("ssm run command status {}"
.format(invocation['Status']))
logger.info("ssm rum command result \n{}"
.format(invocation['Output'].rstrip('\n')))
return invocation['Output'].rstrip('\n')
except Exception as x:
raise FailedActivity(
"failed issuing a execute of shell script:\n{}".format(x))
|
nilq/baby-python
|
python
|
import torch as T
import dataclasses as dc
from typing import Optional, Callable
def vanilla_gradient(
output, input,
filter_outliers_quantiles:tuple[float,float]=[.005, .995]):
map = T.autograd.grad(output, input)
assert isinstance(map, tuple) and len(map) == 1, 'sanity check'
map = map[0]
# --> filter the bottom 0.5% and top 0.5% of gradient values since
# SmoothGrad paper suggests they are outliers
low, hi = filter_outliers_quantiles
map.clamp_(map.quantile(low), map.quantile(hi))
return map
@dc.dataclass
class SmoothGrad:
"""Wrap a model. Instead of outputting a prediction, generate SmoothGrad
saliency maps for given image and an output class index to explain.
>>> sg = SmoothGrad(model)
>>> explanation = sg(x, index_of_class_to_explain=0)
"""
model: T.nn.Module
layer: Optional[T.nn.Module] = None # defaults to a saliency map w.r.t. input
saliency_method:str = 'vanilla_gradient'
# smoothgrad hyperparameters
nsamples:int = 30 # paper suggests less than 50
std_spread:float = .15 # paper suggests values satisfying std / (max-min intensities) in [.1,.2], so std = .15*(max-min)
apply_before_mean:Callable = lambda x: x**2 # apply a function (like absolute value or magnitude or clip extreme values) before computing mean over samples.
def __call__(self, x: T.Tensor, index_of_class_to_explain:T.Tensor):
explanations = []
B,C = x.shape[:2]
# --> compute the standard deviation per image and color channel.
_intensity_range = x.reshape(B,C,-1).max(-1).values - x.reshape(B,C,-1).min(-1).values
std = self.std_spread * _intensity_range
# --> smoothgrad. just an average of saliency maps perturbed by noise
for i in range(self.nsamples):
self.model.zero_grad()
_noise = T.randn_like(x) * std.reshape(B,C,*(1 for _ in x.shape[2:]))
x_plus_noise = (x.detach() + _noise).requires_grad_()
yhat = self.model(x_plus_noise)
if self.saliency_method == 'vanilla_gradient':
map = vanilla_gradient(
input=self.layer if self.layer is not None else x_plus_noise,
output=yhat[:, index_of_class_to_explain],
)
else:
raise NotImplementedError()
map = self.apply_before_mean(map)
explanations.append(map)
return T.stack(explanations).mean(0)
# notes from paper
# maybe take absolute value
# consider .99 percentile of gradient values, because extreme values throw off input color and result in black map.
# noise, N(0, sigma^2): 10 to 20% noise?
if __name__ == "__main__":
# cfg = ...
sg = SmoothGrad(cfg.model.cpu())
x,y = cfg.train_dset[0]
x = x.unsqueeze_(0).to(cfg.device, non_blocking=True)
# explanations = [sg(x, i) for i in range(y.shape[0])]
# e = explanations[0]
e = sg(x,6)
|
nilq/baby-python
|
python
|
import argparse
import torch
import wandb
wandb.login()
from dataloader import get_dataloaders
from utils import get_model
from train import Trainer
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, choices=['c10', 'c100', 'svhn'])
parser.add_argument('--model', required=True, choices=['mlp_mixer'])
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--eval-batch-size', type=int, default=1024)
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--seed', type=int, default=3407)
parser.add_argument('--epochs', type=int, default=300)
# parser.add_argument('--precision', type=int, default=16)
parser.add_argument('--patch-size', type=int, default=4)
parser.add_argument('--hidden-size', type=int, default=128)
parser.add_argument('--hidden-c', type=int, default=512)
parser.add_argument('--hidden-s', type=int, default=64)
parser.add_argument('--num-layers', type=int, default=8)
parser.add_argument('--drop-p', type=int, default=0.)
parser.add_argument('--off-act', action='store_true', help='Disable activation function')
parser.add_argument('--is-cls-token', action='store_true', help='Introduce a class token.')
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--min-lr', type=float, default=1e-6)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--optimizer', default='adam', choices=['adam', 'sgd'])
parser.add_argument('--scheduler', default='cosine', choices=['step', 'cosine'])
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--weight-decay', type=float, default=5e-5)
parser.add_argument('--off-nesterov', action='store_true')
parser.add_argument('--label-smoothing', type=float, default=0.1)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--warmup-epoch', type=int, default=5)
parser.add_argument('--autoaugment', action='store_true')
parser.add_argument('--clip-grad', type=float, default=0, help="0 means disabling clip-grad")
parser.add_argument('--cutmix-beta', type=float, default=1.0)
parser.add_argument('--cutmix-prob', type=float, default=0.)
args = parser.parse_args()
args.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
args.nesterov = not args.off_nesterov
torch.random.manual_seed(args.seed)
experiment_name = f"{args.model}_{args.dataset}_{args.optimizer}_{args.scheduler}"
if args.autoaugment:
experiment_name += "_aa"
if args.clip_grad:
experiment_name += f"_cg{args.clip_grad}"
if args.off_act:
experiment_name += f"_noact"
if args.cutmix_prob>0.:
experiment_name += f'_cm'
if args.is_cls_token:
experiment_name += f"_cls"
if __name__=='__main__':
with wandb.init(project='mlp_mixer', config=args, name=experiment_name):
train_dl, test_dl = get_dataloaders(args)
model = get_model(args)
trainer = Trainer(model, args)
trainer.fit(train_dl, test_dl)
|
nilq/baby-python
|
python
|
def divide(num):
try:
return 42 / num
except ZeroDivisionError:
print('Error: Invalid argument')
print(divide(2))
print(divide(12))
print(divide(0))
print(divide(1))
|
nilq/baby-python
|
python
|
# coding=utf-8
#
# created by kpe on 28.Mar.2019 at 15:56
#
from __future__ import absolute_import, division, print_function
|
nilq/baby-python
|
python
|
import graphviz
dot = graphviz.Digraph(comment='GIADog system overview')
dot.render('output/system.gv', view=True)
|
nilq/baby-python
|
python
|
# The MIT License (MIT)
# Copyright (c) 2020 Mike Teachman
# https://opensource.org/licenses/MIT
# Platform-independent MicroPython code for the rotary encoder module
# Documentation:
# https://github.com/MikeTeachman/micropython-rotary
import micropython
_DIR_CW = const(0x10) # Clockwise step
_DIR_CCW = const(0x20) # Counter-clockwise step
# Rotary Encoder States
_R_START = const(0x0)
_R_CW_1 = const(0x1)
_R_CW_2 = const(0x2)
_R_CW_3 = const(0x3)
_R_CCW_1 = const(0x4)
_R_CCW_2 = const(0x5)
_R_CCW_3 = const(0x6)
_R_ILLEGAL = const(0x7)
_transition_table = [
# |------------- NEXT STATE -------------| |CURRENT STATE|
# CLK/DT CLK/DT CLK/DT CLK/DT
# 00 01 10 11
[_R_START, _R_CCW_1, _R_CW_1, _R_START], # _R_START
[_R_CW_2, _R_START, _R_CW_1, _R_START], # _R_CW_1
[_R_CW_2, _R_CW_3, _R_CW_1, _R_START], # _R_CW_2
[_R_CW_2, _R_CW_3, _R_START, _R_START | _DIR_CW], # _R_CW_3
[_R_CCW_2, _R_CCW_1, _R_START, _R_START], # _R_CCW_1
[_R_CCW_2, _R_CCW_1, _R_CCW_3, _R_START], # _R_CCW_2
[_R_CCW_2, _R_START, _R_CCW_3, _R_START | _DIR_CCW], # _R_CCW_3
[_R_START, _R_START, _R_START, _R_START]] # _R_ILLEGAL
_transition_table_half_step = [
[_R_CW_3, _R_CW_2, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CCW, _R_START, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CW, _R_CW_2, _R_START, _R_START],
[_R_CW_3, _R_CCW_2, _R_CCW_1, _R_START],
[_R_CW_3, _R_CW_2, _R_CCW_1, _R_START | _DIR_CW],
[_R_CW_3, _R_CCW_2, _R_CW_3, _R_START | _DIR_CCW]]
_STATE_MASK = const(0x07)
_DIR_MASK = const(0x30)
def _wrap(value, incr, lower_bound, upper_bound):
range = upper_bound - lower_bound + 1
value = value + incr
if value < lower_bound:
value += range * ((lower_bound - value) // range + 1)
return lower_bound + (value - lower_bound) % range
def _bound(value, incr, lower_bound, upper_bound):
return min(upper_bound, max(lower_bound, value + incr))
def _trigger(rotary_instance):
for listener in rotary_instance._listener:
listener()
class Rotary(object):
RANGE_UNBOUNDED = const(1)
RANGE_WRAP = const(2)
RANGE_BOUNDED = const(3)
def __init__(self, min_val, max_val, reverse, range_mode, half_step):
self._min_val = min_val
self._max_val = max_val
self._reverse = -1 if reverse else 1
self._range_mode = range_mode
self._value = min_val
self._state = _R_START
self._half_step = half_step
self._listener = []
def set(self, value=None, min_val=None,
max_val=None, reverse=None, range_mode=None):
# disable DT and CLK pin interrupts
self._hal_disable_irq()
if value is not None:
self._value = value
if min_val is not None:
self._min_val = min_val
if max_val is not None:
self._max_val = max_val
if reverse is not None:
self._reverse = -1 if reverse else 1
if range_mode is not None:
self._range_mode = range_mode
self._state = _R_START
# enable DT and CLK pin interrupts
self._hal_enable_irq()
def value(self):
return self._value
def reset(self):
self._value = 0
def close(self):
self._hal_close()
def add_listener(self, l):
self._listener.append(l)
def remove_listener(self, l):
if l not in self._listener:
raise ValueError('{} is not an installed listener'.format(l))
self._listener.remove(l)
def _process_rotary_pins(self, pin):
old_value = self._value
clk_dt_pins = (self._hal_get_clk_value() <<
1) | self._hal_get_dt_value()
# Determine next state
if self._half_step:
self._state = _transition_table_half_step[self._state &
_STATE_MASK][clk_dt_pins]
else:
self._state = _transition_table[self._state &
_STATE_MASK][clk_dt_pins]
direction = self._state & _DIR_MASK
incr = 0
if direction == _DIR_CW:
incr = 1
elif direction == _DIR_CCW:
incr = -1
incr *= self._reverse
if self._range_mode == self.RANGE_WRAP:
self._value = _wrap(
self._value,
incr,
self._min_val,
self._max_val)
elif self._range_mode == self.RANGE_BOUNDED:
self._value = _bound(
self._value,
incr,
self._min_val,
self._max_val)
else:
self._value = self._value + incr
try:
if old_value != self._value and len(self._listener) != 0:
micropython.schedule(_trigger, self)
except:
pass
|
nilq/baby-python
|
python
|
from flask import Flask
app = Flask(__name__)
import sqreen
sqreen.start()
from app import routes
if __name__ == '__main__':
app.run(debug=True)
|
nilq/baby-python
|
python
|
"""Module to generate datasets for FROCC
"""
import os
import numpy as np
import sklearn.datasets as skds
import scipy.sparse as sp
def himoon(n_samples=1000, n_dims=1000, sparsity=0.01, dist=5):
# n_samples = 1000
# n_dims = 1000
# dist = 5
# sparsity = 0.01
x, y = skds.make_moons(n_samples=n_samples * 2)
x = np.hstack(
(x, dist * np.ones((n_samples * 2, int(n_dims * sparsity - x.shape[1]))))
)
x_p = x[y == 1]
x_pos = sp.csr_matrix((n_samples, n_dims))
x_pos[:, : x.shape[1]] = x_p
x_n = x[y == 0]
x_neg = sp.csr_matrix((int(n_samples * 0.3), n_dims))
x_neg[:, : x.shape[1]] = x_n[: int(n_samples * 0.3)]
x_train = x_pos[: int(n_samples * 0.7)]
x_val = sp.vstack(
(
x_pos[int(n_samples * 0.7) : int(n_samples * 0.9)],
x_neg[: int(n_samples * 0.2)],
),
)
x_test = sp.vstack((x_pos[int(n_samples * 0.9) :], x_neg[int(n_samples * 0.2) :]))
y_train = np.ones(int(n_samples * 0.7))
y_val = np.concatenate(
((np.ones(int(n_samples * 0.2)), np.zeros(int(n_samples * 0.2))))
)
y_test = np.concatenate(
((np.ones(int(n_samples * 0.1)), np.zeros(int(n_samples * 0.1))))
)
# x_train = sp.csc_matrix(x_train)
# x_val = sp.csc_matrix(x_val)
# x_test = sp.csc_matrix(x_test)
x_train.reshape(x_train.shape)
x_test.reshape(x_test.shape)
x_val.reshape(x_val.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
def mmgauss(n_samples=1000, n_dims=1000, modes=5, sparsity=0.01, dist=5):
# n_samples = 10000
# n_dims = 10000
# modes = 5
# dist = 5
# sparsity = 0.01
pos_means = [(i + dist) * np.ones(int(n_dims * sparsity)) for i in range(modes)]
neg_means = dist * np.zeros((int(n_dims * sparsity), 1))
x_p, _ = skds.make_blobs(n_samples=n_samples, centers=pos_means)
x_pos = sp.csr_matrix((n_samples, n_dims))
x_pos[:, : int(n_dims * sparsity)] = x_p
x_n, _ = skds.make_blobs(n_samples=int(n_samples * 0.3), centers=neg_means)
x_neg = sp.csr_matrix((int(n_samples * 0.3), n_dims))
x_neg[:, : int(n_dims * sparsity)] = x_n
x_train = x_pos[: int(n_samples * 0.7)]
x_val = sp.vstack(
(
x_pos[int(n_samples * 0.7) : int(n_samples * 0.9)],
x_neg[: int(n_samples * 0.2)],
),
)
x_test = sp.vstack((x_pos[int(n_samples * 0.9) :], x_neg[int(n_samples * 0.2) :]))
y_train = np.ones(int(n_samples * 0.7))
y_val = np.concatenate(
((np.ones(int(n_samples * 0.2)), np.zeros(int(n_samples * 0.2))))
)
y_test = np.concatenate(
((np.ones(int(n_samples * 0.1)), np.zeros(int(n_samples * 0.1))))
)
# x_train = sp.csc_matrix(x_train)
# x_val = sp.csc_matrix(x_val)
# x_test = sp.csc_matrix(x_test)
x_train.reshape(x_train.shape)
x_test.reshape(x_test.shape)
x_val.reshape(x_val.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
|
nilq/baby-python
|
python
|
import pytest
from mixer.main import mixer
from smpa.models.address import Address, SiteAddress
@pytest.fixture
def address():
obj = Address()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
@pytest.fixture
def site_address():
obj = SiteAddress()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
|
nilq/baby-python
|
python
|
# Russell RIchardson
# Homework 2, problem 1
"""This reads from a text file and returns a string of the text"""
def read_from_a_file(the_file):
file=open(the_file,'r')
the_string=file.read()
file.close()
return the_string
"""This takes in a string and writes that string to a text file"""
def write_to_a_file(message, the_file):
file = open(the_file,"w")
file.write(message)
file.close()
"""Call main to run the main program"""
def main():
the_file = r"message.txt"
message = read_from_a_file(the_file)
print(message)
key = input("Enter a key for the cipher: ")
encrypted_message = encrypt(key,message)
print(encrypted_message)
new_file = the_file[:-4]
new_file = new_file + "-cipher.txt"
write_to_a_file(encrypted_message,new_file)
"""This encrypts the message, given a key"""
def encrypt(key,message):
encrypted_message = ""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
key_index = 0
key = key.lower()
for symbol in message:
encrypted_index = alphabet.find(symbol)
if encrypted_index != -1:
encrypted_index += alphabet.find(key[key_index])
encrypted_index %= len(alphabet)
if symbol.islower():
encrypted_message += alphabet[encrypted_index]
elif symbol.isupper():
encrypted_message += alphabet[encrypted_index].upper()
key_index += 1
if key_index == len(key):
key_index = 0
else:
encrypted_message += symbol
return encrypted_message
|
nilq/baby-python
|
python
|
"""
Perform inference on inputted text.
"""
import utils
import torch
from termcolor import cprint, colored as c
import model
import data
import re
import sys
source = sys.argv[1]
# get an edgt object
def get_edgt():
input_chars = list(" \nabcdefghijklmnopqrstuvwxyz01234567890")
output_chars = ["<nop>", "<cap>"] + list(".,;:?!\"'$")
# torch.set_num_threads(8)
batch_size = 128
char2vec = utils.Char2Vec(chars=input_chars, add_unknown=True)
output_char2vec = utils.Char2Vec(chars = output_chars)
input_size = char2vec.size
output_size = output_char2vec.size
hidden_size = input_size
layers = 1
rnn = model.GruRNN(input_size, hidden_size, output_size, batch_size=batch_size, layers=layers, bi=True)
egdt = model.Engadget(rnn, char2vec, output_char2vec)
egdt.load('./data/Gru_Engadget_epch-24.tar')
return egdt
def predict_next(source, in_edgt, gen_length=None, temperature=0.05):
input_chars = list(" \nabcdefghijklmnopqrstuvwxyz01234567890")
output_chars = ["<nop>", "<cap>"] + list(".,;:?!\"'$")
input_text, punc_target = data.extract_punc(source, input_chars, output_chars)
in_edgt.model.batch_size = 1
in_edgt.init_hidden_()
in_edgt.next_([input_text])
punc_output = in_edgt.output_chars(temperature=temperature)[0]
result = data.apply_punc(input_text, punc_output)
# capitalize letters after periods
for i in range(len(result) - 1):
if result[i] == '.':
result = result[:i] + result[i].upper() + result[i + 1:]
print(result)
predict_next(source, get_edgt())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''checkplot.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Jan 2017
License: MIT.
Contains functions to make checkplots: quick views for determining periodic
variability for light curves and sanity-checking results from period-finding
functions (e.g., from periodbase).
The checkplot_png function makes the following 3 x 3 grid and writes to a PNG:
[LSP plot + objectinfo] [ unphased LC ] [ period 1 phased LC ]
[period 1 phased LC /2] [period 1 phased LC x2] [ period 2 phased LC ]
[ period 3 phased LC ] [period 4 phased LC ] [ period 5 phased LC ]
The twolsp_checkplot_png function makes a similar plot for two independent
period-finding routines and writes to a PNG:
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
pgram1 is the plot for the periodogram in the lspinfo1 dict
pgram1 P1, P2, and P3 are the best three periods from lspinfo1
pgram2 is the plot for the periodogram in the lspinfo2 dict
pgram2 P1, P2, and P3 are the best three periods from lspinfo2
The checkplot_pickle function takes, for a single object, an arbitrary number of
results from independent period-finding functions (e.g. BLS, PDM, AoV, GLS) in
periodbase, and generates a gzipped pickle file that contains object and
variability information, finder chart, mag series plot, and for each
period-finding result: a periodogram and phased mag series plots for up to
arbitrary number of 'best periods'. This is intended for use with an external
checkplot viewer: the Tornado webapp checkplotserver.py, but you can also use
the checkplot_pickle_to_png function to render this to a PNG similar to those
above. In this case, the PNG will look something like:
[ finder ] [ objectinfo ] [ variableinfo ] [ unphased LC ]
[ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
[ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
.
.
[ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
for N independent period-finding methods producing:
- periodogram1,2,3...N: the periodograms from each method
- phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each
periodogram
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
import os
import os.path
import gzip
import base64
import sys
import hashlib
import sys
import json
try:
import cPickle as pickle
from cStringIO import StringIO as strio
except:
import pickle
from io import BytesIO as strio
import numpy as np
from numpy import nan as npnan, median as npmedian, \
isfinite as npisfinite, min as npmin, max as npmax, abs as npabs, \
ravel as npravel
# we're going to plot using Agg only
import matplotlib
MPLVERSION = tuple([int(x) for x in matplotlib.__version__.split('.')])
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# import this to check if stimes, smags, serrs are Column objects
from astropy.table import Column as astcolumn
# import this to get neighbors and their x,y coords from the Skyview FITS
from astropy.wcs import WCS
# import from Pillow to generate pngs from checkplot dicts
from PIL import Image, ImageDraw, ImageFont
# import sps.cKDTree for external catalog xmatches
from scipy.spatial import cKDTree
###################
## LOCAL IMPORTS ##
###################
from .lcmath import phase_magseries, phase_bin_magseries, \
normalize_magseries, sigclip_magseries
from .varbase.lcfit import spline_fit_magseries
from .varclass.varfeatures import all_nonperiodic_features
from .varclass.starfeatures import coord_features, color_features, \
color_classification, neighbor_gaia_features
from .plotbase import skyview_stamp, \
PLOTYLABELS, METHODLABELS, METHODSHORTLABELS
from .coordutils import total_proper_motion, reduced_proper_motion
#######################
## UTILITY FUNCTIONS ##
#######################
def _make_periodogram(axes,
lspinfo,
objectinfo,
findercmap,
finderconvolve,
verbose=True,
findercachedir='~/.astrobase/stamp-cache'):
'''makes the periodogram, objectinfo, and finder tile.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# make the LSP plot on the first subplot
axes.plot(periods,lspvals)
axes.set_xscale('log',basex=10)
axes.set_xlabel('Period [days]')
axes.set_ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
axes.set_title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(nbestperiods,
nbestlspvals):
axes.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# if objectinfo is present, get things from it
if (objectinfo and isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo)
and 'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# FIXME: get mag info from astroquery or HATDS if needed
# calculate colors
if ('bmag' in objectinfo and 'vmag' in objectinfo and
'jmag' in objectinfo and 'kmag' in objectinfo and
'sdssi' in objectinfo and
objectinfo['bmag'] and objectinfo['vmag'] and
objectinfo['jmag'] and objectinfo['kmag'] and
objectinfo['sdssi']):
bvcolor = objectinfo['bmag'] - objectinfo['vmag']
jkcolor = objectinfo['jmag'] - objectinfo['kmag']
ijcolor = objectinfo['sdssi'] - objectinfo['jmag']
else:
bvcolor = None
jkcolor = None
ijcolor = None
# bump the ylim of the LSP plot so that the overplotted finder and
# objectinfo can fit in this axes plot
lspylim = axes.get_ylim()
axes.set_ylim(lspylim[0], lspylim[1]+0.75*(lspylim[1]-lspylim[0]))
# get the stamp
try:
dss, dssheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
cachedir=findercachedir,
verbose=verbose)
stamp = dss
# inset plot it on the current axes
inset = inset_axes(axes, width="40%", height="40%", loc=1)
inset.imshow(stamp,cmap=findercmap)
inset.set_xticks([])
inset.set_yticks([])
inset.set_frame_on(False)
# grid lines pointing to the center of the frame
inset.axvline(x=150,ymin=0.2,ymax=0.4,linewidth=2.0,color='k')
inset.axhline(y=150,xmin=0.2,xmax=0.4,linewidth=2.0,color='k')
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# annotate with objectinfo
axes.text(
0.05,0.95,
'%s' % objectid,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
axes.text(
0.05,0.91,
'RA = %.3f, DEC = %.3f' % (objectinfo['ra'], objectinfo['decl']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
if bvcolor:
axes.text(0.05,0.87,
'$B - V$ = %.3f, $V$ = %.3f' % (bvcolor,
objectinfo['vmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'vmag' in objectinfo and objectinfo['vmag']:
axes.text(0.05,0.87,
'$V$ = %.3f' % (objectinfo['vmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if ijcolor:
axes.text(0.05,0.83,
'$i - J$ = %.3f, $J$ = %.3f' % (ijcolor,
objectinfo['jmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'jmag' in objectinfo and objectinfo['jmag']:
axes.text(0.05,0.83,
'$J$ = %.3f' % (objectinfo['jmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if jkcolor:
axes.text(0.05,0.79,
'$J - K$ = %.3f, $K$ = %.3f' % (jkcolor,
objectinfo['kmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'kmag' in objectinfo and objectinfo['kmag']:
axes.text(0.05,0.79,
'$K$ = %.3f' % (objectinfo['kmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'sdssr' in objectinfo and objectinfo['sdssr']:
axes.text(0.05,0.75,'SDSS $r$ = %.3f' % objectinfo['sdssr'],
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
# add in proper motion stuff if available in objectinfo
if ('pmra' in objectinfo and objectinfo['pmra'] and
'pmdecl' in objectinfo and objectinfo['pmdecl']):
pm = total_proper_motion(objectinfo['pmra'],
objectinfo['pmdecl'],
objectinfo['decl'])
axes.text(0.05,0.67,'$\mu$ = %.2f mas yr$^{-1}$' % pm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'jmag' in objectinfo and objectinfo['jmag']:
rpm = reduced_proper_motion(objectinfo['jmag'],pm)
axes.text(0.05,0.63,'$H_J$ = %.2f' % rpm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
def _make_magseries_plot(axes,
stimes,
smags,
serrs,
magsarefluxes=False):
'''makes the magseries plot tile.
'''
scaledplottime = stimes - npmin(stimes)
axes.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plot_xlim = axes.get_xlim()
axes.set_xlim((npmin(scaledplottime)-1.0,
npmax(scaledplottime)+1.0))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
def _make_phased_magseries_plot(axes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim,
lspmethod,
xliminsetmode=False,
twolspmode=False,
magsarefluxes=False):
'''makes the phased magseries plot tile.
if xliminsetmode = True, then makes a zoomed-in plot with the provided
plotxlim as the main x limits, and the full plot as an inset.
'''
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
# finally, make the phased LC plot
axes.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
axes.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plot_xlim = axes.get_xlim()
axes.set_xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
axes.set_xlim((plotxlim[0],plotxlim[1]))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
# make the plot title
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind == 1 and not twolspmode:
plottitle = '%s best period x 0.5: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind == 2 and not twolspmode:
plottitle = '%s best period x 2: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
varepoch
)
elif periodind > 2 and not twolspmode:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind-1,
varperiod,
varepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind+1,
varperiod,
varepoch
)
axes.set_title(plottitle)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, list) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = axes.get_ylim()
if magsarefluxes:
axes.set_ylim(axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0]))
else:
axes.set_ylim(axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0]))
# put the inset axes in
inset = inset_axes(axes, width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.1,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
############################################
## CHECKPLOT FUNCTIONS THAT WRITE TO PNGS ##
############################################
def checkplot_png(lspinfo,
times,
mags,
errs,
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
verbose=True):
'''This makes a checkplot for an info dict from a period-finding routine.
A checkplot is a 3 x 3 grid of plots like so:
[LSP plot + objectinfo] [ unphased LC ] [ period 1 phased LC ]
[period 1 phased LC /2] [period 1 phased LC x2] [ period 2 phased LC ]
[ period 3 phased LC ] [period 4 phased LC ] [ period 5 phased LC ]
This is used to sanity check the five best periods obtained from an LSP
function in periodbase.
lspinfo is either a dict or a Python pickle filename containing a dict that
should look something like the dict below, containing the output from your
period search routine. The key 'lspvals' is the spectral power or SNR
obtained from Lomb-Scargle, PDM, AoV, or BLS. The keys 'nbestperiods' and
'nbestlspvals' contain the best five periods and their respective peaks
chosen by your period search routine (usually the highest SNR or highest
power peaks in the spectrum).
{'bestperiod':7.7375425564838061,
'lspvals':array([ 0.00892461, 0.0091704 , 0.00913682,...]),
'periods':array([ 8. , 7.999936, 7.999872, ...]),
'nbestperiods':[7.7375425564838061,
7.6370856881010738,
7.837604827964415,
7.5367037472486667,
7.9377048920074627],
'nbestlspvals':[0.071409790831114872,
0.055157963469682415,
0.055126754408175715,
0.023441268126990749,
0.023239128705778048],
'method':'gls'}
The 'method' key-val pair decides what kind of period finding method was
run. This is used to label the periodogram plot correctly. The following
values are recognized.
'gls' -> generalized Lomb-Scargle (e.g., from periodbase.pgen_lsp)
'pdm' -> Stellingwerf PDM (e.g., from periodbase.stellingwerf_pdm)
'aov' -> Schwarzenberg-Czerny AoV (e.g., from periodbase.aov_periodfind)
'bls' -> Box Least-squared Search (e.g., from periodbase.bls_parallel_pfind)
'sls' -> Lomb-Scargle from Scipy (e.g., from periodbase.scipylsp_parallel)
magsarefluxes = True means the values provided in the mags input array are
actually fluxes; this affects the sigma-clipping and plotting of light
curves.
If a dict is passed to objectinfo, this function will use it to figure out
where in the sky the checkplotted object is, and put the finding chart plus
some basic info into the checkplot. The objectinfo dict should look
something like those produced for HAT light curves using the reader
functions in the astrobase.hatlc module, e.g.:
{'bmag': 17.669,
'decl': -63.933598,
'hatid': 'HAT-786-0021445',
'objectid': 'HAT-786-0021445',
'hmag': 13.414,
'jmag': 14.086,
'kmag': 13.255,
'ndet': 10850,
'network': 'HS',
'pmdecl': -19.4,
'pmdecl_err': 5.1,
'pmra': 29.3,
'pmra_err': 4.1,
'ra': 23.172678,
'sdssg': 17.093,
'sdssi': 15.382,
'sdssr': 15.956,
'stations': 'HS02,HS04,HS06',
'twomassid': '01324144-6356009 ',
'ucac4id': '12566701',
'vmag': 16.368}
At a minimum, you must have the following fields: 'objectid', 'ra',
'decl'. If 'jmag', 'kmag', 'bmag', 'vmag', 'sdssr', and 'sdssi' are also
present, the following quantities will be calculated: B-V, J-K, and i-J. If
'pmra' and 'pmdecl' are present as well, the total proper motion and reduced
J magnitude proper motion will be calculated.
findercmap sets the matplotlib colormap of the downloaded finder chart:
http://matplotlib.org/examples/color/colormaps_reference.html
finderconvolve convolves the finder FITS image with the given
astropy.convolution kernel:
http://docs.astropy.org/en/stable/convolution/kernels.html
This can be useful to see effects of wide-field telescopes with large pixel
sizes (like HAT) on the blending of sources.
findercachedir is the directory where the downloaded stamp FITS files
go. Repeated calls to this function will then use the cached version of the
stamp if the finder coordinates don't change.
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
verbose = False turns off many of the informational messages. Useful for
when an external function is driving lots of checkplot calls.
'''
if not outfile and isinstance(lspinfo,str):
# generate the plot filename
plotfpath = os.path.join(
os.path.dirname(lspinfo),
'checkplot-%s.png' % (
os.path.basename(lspinfo),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.png'
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo)
if '.gz' in lspinfo:
with gzip.open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
else:
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo and
'lspvals' in lspinfo and
'bestperiod' in lspinfo):
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
lspmethod = lspinfo['method']
else:
LOGERROR('could not understand lspinfo for this object, skipping...')
return None
if not npisfinite(bestperiod):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
#######################
## PLOT 1 is the LSP ##
#######################
_make_periodogram(axes[0],lspinfo,objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
######################################
## NOW MAKE THE PHASED LIGHT CURVES ##
######################################
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 2 is an unphased LC ##
##############################
_make_magseries_plot(axes[1], stimes, smags, serrs,
magsarefluxes=magsarefluxes)
###########################
### NOW PLOT PHASED LCS ###
###########################
# make the plot for each best period
lspbestperiods = nbestperiods[::]
lspperiodone = lspbestperiods[0]
lspbestperiods.insert(1,lspperiodone*2.0)
lspbestperiods.insert(1,lspperiodone*0.5)
for periodind, varperiod in enumerate(lspbestperiods):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
axes[periodind+2].set_facecolor(bestperiodhighlight)
else:
axes[periodind+2].set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(axes[periodind+2],
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod,
xliminsetmode=xliminsetmode,
magsarefluxes=magsarefluxes)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
def twolsp_checkplot_png(lspinfo1,
lspinfo2,
times,
mags,
errs,
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
verbose=True):
'''This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of the same. This makes a special
checkplot that uses two lspinfo dictionaries, from two independent
period-finding methods. For EBs, it's probably best to use Stellingwerf PDM
or Schwarzenberg-Czerny AoV as one of these, and the Box Least-squared Search
method as the other one.
The checkplot layout in this case is:
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
pgram1 is the plot for the periodogram in the lspinfo1 dict
pgram1 P1, P2, and P3 are the best three periods from lspinfo1
pgram2 is the plot for the periodogram in the lspinfo2 dict
pgram2 P1, P2, and P3 are the best three periods from lspinfo2
All other args and kwargs are the same as checkplot_png. Note that we take
the output file name from lspinfo1 if lspinfo1 is a string filename pointing
to a (gzipped) pickle containing the results dict from a period-finding
routine similar to those in periodbase.
'''
# generate the plot filename
if not outfile and isinstance(lspinfo1,str):
plotfpath = os.path.join(
os.path.dirname(lspinfo),
'twolsp-checkplot-%s.png' % (
os.path.basename(lspinfo),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'twolsp-checkplot.png'
# get the first LSP from a pickle file transparently
if isinstance(lspinfo1,str) and os.path.exists(lspinfo1):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo1)
if '.gz' in lspinfo1:
with gzip.open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
else:
with open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
# get the second LSP from a pickle file transparently
if isinstance(lspinfo2,str) and os.path.exists(lspinfo2):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo2)
if '.gz' in lspinfo2:
with gzip.open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
else:
with open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo1 and 'periods' in lspinfo2 and
'lspvals' in lspinfo1 and 'lspvals' in lspinfo2 and
'bestperiod' in lspinfo1 and 'bestperiod' in lspinfo2):
periods1 = lspinfo1['periods']
lspvals1 = lspinfo1['lspvals']
bestperiod1 = lspinfo1['bestperiod']
nbestperiods1 = lspinfo1['nbestperiods']
nbestlspvals1 = lspinfo1['nbestlspvals']
lspmethod1 = lspinfo1['method']
periods2 = lspinfo2['periods']
lspvals2 = lspinfo2['lspvals']
bestperiod2 = lspinfo2['bestperiod']
nbestperiods2 = lspinfo2['nbestperiods']
nbestlspvals2 = lspinfo2['nbestlspvals']
lspmethod2 = lspinfo2['method']
else:
LOGERROR('could not understand lspinfo1 or lspinfo2 '
'for this object, skipping...')
return None
if (not npisfinite(bestperiod1)) or (not npisfinite(bestperiod2)):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
######################################################################
## PLOT 1 is the LSP from lspinfo1, including objectinfo and finder ##
######################################################################
_make_periodogram(axes[0], lspinfo1, objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
#####################################
## PLOT 2 is the LSP from lspinfo2 ##
#####################################
_make_periodogram(axes[1], lspinfo2, None,
findercmap, finderconvolve)
##########################################
## FIX UP THE MAGS AND REMOVE BAD STUFF ##
##########################################
# sigclip first
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 3 is an unphased LC ##
##############################
_make_magseries_plot(axes[2], stimes, smags, serrs,
magsarefluxes=magsarefluxes)
# make the plot for each best period
lspbestperiods1 = nbestperiods1[::]
lspbestperiods2 = nbestperiods2[::]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO1 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods1[:3],
[axes[3], axes[4], axes[5]]):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod1,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode)
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO2 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods2[:3],
[axes[6], axes[7], axes[8]]):
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, varepoch))
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod2,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
#########################################
## PICKLE CHECKPLOT UTILITY FUNCTIONS ##
#########################################
def _xyzdist_to_distarcsec(xyzdist):
'''
This just inverts the xyz unit vector distance -> angular distance relation.
'''
return np.degrees(2.0*np.arcsin(xyzdist/2.0))*3600.0
def _base64_to_file(b64str, outfpath, writetostrio=False):
'''
This converts the base64 encoded string to a file.
'''
try:
filebytes = base64.b64decode(b64str)
# if we're writing back to a stringio object
if writetostrio:
outobj = strio(filebytes)
return outobj
# otherwise, we're writing to an actual file
else:
with open(outfpath,'wb') as outfd:
outfd.write(filebytes)
if os.path.exists(outfpath):
return outfpath
else:
LOGERROR('could not write output file: %s' % outfpath)
return None
except Exception as e:
LOGEXCEPTION('failed while trying to convert '
'b64 string to file %s' % outfpath)
return None
def _pkl_finder_objectinfo(objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
lclistpkl=None,
nbrradiusarcsec=30.0,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True):
'''This returns the finder chart and object information as a dict.
'''
if (isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# get the finder chart
try:
# generate the finder chart
finder, finderheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
verbose=verbose,
cachedir=findercachedir)
finderfig = plt.figure(figsize=(3,3),dpi=plotdpi,frameon=False)
plt.imshow(finder, cmap=findercmap)
# skip down to after nbr stuff for the rest of the finderchart...
# search around the target's location and get its neighbors if
# lclistpkl is provided and it exists
if (lclistpkl is not None and
os.path.exists(lclistpkl) and
nbrradiusarcsec is not None and
nbrradiusarcsec > 0.0):
if lclistpkl.endswith('.gz'):
infd = gzip.open(lclistpkl,'rb')
else:
infd = open(lclistpkl,'rb')
lclist = pickle.load(infd)
infd.close()
if not 'kdtree' in lclist:
LOGERROR('neighbors within %.1f arcsec for %s could '
'not be found, no kdtree in lclistpkl: %s'
% (objectid, lclistpkl))
neighbors = None
kdt = None
else:
kdt = lclist['kdtree']
obj_cosdecl = np.cos(np.radians(objectinfo['decl']))
obj_sindecl = np.sin(np.radians(objectinfo['decl']))
obj_cosra = np.cos(np.radians(objectinfo['ra']))
obj_sinra = np.sin(np.radians(objectinfo['ra']))
obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,
obj_sinra*obj_cosdecl,
obj_sindecl))
match_xyzdist = (
2.0 * np.sin(np.radians(nbrradiusarcsec/3600.0)/2.0)
)
matchdists, matchinds = kdt.query(
obj_xyz,
k=6, # get closest 5 neighbors + tgt
distance_upper_bound=match_xyzdist
)
# sort by matchdist
mdsorted = np.argsort(matchdists[0])
matchdists = matchdists[0][mdsorted]
matchinds = matchinds[0][mdsorted]
# luckily, the indices to the kdtree are the same as that
# for the objects (I think)
neighbors = []
# initialize the finder WCS
finderwcs = WCS(finderheader)
nbrind = 0
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md > 0.0:
# generate the xy for the finder we'll use a HTML5
# canvas and these pixcoords to highlight each
# neighbor when we mouse over its row in the
# neighbors tab
pixcoords = finderwcs.all_world2pix(
np.array([[lclist['objects']['ra'][mi],
lclist['objects']['decl'][mi]]]),
1
)
# each elem is {'objectid',
# 'ra','decl',
# 'xpix','ypix',
# 'dist','lcfpath'}
thisnbr = {
'objectid':lclist['objects']['objectid'][mi],
'ra':lclist['objects']['ra'][mi],
'decl':lclist['objects']['decl'][mi],
'xpix':pixcoords[0,0],
'ypix':300.0 - pixcoords[0,1],
'dist':_xyzdist_to_distarcsec(md),
'lcfpath': lclist['objects']['lcfname'][mi]
}
neighbors.append(thisnbr)
nbrind = nbrind+1
# put in a nice marker for this neighbor into the
# overall finder chart
annotatex = pixcoords[0,0]
annotatey = 300.0 - pixcoords[0,1]
if ((300.0 - annotatex) > 50.0):
offx = annotatex + 30.0
xha = 'center'
else:
offx = annotatex - 30.0
xha = 'center'
if ((300.0 - annotatey) > 50.0):
offy = annotatey - 30.0
yha = 'center'
else:
offy = annotatey + 30.0
yha = 'center'
plt.annotate('N%s' % nbrind,
(annotatex, annotatey),
xytext=(offx, offy),
arrowprops={'facecolor':'blue',
'edgecolor':'blue',
'width':1.0,
'headwidth':1.0,
'headlength':0.1,
'shrink':0.0},
color='blue',
horizontalalignment=xha,
verticalalignment=yha)
# if there are no neighbors, set the 'neighbors' key to None
else:
neighbors = None
kdt = None
#
# finish up the finder chart after neighbors are processed
#
plt.xticks([])
plt.yticks([])
# grid lines pointing to the center of the frame
plt.axvline(x=150,ymin=0.2,ymax=0.4,linewidth=2.0,color='b')
plt.axhline(y=149,xmin=0.2,xmax=0.4,linewidth=2.0,color='b')
plt.gca().set_frame_on(False)
# this is the output instance
finderpng = strio()
finderfig.savefig(finderpng,
bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
finderpng.seek(0)
finderb64 = base64.b64encode(finderpng.read())
# close the stringio buffer
finderpng.close()
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
finderb64 = None
neighbors = None
kdt = None
# now that we have the finder chart, get the rest of the object
# information
# first, the color features
colorfeat = color_features(objectinfo)
# next, get the coord features
coordfeat = coord_features(objectinfo)
# next, get the color classification
colorclass = color_classification(colorfeat, coordfeat)
# get the neighbor features and GAIA info
nbrfeat = neighbor_gaia_features(objectinfo, kdt, nbrradiusarcsec,
verbose=False)
# update the objectinfo dict with everything
objectinfo.update(colorfeat)
objectinfo.update(coordfeat)
objectinfo.update(colorclass)
objectinfo.update(nbrfeat)
# update GAIA info so it's available at the first level
if 'ok' in objectinfo['gaia_status']:
objectinfo['gaiamag'] = objectinfo['gaia_mags'][0]
objectinfo['gaia_absmag'] = objectinfo['gaia_absolute_mags'][0]
objectinfo['gaia_parallax'] = objectinfo['gaia_parallaxes'][0]
objectinfo['gaia_parallax_err'] = objectinfo['gaia_parallax_errs'][0]
else:
objectinfo['gaiamag'] = np.nan
objectinfo['gaia_absmag'] = np.nan
objectinfo['gaia_parallax'] = np.nan
objectinfo['gaia_parallax_err'] = np.nan
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':objectid,
'neighbors':neighbors,
'objectinfo':objectinfo,
'finderchart':finderb64,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# add the objecttags key to objectinfo
checkplotdict['objectinfo']['objecttags'] = None
# if there's no objectinfo, we can't do anything.
else:
# put together the initial checkplot pickle dictionary
# this will be updated by the functions below as appropriate
# and will written out as a gzipped pickle at the end of processing
checkplotdict = {'objectid':None,
'neighbors':None,
'objectinfo':{'bmag':None,
'bvcolor':None,
'decl':None,
'hatid':None,
'hmag':None,
'ijcolor':None,
'jkcolor':None,
'jmag':None,
'kmag':None,
'ndet':None,
'network':None,
'objecttags':None,
'pmdecl':None,
'pmdecl_err':None,
'pmra':None,
'pmra_err':None,
'propermotion':None,
'ra':None,
'rpmj':None,
'sdssg':None,
'sdssi':None,
'sdssr':None,
'stations':None,
'twomassid':None,
'ucac4id':None,
'vmag':None},
'finderchart':None,
'sigclip':sigclip,
'normto':normto,
'normmingap':normmingap}
# end of objectinfo processing
# add the varinfo dict
if isinstance(varinfo, dict):
checkplotdict['varinfo'] = varinfo
else:
checkplotdict['varinfo'] = {
'objectisvar':None,
'vartags':None,
'varisperiodic':None,
'varperiod':None,
'varepoch':None,
}
return checkplotdict
def _pkl_periodogram(lspinfo,
plotdpi=100,
override_pfmethod=None):
'''This returns the periodogram plot PNG as base64, plus info as a dict.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# open the figure instance
pgramfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# make the plot
plt.plot(periods,lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for xbestperiod, xbestpeak in zip(nbestperiods,
nbestlspvals):
plt.annotate('%.6f' % xbestperiod,
xy=(xbestperiod, xbestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# this is the output instance
pgrampng = strio()
pgramfig.savefig(pgrampng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
pgrampng.seek(0)
pgramb64 = base64.b64encode(pgrampng.read())
# close the stringio buffer
pgrampng.close()
if not override_pfmethod:
# this is the dict to return
checkplotdict = {
lspinfo['method']:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
else:
# this is the dict to return
checkplotdict = {
override_pfmethod:{
'periods':periods,
'lspvals':lspvals,
'bestperiod':bestperiod,
'nbestperiods':nbestperiods,
'nbestlspvals':nbestlspvals,
'periodogram':pgramb64,
}
}
return checkplotdict
def _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=100,
magsarefluxes=False):
'''This returns the magseries plot PNG as base64, plus arrays as dict.
'''
scaledplottime = stimes - npmin(stimes)
# open the figure instance
magseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
plt.plot(scaledplottime,
smags,
marker='o',
ms=2.0, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
plot_xlim = plt.xlim()
plt.xlim((npmin(scaledplottime)-2.0,
npmax(scaledplottime)+2.0))
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# this is the output instance
magseriespng = strio()
magseriesfig.savefig(magseriespng,
# bbox_inches='tight',
pad_inches=0.05, format='png')
plt.close()
# encode the finderpng instance to base64
magseriespng.seek(0)
magseriesb64 = base64.b64encode(magseriespng.read())
# close the stringio buffer
magseriespng.close()
checkplotdict = {
'magseries':{
'plot':magseriesb64,
'times':stimes,
'mags':smags,
'errs':serrs
}
}
return checkplotdict
def _pkl_phased_magseries_plot(checkplotdict, lspmethod, periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
xliminsetmode=False,
magsarefluxes=False,
directreturn=False,
overplotfit=None,
verbose=True,
override_pfmethod=None):
'''This returns the phased magseries plot PNG as base64 plus info as a dict.
checkplotdict is an existing checkplotdict to update. If it's None or
directreturn = True, then the generated dict result for this magseries plot
will be returned directly.
lspmethod is a string indicating the type of period-finding algorithm that
produced the period. If this is not in METHODSHORTLABELS, it will be used
verbatim.
periodind is the index of the period.
If == 0 -> best period and bestperiodhighlight is applied if not None
If > 0 -> some other peak of the periodogram
If == -1 -> special mode w/ no periodogram labels and enabled highlight
overplotfit is a result dict returned from one of the XXXX_fit_magseries
functions in astrobase.varbase.lcfit. If this is not None, then the fit will
be overplotted on the phased light curve plot.
overplotfit must have the following structure and at least the keys below if
not originally from one of these functions:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
fitmags and times should all be of the same size. overplotfit is copied over
to the checkplot dict for each specific phased LC plot to save all of this
information.
'''
# open the figure instance
phasedseriesfig = plt.figure(figsize=(7.5,4.8),dpi=plotdpi)
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
varepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch,str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
varepoch = spfit['fitinfo']['fitepoch']
if len(varepoch) != 1:
varepoch = varepoch[0]
except Exception as e:
LOGEXCEPTION('spline fit failed, using min(times) as epoch')
varepoch = npmin(stimes)
if verbose:
LOGINFO('plotting %s phased LC with period %s: %.6f, epoch: %.5f' %
(lspmethod, periodind, varperiod, varepoch))
# make the plot title based on the lspmethod
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
varperiod,
varepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
(METHODSHORTLABELS[lspmethod] if lspmethod in METHODSHORTLABELS
else lspmethod),
periodind+1,
varperiod,
varepoch
)
elif periodind == -1:
plottitle = '%s period: %.6f d - epoch: %.5f' % (
lspmethod,
varperiod,
varepoch
)
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
else:
binplotphase = None
binplotmags = None
# finally, make the phased LC plot
plt.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
plt.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# if we're making a overplotfit, then plot the fit over the other stuff
if overplotfit and isinstance(overplotfit, dict):
fitmethod = overplotfit['fittype']
fitchisq = overplotfit['fitchisq']
fitredchisq = overplotfit['fitredchisq']
plotfitmags = overplotfit['fitinfo']['fitmags']
plotfittimes = overplotfit['magseries']['times']
# phase the fit magseries
fitphasedlc = phase_magseries(plotfittimes,
plotfitmags,
varperiod,
varepoch,
wrap=phasewrap,
sort=phasesort)
plotfitphase = fitphasedlc['phase']
plotfitmags = fitphasedlc['mags']
plotfitlabel = ('%s fit ${\chi}^2/{\mathrm{dof}} = %.3f$' %
(fitmethod, fitredchisq))
# plot the fit phase and mags
plt.plot(plotfitphase, plotfitmags,'k-',
linewidth=3, rasterized=True,label=plotfitlabel)
plt.legend(loc='upper left', frameon=False)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = plt.ylim()
plt.ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
plot_xlim = plt.xlim()
plt.xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
plt.xlim((plotxlim[0],plotxlim[1]))
# make a grid
ax = plt.gca()
if isinstance(xgridlines,list):
ax.set_xticks(xgridlines, minor=False)
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)
plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
# set the plot title
plt.title(plottitle)
# make sure the best period phased LC plot stands out
if (periodind == 0 or periodind == -1) and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plt.gca().set_facecolor(bestperiodhighlight)
else:
plt.gca().set_axis_bgcolor(bestperiodhighlight)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, list) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = plt.gca().get_ylim()
if magsarefluxes:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0])
)
else:
plt.gca().set_ylim(
axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0])
)
# put the inset axes in
inset = inset_axes(plt.gca(), width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
if phasebin:
# make the scatter plot for the phased LC plot
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.9,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
# this is the output instance
phasedseriespng = strio()
phasedseriesfig.savefig(phasedseriespng,
# bbox_inches='tight',
pad_inches=0.0, format='png')
plt.close()
# encode the finderpng instance to base64
phasedseriespng.seek(0)
phasedseriesb64 = base64.b64encode(phasedseriespng.read())
# close the stringio buffer
phasedseriespng.close()
# this includes a fitinfo dict if one is provided in overplotfit
retdict = {
'plot':phasedseriesb64,
'period':varperiod,
'epoch':varepoch,
'phase':plotphase,
'phasedmags':plotmags,
'binphase':binplotphase,
'binphasedmags':binplotmags,
'phasewrap':phasewrap,
'phasesort':phasesort,
'phasebin':phasebin,
'minbinelems':minbinelems,
'plotxlim':plotxlim,
'lcfit':overplotfit,
}
# if we're returning stuff directly, i.e. not being used embedded within
# the checkplot_dict function
if directreturn or checkplotdict is None:
return retdict
# this requires the checkplotdict to be present already, we'll just update
# it at the appropriate lspmethod and periodind
else:
if override_pfmethod:
checkplotdict[override_pfmethod][periodind] = retdict
else:
checkplotdict[lspmethod][periodind] = retdict
return checkplotdict
#########################################
## XMATCHING AGAINST EXTERNAL CATALOGS ##
#########################################
def _parse_xmatch_catalog_header(xc, xk):
'''
This parses the header for a catalog file.
'''
catdef = []
# read in this catalog and transparently handle gzipped files
if xc.endswith('.gz'):
infd = gzip.open(xc,'rb')
else:
infd = open(xc,'rb')
# read in the defs
for line in infd:
if line.decode().startswith('#'):
catdef.append(
line.decode().replace('#','').strip().rstrip('\n')
)
if not line.decode().startswith('#'):
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = ' '.join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x['key'] for x in catdefdict['columns']]
catdefdtypes = [x['dtype'] for x in catdefdict['columns']]
catdefnames = [x['name'] for x in catdefdict['columns']]
catdefunits = [x['unit'] for x in catdefdict['columns']]
# get the correct column indices and dtypes for the requested columns
# from the catdefdict
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits)
def load_xmatch_external_catalogs(xmatchto, xmatchkeys, outfile=None):
'''This loads the external xmatch catalogs into a dict for use here.
xmatchto is a list of text files that contain each catalog.
the text files must be 'CSVs' that use the '|' character as the separator
betwen columns. These files should all begin with a header in JSON format on
lines starting with the '#' character. this header will define the catalog
and contains the name of the catalog and the column definitions. Column
definitions must have the column name and the numpy dtype of the columns (in
the same format as that expected for the numpy.genfromtxt function). Any
line that does not begin with '#' is assumed to be part of the columns in
the catalog. An example is shown below.
# {"name":"NSVS catalog of variable stars",
# "columns":[
# {"key":"objectid", "dtype":"U20", "name":"Object ID", "unit": null},
# {"key":"ra", "dtype":"f8", "name":"RA", "unit":"deg"},
# {"key":"decl","dtype":"f8", "name": "Declination", "unit":"deg"},
# {"key":"sdssr","dtype":"f8","name":"SDSS r", "unit":"mag"},
# {"key":"vartype","dtype":"U20","name":"Variable type", "unit":null}
# ],
# "colra":"ra",
# "coldec":"decl",
# "description":"Contains variable stars from the NSVS catalog"}
objectid1 | 45.0 | -20.0 | 12.0 | detached EB
objectid2 | 145.0 | 23.0 | 10.0 | RRab
objectid3 | 12.0 | 11.0 | 14.0 | Cepheid
.
.
.
xmatchkeys is the list of lists of columns to get out of each xmatchto
catalog. this should be the same length as xmatchto and each element here
will apply to the respective file in xmatchto.
if outfile is not None, set this to the name of the pickle to write the
collect xmatch catalogs to. this pickle can then be loaded transparently by
the checkplot_dict, checkplot_pickle functions to provide xmatch info the
_xmatch_external_catalog function below.
'''
outdict = {}
for xc, xk in zip(xmatchto, xmatchkeys):
parsed_catdef = _parse_xmatch_catalog_header(xc, xk)
if not parsed_catdef:
continue
(infd, catdefdict,
catcolinds, catcoldtypes,
catcolnames, catcolunits) = parsed_catdef
# get the specified columns out of the catalog
catarr = np.genfromtxt(infd,
usecols=catcolinds,
names=xk,
dtype=','.join(catcoldtypes),
comments='#',
delimiter='|',
autostrip=True)
infd.close()
catshortname = os.path.splitext(os.path.basename(xc))[0]
catshortname = catshortname.replace('.csv','')
#
# make a kdtree for this catalog
#
# get the ra and decl columns
objra, objdecl = (catarr[catdefdict['colra']],
catarr[catdefdict['coldec']])
# get the xyz unit vectors from ra,decl
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = cKDTree(xyz,copy_data=True)
# generate the outdict element for this catalog
catoutdict = {'kdtree':kdt,
'data':catarr,
'columns':xk,
'colnames':catcolnames,
'colunits':catcolunits,
'name':catdefdict['name'],
'desc':catdefdict['description']}
outdict[catshortname] = catoutdict
if outfile is not None:
# if we're on OSX, we apparently need to save the file in chunks smaller
# than 2 GB to make it work right. can't load pickles larger than 4 GB
# either, but 3 GB < total size < 4 GB appears to be OK when loading.
# also see: https://bugs.python.org/issue24658.
# fix adopted from: https://stackoverflow.com/a/38003910
if sys.platform == 'darwin':
dumpbytes = pickle.dumps(outdict, protocol=pickle.HIGHEST_PROTOCOL)
n_bytes = 2**31
max_bytes = 2**31 - 1
with open(outfile, 'wb') as outfd:
for idx in range(0, len(dumpbytes), max_bytes):
outfd.write(dumpbytes[idx:idx+max_bytes])
else:
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
else:
return outdict
def xmatch_external_catalogs(checkplotdict,
xmatchinfo,
xmatchradiusarcsec=2.0,
returndirect=False,
updatexmatch=True,
savepickle=None):
'''This matches the current object to the external match catalogs in
xmatchdict.
checkplotdict is the usual checkplot dict. this must contain at least
'objectid', and in the 'objectinfo' subdict: 'ra', and 'decl'. an 'xmatch'
key will be added to this dict, with something like the following dict as
the value:
{'xmatchradiusarcsec':xmatchradiusarcsec,
'catalog1':{'name':'Catalog of interesting things',
'found':True,
'distarcsec':0.7,
'info':{'objectid':...,'ra':...,'decl':...,'desc':...}},
'catalog2':{'name':'Catalog of more interesting things',
'found':False,
'distarcsec':nan,
'info':None},
.
.
.
....}
xmatchinfo is the either a dict produced by load_xmatch_external_catalogs or
the pickle produced by the same function.
xmatchradiusarcsec is the xmatch radius in arcseconds.
NOTE: this modifies checkplotdict IN PLACE if returndirect is False. If it
is True, then just returns the xmatch results as a dict.
If updatexmatch is True, any previous 'xmatch' elements in the checkplotdict
will be added on to instead of being overwritten.
If savepickle is not None, it should be the name of a checkplot pickle file
to write the pickle back to.
'''
# load the xmatch info
if isinstance(xmatchinfo, str) and os.path.exists(xmatchinfo):
with open(xmatchinfo,'rb') as infd:
xmatchdict = pickle.load(infd)
elif isinstance(xmatchinfo, dict):
xmatchdict = xmatchinfo
else:
LOGERROR("can't figure out xmatch info, can't xmatch, skipping...")
return checkplotdict
#
# generate the xmatch spec
#
# get our ra, decl
objra = checkplotdict['objectinfo']['ra']
objdecl = checkplotdict['objectinfo']['decl']
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
objxyz = np.column_stack((cosra*cosdecl,
sinra*cosdecl,
sindecl))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(xmatchradiusarcsec/3600.0)/2.0)
#
# now search in each external catalog
#
xmatchresults = {}
extcats = sorted(list(xmatchdict.keys()))
for ecat in extcats:
# get the kdtree
kdt = xmatchdict[ecat]['kdtree']
# look up the coordinates
kdt_dist, kdt_ind = kdt.query(objxyz,
k=1,
distance_upper_bound=xyzdist)
# sort by matchdist
mdsorted = np.argsort(kdt_dist)
matchdists = kdt_dist[mdsorted]
matchinds = kdt_ind[mdsorted]
if matchdists[np.isfinite(matchdists)].size == 0:
xmatchresults[ecat] = {'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':False,
'distarcsec':None,
'info':None}
else:
for md, mi in zip(matchdists, matchinds):
if np.isfinite(md) and md < xyzdist:
infodict = {}
distarcsec = _xyzdist_to_distarcsec(md)
for col in xmatchdict[ecat]['columns']:
coldata = xmatchdict[ecat]['data'][col][mi]
if isinstance(coldata, str):
coldata = coldata.strip()
infodict[col] = coldata
xmatchresults[ecat] = {
'name':xmatchdict[ecat]['name'],
'desc':xmatchdict[ecat]['desc'],
'found':True,
'distarcsec':distarcsec,
'info':infodict,
'colkeys':xmatchdict[ecat]['columns'],
'colnames':xmatchdict[ecat]['colnames'],
'colunit':xmatchdict[ecat]['colunits'],
}
break
#
# should now have match results for all external catalogs
#
if returndirect:
return xmatchresults
else:
if updatexmatch and 'xmatch' in checkplotdict:
checkplotdict['xmatch'].update(xmatchresults)
else:
checkplotdict['xmatch'] = xmatchresults
if savepickle:
cpf = _write_checkplot_picklefile(checkplotdict,
outfile=savepickle,
protocol=4)
return cpf
else:
return checkplotdict
########################
## READ/WRITE PICKLES ##
########################
def _write_checkplot_picklefile(checkplotdict,
outfile=None,
protocol=2,
outgzip=False):
'''This writes the checkplotdict to a (gzipped) pickle file.
If outfile is None, writes a (gzipped) pickle file of the form:
checkplot-{objectid}.pkl(.gz)
to the current directory.
protocol sets the pickle protocol:
3 -> default in Python 3 - way faster but incompatible with Python 2
2 -> default in Python 2 - very slow, but compatible with Python 2 and 3
the default protocol is 2 so that pickle files generated by newer Pythons
can still be read by older ones. if this isn't a concern, set protocol to 3.
'''
if outgzip:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl.gz'.format(
objectid=checkplotdict['objectid']
)
)
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
if not outfile:
outfile = (
'checkplot-{objectid}.pkl'.format(
objectid=checkplotdict['objectid']
)
)
# make sure to do the right thing if '.gz' is in the filename but
# outgzip was False
if outfile.endswith('.gz'):
LOGWARNING('output filename ends with .gz but kwarg outgzip=False. '
'will use gzip to compress the output pickle')
with gzip.open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
else:
with open(outfile,'wb') as outfd:
pickle.dump(checkplotdict,outfd,protocol=protocol)
return os.path.abspath(outfile)
def _read_checkplot_picklefile(checkplotpickle):
'''This reads a checkplot gzipped pickle file back into a dict.
NOTE: the try-except is for Python 2 pickles that have numpy arrays in
them. Apparently, these aren't compatible with Python 3. See here:
http://stackoverflow.com/q/11305790
The workaround is noted in this answer:
http://stackoverflow.com/a/41366785
But not sure how robust this is. We should probably move to another format
for these checkplots.
'''
if checkplotpickle.endswith('.gz'):
try:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with gzip.open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
else:
try:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd)
except UnicodeDecodeError:
with open(checkplotpickle,'rb') as infd:
cpdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % checkplotpickle)
return cpdict
#############################
## CHECKPLOT DICT FUNCTION ##
#############################
def checkplot_dict(lspinfolist,
times,
mags,
errs,
magsarefluxes=False,
nperiodstouse=3,
objectinfo=None,
varinfo=None,
getvarfeatures=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
lcfitfunc=None,
lcfitparams={},
externalplots=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
bestperiodhighlight=None,
xgridlines=None,
mindet=1000,
verbose=True):
'''This writes a multiple lspinfo checkplot to a dict.
This function can take input from multiple lspinfo dicts (e.g. a list of
output dicts or gzipped pickles of dicts from independent runs of BLS, PDM,
AoV, or GLS period-finders in periodbase).
NOTE: if lspinfolist contains more than one lspinfo object with the same
lspmethod ('pdm','gls','sls','aov','bls'), the latest one in the list will
overwrite the earlier ones.
The output dict contains all the plots (magseries and phased
magseries), periodograms, object information, variability information, light
curves, and phased light curves. This can be written to:
- a pickle with checkplot.checkplot_pickle below
- a PNG with checkplot.checkplot_dict_png below
All kwargs are the same as for checkplot_png, except for the following:
nperiodstouse controls how many 'best' periods to make phased LC plots
for. By default, this is the 3 best. If this is set to None, all 'best'
periods present in each lspinfo dict's 'nbestperiods' key will be plotted
(this is 5 according to periodbase functions' defaults).
varinfo is a dictionary with the following keys:
{'objectisvar': True if object is time-variable,
'vartags': list of variable type tags (strings),
'varisperiodic': True if object is a periodic variable,
'varperiod': variability period of the object,
'varepoch': epoch of variability in JD}
if varinfo is None, an initial empty dictionary of this form will be created
and written to the output pickle. This can be later updated using
checkplotviewer.py, etc.
If getvarfeatures is True, will use the function
varbase.features.all_nonperiodic_features to calculate several light curve
features such as the median, MAD, Stetson J index, CDPP, percentiles, etc.
lcfitfunc is a Python function that is used to fit a model to the light
curve. This is then overplotted for each phased light curve in the
checkplot. This function should have the following signature:
def lcfitfunc(times, mags, errs, period, **lcfitparams)
where lcfitparams encapsulates all external parameters (i.e. number of knots
for a spline function, the degree of a Legendre polynomial fit, etc.) This
function should return a Python dict with the following structure (similar
to the functions in astrobase.varbase.lcfit) and at least the keys below:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
additional keys can include ['fitinfo']['finalparams'] for the final model
fit parameters (this will be used by the checkplotserver if present),
['fitinfo']['fitepoch'] for the minimum light epoch returned by the model
fit, among others. in any case, the output dict of lcfitfunc will be copied
to the output checkplot pickle's ['lcfit'][<fittype>] key:val dict for each
phased light curve.
externalplots is a list of 4-element tuples containing:
1. path to PNG of periodogram from a external period-finding method
2. path to PNG of best period phased light curve from external period-finder
3. path to PNG of 2nd-best phased light curve from external period-finder
4. path to PNG of 3rd-best phased light curve from external period-finder
This can be used to incorporate external period-finding method results into
the output checkplot pickle or exported PNG to allow for comparison with
astrobase results.
example of externalplots:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
If externalplots is provided, the checkplot_pickle_to_png function below
will automatically retrieve these plot PNGs and put them into the exported
checkplot PNG.
sigclip is either a single float or a list of two floats. in the first case,
the sigclip is applied symmetrically. in the second case, the first sigclip
in the list is applied to +ve magnitude deviations (fainter) and the second
sigclip in the list is applied to -ve magnitude deviations (brighter).
An example list would be `[10.,3.]` (for 10 sigma dimmings, 3 sigma
brightenings).
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xgridlines (default None) can be a list, e.g., [-0.5,0.,0.5] that sets the
x-axis grid lines on plotted phased LCs for easy visual identification of
important features.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
'''
# 0. get the objectinfo and finder chart and initialize the checkplotdict
checkplotdict = _pkl_finder_objectinfo(objectinfo,
varinfo,
findercmap,
finderconvolve,
sigclip,
normto,
normmingap,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
plotdpi=plotdpi,
verbose=verbose,
findercachedir=findercachedir)
# if an objectinfo dict is absent, we'll generate a fake objectid based on
# the second five time and mag array values. this should be OK to ID the
# object across repeated runs of this function with the same times, mags,
# errs, but should provide enough uniqueness otherwise (across different
# times/mags array inputs). this is all done so we can still save checkplots
# correctly to pickles after reviewing them using checkplotserver
# try again to get the right objectid
if (objectinfo and isinstance(objectinfo, dict) and
'objectid' in objectinfo and objectinfo['objectid']):
checkplotdict['objectid'] = objectinfo['objectid']
# if this doesn't work, generate a random one
if checkplotdict['objectid'] is None:
try:
objuuid = hashlib.sha512(times[5:10].tostring() +
mags[5:10].tostring()).hexdigest()[:5]
except Exception as e:
LOGWARNING('times, mags, and errs may have too few items')
objuuid = hashlib.sha512(times.tostring() +
mags.tostring()).hexdigest()[:5]
LOGWARNING('no objectid provided in objectinfo keyword arg, '
'generated from times[5:10] + mags[5:10]: %s' % objuuid)
checkplotdict['objectid'] = objuuid
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# fail early if not enough light curve points
if ((stimes is None) or (smags is None) or (serrs is None) or
(stimes.size < 49) or (smags.size < 49) or (serrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make a checkplot for this objectid: %s" %
checkplotdict['objectid'])
checkplotdict['magseries'] = None
checkplotdict['status'] = 'failed: LC points appear to be all nan'
return checkplotdict
# this may fix some unpickling issues for astropy.table.Column objects
# we convert them back to ndarrays
if isinstance(stimes, astcolumn):
stimes = stimes.data
LOGWARNING('times is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
if isinstance(smags, astcolumn):
smags = smags.data
LOGWARNING('mags is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
if isinstance(serrs, astcolumn):
serrs = serrs.data
LOGWARNING('errs is an astropy.table.Column object, '
'changing to numpy array because of '
'potential unpickling issues')
# report on how sigclip went
if verbose:
LOGINFO('sigclip = %s: before = %s observations, '
'after = %s observations' %
(sigclip, len(times), len(stimes)))
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) > mindet:
# 1. get the mag series plot using these filtered stimes, smags, serrs
magseriesdict = _pkl_magseries_plot(stimes, smags, serrs,
plotdpi=plotdpi,
magsarefluxes=magsarefluxes)
# update the checkplotdict
checkplotdict.update(magseriesdict)
# 2. for each lspinfo in lspinfolist, read it in (from pkl or pkl.gz
# if necessary), make the periodogram, make the phased mag series plots
# for each of the nbestperiods in each lspinfo dict
checkplot_pfmethods = []
for lspind, lspinfo in enumerate(lspinfolist):
# get the LSP from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
if '.gz' in lspinfo:
with gzip.open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
else:
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
# make the periodogram first
# we'll prepend the lspmethod index to allow for multiple same
# lspmethods
override_pfmethod = '%s-%s' % (lspind, lspinfo['method'])
periodogramdict = _pkl_periodogram(
lspinfo,
plotdpi=plotdpi,
override_pfmethod=override_pfmethod
)
# update the checkplotdict.
checkplotdict.update(periodogramdict)
# now, make the phased light curve plots for each of the
# nbestperiods from this periodogram
for nbpind, nbperiod in enumerate(
lspinfo['nbestperiods'][:nperiodstouse]
):
# if there's a function to use for fitting, do the fit
if lcfitfunc:
try:
overplotfit = lcfitfunc(stimes,
smags,
serrs,
nbperiod,
**lcfitparams)
except Exception as e:
LOGEXCEPTION('the light curve fitting function '
'failed, not plotting a fit over the '
'phased light curve')
overplotfit = None
else:
overplotfit = None
# this updates things as it runs
checkplotdict = _pkl_phased_magseries_plot(
checkplotdict,
lspinfo['method'],
nbpind,
stimes, smags, serrs,
nbperiod, varepoch,
phasewrap=phasewrap,
phasesort=phasesort,
phasebin=phasebin,
minbinelems=minbinelems,
plotxlim=plotxlim,
overplotfit=overplotfit,
plotdpi=plotdpi,
bestperiodhighlight=bestperiodhighlight,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
xgridlines=xgridlines,
verbose=verbose,
override_pfmethod=override_pfmethod,
)
# if there's an snr key for this lspmethod, add the info in it to
# the checkplotdict as well
if 'snr' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['snr'] = (
lspinfo['snr']
)
if 'altsnr' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['altsnr'] = (
lspinfo['altsnr']
)
if 'transitdepth' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['transitdepth'] = (
lspinfo['transitdepth']
)
if 'transitduration' in lspinfo:
checkplotdict[lspinfo[override_pfmethod]]['transitduration'] = (
lspinfo['transitduration']
)
checkplot_pfmethods.append(override_pfmethod)
#
# end of processing each pfmethod
#
## update the checkplot dict with some other stuff that's needed by
## checkplotserver
# 3. add a comments key:val
checkplotdict['comments'] = None
# 4. calculate some variability features
if getvarfeatures is True:
checkplotdict['varinfo']['features'] = all_nonperiodic_features(
stimes,
smags,
serrs,
magsarefluxes=magsarefluxes,
)
# 5. add a signals key:val. this will be used by checkplotserver's
# pre-whitening and masking functions. these will write to
# checkplotdict['signals']['whiten'] and
# checkplotdict['signals']['mask'] respectively.
checkplotdict['signals'] = {}
# 6. add any externalplots if we have them
checkplotdict['externalplots'] = []
if (externalplots and
isinstance(externalplots, list) and
len(externalplots) > 0):
for externalrow in externalplots:
if all(os.path.exists(erowfile) for erowfile in externalrow):
if verbose:
LOGINFO('adding external plots: %s to checkplot dict' %
repr(externalrow))
checkplotdict['externalplots'].append(externalrow)
else:
LOGWARNING('could not add some external '
'plots in: %s to checkplot dict'
% repr(externalrow))
# 7. do any xmatches required
if xmatchinfo is not None:
checkplotdict = xmatch_external_catalogs(
checkplotdict,
xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec
)
# the checkplotdict now contains everything we need
contents = sorted(list(checkplotdict.keys()))
checkplotdict['status'] = 'ok: contents are %s' % contents
if verbose:
LOGINFO('checkplot dict complete for %s' %
checkplotdict['objectid'])
LOGINFO('checkplot dict contents: %s' % contents)
# 8. update the pfmethods key
checkplotdict['pfmethods'] = checkplot_pfmethods
# otherwise, we don't have enough LC points, return nothing
else:
LOGERROR('not enough light curve points for %s, have %s, need %s' %
(checkplotdict['objectid'],len(stimes),mindet))
checkplotdict['magseries'] = None
checkplotdict['status'] = 'failed: not enough LC points'
# at the end, return the dict
return checkplotdict
################################
## CHECKPLOT PICKLE FUNCTIONS ##
################################
def checkplot_pickle(lspinfolist,
times,
mags,
errs,
magsarefluxes=False,
nperiodstouse=3,
objectinfo=None,
lcfitfunc=None,
lcfitparams={},
varinfo=None,
getvarfeatures=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
externalplots=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
outfile=None,
outgzip=False,
sigclip=4.0,
varepoch='min',
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=[-0.8,0.8],
xliminsetmode=False,
plotdpi=100,
returndict=False,
pickleprotocol=None,
bestperiodhighlight=None,
xgridlines=None,
mindet=1000,
verbose=True):
'''This writes a multiple lspinfo checkplot to a (gzipped) pickle file.
This function can take input from multiple lspinfo dicts (e.g. a list of
output dicts or gzipped pickles of dicts from independent runs of BLS, PDM,
AoV, or GLS period-finders in periodbase).
NOTE: if lspinfolist contains more than one lspinfo object with the same
lspmethod ('pdm','gls','sls','aov','bls'), the latest one in the list will
overwrite the earlier ones.
The output pickle contains all the plots (magseries and phased magseries),
periodograms, object information, variability information, light curves, and
phased light curves. The pickle produced by this function can be used with
an external viewer app (e.g. checkplotserver.py), or by using the
checkplot_pickle_to_png function below.
All kwargs are the same as for checkplot_png, except for the following:
nperiodstouse controls how many 'best' periods to make phased LC plots
for. By default, this is the 3 best. If this is set to None, all 'best'
periods present in each lspinfo dict's 'nbestperiods' key will be plotted
(this is 5 according to periodbase functions' defaults).
varinfo is a dictionary with the following keys:
{'objectisvar': True if object is time-variable,
'vartags': list of variable type tags (strings),
'varisperiodic': True if object is a periodic variable,
'varperiod': variability period of the object,
'varepoch': epoch of variability in JD}
if varinfo is None, an initial empty dictionary of this form will be created
and written to the output pickle. This can be later updated using
checkplotviewer.py, etc.
If getvarfeatures is True, will use the function
varbase.features.all_nonperiodic_features to calculate several light curve
features such as the median, MAD, Stetson J index, CDPP, percentiles, etc.
lcfitfunc is a Python function that is used to fit a model to the light
curve. This is then overplotted for each phased light curve in the
checkplot. This function should have the following signature:
def lcfitfunc(times, mags, errs, period, **lcfitparams)
where lcfitparams encapsulates all external parameters (i.e. number of knots
for a spline function, the degree of a Legendre polynomial fit, etc.) This
function should return a Python dict with the following structure (similar
to the functions in astrobase.varbase.lcfit) and at least the keys below:
{'fittype':<str: name of fit method>,
'fitchisq':<float: the chi-squared value of the fit>,
'fitredchisq':<float: the reduced chi-squared value of the fit>,
'fitinfo':{'fitmags':<ndarray: model mags or fluxes from fit function>},
'magseries':{'times':<ndarray: times at which the fitmags are evaluated>}}
additional keys can include ['fitinfo']['finalparams'] for the final model
fit parameters, ['fitinfo']['fitepoch'] for the minimum light epoch returned
by the model fit, among others. the output dict of lcfitfunc will be copied
to the output checkplot dict's ['fitinfo'][<fittype>] key:val dict.
externalplots is a list of 4-element tuples containing:
1. path to PNG of periodogram from a external period-finding method
2. path to PNG of best period phased light curve from external period-finder
3. path to PNG of 2nd-best phased light curve from external period-finder
4. path to PNG of 3rd-best phased light curve from external period-finder
This can be used to incorporate external period-finding method results into
the output checkplot pickle or exported PNG to allow for comparison with
astrobase results.
example of externalplots:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
If externalplots is provided, the checkplot_pickle_to_png function below
will automatically retrieve these plot PNGs and put them into the exported
checkplot PNG.
sigclip is either a single float or a list of two floats. in the first case,
the sigclip is applied symmetrically. in the second case, the first sigclip
in the list is applied to +ve magnitude deviations (fainter) and the second
sigclip in the list is applied to -ve magnitude deviations (brighter).
An example list would be `[10.,3.]` (for 10 sigma dimmings, 3 sigma
brightenings).
bestperiodhighlight sets whether user wants a background on the phased light
curve from each periodogram type to distinguish them from others. this is an
HTML hex color specification. If this is None, no highlight will be added.
xgridlines (default None) can be a list, e.g., [-0.5,0.,0.5] that sets the
x-axis grid lines on plotted phased LCs for easy visual identification of
important features.
xliminsetmode = True sets up the phased mag series plot to show a zoomed-in
portion (set by plotxlim) as the main plot and an inset version of the full
phased light curve from phase 0.0 to 1.0. This can be useful if searching
for small dips near phase 0.0 caused by planetary transits for example.
outgzip controls whether to gzip the output pickle. it turns out that this
is the slowest bit in the output process, so if you're after speed, best not
to use this. this is False by default since it turns out that gzip actually
doesn't save that much space (29 MB vs. 35 MB for the average checkplot
pickle).
'''
if outgzip:
# generate the outfile filename
if not outfile and isinstance(lspinfolist[0],str):
plotfpath = os.path.join(
os.path.dirname(lspinfolist[0]),
'checkplot-%s.pkl.gz' % (
os.path.basename(
lspinfolist[0].replace('.pkl','').replace('.gz','')
)
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.pkl.gz'
else:
# generate the outfile filename
if not outfile and isinstance(lspinfolist[0],str):
plotfpath = os.path.join(
os.path.dirname(lspinfolist[0]),
'checkplot-%s.pkl' % (
os.path.basename(
lspinfolist[0].replace('.pkl','').replace('.gz','')
)
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.pkl'
# call checkplot_dict for most of the work
checkplotdict = checkplot_dict(
lspinfolist,
times,
mags,
errs,
magsarefluxes=magsarefluxes,
nperiodstouse=nperiodstouse,
objectinfo=objectinfo,
varinfo=varinfo,
getvarfeatures=getvarfeatures,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
lcfitfunc=lcfitfunc,
lcfitparams=lcfitparams,
externalplots=externalplots,
findercmap=findercmap,
finderconvolve=finderconvolve,
findercachedir=findercachedir,
normto=normto,
normmingap=normmingap,
sigclip=sigclip,
varepoch=varepoch,
phasewrap=phasewrap,
phasesort=phasesort,
phasebin=phasebin,
minbinelems=minbinelems,
plotxlim=plotxlim,
xliminsetmode=xliminsetmode,
plotdpi=plotdpi,
bestperiodhighlight=bestperiodhighlight,
xgridlines=xgridlines,
mindet=mindet,
verbose=verbose
)
# figure out which protocol to use
# for Python >= 3.4; use v3
if ((sys.version_info[0:2] >= (3,4) and not pickleprotocol) or
(pickleprotocol == 3)):
pickleprotocol = 3
if verbose:
LOGWARNING('the output pickle uses protocol v3 '
'which IS NOT backwards compatible with Python 2.7')
# for Python == 2.7; use v2
elif sys.version_info[0:2] == (2,7) and not pickleprotocol:
pickleprotocol = 2
# otherwise, if left unspecified, use the slowest but most compatible
# protocol. this will be readable by all (most?) Pythons
elif not pickleprotocol:
pickleprotocol = 0
# write the completed checkplotdict to a gzipped pickle
picklefname = _write_checkplot_picklefile(checkplotdict,
outfile=plotfpath,
protocol=pickleprotocol,
outgzip=outgzip)
# at the end, return the dict and filename if asked for
if returndict:
if verbose:
LOGINFO('checkplot done -> %s' % picklefname)
return checkplotdict, picklefname
# otherwise, just return the filename
else:
# just to make sure: free up space
del checkplotdict
if verbose:
LOGINFO('checkplot done -> %s' % picklefname)
return picklefname
def checkplot_pickle_update(currentcp, updatedcp,
outfile=None,
outgzip=False,
pickleprotocol=None,
verbose=True):
'''This updates the current checkplot dict with updated values provided.
current is either a checkplot dict produced by checkplot_pickle above or a
gzipped pickle file produced by the same function. updated is a dict or
pickle file with the same format as current.
Writes out the new checkplot gzipped pickle file to outfile. If current is a
file, updates it in place if outfile is None. Mostly only useful for
checkplotserver.py.
'''
# generate the outfile filename
if not outfile and isinstance(currentcp,str):
plotfpath = currentcp
elif outfile:
plotfpath = outfile
elif isinstance(currentcp, dict) and currentcp['objectid']:
if outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % currentcp['objectid']
else:
plotfpath = 'checkplot-%s.pkl' % currentcp['objectid']
else:
# we'll get this later below
plotfpath = None
# break out python 2.7 and > 3 nonsense
if sys.version_info[:2] > (3,2):
if (isinstance(currentcp, str) and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp, dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
if (isinstance(updatedcp, str) and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# check for unicode in python 2.7
else:
# get the current checkplotdict
if ((isinstance(currentcp, str) or isinstance(currentcp, unicode))
and os.path.exists(currentcp)):
cp_current = _read_checkplot_picklefile(currentcp)
elif isinstance(currentcp,dict):
cp_current = currentcp
else:
LOGERROR('currentcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(currentcp), type(currentcp)))
return None
# get the updated checkplotdict
if ((isinstance(updatedcp, str) or isinstance(updatedcp, unicode))
and os.path.exists(updatedcp)):
cp_updated = _read_checkplot_picklefile(updatedcp)
elif isinstance(updatedcp, dict):
cp_updated = updatedcp
else:
LOGERROR('updatedcp: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(updatedcp), type(updatedcp)))
return None
# do the update using python's dict update mechanism
# this requires updated to be in the same checkplotdict format as current
# all keys in current will now be from updated
cp_current.update(cp_updated)
# figure out the plotfpath if we haven't by now
if not plotfpath and outgzip:
plotfpath = 'checkplot-%s.pkl.gz' % cp_current['objectid']
elif (not plotfpath) and (not outgzip):
plotfpath = 'checkplot-%s.pkl' % cp_current['objectid']
# make sure we write the correct postfix
if plotfpath.endswith('.gz'):
outgzip = True
# figure out which protocol to use
# for Python >= 3.4; use v4 by default
if ((sys.version_info[0:2] >= (3,4) and not pickleprotocol) or
(pickleprotocol > 2)):
pickleprotocol = 3
if verbose:
LOGWARNING('the output pickle uses protocol v3 '
'which IS NOT backwards compatible with Python 2.7')
# for Python == 2.7; use v2
elif sys.version_info[0:2] == (2,7) and not pickleprotocol:
pickleprotocol = 2
# otherwise, if left unspecified, use the slowest but most compatible
# protocol. this will be readable by all (most?) Pythons
elif not pickleprotocol:
pickleprotocol = 0
# write the new checkplotdict
return _write_checkplot_picklefile(cp_current,
outfile=plotfpath,
outgzip=outgzip,
protocol=pickleprotocol)
def checkplot_pickle_to_png(checkplotin,
outfile,
extrarows=None):
'''This reads the pickle provided, and writes out a PNG.
checkplotin is either a checkplot dict produced by checkplot_pickle above or
a pickle file produced by the same function.
The PNG has 4 x N tiles, as below:
[ finder ] [ objectinfo ] [ varinfo/comments ] [ unphased LC ]
[ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
[ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
.
.
[ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
for N independent period-finding methods producing:
- periodogram1,2,3...N: the periodograms from each method
- phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each
periodogram
outfile is the output PNG file to generate.
extrarows is a list of 4-element tuples containing paths to PNG files that
will be added to the end of the rows generated from the checkplotin
pickle/dict. Each tuple represents a row in the final output PNG file. If
there are less than 4 elements per tuple, the missing elements will be
filled in with white-space. If there are more than 4 elements per tuple,
only the first four will be used.
The purpose of this kwarg is to incorporate periodograms and phased LC plots
(in the form of PNGs) generated from an external period-finding function or
program (like vartools) to allow for comparison with astrobase results.
Each external PNG will be resized to 750 x 480 pixels to fit into an output
image cell.
By convention, each 4-element tuple should contain:
a periodiogram PNG
phased LC PNG with 1st best peak period from periodogram
phased LC PNG with 2nd best peak period from periodogram
phased LC PNG with 3rd best peak period from periodogram
example of extrarows:
extrarows = [('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
'''
# figure out if the checkplotpickle is a filename
# python 3
if sys.version_info[:2] > (3,2):
if (isinstance(checkplotin, str) and os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin, dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# check for unicode in python 2.7
else:
# get the current checkplotdict
if ((isinstance(checkplotin, str) or isinstance(checkplotin, unicode))
and os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin,dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# figure out the dimensions of the output png
# each cell is 750 x 480 pixels
# a row is made of four cells
# - the first row is for object info
# - the rest are for periodograms and phased LCs, one row per method
# if there are more than three phased LC plots per method, we'll only plot 3
cplspmethods = cpd['pfmethods']
cprows = len(cplspmethods)
# add in any extra rows from neighbors
if 'neighbors' in cpd and cpd['neighbors'] and len(cpd['neighbors']) > 0:
nbrrows = len(cpd['neighbors'])
else:
nbrrows = 0
# add in any extra rows from keyword arguments
if extrarows and len(extrarows) > 0:
erows = len(extrarows)
else:
erows = 0
# add in any extra rows from the checkplot dict
if ('externalplots' in cpd and
cpd['externalplots'] and
len(cpd['externalplots']) > 0):
cpderows = len(cpd['externalplots'])
else:
cpderows = 0
totalwidth = 3000
totalheight = 480 + (cprows + erows + nbrrows + cpderows)*480
# this is the output PNG
outimg = Image.new('RGBA',(totalwidth, totalheight),(255,255,255,255))
# now fill in the rows of the output png. we'll use Pillow to build up the
# output image from the already stored plots and stuff in the checkplot
# dict.
###############################
# row 1, cell 1: finder chart #
###############################
if cpd['finderchart']:
finder = Image.open(
_base64_to_file(cpd['finderchart'], None, writetostrio=True)
)
bigfinder = finder.resize((450,450), Image.ANTIALIAS)
outimg.paste(bigfinder,(150,20))
#####################################
# row 1, cell 2: object information #
#####################################
# find the font we need from the package data
fontpath = os.path.join(os.path.dirname(__file__),
'cpserver',
'cps-assets',
'DejaVuSans.ttf')
# load the font
if os.path.exists(fontpath):
cpfontnormal = ImageFont.truetype(fontpath, 20)
cpfontlarge = ImageFont.truetype(fontpath, 28)
else:
LOGWARNING('could not find bundled '
'DejaVu Sans font in the astrobase package '
'data, using ugly defaults...')
cpfontnormal = ImageFont.load_default()
cpfontlarge = ImageFont.load_default()
# the image draw object
objinfodraw = ImageDraw.Draw(outimg)
# write out the object information
# objectid
objinfodraw.text(
(875, 25),
cpd['objectid'] if cpd['objectid'] else 'no objectid',
font=cpfontlarge,
fill=(0,0,255,255)
)
# twomass id
if 'twomassid' in cpd['objectinfo']:
objinfodraw.text(
(875, 60),
('2MASS J%s' % cpd['objectinfo']['twomassid']
if cpd['objectinfo']['twomassid']
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
# ndet
if 'ndet' in cpd['objectinfo']:
objinfodraw.text(
(875, 85),
('LC points: %s' % cpd['objectinfo']['ndet']
if cpd['objectinfo']['ndet'] is not None
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(875, 85),
('LC points: %s' % cpd['magseries']['times'].size),
font=cpfontnormal,
fill=(0,0,0,255)
)
# coords and PM
objinfodraw.text(
(875, 125),
('Coords and PM'),
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'ra' in cpd['objectinfo'] and 'decl' in cpd['objectinfo']:
objinfodraw.text(
(1125, 125),
(('RA, Dec: %.3f, %.3f' %
(cpd['objectinfo']['ra'], cpd['objectinfo']['decl']))
if (cpd['objectinfo']['ra'] is not None and
cpd['objectinfo']['decl'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 125),
'RA, Dec: nan, nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'propermotion' in cpd['objectinfo']:
objinfodraw.text(
(1125, 150),
(('Total PM: %.5f mas/yr' % cpd['objectinfo']['propermotion'])
if (cpd['objectinfo']['propermotion'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 150),
'Total PM: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'rpmj' in cpd['objectinfo']:
objinfodraw.text(
(1125, 175),
(('Reduced PM [Jmag]: %.3f' % cpd['objectinfo']['rpmj'])
if (cpd['objectinfo']['rpmj'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(1125, 175),
'Reduced PM [Jmag]: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
# magnitudes
objinfodraw.text(
(875, 200),
('Magnitudes'),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 200),
('gri: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['sdssg'] if
('sdssg' in cpd['objectinfo'] and
cpd['objectinfo']['sdssg'] is not None)
else npnan),
(cpd['objectinfo']['sdssr'] if
('sdssr' in cpd['objectinfo'] and
cpd['objectinfo']['sdssr'] is not None)
else npnan),
(cpd['objectinfo']['sdssi'] if
('sdssi' in cpd['objectinfo'] and
cpd['objectinfo']['sdssi'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 225),
('JHK: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['jmag'] if
('jmag' in cpd['objectinfo'] and
cpd['objectinfo']['jmag'] is not None)
else npnan),
(cpd['objectinfo']['hmag'] if
('hmag' in cpd['objectinfo'] and
cpd['objectinfo']['hmag'] is not None)
else npnan),
(cpd['objectinfo']['kmag'] if
('kmag' in cpd['objectinfo'] and
cpd['objectinfo']['kmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 250),
('BV: %.3f, %.3f' %
((cpd['objectinfo']['bmag'] if
('bmag' in cpd['objectinfo'] and
cpd['objectinfo']['bmag'] is not None)
else npnan),
(cpd['objectinfo']['vmag'] if
('vmag' in cpd['objectinfo'] and
cpd['objectinfo']['vmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# colors
if ('dereddened' in cpd['objectinfo'] and
cpd['objectinfo']['dereddened'] == True):
deredlabel = "(dereddened)"
else:
deredlabel = ""
objinfodraw.text(
(875, 275),
'Colors %s' % deredlabel,
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 275),
('B - V: %.3f, V - K: %.3f' %
( (cpd['objectinfo']['bvcolor'] if
('bvcolor' in cpd['objectinfo'] and
cpd['objectinfo']['bvcolor'] is not None)
else npnan),
(cpd['objectinfo']['vkcolor'] if
('vkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['vkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 300),
('i - J: %.3f, g - K: %.3f' %
( (cpd['objectinfo']['ijcolor'] if
('ijcolor' in cpd['objectinfo'] and
cpd['objectinfo']['ijcolor'] is not None)
else npnan),
(cpd['objectinfo']['gkcolor'] if
('gkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['gkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(1125, 325),
('J - K: %.3f' %
( (cpd['objectinfo']['jkcolor'] if
('jkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['jkcolor'] is not None)
else npnan),) ),
font=cpfontnormal,
fill=(0,0,0,255)
)
# color classification
if ('color_classes' in cpd['objectinfo'] and
cpd['objectinfo']['color_classes']):
objinfodraw.text(
(875, 350),
('star classification by color: %s' %
(', '.join(cpd['objectinfo']['color_classes']))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# GAIA neighbors
if ( ('gaia_neighbors' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_neighbors'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_neighbors'])) and
('searchradarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['searchradarcsec']) ):
objinfodraw.text(
(875, 375),
('%s GAIA close neighbors within %.1f arcsec' %
(cpd['objectinfo']['gaia_neighbors'],
cpd['objectinfo']['searchradarcsec'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# closest GAIA neighbor
if ( ('gaia_closest_distarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_distarcsec'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])) and
('gaia_closest_gmagdiff' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_gmagdiff'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])) ):
objinfodraw.text(
(875, 400),
('closest GAIA neighbor is %.1f arcsec away, '
'GAIA mag (obj-nbr): %.3f' %
(cpd['objectinfo']['gaia_closest_distarcsec'],
cpd['objectinfo']['gaia_closest_gmagdiff'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# object tags
if 'objecttags' in cpd['objectinfo'] and cpd['objectinfo']['objecttags']:
objtagsplit = cpd['objectinfo']['objecttags'].split(',')
# write three tags per line
nobjtaglines = int(np.ceil(len(objtagsplit)/3.0))
for objtagline in range(nobjtaglines):
objtagslice = ','.join(objtagsplit[objtagline*3:objtagline*3+3])
objinfodraw.text(
(875, 450+objtagline*25),
objtagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
################################################
# row 1, cell 3: variability info and comments #
################################################
# objectisvar
objisvar = cpd['varinfo']['objectisvar']
if objisvar == '0':
objvarflag = 'Variable star flag not set'
elif objisvar == '1':
objvarflag = 'Object is probably a variable star'
elif objisvar == '2':
objvarflag = 'Object is probably not a variable star'
elif objisvar == '3':
objvarflag = 'Not sure if this object is a variable star'
elif objisvar is None:
objvarflag = 'Variable star flag not set'
elif objisvar is True:
objvarflag = 'Object is probably a variable star'
elif objisvar is False:
objvarflag = 'Object is probably not a variable star'
else:
objvarflag = 'Variable star flag: %s' % objisvar
objinfodraw.text(
(1600, 125),
objvarflag,
font=cpfontnormal,
fill=(0,0,0,255)
)
# period
objinfodraw.text(
(1600, 150),
('Period [days]: %.6f' %
(cpd['varinfo']['varperiod']
if cpd['varinfo']['varperiod'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# epoch
objinfodraw.text(
(1600, 175),
('Epoch [JD]: %.6f' %
(cpd['varinfo']['varepoch']
if cpd['varinfo']['varepoch'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# variability tags
if cpd['varinfo']['vartags']:
vartagsplit = cpd['varinfo']['vartags'].split(',')
# write three tags per line
nvartaglines = int(np.ceil(len(vartagsplit)/3.0))
for vartagline in range(nvartaglines):
vartagslice = ','.join(vartagsplit[vartagline*3:vartagline*3+3])
objinfodraw.text(
(1600, 225+vartagline*25),
vartagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
# object comments
if cpd['comments']:
commentsplit = cpd['comments'].split(' ')
# write 10 words per line
ncommentlines = int(np.ceil(len(commentsplit)/10.0))
for commentline in range(ncommentlines):
commentslice = ' '.join(
commentsplit[commentline*10:commentline*10+10]
)
objinfodraw.text(
(1600, 325+commentline*25),
commentslice,
font=cpfontnormal,
fill=(0,0,0,255)
)
#######################################
# row 1, cell 4: unphased light curve #
#######################################
if (cpd['magseries'] and
'plot' in cpd['magseries'] and
cpd['magseries']['plot']):
magseries = Image.open(
_base64_to_file(cpd['magseries']['plot'], None, writetostrio=True)
)
outimg.paste(magseries,(750*3,0))
###############################
# the rest of the rows in cpd #
###############################
for lspmethodind, lspmethod in enumerate(cplspmethods):
###############################
# the periodogram comes first #
###############################
if (cpd[lspmethod] and cpd[lspmethod]['periodogram']):
pgram = Image.open(
_base64_to_file(cpd[lspmethod]['periodogram'], None,
writetostrio=True)
)
outimg.paste(pgram,(0,480 + 480*lspmethodind))
#############################
# best phased LC comes next #
#############################
if (cpd[lspmethod] and 0 in cpd[lspmethod] and cpd[lspmethod][0]):
plc1 = Image.open(
_base64_to_file(cpd[lspmethod][0]['plot'], None, writetostrio=True)
)
outimg.paste(plc1,(750,480 + 480*lspmethodind))
#################################
# 2nd best phased LC comes next #
#################################
if (cpd[lspmethod] and 1 in cpd[lspmethod] and cpd[lspmethod][1]):
plc2 = Image.open(
_base64_to_file(cpd[lspmethod][1]['plot'], None, writetostrio=True)
)
outimg.paste(plc2,(750*2,480 + 480*lspmethodind))
#################################
# 3rd best phased LC comes next #
#################################
if (cpd[lspmethod] and 2 in cpd[lspmethod] and cpd[lspmethod][2]):
plc3 = Image.open(
_base64_to_file(cpd[lspmethod][2]['plot'], None, writetostrio=True)
)
outimg.paste(plc3,(750*3,480 + 480*lspmethodind))
################################
## ALL DONE WITH BUILDING PNG ##
################################
#########################
# add in any extra rows #
#########################
# from the keyword arguments
if erows > 0:
for erowind, erow in enumerate(extrarows):
# make sure we never go above 4 plots in a row
for ecolind, ecol in enumerate(erow[:4]):
eplot = Image.open(ecol)
eplotresized = eplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(eplotresized,
(750*ecolind,
(cprows+1)*480 + 480*erowind))
# from the checkplotdict
if cpderows > 0:
for cpderowind, cpderow in enumerate(cpd['externalplots']):
# make sure we never go above 4 plots in a row
for cpdecolind, cpdecol in enumerate(cpderow[:4]):
cpdeplot = Image.open(cpdecol)
cpdeplotresized = cpdeplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(cpdeplotresized,
(750*cpdecolind,
(cprows+1)*480 + (erows*480) + 480*cpderowind))
# from neighbors:
if nbrrows > 0:
# we have four tiles
# tile 1: neighbor objectid, ra, decl, distance, unphased LC
# tile 2: phased LC for gls
# tile 3: phased LC for pdm
# tile 4: phased LC for any other period finding method
# the priority is like so: ['bls','mav','aov','win']
for nbrind, nbr in enumerate(cpd['neighbors']):
# figure out which period finding methods are available for this
# neighbor. make sure to match the ones from the actual object in
# order of priority: 'gls','pdm','bls','aov','mav','acf','win'
nbrlspmethods = []
for lspmethod in cpd['pfmethods']:
if lspmethod in nbr:
nbrlspmethods.append(lspmethod)
# restrict to top three in priority
nbrlspmethods = nbrlspmethods[:3]
try:
# first panel: neighbor objectid, ra, decl, distance, unphased
# LC
nbrlc = Image.open(
_base64_to_file(
nbr['magseries']['plot'], None, writetostrio=True
)
)
outimg.paste(nbrlc,
(750*0,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s' % (nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
# second panel: phased LC for gls
lsp1lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[0]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp1lc,
(750*1,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp2lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[1]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp2lc,
(750*2,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp3lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[2]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp3lc,
(750*3,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
except Exception as e:
LOGERROR('neighbor %s does not have a magseries plot, '
'measurements are probably all nan' % nbr['objectid'])
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s, no light curve!' %
(nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
#####################
## WRITE FINAL PNG ##
#####################
# check if we've stupidly copied over the same filename as the input pickle
# to expected output file
if outfile.endswith('pkl'):
LOGWARNING('expected output PNG filename ends with .pkl, '
'changed to .png')
outfile = outfile.replace('.pkl','.png')
outimg.save(outfile)
if os.path.exists(outfile):
LOGINFO('checkplot pickle -> checkplot PNG: %s OK' % outfile)
return outfile
else:
LOGERROR('failed to write checkplot PNG')
return None
def cp2png(checkplotin, extrarows=None):
'''
This is just a shortened form of the function above for convenience.
This only handles pickle files.
'''
if checkplotin.endswith('.gz'):
outfile = checkplotin.replace('.pkl.gz','.png')
else:
outfile = checkplotin.replace('.pkl','.png')
return checkplot_pickle_to_png(checkplotin, outfile, extrarows=extrarows)
################################
## POST-PROCESSING CHECKPLOTS ##
################################
def finalize_checkplot(cpx,
outdir,
all_lclistpkl,
objfits=None):
'''This is used to prevent any further changes to the checkplot.
cpx is the checkplot dict or pickle to process.
outdir is the directory to where the final pickle will be written. If this
is set to the same dir as cpx and cpx is a pickle, the function will return
a failure. This is meant to keep the in-process checkplots separate from the
finalized versions.
all_lclistpkl is a pickle created by lcproc.make_lclist above with no
restrictions on the number of observations (so ALL light curves in the
collection).
objfits if not None should be a file path to a FITS file containing a WCS
header and this object. This will be used to make a stamp cutout of the
object using the actual image it was detected on. This will be a useful
comparison to the usual DSS POSS-RED2 image used by the checkplots.
Use this function after all variable classification, period-finding, and
object xmatches are done. This function will add a 'final' key to the
checkplot, which will contain:
- a phased LC plot with the period and epoch set after review using the
times, mags, errs after any appropriate filtering and sigclip was done in
the checkplotserver UI
- The unphased LC using the times, mags, errs after any appropriate
filtering and sigclip was done in the checkplotserver UI
- the same plots for any LC collection neighbors
- the survey cutout for the object if objfits is provided and checks out
- a redone neighbor search using GAIA and all light curves in the collection
even if they don't have at least 1000 observations.
These items will be shown in a special 'Final' tab in the checkplotserver
webapp (this should be run in readonly mode as well). The final tab will
also contain downloadable links for the checkplot pickle in pkl and PNG
format, as well as the final times, mags, errs as a gzipped CSV with a
header containing all of this info (will be readable by the usual
astrobase.hatsurveys.hatlc module).
'''
def parallel_finalize_cplist(cplist,
outdir,
objfits=None):
'''This is a parallel driver for the function above, operating on list of
checkplots.
'''
def parallel_finalize_cpdir(cpdir,
outdir,
cpfileglob='checkplot-*.pkl*',
objfits=None):
'''This is a parallel driver for the function above, operating on a
directory of checkplots.
'''
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lazy_read.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lazy_read.proto',
package='mindinsight.summary',
syntax='proto2',
serialized_options=b'\370\001\001',
serialized_pb=b'\n\x0flazy_read.proto\x12\x13mindinsight.summary\"t\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x02(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x11\n\x07version\x18\x03 \x01(\tH\x00\x12/\n\x07summary\x18\x05 \x01(\x0b\x32\x1c.mindinsight.summary.SummaryH\x00\x42\x06\n\x04what\"\xc8\x01\n\x07Summary\x12\x31\n\x05value\x18\x01 \x03(\x0b\x32\".mindinsight.summary.Summary.Value\x1a\x1e\n\x05Image\x12\x15\n\rencoded_image\x18\x04 \x02(\x0c\x1aj\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x16\n\x0cscalar_value\x18\x03 \x01(\x02H\x00\x12\x33\n\x05image\x18\x04 \x01(\x0b\x32\".mindinsight.summary.Summary.ImageH\x00\x42\x07\n\x05valueB\x03\xf8\x01\x01'
)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='mindinsight.summary.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='mindinsight.summary.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='mindinsight.summary.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='mindinsight.summary.Event.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='summary', full_name='mindinsight.summary.Event.summary', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='mindinsight.summary.Event.what',
index=0, containing_type=None, fields=[]),
],
serialized_start=40,
serialized_end=156,
)
_SUMMARY_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='mindinsight.summary.Summary.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='encoded_image', full_name='mindinsight.summary.Summary.Image.encoded_image', index=0,
number=4, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=221,
serialized_end=251,
)
_SUMMARY_VALUE = _descriptor.Descriptor(
name='Value',
full_name='mindinsight.summary.Summary.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='mindinsight.summary.Summary.Value.tag', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scalar_value', full_name='mindinsight.summary.Summary.Value.scalar_value', index=1,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='mindinsight.summary.Summary.Value.image', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='mindinsight.summary.Summary.Value.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=253,
serialized_end=359,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='mindinsight.summary.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='mindinsight.summary.Summary.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SUMMARY_IMAGE, _SUMMARY_VALUE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=359,
)
_EVENT.fields_by_name['summary'].message_type = _SUMMARY
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['version'])
_EVENT.fields_by_name['version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
_SUMMARY_IMAGE.containing_type = _SUMMARY
_SUMMARY_VALUE.fields_by_name['image'].message_type = _SUMMARY_IMAGE
_SUMMARY_VALUE.containing_type = _SUMMARY
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['scalar_value'])
_SUMMARY_VALUE.fields_by_name['scalar_value'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['image'])
_SUMMARY_VALUE.fields_by_name['image'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY.fields_by_name['value'].message_type = _SUMMARY_VALUE
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Event)
})
_sym_db.RegisterMessage(Event)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), {
'Image' : _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_IMAGE,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary.Image)
})
,
'Value' : _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_VALUE,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary.Value)
})
,
'DESCRIPTOR' : _SUMMARY,
'__module__' : 'lazy_read_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.summary.Summary)
})
_sym_db.RegisterMessage(Summary)
_sym_db.RegisterMessage(Summary.Image)
_sym_db.RegisterMessage(Summary.Value)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import os
import sys
import socket
import time
from multiprocessing import Process
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.tensorboard import SummaryWriter
from super_gradients.training.exceptions.dataset_exceptions import UnsupportedBatchItemsFormat
# TODO: These utils should move to sg_model package as internal (private) helper functions
def try_port(port):
"""
try_port - Helper method for tensorboard port binding
:param port:
:return:
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
is_port_available = False
try:
sock.bind(("localhost", port))
is_port_available = True
except Exception as ex:
print('Port ' + str(port) + ' is in use' + str(ex))
sock.close()
return is_port_available
def launch_tensorboard_process(checkpoints_dir_path: str, sleep_postpone: bool = True, port: int = None) -> Tuple[Process, int]:
"""
launch_tensorboard_process - Default behavior is to scan all free ports from 6006-6016 and try using them
unless port is defined by the user
:param checkpoints_dir_path:
:param sleep_postpone:
:param port:
:return: tuple of tb process, port
"""
logdir_path = str(Path(checkpoints_dir_path).parent.absolute())
tb_cmd = 'tensorboard --logdir=' + logdir_path + ' --bind_all'
if port is not None:
tb_ports = [port]
else:
tb_ports = range(6006, 6016)
for tb_port in tb_ports:
if not try_port(tb_port):
continue
else:
print('Starting Tensor-Board process on port: ' + str(tb_port))
tensor_board_process = Process(target=os.system, args=([tb_cmd + ' --port=' + str(tb_port)]))
tensor_board_process.daemon = True
tensor_board_process.start()
# LET THE TENSORBOARD PROCESS START
if sleep_postpone:
time.sleep(3)
return tensor_board_process, tb_port
# RETURNING IRRELEVANT VALUES
print('Failed to initialize Tensor-Board process on port: ' + ', '.join(map(str, tb_ports)))
return None, -1
def init_summary_writer(tb_dir, checkpoint_loaded, user_prompt=False):
"""Remove previous tensorboard files from directory and launch a tensor board process"""
# If the training is from scratch, Walk through destination folder and delete existing tensorboard logs
user = ''
if not checkpoint_loaded:
for filename in os.listdir(tb_dir):
if 'events' in filename:
if not user_prompt:
print('"{}" will not be deleted'.format(filename))
continue
while True:
# Verify with user before deleting old tensorboard files
user = input('\nOLDER TENSORBOARD FILES EXISTS IN EXPERIMENT FOLDER:\n"{}"\n'
'DO YOU WANT TO DELETE THEM? [y/n]'
.format(filename)) if (user != 'n' or user != 'y') else user
if user == 'y':
os.remove('{}/{}'.format(tb_dir, filename))
print('DELETED: {}!'.format(filename))
break
elif user == 'n':
print('"{}" will not be deleted'.format(filename))
break
print('Unknown answer...')
# Launch a tensorboard process
return SummaryWriter(tb_dir)
def add_log_to_file(filename, results_titles_list, results_values_list, epoch, max_epochs):
"""Add a message to the log file"""
# -Note: opening and closing the file every time is in-efficient. It is done for experimental purposes
with open(filename, 'a') as f:
f.write('\nEpoch (%d/%d) - ' % (epoch, max_epochs))
for result_title, result_value in zip(results_titles_list, results_values_list):
if isinstance(result_value, torch.Tensor):
result_value = result_value.item()
f.write(result_title + ': ' + str(result_value) + '\t')
def write_training_results(writer, results_titles_list, results_values_list, epoch):
"""Stores the training and validation loss and accuracy for current epoch in a tensorboard file"""
for res_key, res_val in zip(results_titles_list, results_values_list):
# USE ONLY LOWER-CASE LETTERS AND REPLACE SPACES WITH '_' TO AVOID MANY TITLES FOR THE SAME KEY
corrected_res_key = res_key.lower().replace(' ', '_')
writer.add_scalar(corrected_res_key, res_val, epoch)
writer.flush()
def write_hpms(writer, hpmstructs=[], special_conf={}):
"""Stores the training and dataset hyper params in the tensorboard file"""
hpm_string = ""
for hpm in hpmstructs:
for key, val in hpm.__dict__.items():
hpm_string += '{}: {} \n '.format(key, val)
for key, val in special_conf.items():
hpm_string += '{}: {} \n '.format(key, val)
writer.add_text("Hyper_parameters", hpm_string)
writer.flush()
# TODO: This should probably move into datasets/datasets_utils.py?
def unpack_batch_items(batch_items: Union[tuple, torch.Tensor]):
"""
Adds support for unpacking batch items in train/validation loop.
@param batch_items: (Union[tuple, torch.Tensor]) returned by the data loader, which is expected to be in one of
the following formats:
1. torch.Tensor or tuple, s.t inputs = batch_items[0], targets = batch_items[1] and len(batch_items) = 2
2. tuple: (inputs, targets, additional_batch_items)
where inputs are fed to the network, targets are their corresponding labels and additional_batch_items is a
dictionary (format {additional_batch_item_i_name: additional_batch_item_i ...}) which can be accessed through
the phase context under the attribute additional_batch_item_i_name, using a phase callback.
@return: inputs, target, additional_batch_items
"""
additional_batch_items = {}
if len(batch_items) == 2:
inputs, target = batch_items
elif len(batch_items) == 3:
inputs, target, additional_batch_items = batch_items
else:
raise UnsupportedBatchItemsFormat()
return inputs, target, additional_batch_items
def log_uncaught_exceptions(logger):
"""
Makes logger log uncaught exceptions
@param logger: logging.Logger
@return: None
"""
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2016-2017 Stella/AboodXD
# Supported formats:
# -RGBA8
# -RGB10A2
# -RGB565
# -RGB5A1
# -RGBA4
# -L8/R8
# -L8A8/RG8
# -BC1
# -BC2
# -BC3
# -BC4U
# -BC4S
# -BC5U
# -BC5S
# Feel free to include this in your own program if you want, just give credits. :)
"""dds.py: DDS reader and header generator."""
import struct
try:
import form_conv_cy as form_conv
except ImportError:
import form_conv
def readDDS(f, SRGB):
with open(f, "rb") as inf:
inb = inf.read()
if len(inb) < 0x80 or inb[:4] != b'DDS ':
print("")
print(f + " is not a valid DDS file!")
return 0, 0, 0, b'', 0, [], 0, []
width = struct.unpack("<I", inb[16:20])[0]
height = struct.unpack("<I", inb[12:16])[0]
fourcc = inb[84:88]
if fourcc == b'DX10':
print("")
print("DX10 DDS files are not supported.")
return 0, 0, 0, b'', 0, [], 0, []
pflags = struct.unpack("<I", inb[80:84])[0]
bpp = struct.unpack("<I", inb[88:92])[0] >> 3
channel0 = struct.unpack("<I", inb[92:96])[0]
channel1 = struct.unpack("<I", inb[96:100])[0]
channel2 = struct.unpack("<I", inb[100:104])[0]
channel3 = struct.unpack("<I", inb[104:108])[0]
caps = struct.unpack("<I", inb[108:112])[0]
if caps not in [0x1000, 0x401008]:
print("")
print("Invalid texture.")
return 0, 0, 0, b'', 0, [], 0, []
abgr8_masks = {0xff: 0, 0xff00: 1, 0xff0000: 2, 0xff000000: 3, 0: 5}
bgr8_masks = {0xff: 0, 0xff00: 1, 0xff0000: 2, 0: 5}
a2rgb10_masks = {0x3ff00000: 0, 0xffc00: 1, 0x3ff: 2, 0xc0000000: 3, 0: 5}
bgr565_masks = {0x1f: 0, 0x7e0: 1, 0xf800: 2, 0: 5}
a1bgr5_masks = {0x1f: 0, 0x3e0: 1, 0x7c00: 2, 0x8000: 3, 0: 5}
abgr4_masks = {0xf: 0, 0xf0: 1, 0xf00: 2, 0xf000: 3, 0: 5}
l8_masks = {0xff: 0, 0: 5}
a8l8_masks = {0xff: 0, 0xff00: 1, 0: 5}
compressed = False
luminance = False
rgb = False
has_alpha = False
if pflags == 4:
compressed = True
elif pflags == 0x20000 or pflags == 2:
luminance = True
elif pflags == 0x20001:
luminance = True
has_alpha = True
elif pflags == 0x40:
rgb = True
elif pflags == 0x41:
rgb = True
has_alpha = True
else:
print("")
print("Invalid texture.")
return 0, 0, 0, b'', 0, [], 0, []
format_ = 0
if compressed:
compSel = [0, 1, 2, 3]
if fourcc == b'DXT1':
format_ = 0x42
bpp = 8
elif fourcc == b'DXT3':
format_ = 0x43
bpp = 16
elif fourcc == b'DXT5':
format_ = 0x44
bpp = 16
elif fourcc in [b'BC4U', b'ATI1']:
format_ = 0x49
bpp = 8
elif fourcc == b'BC4S':
format_ = 0x4a
bpp = 8
elif fourcc in [b'BC5U', b'ATI2']:
format_ = 0x4b
bpp = 16
elif fourcc == b'BC5S':
format_ = 0x4c
bpp = 16
size = ((width + 3) >> 2) * ((height + 3) >> 2) * bpp
else:
if luminance:
if has_alpha:
if channel0 in a8l8_masks and channel1 in a8l8_masks and channel2 in a8l8_masks and channel3 in a8l8_masks and bpp == 2:
format_ = 0xd
compSel = [a8l8_masks[channel0], a8l8_masks[channel1], a8l8_masks[channel2], a8l8_masks[channel3]]
else:
if channel0 in l8_masks and channel1 in l8_masks and channel2 in l8_masks and channel3 in l8_masks and bpp == 1:
format_ = 1
compSel = [l8_masks[channel0], l8_masks[channel1], l8_masks[channel2], l8_masks[channel3]]
elif rgb:
if has_alpha:
if bpp == 4:
if channel0 in abgr8_masks and channel1 in abgr8_masks and channel2 in abgr8_masks and channel3 in abgr8_masks:
format_ = 0x38 if SRGB else 0x25
compSel = [abgr8_masks[channel0], abgr8_masks[channel1], abgr8_masks[channel2], abgr8_masks[channel3]]
elif channel0 in a2rgb10_masks and channel1 in a2rgb10_masks and channel2 in a2rgb10_masks and channel3 in a2rgb10_masks:
format_ = 0x3d
compSel = [a2rgb10_masks[channel0], a2rgb10_masks[channel1], a2rgb10_masks[channel2], a2rgb10_masks[channel3]]
elif bpp == 2:
if channel0 in a1bgr5_masks and channel1 in a1bgr5_masks and channel2 in a1bgr5_masks and channel3 in a1bgr5_masks:
format_ = 0x3b
compSel = [a1bgr5_masks[channel0], a1bgr5_masks[channel1], a1bgr5_masks[channel2], a1bgr5_masks[channel3]]
elif channel0 in abgr4_masks and channel1 in abgr4_masks and channel2 in abgr4_masks and channel3 in abgr4_masks:
format_ = 0x39
compSel = [abgr4_masks[channel0], abgr4_masks[channel1], abgr4_masks[channel2], abgr4_masks[channel3]]
else:
if channel0 in bgr8_masks and channel1 in bgr8_masks and channel2 in bgr8_masks and channel3 == 0 and bpp == 3: # Kinda not looking good if you ask me
format_ = 0x38 if SRGB else 0x25
compSel = [bgr8_masks[channel0], bgr8_masks[channel1], bgr8_masks[channel2], 3]
if channel0 in bgr565_masks and channel1 in bgr565_masks and channel2 in bgr565_masks and channel3 in bgr565_masks and bpp == 2:
format_ = 0x3c
compSel = [bgr565_masks[channel0], bgr565_masks[channel1], bgr565_masks[channel2], bgr565_masks[channel3]]
size = width * height * bpp
if caps == 0x401008:
numMips = struct.unpack("<I", inb[28:32])[0] - 1
mipSize = get_mipSize(width, height, bpp, numMips, compressed)
else:
numMips = 0
mipSize = 0
if len(inb) < 0x80+size+mipSize:
print("")
print(f + " is not a valid DDS file!")
return 0, 0, 0, b'', 0, [], 0, []
if format_ == 0:
print("")
print("Unsupported DDS format!")
return 0, 0, 0, b'', 0, [], 0, []
data = inb[0x80:0x80+size+mipSize]
if format_ in [0x25, 0x38] and bpp == 3:
data = form_conv.rgb8torgbx8(data)
bpp += 1
size = width * height * bpp
return width, height, format_, fourcc, size, compSel, numMips, data
def get_mipSize(width, height, bpp, numMips, compressed):
size = 0
for i in range(numMips):
level = i + 1
if compressed:
size += ((max(1, width >> level) + 3) >> 2) * ((max(1, height >> level) + 3) >> 2) * bpp
else:
size += max(1, width >> level) * max(1, height >> level) * bpp
return size
def generateHeader(num_mipmaps, w, h, format_, compSel, size, compressed):
hdr = bytearray(128)
luminance = False
RGB = False
has_alpha = True
if format_ == 28: # ABGR8
RGB = True
compSels = {0: 0x000000ff, 1: 0x0000ff00, 2: 0x00ff0000, 3: 0xff000000, 5: 0}
fmtbpp = 4
elif format_ == 24: # A2RGB10
RGB = True
compSels = {0: 0x3ff00000, 1: 0x000ffc00, 2: 0x000003ff, 3: 0xc0000000, 5: 0}
fmtbpp = 4
elif format_ == 85: # BGR565
RGB = True
compSels = {0: 0x0000001f, 1: 0x000007e0, 2: 0x0000f800, 3: 0, 5: 0}
fmtbpp = 2
has_alpha = False
elif format_ == 86: # A1BGR5
RGB = True
compSels = {0: 0x0000001f, 1: 0x000003e0, 2: 0x00007c00, 3: 0x00008000, 5: 0}
fmtbpp = 2
elif format_ == 115: # ABGR4
RGB = True
compSels = {0: 0x0000000f, 1: 0x000000f0, 2: 0x00000f00, 3: 0x0000f000, 5: 0}
fmtbpp = 2
elif format_ == 61: # L8
luminance = True
compSels = {0: 0x000000ff, 1: 0, 2: 0, 3: 0, 5: 0}
fmtbpp = 1
if compSel[3] != 0:
has_alpha = False
elif format_ == 49: # A8L8
luminance = True
compSels = {0: 0x000000ff, 1: 0x0000ff00, 2: 0, 3: 0, 5: 0}
fmtbpp = 2
flags = 0x00000001 | 0x00001000 | 0x00000004 | 0x00000002
caps = 0x00001000
if num_mipmaps == 0:
num_mipmaps = 1
elif num_mipmaps != 1:
flags |= 0x00020000
caps |= 0x00000008 | 0x00400000
if not compressed:
flags |= 0x00000008
a = False
if compSel[0] != 0 and compSel[1] != 0 and compSel[2] != 0 and compSel[3] == 0: # ALPHA
a = True
pflags = 0x00000002
elif luminance: # LUMINANCE
pflags = 0x00020000
elif RGB: # RGB
pflags = 0x00000040
else: # Not possible...
return b''
if has_alpha and not a:
pflags |= 0x00000001
size = w * fmtbpp
else:
flags |= 0x00080000
pflags = 0x00000004
if format_ == "BC1":
fourcc = b'DXT1'
elif format_ == "BC2":
fourcc = b'DXT3'
elif format_ == "BC3":
fourcc = b'DXT5'
elif format_ == "BC4U":
fourcc = b'ATI1'
elif format_ == "BC4S":
fourcc = b'BC4S'
elif format_ == "BC5U":
fourcc = b'ATI2'
elif format_ == "BC5S":
fourcc = b'BC5S'
hdr[0:0 + 4] = b'DDS '
hdr[4:4 + 4] = 124 .to_bytes(4, 'little')
hdr[8:8 + 4] = flags.to_bytes(4, 'little')
hdr[12:12 + 4] = h.to_bytes(4, 'little')
hdr[16:16 + 4] = w.to_bytes(4, 'little')
hdr[20:20 + 4] = size.to_bytes(4, 'little')
hdr[28:28 + 4] = num_mipmaps.to_bytes(4, 'little')
hdr[76:76 + 4] = 32 .to_bytes(4, 'little')
hdr[80:80 + 4] = pflags.to_bytes(4, 'little')
if compressed:
hdr[84:84 + 4] = fourcc
else:
hdr[88:88 + 4] = (fmtbpp << 3).to_bytes(4, 'little')
hdr[92:92 + 4] = compSels[compSel[0]].to_bytes(4, 'little')
hdr[96:96 + 4] = compSels[compSel[1]].to_bytes(4, 'little')
hdr[100:100 + 4] = compSels[compSel[2]].to_bytes(4, 'little')
hdr[104:104 + 4] = compSels[compSel[3]].to_bytes(4, 'little')
hdr[108:108 + 4] = caps.to_bytes(4, 'little')
return hdr
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_ARCHIVE_ROOT,
POLYAXON_KEYS_ARTIFACTS_ROOT,
POLYAXON_KEYS_CONTEXT_ROOT,
POLYAXON_KEYS_OFFLINE_ROOT,
)
def polyaxon_user_path():
base_path = os.path.expanduser("~")
if not os.access(base_path, os.W_OK):
base_path = "/tmp"
return os.path.join(base_path, ".polyaxon")
CONTEXT_ROOT = os.environ.get(POLYAXON_KEYS_CONTEXT_ROOT, "/plx-context")
CONTEXT_MOUNT_GC = "{}/.gc/gc-secret.json".format(CONTEXT_ROOT)
CONTEXT_MOUNT_CONFIGS = "{}/.configs".format(CONTEXT_ROOT)
CONTEXT_MOUNT_AUTH = "{}/.auth".format(CONTEXT_MOUNT_CONFIGS)
CONTEXT_MOUNT_ARTIFACTS = "{}/artifacts".format(CONTEXT_ROOT)
CONTEXT_MOUNT_ARTIFACTS_FORMAT = "{}/{{}}".format(CONTEXT_MOUNT_ARTIFACTS)
CONTEXT_MOUNT_ARTIFACTS_RELATED = CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("_related_runs")
CONTEXT_MOUNT_ARTIFACTS_RELATED_FORMAT = "{}/{{}}".format(
CONTEXT_MOUNT_ARTIFACTS_RELATED
)
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT = "{}/outputs".format(CONTEXT_MOUNT_ARTIFACTS_FORMAT)
CONTEXT_MOUNT_RUN_EVENTS_FORMAT = "{}/events".format(CONTEXT_MOUNT_ARTIFACTS_FORMAT)
CONTEXT_MOUNT_SHM = "/dev/shm"
CONTEXT_MOUNT_DOCKER = "/var/run/docker.sock"
CONTEXT_TMP_POLYAXON_PATH = "/tmp/.polyaxon/"
CONTEXT_USER_POLYAXON_PATH = polyaxon_user_path()
CONTEXT_ARCHIVE_ROOT = os.environ.get(POLYAXON_KEYS_ARCHIVE_ROOT, "/tmp/plx/archives")
CONTEXT_ARTIFACTS_ROOT = os.environ.get(
POLYAXON_KEYS_ARTIFACTS_ROOT, "/tmp/plx/artifacts"
)
CONTEXT_OFFLINE_ROOT = os.environ.get(POLYAXON_KEYS_OFFLINE_ROOT, "/tmp/plx/offline")
CONTEXT_OFFLINE_FORMAT = "{}/{{}}".format(CONTEXT_OFFLINE_ROOT)
CONTEXT_ARTIFACTS_FORMAT = "{}/{{}}".format(CONTEXT_ARTIFACTS_ROOT)
CONTEXTS_OUTPUTS_SUBPATH_FORMAT = "{}/outputs"
CONTEXTS_EVENTS_SUBPATH_FORMAT = "{}/events"
|
nilq/baby-python
|
python
|
# Marcelo Campos de Medeiros
# ADS UNIFIP
# REVISÃO DE PYTHON
# AULA 10 CONDIÇÕES GUSTAVO GUANABARA
'''
Faça um Programa que pergunte o salário do funcionário e calcule o valor do seu aumento.
* Para salários superiores a R$ 1.250.00, Calcule um aumento de 10%.
* Para os inferiores ou iguais o aumento é de 15%.
'''
print('='*30)
print('{:$^30}'.format(' AUMENTO DE SALÁRIO '))
print('='*30)
print()
salario = float(input('Qual valor do seu salário: '))
print()
if salario <= 1250:
reajuste = salario + (salario * 0.15)
print('Você tinha um salário de R$ %.2f.\nCom reajuste de seu novo salário é R$ %.2f'%(salario, reajuste))
else:
reajuste = salario + (salario * 0.10)
print('Você tinha um salário de R$ %.2f.\nCom reajuste de seu novo salário é R$ %.2f.'%(salario, reajuste))
print()
|
nilq/baby-python
|
python
|
# global imports
from dash.dependencies import Input, Output, State, ALL # ClientsideFunction
from dash import html
# local imports
from ..dash_app import app
import pemfc_gui.input as gui_input
from .. import dash_layout as dl
tab_layout = html.Div(dl.frame(gui_input.main_frame_dicts[1]))
@app.callback(
Output({'type': ALL, 'id': ALL, 'specifier': 'disabled_manifolds'},
'disabled'),
Input({'type': ALL, 'id': ALL,
'specifier': 'checklist_activate_calculation'}, 'value'),
Input({'type': ALL, 'id': ALL, 'specifier': 'disabled_manifolds'}, 'value'),
)
def activate_column(input1, input2):
len_state = len(input2)
list_state = [True for x in range(len_state)] # disable=True for all inputs
for num, val in enumerate(input1): # 3 inputs in input1 for 3 rows
if val == [1]:
list_state[0 + num] = list_state[3 + num] = list_state[15 + num] = \
list_state[18 + num] = list_state[30 + num] = False
if input2[3+num] == 'circular':
list_state[6+num], list_state[9+num], list_state[12+num] = \
False, True, True
else:
list_state[6+num], list_state[9+num], list_state[12+num] = \
True, False, False
if input2[18+num] == 'circular':
list_state[21+num], list_state[24+num], list_state[27+num] = \
False, True, True
else:
list_state[21+num], list_state[24+num], list_state[27+num] = \
True, False, False
return list_state
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.