code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from collections import namedtuple
import json, logging, socket, re, struct, time
from typing import Tuple, Iterator
from urllib.parse import urlparse, parse_qs
from backend import Backend, Change
from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet
# Total number of reconnection tries
RECONNECT_TRIES=5
# Delay in seconds between reconnections (initial)
RECONNECT_DELAY=5
# Scale delay factor after each failure
RECONNECT_DELAY_BACKOFF=1.5
HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype'])
SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])
# Network address type.
class AddrType:
IPv4 = 0
IPv6 = 1
NAME = 2
# Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor.
class ProxyType:
DIRECT = 0
SOCKS5 = 1
def parse_host_port(path: str) -> HostPortInfo:
'''Parse a host:port pair.'''
if path.startswith('['): # bracketed IPv6 address
eidx = path.find(']')
if eidx == -1:
raise ValueError('Unterminated bracketed host address.')
host = path[1:eidx]
addrtype = AddrType.IPv6
eidx += 1
if eidx >= len(path) or path[eidx] != ':':
raise ValueError('Port number missing.')
eidx += 1
else:
eidx = path.find(':')
if eidx == -1:
raise ValueError('Port number missing.')
host = path[0:eidx]
if re.match('\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format
addrtype = AddrType.IPv4
else:
addrtype = AddrType.NAME
eidx += 1
try:
port = int(path[eidx:])
except ValueError:
raise ValueError('Invalid port number')
return HostPortInfo(host=host, port=port, addrtype=addrtype)
def parse_socket_url(destination: str) -> SocketURLInfo:
'''Parse a socket: URL to extract the information contained in it.'''
url = urlparse(destination)
if url.scheme != 'socket':
raise ValueError('Scheme for socket backend must be socket:...')
target = parse_host_port(url.path)
proxytype = ProxyType.DIRECT
proxytarget = None
# parse query parameters
# reject unknown parameters (currently all of them)
qs = parse_qs(url.query)
for (key, values) in qs.items():
if key == 'proxy': # proxy=socks5:127.0.0.1:9050
if len(values) != 1:
raise ValueError('Proxy can only have one value')
(ptype, ptarget) = values[0].split(':', 1)
if ptype != 'socks5':
raise ValueError('Unknown proxy type ' + ptype)
proxytype = ProxyType.SOCKS5
proxytarget = parse_host_port(ptarget)
else:
raise ValueError('Unknown query string parameter ' + key)
return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget)
class SocketBackend(Backend):
def __init__(self, destination: str, create: bool):
self.version = None
self.prev_version = None
self.destination = destination
self.url = parse_socket_url(destination)
self.connect()
def connect(self):
if self.url.proxytype == ProxyType.DIRECT:
if self.url.target.addrtype == AddrType.IPv6:
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else: # TODO NAME is assumed to be IPv4 for now
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
assert(self.url.proxytype == ProxyType.SOCKS5)
import socks
self.sock = socks.socksocket()
self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port)
logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format(
self.url.target.host, self.url.target.port, self.url.target.addrtype,
self.url.proxytype, self.url.proxytarget))
self.sock.connect((self.url.target.host, self.url.target.port))
logging.info('Connected to {}'.format(self.destination))
def _send_packet(self, typ: int, payload: bytes) -> None:
send_packet(self.sock, typ, payload)
def _recv_packet(self) -> Tuple[int, bytes]:
return recv_packet(self.sock)
def initialize(self) -> bool:
'''
Initialize socket backend by request current metadata from server.
'''
logging.info('Initializing backend')
self._request_metadata()
logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format(
self.protocol, self.version, self.prev_version, self.version_count
))
return True
def _request_metadata(self) -> None:
self._send_packet(PacketType.REQ_METADATA, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.METADATA)
self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload)
def add_change(self, entry: Change) -> bool:
typ, payload = packet_from_change(entry)
base_version = self.version
retry = 0
retry_delay = RECONNECT_DELAY
need_connect = False
while True: # Retry loop
try:
if need_connect:
self.connect()
# Request metadata, to know where we stand
self._request_metadata()
if self.version == entry.version:
# If the current version at the server side matches the version of the
# entry, the packet was succesfully sent and processed and the error
# happened afterward. Nothing left to do.
return True
elif base_version == self.version:
# The other acceptable option is that the current version still matches
# that on the server side. Then we retry.
pass
else:
raise Exception('Unexpected backup version {} after reconnect'.format(self.version))
self._send_packet(typ, payload)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
except (BrokenPipeError, OSError):
pass
else:
break
if retry == RECONNECT_TRIES:
logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry))
raise IOError('Connection was lost while sending change')
retry += 1
logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay))
time.sleep(retry_delay)
retry_delay *= RECONNECT_DELAY_BACKOFF
need_connect = True
self.prev_version = self.version
self.version = entry.version
return True
def rewind(self) -> bool:
'''Rewind to previous version.'''
version = struct.pack("!I", self.prev_version)
self._send_packet(PacketType.REWIND, version)
# Wait for change to be acknowledged before continuing.
(typ, _) = self._recv_packet()
assert(typ == PacketType.ACK)
return True
def stream_changes(self) -> Iterator[Change]:
self._send_packet(PacketType.RESTORE, b'')
version = -1
while True:
(typ, payload) = self._recv_packet()
if typ in PKT_CHANGE_TYPES:
change = change_from_packet(typ, payload)
version = change.version
yield change
elif typ == PacketType.DONE:
break
else:
raise ValueError("Unknown entry type {}".format(typ))
if version != self.version:
raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version))
assert(version == self.version)
def compact(self):
self._send_packet(PacketType.COMPACT, b'')
(typ, payload) = self._recv_packet()
assert(typ == PacketType.COMPACT_RES)
return json.loads(payload.decode())
| [
"socks.socksocket",
"collections.namedtuple",
"urllib.parse.urlparse",
"socket.socket",
"protocol.change_from_packet",
"protocol.recv_packet",
"protocol.send_packet",
"re.match",
"struct.pack",
"time.sleep",
"urllib.parse.parse_qs",
"struct.unpack",
"logging.info",
"protocol.packet_from_ch... | [((534, 590), 'collections.namedtuple', 'namedtuple', (['"""HostPortInfo"""', "['host', 'port', 'addrtype']"], {}), "('HostPortInfo', ['host', 'port', 'addrtype'])\n", (544, 590), False, 'from collections import namedtuple\n'), ((607, 674), 'collections.namedtuple', 'namedtuple', (['"""SocketURLInfo"""', "['target', 'proxytype', 'proxytarget']"], {}), "('SocketURLInfo', ['target', 'proxytype', 'proxytarget'])\n", (617, 674), False, 'from collections import namedtuple\n'), ((1995, 2016), 'urllib.parse.urlparse', 'urlparse', (['destination'], {}), '(destination)\n', (2003, 2016), False, 'from urllib.parse import urlparse, parse_qs\n'), ((2312, 2331), 'urllib.parse.parse_qs', 'parse_qs', (['url.query'], {}), '(url.query)\n', (2320, 2331), False, 'from urllib.parse import urlparse, parse_qs\n'), ((1499, 1543), 're.match', 're.match', (['"""\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$"""', 'host'], {}), "('\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$', host)\n", (1507, 1543), False, 'import json, logging, socket, re, struct, time\n'), ((4245, 4281), 'protocol.send_packet', 'send_packet', (['self.sock', 'typ', 'payload'], {}), '(self.sock, typ, payload)\n', (4256, 4281), False, 'from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet\n'), ((4347, 4369), 'protocol.recv_packet', 'recv_packet', (['self.sock'], {}), '(self.sock)\n', (4358, 4369), False, 'from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet\n'), ((4512, 4548), 'logging.info', 'logging.info', (['"""Initializing backend"""'], {}), "('Initializing backend')\n", (4524, 4548), False, 'import json, logging, socket, re, struct, time\n'), ((5072, 5103), 'struct.unpack', 'struct.unpack', (['"""!IIIQ"""', 'payload'], {}), "('!IIIQ', payload)\n", (5085, 5103), False, 'import json, logging, socket, re, struct, time\n'), ((5177, 5202), 'protocol.packet_from_change', 'packet_from_change', (['entry'], {}), '(entry)\n', (5195, 5202), False, 'from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet\n'), ((7330, 7366), 'struct.pack', 'struct.pack', (['"""!I"""', 'self.prev_version'], {}), "('!I', self.prev_version)\n", (7341, 7366), False, 'import json, logging, socket, re, struct, time\n'), ((3674, 3692), 'socks.socksocket', 'socks.socksocket', ([], {}), '()\n', (3690, 3692), False, 'import socks\n'), ((7033, 7056), 'time.sleep', 'time.sleep', (['retry_delay'], {}), '(retry_delay)\n', (7043, 7056), False, 'import json, logging, socket, re, struct, time\n'), ((3363, 3413), 'socket.socket', 'socket.socket', (['socket.AF_INET6', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET6, socket.SOCK_STREAM)\n', (3376, 3413), False, 'import json, logging, socket, re, struct, time\n'), ((3502, 3551), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3515, 3551), False, 'import json, logging, socket, re, struct, time\n'), ((7839, 7871), 'protocol.change_from_packet', 'change_from_packet', (['typ', 'payload'], {}), '(typ, payload)\n', (7857, 7871), False, 'from protocol import PacketType, recvall, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet\n')] |
from typing import List, Tuple
from pathlib import Path
def get_ot_defs() -> List[Tuple[str, int]]:
def_files = (
Path(__file__).parent / ".." / ".." / ".." / "labware" / "definitions" / "2"
).glob("**/*.json")
# example filename
# shared-data/labware/definitions/2/opentrons_96_tiprack_300ul/1.json
return [(f.parent.name, int(f.stem)) for f in def_files]
| [
"pathlib.Path"
] | [((129, 143), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (133, 143), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 23:19:58 2021
@author: qiang
"""
import tensorflow.keras.backend as K
import tensorflow as tf
import time
from rl_symbol_env_continous import symbol_env_continous
from DDPG_keras import ActorCritic
from env_base import UR5_env
obs_dim = (10,)
obs_dimss = 10
act_dim = 4
act_dimension = (4,)
sess = tf.Session()
K.set_session(sess)
env = symbol_env_continous()
actor_critic = ActorCritic(sess)
episodee = 2500
actor_critic.actor_model.load_weights(str(episodee)+"_"+"actor"+".h5")
actor_critic.target_actor_model.load_weights(str(episodee)+"_"+"actor_target"+".h5")
actor_critic.critic_model.load_weights(str(episodee)+"_"+"critic"+".h5")
actor_critic.target_critic_model.load_weights(str(episodee)+"_"+"critic_target"+".h5")
#%%
# Input how many times you want to test the robot.
times = 100
env_rl = symbol_env_continous()
env_sim = UR5_env()
env_sim.sim_start()
for _ in range(times):
done = False
cur_state = env_rl.reset()
el = 0
target_pos = [cur_state[7],cur_state[8],cur_state[9]]
env_sim.set_target_pos(target_pos)
env_sim.movej(env_rl.current_joint_pos)
cur_state = cur_state.reshape((1, obs_dimss))
while not done and el<=100:
el += 1
action = actor_critic.act(cur_state)
action = action.reshape((1, act_dim))
next_state, reward, done = env_rl.step(action)
env_sim.movej(env_rl.current_joint_pos)
time.sleep(0.05)
next_state = next_state.reshape((1,obs_dimss))
cur_state = next_state
| [
"rl_symbol_env_continous.symbol_env_continous",
"env_base.UR5_env",
"tensorflow.Session",
"time.sleep",
"DDPG_keras.ActorCritic",
"tensorflow.keras.backend.set_session"
] | [((377, 389), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (387, 389), True, 'import tensorflow as tf\n'), ((390, 409), 'tensorflow.keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (403, 409), True, 'import tensorflow.keras.backend as K\n'), ((416, 438), 'rl_symbol_env_continous.symbol_env_continous', 'symbol_env_continous', ([], {}), '()\n', (436, 438), False, 'from rl_symbol_env_continous import symbol_env_continous\n'), ((454, 471), 'DDPG_keras.ActorCritic', 'ActorCritic', (['sess'], {}), '(sess)\n', (465, 471), False, 'from DDPG_keras import ActorCritic\n'), ((883, 905), 'rl_symbol_env_continous.symbol_env_continous', 'symbol_env_continous', ([], {}), '()\n', (903, 905), False, 'from rl_symbol_env_continous import symbol_env_continous\n'), ((916, 925), 'env_base.UR5_env', 'UR5_env', ([], {}), '()\n', (923, 925), False, 'from env_base import UR5_env\n'), ((1477, 1493), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1487, 1493), False, 'import time\n')] |
#!/usr/bin/env python
# encoding: utf-8
from flask import Flask, request, jsonify
import base64
import numpy as np
from util.args_help import fill_from_args
import os
import logging
from dpr.simple_mmap_dataset import Corpus
from dpr.faiss_index import ANNIndex
logger = logging.getLogger(__name__)
class Options():
def __init__(self):
self.port = 5001
self.corpus_dir = ''
self.model_name = 'facebook/rag-token-nq'
self.rest_dtype = 16
self.local_only = False # only accessible on same machine
self.debug = False
self.log_info = False
self.__required_args__ = ['corpus_dir']
def get_rest_dtype(self):
return np.float32 if self.rest_dtype == 32 else np.float16
def run(opts: Options):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(format='%(filename)s:%(lineno)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if opts.log_info else logging.WARNING)
app = Flask(__name__)
if not opts.log_info:
log = logging.getLogger('werkzeug')
log.disabled = True
app.logger.disabled = True
app.logger.setLevel(logging.WARNING)
passages = Corpus(os.path.join(opts.corpus_dir))
index = ANNIndex(os.path.join(opts.corpus_dir, "index.faiss"))
dim = index.dim()
print(dim)
@app.route('/config', methods=['GET'])
def get_config():
return jsonify({'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir})
@app.route('/retrieve', methods=['POST'])
def retrieve_docs():
rest_dtype = opts.get_rest_dtype()
query = request.get_json()
# input is three parts:
# the base64 encoded fp16 numpy matrix
# k (the number of records per document)
# return-vectors flag
query_vectors = np.frombuffer(base64.decodebytes(query['query_vectors'].encode('ascii')), dtype=rest_dtype).reshape(-1, dim)
k = query['k']
include_vectors = 'include_vectors' in query and query['include_vectors']
query_vectors = query_vectors.astype(np.float32)
scores, indexes = index.search(query_vectors, k)
docs = [[passages[ndx] for ndx in ndxs] for ndxs in indexes]
if 'pid' in docs[0][0]:
doc_dicts = [{'pid': [dqk['pid'] for dqk in dq],
'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
else:
doc_dicts = [{'title': [dqk['title'] for dqk in dq],
'text': [dqk['text'] for dqk in dq]} for dq in docs]
retval = {'docs': doc_dicts}
if include_vectors:
doc_vectors = np.zeros([query_vectors.shape[0], k, query_vectors.shape[1]], dtype=rest_dtype)
for qi, docs_qi in enumerate(docs):
for ki, doc_qi_ki in enumerate(docs_qi):
doc_vectors[qi, ki] = doc_qi_ki['vector']
retval['doc_vectors'] = base64.b64encode(doc_vectors).decode('ascii')
# print(retval)
# output
# list of docs: len(docs) == query_vectors.shape[0]; len(docs[i].title) == len(docs[i].text) == k
# doc_vectors: query_vectors.shape[0] x k x query_vectors.shape[1]
return jsonify(retval)
app.run(host='127.0.0.1' if opts.local_only else '0.0.0.0', debug=opts.debug, port=opts.port)
if __name__ == '__main__':
opts = Options()
fill_from_args(opts)
run(opts)
| [
"logging.getLogger",
"logging.basicConfig",
"util.args_help.fill_from_args",
"flask.Flask",
"base64.b64encode",
"os.path.join",
"flask.request.get_json",
"numpy.zeros",
"logging.root.removeHandler",
"flask.jsonify"
] | [((272, 299), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'import logging\n'), ((865, 1029), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(filename)s:%(lineno)d - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if opts.log_info else logging.WARNING)'}), "(format='%(filename)s:%(lineno)d - %(message)s', datefmt\n ='%m/%d/%Y %H:%M:%S', level=logging.INFO if opts.log_info else logging.\n WARNING)\n", (884, 1029), False, 'import logging\n'), ((1078, 1093), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1083, 1093), False, 'from flask import Flask, request, jsonify\n'), ((3548, 3568), 'util.args_help.fill_from_args', 'fill_from_args', (['opts'], {}), '(opts)\n', (3562, 3568), False, 'from util.args_help import fill_from_args\n'), ((825, 860), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (851, 860), False, 'import logging\n'), ((1134, 1163), 'logging.getLogger', 'logging.getLogger', (['"""werkzeug"""'], {}), "('werkzeug')\n", (1151, 1163), False, 'import logging\n'), ((1294, 1323), 'os.path.join', 'os.path.join', (['opts.corpus_dir'], {}), '(opts.corpus_dir)\n', (1306, 1323), False, 'import os\n'), ((1346, 1390), 'os.path.join', 'os.path.join', (['opts.corpus_dir', '"""index.faiss"""'], {}), "(opts.corpus_dir, 'index.faiss')\n", (1358, 1390), False, 'import os\n'), ((1510, 1584), 'flask.jsonify', 'jsonify', (["{'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir}"], {}), "({'dtype': opts.rest_dtype, 'dim': dim, 'corpus': opts.corpus_dir})\n", (1517, 1584), False, 'from flask import Flask, request, jsonify\n'), ((1716, 1734), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1732, 1734), False, 'from flask import Flask, request, jsonify\n'), ((3379, 3394), 'flask.jsonify', 'jsonify', (['retval'], {}), '(retval)\n', (3386, 3394), False, 'from flask import Flask, request, jsonify\n'), ((2808, 2887), 'numpy.zeros', 'np.zeros', (['[query_vectors.shape[0], k, query_vectors.shape[1]]'], {'dtype': 'rest_dtype'}), '([query_vectors.shape[0], k, query_vectors.shape[1]], dtype=rest_dtype)\n', (2816, 2887), True, 'import numpy as np\n'), ((3091, 3120), 'base64.b64encode', 'base64.b64encode', (['doc_vectors'], {}), '(doc_vectors)\n', (3107, 3120), False, 'import base64\n')] |
import os
import shutil
import pandas as pd
from pathlib import Path
from b2btool.blob.storage import *
try:
conversion = pd.read_excel(
r"J:\PTCR\Users\RECS\.b2btool\drawings_to_jde.xlsx",
dtype={"ID": str, "JDE": str},
)
except FileNotFoundError as ex:
print(ex.args)
def find_jde(drawing_name, conversion):
line = conversion[conversion["drawing_number"] == drawing_name]
return line.JDE.tolist() # list of jde equivalent.
def copy_file(image, image_renamed, push=False):
if os.path.exists(image_renamed):
shutil.move(image_renamed, f"duplicate/{image_renamed}")
print(f"[!]: {image_renamed} -> duplicate")
else:
shutil.copyfile(image, image_renamed)
print(f"[Copy] {image} \t->\t {image_renamed}")
if push:
push_to_blob_unique_image(image_renamed)
def rename_to_drawing_number(image_jpg, push):
"""Rename drawings-name >>> jde-name
"""
image_name = Path(image_jpg).stem
# example of renaming pictures names: PT0019300.jpg -> 340247.jpg
# create a folder for duplicate jde pictures.
os.makedirs("duplicate", exist_ok=True)
if (
os.path.exists(image_jpg) and image_name in conversion["drawing_number"].tolist()
):
jde_equivalences = find_jde(image_name, conversion)
if len(jde_equivalences) == 1:
# single jde for one drawing
jde_equivalent_jpg = f"{jde_equivalences[0]}.jpg"
copy_file(image_jpg, jde_equivalent_jpg, push)
elif len(jde_equivalences) > 1:
# Multiple jde's for one drawing
for jde in jde_equivalences:
jde_jpg = f"{jde}.jpg"
copy_file(image_jpg, jde_jpg, push)
os.remove(image_jpg)
print(f"[Deleted]: {image_jpg}")
else:
# for empty list simply ignore.
pass
else:
# it's a jde part image.
if push:
push_to_blob_unique_image(image_jpg)
pass | [
"os.path.exists",
"os.makedirs",
"shutil.move",
"pathlib.Path",
"shutil.copyfile",
"pandas.read_excel",
"os.remove"
] | [((128, 233), 'pandas.read_excel', 'pd.read_excel', (['"""J:\\\\PTCR\\\\Users\\\\RECS\\\\.b2btool\\\\drawings_to_jde.xlsx"""'], {'dtype': "{'ID': str, 'JDE': str}"}), "('J:\\\\PTCR\\\\Users\\\\RECS\\\\.b2btool\\\\drawings_to_jde.xlsx',\n dtype={'ID': str, 'JDE': str})\n", (141, 233), True, 'import pandas as pd\n'), ((526, 555), 'os.path.exists', 'os.path.exists', (['image_renamed'], {}), '(image_renamed)\n', (540, 555), False, 'import os\n'), ((1116, 1155), 'os.makedirs', 'os.makedirs', (['"""duplicate"""'], {'exist_ok': '(True)'}), "('duplicate', exist_ok=True)\n", (1127, 1155), False, 'import os\n'), ((565, 621), 'shutil.move', 'shutil.move', (['image_renamed', 'f"""duplicate/{image_renamed}"""'], {}), "(image_renamed, f'duplicate/{image_renamed}')\n", (576, 621), False, 'import shutil\n'), ((692, 729), 'shutil.copyfile', 'shutil.copyfile', (['image', 'image_renamed'], {}), '(image, image_renamed)\n', (707, 729), False, 'import shutil\n'), ((971, 986), 'pathlib.Path', 'Path', (['image_jpg'], {}), '(image_jpg)\n', (975, 986), False, 'from pathlib import Path\n'), ((1173, 1198), 'os.path.exists', 'os.path.exists', (['image_jpg'], {}), '(image_jpg)\n', (1187, 1198), False, 'import os\n'), ((1755, 1775), 'os.remove', 'os.remove', (['image_jpg'], {}), '(image_jpg)\n', (1764, 1775), False, 'import os\n')] |
import spacy
import pandas as pd
import numpy as np
# Given tokens, find sentences containing a keyword from texts
def find_sents(tokens,keyword):
useful_sents=[]
for sent in tokens.sents:
bows=[token.text for token in sent]
for word in bows:
if ((keyword in word.lower()) & (sent not in useful_sents)):
useful_sents.append(sent)
return useful_sents
# Given tokens and a keyword, get the dependency of this keyword in sentences
def get_dependency(tokens,keyword):
dependency={'lefts':[],'rights':[],'head':[]}
for token in tokens:
if token.text==keyword:
if len([t.text for t in token.lefts])>0:
dependency['lefts'].append([t.text for t in token.lefts])
if len([t.text for t in token.rights])>0:
dependency['rights'].append([t.text for t in token.rights])
dependency['head'].append(token.head.text)
return dependency
# Given a df of news articles with texts,
# Get tokens with nlp
# Get sentences with a certain keyword in the text
# Get dependency of the keyword in the sentences containing the keyword
def process(df,keyword,txt_col):
nlp = spacy.load('en_core_web_sm')
# nlp processing get token from text
df=df[~df[txt_col].isna()]
df.loc[:,'nlp_t']=df.loc[:,txt_col].apply(lambda i: nlp(i))
# Write sentences with keyword in the keyword column
df.loc[:,keyword]=df.loc[:,'nlp_t'].apply(lambda x: find_sents(x,keyword))
# if there is no sentence containing the keyword in the article, return nan
df.loc[df[keyword].apply(lambda x: len(x)==0),keyword]=np.nan
# Write dependence component in the dependency column
df.loc[:,'dependence']=df.loc[:,'nlp_t'].apply(lambda i: get_dependency(i,keyword))
df.reset_index(inplace=True,drop=True)
return df | [
"spacy.load"
] | [((1197, 1225), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1207, 1225), False, 'import spacy\n')] |
# Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by <NAME>, Ghent University & Griffith University.
#
# Basic geometry functions and classes
import numpy
import pylab
EPSILON = 10e-12 # smallest difference for points/directions
#---------------------------------------------------------------------------------------------------
# Convenience functions
#---------------------------------------------------------------------------------------------------
def parse_coordinates(*args):
""" parse 2D/3D coordinates x,y(,z) in a variety of fashions, and return a 3-element tuple """
n = len(args)
if n == 0:
return (0.0,0.0,0.0)
if n == 1:
try: # try if a Point object is supplied
return args[0].coordinates()
except:
if type(args[0]) in (tuple,list):
# coordinates supplied as a tuple (x,y) or (x,y,z)
if len(args[0]) == 2:
return (args[0][0], args[0][1], 0.0)
if len(args[0]) == 3:
return (args[0][0], args[0][1], args[0][2])
if type(args[0]) is str:
# coordinates supplied as a string '(x,y,z)'
c = args[0].strip('()').split(',')
return (float(c[0]), float(c[1]), float(c[2]))
else:
# coordinates supplied as separate arguments x,y or x,y,z
if n == 2:
return (args[0], args[1], 0.0)
if n == 3:
return (args[0], args[1], args[2])
raise Exception('unable to parse coordinates: ' + str(args))
def asPoint(p):
""" create a point object from 2D/3D coordinates """
if isinstance(p, Point):
return p
else:
return Point(p)
def asDirection(d):
""" create a direction object from a tuple (bearing, gradient) """
if isinstance(d, Direction):
return d
else:
return Direction(bearing = d[0], gradient = d[1])
#---------------------------------------------------------------------------------------------------
# Point class
#---------------------------------------------------------------------------------------------------
class Point(object):
""" basic 3D point class """
def __init__(self, *xyz):
object.__init__(self)
self.x, self.y, self.z = parse_coordinates(*xyz)
def copy(self):
""" return a copy """
return Point(self.x, self.y, self.z)
def coordinates(self):
""" return the coordinates as a tuple (x,y,z) """
return (self.x, self.y, self.z)
def __getitem__(self, key):
""" implement list style access to coordinates: p[0], p[1], p[2] """
return self.coordinates()[key]
def __str__(self):
""" string representation of a point """
return '(%.2f,%.2f,%.2f)' % self.coordinates()
def middle(self, other):
""" return the middle point between self and another point """
return Point((self.x + other.x)/2.0, (self.y + other.y)/2.0, (self.z + other.z)/2.0)
def distanceSquared(self, other):
""" return the squared distance to another point """
return (self.x - other.x)**2 + (self.y - other.y)**2 + (self.z - other.z)**2
def distance(self, other):
""" return the distance to another point """
return numpy.sqrt(self.distanceSquared(other))
def distanceXY(self, other):
""" return the distance to another point, both projected to the xy-plane """
return numpy.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)
def __eq__(self, other):
""" check if points coincide """
if other == None:
return False
return (self.distance(other) < EPSILON)
def __ne__(self, other):
""" check if points do not coincide """
return not self.__eq__(other)
def __cmp__(self, other):
""" compare the coordinates, first x, then y, then z """
if self.x == other.x:
if (self.y == other.y):
return (self.z < other.z)
else:
return (self.y < other.y)
else:
return (self.x < other.x)
def projectXY(self, z = 0.0):
""" return the projection of the point on the xy-plane """
return Point(self.x, self.y, z)
def transform(self, func):
""" perform a coordinate transformation with the given function (x,y,z) to (x',y',z') """
self.x, self.y, self.z = func((self.x, self.y, self.z))
def plot(self, color = 'black', size = 5):
""" plot the point in the xy-plane """
pylab.plot([self.x], [self.y], color = color, linestyle = 'None', marker = '.', markersize = size)
#---------------------------------------------------------------------------------------------------
# Direction class
#---------------------------------------------------------------------------------------------------
class Direction(object):
""" basic geometrical 3D direction class """
def __init__(self, bearing, gradient = 0.0):
object.__init__(self)
# both bearing and gradient are stored in degrees
self.bearing = bearing
self.gradient = gradient
def copy(self):
""" return a copy """
return Direction(self.bearing, self.gradient)
def __getitem__(self, key):
""" implement list style access to bearing and gradient """
return (self.bearing, self.gradient)[key]
def bearingRadians(self):
""" return the bearing (horizontal angle with the x-axis) in radians """
return numpy.radians(self.bearing)
def gradientRadians(self):
""" return the gradient (vertical angle with the xy-plane) in radians """
return numpy.radians(self.gradient)
def __str__(self):
""" return a string representation of the direction """
return '[%.2f,%.2f]' % (self.bearing, self.gradient)
def __eq__(self, other):
""" check if directions coincide """
if other == None:
return False
db = abs(self.bearing - other.bearing)
dg = abs(self.gradient - other.gradient)
return (db <= EPSILON) and (dg <= EPSILON)
def __ne__(self, other):
""" check if directions do not coincide """
return not self.__eq__(other)
def directionFromTo(p1, p2):
""" returns the direction from point 1 to point 2 """
(dx, dy, dz) = (p2.x - p1.x, p2.y - p1.y, p2.z - p1.z)
siz = p1.distance(p2)
return Direction(bearing = numpy.degrees(numpy.arctan2(dy, dx)), gradient = numpy.degrees(numpy.arcsin(dz/siz)))
#---------------------------------------------------------------------------------------------------
# Test code
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
points = []
points.append(Point(1.2, 3.4))
points.append(Point([5.6, 7.8, 9.0]))
points.append(Point('(7.8, 9.0, 1.2)'))
pylab.figure()
for p in points:
p.plot()
try:
pylab.show()
except:
pass
| [
"numpy.radians",
"numpy.sqrt",
"pylab.plot",
"numpy.arcsin",
"pylab.figure",
"numpy.arctan2",
"pylab.show"
] | [((6690, 6704), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (6702, 6704), False, 'import pylab\n'), ((3341, 3402), 'numpy.sqrt', 'numpy.sqrt', (['((self.x - other.x) ** 2 + (self.y - other.y) ** 2)'], {}), '((self.x - other.x) ** 2 + (self.y - other.y) ** 2)\n', (3351, 3402), False, 'import numpy\n'), ((4364, 4458), 'pylab.plot', 'pylab.plot', (['[self.x]', '[self.y]'], {'color': 'color', 'linestyle': '"""None"""', 'marker': '"""."""', 'markersize': 'size'}), "([self.x], [self.y], color=color, linestyle='None', marker='.',\n markersize=size)\n", (4374, 4458), False, 'import pylab\n'), ((5319, 5346), 'numpy.radians', 'numpy.radians', (['self.bearing'], {}), '(self.bearing)\n', (5332, 5346), False, 'import numpy\n'), ((5470, 5498), 'numpy.radians', 'numpy.radians', (['self.gradient'], {}), '(self.gradient)\n', (5483, 5498), False, 'import numpy\n'), ((6754, 6766), 'pylab.show', 'pylab.show', ([], {}), '()\n', (6764, 6766), False, 'import pylab\n'), ((6227, 6248), 'numpy.arctan2', 'numpy.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (6240, 6248), False, 'import numpy\n'), ((6276, 6298), 'numpy.arcsin', 'numpy.arcsin', (['(dz / siz)'], {}), '(dz / siz)\n', (6288, 6298), False, 'import numpy\n')] |
import unittest
from helpers import auditchecker, xroad
from main.maincontroller import MainController
from tests.xroad_global_groups_tests import global_groups_tests
class XroadMemberRemoveFromGlobalGroup(unittest.TestCase):
"""
SERVICE_38 Remove an X-Road Member from a Global Group
RIA URL: https://jira.ria.ee/browse/XTKB-183
Depends on finishing other test(s): member add to global group
Requires helper scenarios:
X-Road version: 6.16.0
"""
def __init__(self, methodName='test_member_remove_from_global_group'):
unittest.TestCase.__init__(self, methodName)
def test_member_remove_from_global_group(self):
main = MainController(self)
cs_host = main.config.get('cs.host')
cs_user = main.config.get('cs.user')
cs_pass = main.config.get('cs.pass')
cs_ssh_host = main.config.get('cs.ssh_host')
cs_ssh_user = main.config.get('cs.ssh_user')
cs_ssh_pass = main.config.get('cs.ssh_pass')
group_name = main.config.get('cs.global_group')
log_checker = auditchecker.AuditChecker(cs_ssh_host, cs_ssh_user, cs_ssh_pass)
member_name = main.config.get('ss1.client_name')
member_code = xroad.split_xroad_id(main.config.get('ss1.client_id'))['code']
test_member_remove_from_global_group = global_groups_tests.test_member_remove_from_global_group(main, member_name,
member_code,
group_name,
log_checker=log_checker)
try:
main.reload_webdriver(cs_host, cs_user, cs_pass)
test_member_remove_from_global_group()
except:
main.save_exception_data()
raise
finally:
main.tearDown()
| [
"tests.xroad_global_groups_tests.global_groups_tests.test_member_remove_from_global_group",
"main.maincontroller.MainController",
"helpers.auditchecker.AuditChecker",
"unittest.TestCase.__init__"
] | [((561, 605), 'unittest.TestCase.__init__', 'unittest.TestCase.__init__', (['self', 'methodName'], {}), '(self, methodName)\n', (587, 605), False, 'import unittest\n'), ((674, 694), 'main.maincontroller.MainController', 'MainController', (['self'], {}), '(self)\n', (688, 694), False, 'from main.maincontroller import MainController\n'), ((1068, 1132), 'helpers.auditchecker.AuditChecker', 'auditchecker.AuditChecker', (['cs_ssh_host', 'cs_ssh_user', 'cs_ssh_pass'], {}), '(cs_ssh_host, cs_ssh_user, cs_ssh_pass)\n', (1093, 1132), False, 'from helpers import auditchecker, xroad\n'), ((1323, 1452), 'tests.xroad_global_groups_tests.global_groups_tests.test_member_remove_from_global_group', 'global_groups_tests.test_member_remove_from_global_group', (['main', 'member_name', 'member_code', 'group_name'], {'log_checker': 'log_checker'}), '(main, member_name,\n member_code, group_name, log_checker=log_checker)\n', (1379, 1452), False, 'from tests.xroad_global_groups_tests import global_groups_tests\n')] |
##########################################################################
#
# Copyright (c) 2019, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import sys
import unittest
import functools
import Gaffer
import GafferTest
class ExtensionAlgoTest( GafferTest.TestCase ) :
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.addCleanup(
functools.partial( setattr, sys, "path", sys.path[:] )
)
def testExport( self ) :
# Export
box = Gaffer.Box( "AddOne" )
box["__add"] = GafferTest.AddNode()
box["__add"]["op2"].setValue( 1 )
Gaffer.PlugAlgo.promote( box["__add"]["op1"] ).setName( "in" )
Gaffer.PlugAlgo.promote( box["__add"]["sum"] ).setName( "out" )
Gaffer.Metadata.registerValue( box, "description", "Test" )
Gaffer.Metadata.registerValue( box["in"], "description", "The input" )
Gaffer.Metadata.registerValue( box["out"], "description", "The output" )
Gaffer.Metadata.registerValue( box["in"], "test", 1 )
Gaffer.ExtensionAlgo.exportExtension( "TestExtension", [ box ], self.temporaryDirectory() )
self.assertTrue( os.path.exists( os.path.join( self.temporaryDirectory(), "python", "TestExtension" ) ) )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
# Import and test
import TestExtension
script = Gaffer.ScriptNode()
script["node"] = TestExtension.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
import TestExtensionUI
def assertExpectedMetadata( node ) :
self.assertEqual( Gaffer.Metadata.registeredValues( node, instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["in"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.registeredValues( node["out"], instanceOnly = True ), [] )
self.assertEqual( Gaffer.Metadata.value( node, "description" ), "Test" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "description" ), "The input" )
self.assertEqual( Gaffer.Metadata.value( node["out"], "description" ), "The output" )
self.assertEqual( Gaffer.Metadata.value( node["in"], "test" ), 1 )
assertExpectedMetadata( script["node"] )
# Copy/paste and test
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), script["node"]["out"].getValue() )
assertExpectedMetadata( script["node1"] )
def testPlugTypes( self ) :
box = Gaffer.Box( "PlugTypes" )
box["int"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["float"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["string"] = Gaffer.StringPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v2i"] = Gaffer.V2iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["v3i"] = Gaffer.V3iPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["color4f"] = Gaffer.Color4fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["spline"] = Gaffer.SplinefColor3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
Gaffer.ExtensionAlgo.exportExtension( "PlugTypesExtension", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import PlugTypesExtension
node = PlugTypesExtension.PlugTypes()
for plug in Gaffer.Plug.Range( node ) :
self.assertIsInstance( plug, type( box[plug.getName() ] ) )
if hasattr( plug, "getValue" ) :
self.assertEqual( plug.getValue(), box[plug.getName()].getValue() )
for plug in Gaffer.Plug.RecursiveRange( node ) :
self.assertFalse( plug.getFlags( Gaffer.Plug.Flags.Dynamic ) )
def testInternalExpression( self ) :
box = Gaffer.Box( "AddOne" )
box["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
box["__expression"] = Gaffer.Expression()
box["__expression"].setExpression( """parent["out"] = parent["in"] + 1""" )
Gaffer.ExtensionAlgo.exportExtension( "TestExtensionWithExpression", [ box ], self.temporaryDirectory() )
sys.path.append( os.path.join( self.temporaryDirectory(), "python" ) )
import TestExtensionWithExpression
script = Gaffer.ScriptNode()
script["node"] = TestExtensionWithExpression.AddOne()
script["node"]["in"].setValue( 2 )
self.assertEqual( script["node"]["out"].getValue(), 3 )
# Test copy/paste
script.execute( script.serialise( filter = Gaffer.StandardSet( { script["node"] } ) ) )
self.assertEqual( script["node1"].keys(), script["node"].keys() )
self.assertEqual( script["node1"]["out"].getValue(), 3 )
if __name__ == "__main__":
unittest.main()
| [
"Gaffer.Box",
"unittest.main",
"Gaffer.PlugAlgo.promote",
"Gaffer.StandardSet",
"TestExtension.AddOne",
"Gaffer.V3iPlug",
"Gaffer.Metadata.value",
"Gaffer.Plug.Range",
"Gaffer.Color4fPlug",
"Gaffer.IntPlug",
"Gaffer.FloatPlug",
"Gaffer.StringPlug",
"Gaffer.Metadata.registerValue",
"Gaffer.... | [((6556, 6571), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6569, 6571), False, 'import unittest\n'), ((1943, 1974), 'GafferTest.TestCase.setUp', 'GafferTest.TestCase.setUp', (['self'], {}), '(self)\n', (1968, 1974), False, 'import GafferTest\n'), ((2107, 2127), 'Gaffer.Box', 'Gaffer.Box', (['"""AddOne"""'], {}), "('AddOne')\n", (2117, 2127), False, 'import Gaffer\n'), ((2148, 2168), 'GafferTest.AddNode', 'GafferTest.AddNode', ([], {}), '()\n', (2166, 2168), False, 'import GafferTest\n'), ((2339, 2396), 'Gaffer.Metadata.registerValue', 'Gaffer.Metadata.registerValue', (['box', '"""description"""', '"""Test"""'], {}), "(box, 'description', 'Test')\n", (2368, 2396), False, 'import Gaffer\n'), ((2401, 2469), 'Gaffer.Metadata.registerValue', 'Gaffer.Metadata.registerValue', (["box['in']", '"""description"""', '"""The input"""'], {}), "(box['in'], 'description', 'The input')\n", (2430, 2469), False, 'import Gaffer\n'), ((2474, 2544), 'Gaffer.Metadata.registerValue', 'Gaffer.Metadata.registerValue', (["box['out']", '"""description"""', '"""The output"""'], {}), "(box['out'], 'description', 'The output')\n", (2503, 2544), False, 'import Gaffer\n'), ((2549, 2600), 'Gaffer.Metadata.registerValue', 'Gaffer.Metadata.registerValue', (["box['in']", '"""test"""', '(1)'], {}), "(box['in'], 'test', 1)\n", (2578, 2600), False, 'import Gaffer\n'), ((2937, 2956), 'Gaffer.ScriptNode', 'Gaffer.ScriptNode', ([], {}), '()\n', (2954, 2956), False, 'import Gaffer\n'), ((2976, 2998), 'TestExtension.AddOne', 'TestExtension.AddOne', ([], {}), '()\n', (2996, 2998), False, 'import TestExtension\n'), ((4165, 4188), 'Gaffer.Box', 'Gaffer.Box', (['"""PlugTypes"""'], {}), "('PlugTypes')\n", (4175, 4188), False, 'import Gaffer\n'), ((4206, 4281), 'Gaffer.IntPlug', 'Gaffer.IntPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4220, 4281), False, 'import Gaffer\n'), ((4303, 4380), 'Gaffer.FloatPlug', 'Gaffer.FloatPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4319, 4380), False, 'import Gaffer\n'), ((4403, 4481), 'Gaffer.StringPlug', 'Gaffer.StringPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4420, 4481), False, 'import Gaffer\n'), ((4501, 4576), 'Gaffer.V2iPlug', 'Gaffer.V2iPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4515, 4576), False, 'import Gaffer\n'), ((4596, 4671), 'Gaffer.V3iPlug', 'Gaffer.V3iPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4610, 4671), False, 'import Gaffer\n'), ((4695, 4774), 'Gaffer.Color4fPlug', 'Gaffer.Color4fPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (4713, 4774), False, 'import Gaffer\n'), ((4797, 4888), 'Gaffer.SplinefColor3fPlug', 'Gaffer.SplinefColor3fPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.\n Flags.Dynamic)\n', (4822, 4888), False, 'import Gaffer\n'), ((5099, 5129), 'PlugTypesExtension.PlugTypes', 'PlugTypesExtension.PlugTypes', ([], {}), '()\n', (5127, 5129), False, 'import PlugTypesExtension\n'), ((5145, 5168), 'Gaffer.Plug.Range', 'Gaffer.Plug.Range', (['node'], {}), '(node)\n', (5162, 5168), False, 'import Gaffer\n'), ((5359, 5391), 'Gaffer.Plug.RecursiveRange', 'Gaffer.Plug.RecursiveRange', (['node'], {}), '(node)\n', (5385, 5391), False, 'import Gaffer\n'), ((5510, 5530), 'Gaffer.Box', 'Gaffer.Box', (['"""AddOne"""'], {}), "('AddOne')\n", (5520, 5530), False, 'import Gaffer\n'), ((5548, 5623), 'Gaffer.IntPlug', 'Gaffer.IntPlug', ([], {'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(flags=Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)\n', (5562, 5623), False, 'import Gaffer\n'), ((5643, 5760), 'Gaffer.IntPlug', 'Gaffer.IntPlug', ([], {'direction': 'Gaffer.Plug.Direction.Out', 'flags': '(Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic)'}), '(direction=Gaffer.Plug.Direction.Out, flags=Gaffer.Plug.Flags\n .Default | Gaffer.Plug.Flags.Dynamic)\n', (5657, 5760), False, 'import Gaffer\n'), ((5787, 5806), 'Gaffer.Expression', 'Gaffer.Expression', ([], {}), '()\n', (5804, 5806), False, 'import Gaffer\n'), ((6117, 6136), 'Gaffer.ScriptNode', 'Gaffer.ScriptNode', ([], {}), '()\n', (6134, 6136), False, 'import Gaffer\n'), ((6156, 6192), 'TestExtensionWithExpression.AddOne', 'TestExtensionWithExpression.AddOne', ([], {}), '()\n', (6190, 6192), False, 'import TestExtensionWithExpression\n'), ((2000, 2052), 'functools.partial', 'functools.partial', (['setattr', 'sys', '"""path"""', 'sys.path[:]'], {}), "(setattr, sys, 'path', sys.path[:])\n", (2017, 2052), False, 'import functools\n'), ((2207, 2251), 'Gaffer.PlugAlgo.promote', 'Gaffer.PlugAlgo.promote', (["box['__add']['op1']"], {}), "(box['__add']['op1'])\n", (2230, 2251), False, 'import Gaffer\n'), ((2272, 2316), 'Gaffer.PlugAlgo.promote', 'Gaffer.PlugAlgo.promote', (["box['__add']['sum']"], {}), "(box['__add']['sum'])\n", (2295, 2316), False, 'import Gaffer\n'), ((3182, 3239), 'Gaffer.Metadata.registeredValues', 'Gaffer.Metadata.registeredValues', (['node'], {'instanceOnly': '(True)'}), '(node, instanceOnly=True)\n', (3214, 3239), False, 'import Gaffer\n'), ((3271, 3334), 'Gaffer.Metadata.registeredValues', 'Gaffer.Metadata.registeredValues', (["node['in']"], {'instanceOnly': '(True)'}), "(node['in'], instanceOnly=True)\n", (3303, 3334), False, 'import Gaffer\n'), ((3366, 3430), 'Gaffer.Metadata.registeredValues', 'Gaffer.Metadata.registeredValues', (["node['out']"], {'instanceOnly': '(True)'}), "(node['out'], instanceOnly=True)\n", (3398, 3430), False, 'import Gaffer\n'), ((3463, 3505), 'Gaffer.Metadata.value', 'Gaffer.Metadata.value', (['node', '"""description"""'], {}), "(node, 'description')\n", (3484, 3505), False, 'import Gaffer\n'), ((3539, 3587), 'Gaffer.Metadata.value', 'Gaffer.Metadata.value', (["node['in']", '"""description"""'], {}), "(node['in'], 'description')\n", (3560, 3587), False, 'import Gaffer\n'), ((3626, 3675), 'Gaffer.Metadata.value', 'Gaffer.Metadata.value', (["node['out']", '"""description"""'], {}), "(node['out'], 'description')\n", (3647, 3675), False, 'import Gaffer\n'), ((3715, 3756), 'Gaffer.Metadata.value', 'Gaffer.Metadata.value', (["node['in']", '"""test"""'], {}), "(node['in'], 'test')\n", (3736, 3756), False, 'import Gaffer\n'), ((3879, 3915), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (["{script['node']}"], {}), "({script['node']})\n", (3897, 3915), False, 'import Gaffer\n'), ((6355, 6391), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (["{script['node']}"], {}), "({script['node']})\n", (6373, 6391), False, 'import Gaffer\n')] |
from numpy import average, number
from textblob import TextBlob
class ScaleUtilities:
average = 0
number = 0
def __init__(self, string, number):
self.string = string
def get_subjectivity_of(string):
polarity = TextBlob(string).sentiment.polarity * 5
number += 1
average += polarity
return polarity
def average_opinion():
if (number == 0):
print("You idiot")
exit(1)
return average / number
| [
"textblob.TextBlob"
] | [((245, 261), 'textblob.TextBlob', 'TextBlob', (['string'], {}), '(string)\n', (253, 261), False, 'from textblob import TextBlob\n')] |
from chibi.atlas import Chibi_atlas
from chibi_command import Command, Command_result
from chibi_hybrid.chibi_hybrid import Chibi_hybrid
__all__ = [ 'Create', 'Start', 'Stop', 'Attach', 'Info', 'Destroy' ]
class Info_result( Command_result ):
def parse_result( self ):
if not self:
return
result = Chibi_atlas()
for l in self.result.split( '\n' ):
l = l.strip()
if not l:
continue
k, v = l.split( ':' )
v = v.strip()
result[k.lower()] = v.lower()
self.result = result
# lo dejare de usar
@property
def is_running( self ):
return self and self.result.state == 'running'
class LXC( Command ):
command = 'lxc'
captive = False
@Chibi_hybrid
def name( cls, name ):
return cls( '-n', name )
@name.instancemethod
def name( self, name ):
self.add_args( '-n', name )
return self
class Create( LXC ):
command = 'lxc-create'
captive = False
@Chibi_hybrid
def template( cls, template ):
return cls( '-t', template )
@template.instancemethod
def template( self, template ):
self.add_args( '-t', template )
return self
def parameters( self, *args ):
self.add_args( '--', *args )
return self
class Start( LXC ):
command = 'lxc-start'
captive = False
@Chibi_hybrid
def daemon( cls ):
return cls( '-d' )
@daemon.instancemethod
def daemon( self ):
self.add_args( '-d' )
return self
class Stop( LXC ):
command = 'lxc-stop'
captive = False
class Attach( LXC ):
command = 'lxc-attach'
args = ( '--clear-env', )
captive = False
@Chibi_hybrid
def set_var( cls, name, value ):
return cls( '--set-var', f"{name}={value}" )
@set_var.instancemethod
def set_var( self, name, value ):
self.add_args( '--set-var', f"{name}={value}" )
return self
def build_tuple( self, *args, **kw ):
new_args = []
for arg in args:
if isinstance( arg, Command ):
new_args += list( arg.build_tuple() )
else:
new_args.append( arg )
if self.delegate:
delegate_tuple = self.build_delegate()
return (
*delegate_tuple, self.command,
*self.build_kw( **kw ), *self.args, '--', *new_args )
return (
self.command, *self.build_kw( **kw ), *self.args, '--', *new_args )
class Info( LXC ):
command = 'lxc-info'
captive = True
args = ( '-H', )
result_class = Info_result
class Destroy( LXC ):
command = 'lxc-destroy'
captive = False
| [
"chibi.atlas.Chibi_atlas"
] | [((334, 347), 'chibi.atlas.Chibi_atlas', 'Chibi_atlas', ([], {}), '()\n', (345, 347), False, 'from chibi.atlas import Chibi_atlas\n')] |
import time
from time import sleep
def print_elapsed_time(exit_event):
start_time = time.time()
while True:
if exit_event.is_set():
break
print(f'Running for {round(time.time() - start_time)} s', end='\r')
sleep(1)
| [
"time.time",
"time.sleep"
] | [((90, 101), 'time.time', 'time.time', ([], {}), '()\n', (99, 101), False, 'import time\n'), ((254, 262), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (259, 262), False, 'from time import sleep\n'), ((205, 216), 'time.time', 'time.time', ([], {}), '()\n', (214, 216), False, 'import time\n')] |
""" Adapt Server
This module contains all the functionality necessary to
setup an Adapt Server node (not really).
Example
-------
python3 adapt_server.py -v ingest -s testfile.txt
"""
import filesystem
from filesystem import FSOperationError
import sys
import os
import argparse
import logging
from general import hash_entity
from user import User, Keystore
from asset import Asset
from blockchain import BCDB
import config as cfg
logging.basicConfig(format='%(message)s', level=logging.INFO)
LOG = logging.getLogger(__name__)
class AdaptServer():
"""A class to represent an Adapt Server node.
Attributes
----------
user_list : list
Holds list of users. (Will probably be removed)
Methods
-------
ingest(file_path, user):
Ingests a new file (or directory of files) into ADAPT.
retrieve(tid, user):
Retrieves a file from ADAPT.
commit(file_path, prev_tid, user):
Commits a modified version of a file already existing in ADAPT.
"""
def initKS(self, num_blocks=5000):
self.keystore = Keystore()
self.keystore.save()
print("keystore init")
def setDebug(self):
LOG.setLevel(logging.DEBUG)
filesystem.LOG.setLevel(logging.DEBUG)
def __init__(self, bc_address):
"""Constructs all necessary attributes for an AdaptServer object.
"""
self.user_list = []
self.keystore = None
self.fs = filesystem.UpssFs()
self.bdb = BCDB(bc_address)
def ingest(self, file_path, user):
"""Ingests a new file (or directory of files) into ADAPT
Parameters
----------
file_path : str
Path of file(or directory) to ingest
user : User
User performing the ingest
Returns
-------
txid : str
Transaction id if ingestion is successful
"""
file_path = os.path.abspath(file_path)
if not os.path.exists(file_path):
sys.exit("File path provided not valid.")
try:
self.fs.push(file_path)
except FSOperationError:
LOG.error("Ingest operation failed.")
sys.exit(1)
fhash = hash_entity(file_path)
LOG.debug(f"Hash for {file_path}:\n{fhash}\n")
filename = os.path.basename(file_path)
data = self.fs.get_data(filename)
LOG.debug(f"Blockname: {data['bname']}\n")
A = Asset(
user.public_key,
fhash,
'PUT',
tags={'ingest'},
bc_handle=self.bdb
)
A.push(user.private_key)
self.keystore.add(A.id, data['bpointer'], filename)
self.keystore.save()
return A
def retrieve(self, tid, user):
"""Retrieves a file from ADAPT
Parameters
----------
tid : str
Transaction id of requested file
user : User
User performing the retrieval
Returns
-------
txid : str
Transaction id if retrieval is successfull
"""
prev_asset = Asset.from_id(tid, self.bdb)
LOG.debug(f"RETRIEVE:\n{prev_asset}")
try:
(bpointer,filename) = self.keystore[tid]
LOG.debug(f"bpointer: {bpointer}")
except KeyError:
LOG.error("Key does not exist in keystore. Exiting now.")
sys.exit()
file_path = f"{filesystem.LOC}/{filename}"
try:
self.fs.pull(bpointer, file_path)
except FSOperationError:
LOG.error("Retrieve operation failed.")
sys.exit(1)
LOG.info(f"Copied {filename} from ADAPT-FS to ADAPT workspace")
local_file_hash = hash_entity(file_path)
# bc_file_hash = prev_asset.filehash
# Check if the file has been modified without it being recorded on the blockchain
# if local_file_hash != bc_file_hash:
# LOG.error(f"{file_path} has been modified or tampered with.")
LOG.debug(f"fhash: {local_file_hash}")
A = Asset(
user.public_key,
local_file_hash,
'GET',
parent=prev_asset.id,
tags={'retrieve'},
bc_handle=self.bdb
)
A.push(user.private_key)
return A
def commit(self, file_path, prev_tid, user):
"""Commits a modified version of a file already existing in ADAPT
Parameters
----------
file_path : str
Path of file to commit
prev_tid : str
Transaction id of file prior to modifications
user : User
User performing the ingest
Returns
-------
txid : str
Transaction id if commit is successfull
"""
file_path = os.path.abspath(file_path)
if not os.path.exists(file_path):
sys.exit("File path provided not valid.")
prev_asset = Asset.from_id(prev_tid, self.bdb)
LOG.debug(f"COMMIT: {prev_asset}")
try:
(bpointer,filename) = self.keystore[prev_tid]
LOG.debug(f"bpointer: {bpointer}")
except KeyError:
LOG.error("Key does not exist in keystore. Exiting now.")
sys.exit()
try:
self.fs.push(file_path, filename)
except FSOperationError:
LOG.error("Commit operation failed.")
sys.exit(1)
try:
data = self.fs.get_data(filename)
except FSOperationError:
LOG.error("Could not find information on given file.")
sys.exit(1)
LOG.debug(f"New Blockname: {data['bname']}")
newfhash = hash_entity(file_path)
LOG.debug(f"Blockpointer: {data['bpointer']}")
A = Asset(
user.public_key,
newfhash,
'PUT',
parent=prev_asset.id,
tags={'commit'},
bc_handle=self.bdb
)
A.push(user.private_key)
self.keystore.add(A.id, data['bpointer'], filename)
self.keystore.save()
return A
def main():
parser = argparse.ArgumentParser(description="Advanced Detection and Prevention of Tampering")
parser.add_argument('-v', '--verbose', help='increase output verbosity (DEV USE ONLY)', action="store_true")
subparser = parser.add_subparsers(dest='command')
ingest = subparser.add_parser('ingest', help="ingest a file into ADAPT")
retrieve = subparser.add_parser('retrieve', help="retrieve a file from ADAPT")
commit = subparser.add_parser('commit', help="commit a file into ADAPT")
init = subparser.add_parser('init', help="initialize ADAPT (only the filesystem)")
ingest.add_argument('-s', '--source', type=str, required=True, help="stuff")
retrieve.add_argument('-t', '--tid', type=str, required=True)
commit.add_argument('-s', '--source', type=str, required=True)
commit.add_argument('-t', '--tid', type=str, required=True)
init.add_argument('-n', '--num_blocks', type=int, required=False, default=5000, help="number of blocks")
args = parser.parse_args()
user = User("Foobar", "1<PASSWORD>")
node = AdaptServer(cfg.node_addresses['dev'])
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.verbose:
LOG.setLevel(logging.DEBUG)
filesystem.LOG.setLevel(logging.DEBUG)
if args.command == 'init':
node.keystore = Keystore()
node.keystore.save()
try:
node.fs.initialize(args.num_blocks)
except FSOperationError:
LOG.error("Filesystem Initialization Failed.")
sys.exit(1)
else:
node.keystore = Keystore.load()
if args.command == 'ingest':
tid = node.ingest(args.source, user)
LOG.info(f"TID: {tid}")
elif args.command == 'retrieve':
tid = node.retrieve(args.tid, user)
LOG.info(f"TID: {tid}")
elif args.command == 'commit':
tid = node.commit(args.source, args.tid, user)
LOG.info(f"TID: {tid}")
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"user.Keystore",
"user.Keystore.load",
"argparse.ArgumentParser",
"filesystem.UpssFs",
"asset.Asset.from_id",
"asset.Asset",
"general.hash_entity",
"os.path.basename",
"sys.exit",
"os.path.abspath",
"filesystem.LOG.setLevel",
... | [((440, 501), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (459, 501), False, 'import logging\n'), ((508, 535), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (525, 535), False, 'import logging\n'), ((6197, 6287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Advanced Detection and Prevention of Tampering"""'}), "(description=\n 'Advanced Detection and Prevention of Tampering')\n", (6220, 6287), False, 'import argparse\n'), ((7211, 7240), 'user.User', 'User', (['"""Foobar"""', '"""1<PASSWORD>"""'], {}), "('Foobar', '1<PASSWORD>')\n", (7215, 7240), False, 'from user import User, Keystore\n'), ((1077, 1087), 'user.Keystore', 'Keystore', ([], {}), '()\n', (1085, 1087), False, 'from user import User, Keystore\n'), ((1222, 1260), 'filesystem.LOG.setLevel', 'filesystem.LOG.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (1245, 1260), False, 'import filesystem\n'), ((1460, 1479), 'filesystem.UpssFs', 'filesystem.UpssFs', ([], {}), '()\n', (1477, 1479), False, 'import filesystem\n'), ((1500, 1516), 'blockchain.BCDB', 'BCDB', (['bc_address'], {}), '(bc_address)\n', (1504, 1516), False, 'from blockchain import BCDB\n'), ((1931, 1957), 'os.path.abspath', 'os.path.abspath', (['file_path'], {}), '(file_path)\n', (1946, 1957), False, 'import os\n'), ((2229, 2251), 'general.hash_entity', 'hash_entity', (['file_path'], {}), '(file_path)\n', (2240, 2251), False, 'from general import hash_entity\n'), ((2327, 2354), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (2343, 2354), False, 'import os\n'), ((2463, 2536), 'asset.Asset', 'Asset', (['user.public_key', 'fhash', '"""PUT"""'], {'tags': "{'ingest'}", 'bc_handle': 'self.bdb'}), "(user.public_key, fhash, 'PUT', tags={'ingest'}, bc_handle=self.bdb)\n", (2468, 2536), False, 'from asset import Asset\n'), ((3137, 3165), 'asset.Asset.from_id', 'Asset.from_id', (['tid', 'self.bdb'], {}), '(tid, self.bdb)\n', (3150, 3165), False, 'from asset import Asset\n'), ((3765, 3787), 'general.hash_entity', 'hash_entity', (['file_path'], {}), '(file_path)\n', (3776, 3787), False, 'from general import hash_entity\n'), ((4106, 4218), 'asset.Asset', 'Asset', (['user.public_key', 'local_file_hash', '"""GET"""'], {'parent': 'prev_asset.id', 'tags': "{'retrieve'}", 'bc_handle': 'self.bdb'}), "(user.public_key, local_file_hash, 'GET', parent=prev_asset.id, tags={\n 'retrieve'}, bc_handle=self.bdb)\n", (4111, 4218), False, 'from asset import Asset\n'), ((4855, 4881), 'os.path.abspath', 'os.path.abspath', (['file_path'], {}), '(file_path)\n', (4870, 4881), False, 'import os\n'), ((5000, 5033), 'asset.Asset.from_id', 'Asset.from_id', (['prev_tid', 'self.bdb'], {}), '(prev_tid, self.bdb)\n', (5013, 5033), False, 'from asset import Asset\n'), ((5740, 5762), 'general.hash_entity', 'hash_entity', (['file_path'], {}), '(file_path)\n', (5751, 5762), False, 'from general import hash_entity\n'), ((5832, 5935), 'asset.Asset', 'Asset', (['user.public_key', 'newfhash', '"""PUT"""'], {'parent': 'prev_asset.id', 'tags': "{'commit'}", 'bc_handle': 'self.bdb'}), "(user.public_key, newfhash, 'PUT', parent=prev_asset.id, tags={\n 'commit'}, bc_handle=self.bdb)\n", (5837, 5935), False, 'from asset import Asset\n'), ((7360, 7371), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7368, 7371), False, 'import sys\n'), ((7438, 7476), 'filesystem.LOG.setLevel', 'filesystem.LOG.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (7461, 7476), False, 'import filesystem\n'), ((7533, 7543), 'user.Keystore', 'Keystore', ([], {}), '()\n', (7541, 7543), False, 'from user import User, Keystore\n'), ((7785, 7800), 'user.Keystore.load', 'Keystore.load', ([], {}), '()\n', (7798, 7800), False, 'from user import User, Keystore\n'), ((1974, 1999), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1988, 1999), False, 'import os\n'), ((2013, 2054), 'sys.exit', 'sys.exit', (['"""File path provided not valid."""'], {}), "('File path provided not valid.')\n", (2021, 2054), False, 'import sys\n'), ((4897, 4922), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (4911, 4922), False, 'import os\n'), ((4936, 4977), 'sys.exit', 'sys.exit', (['"""File path provided not valid."""'], {}), "('File path provided not valid.')\n", (4944, 4977), False, 'import sys\n'), ((2200, 2211), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2208, 2211), False, 'import sys\n'), ((3433, 3443), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3441, 3443), False, 'import sys\n'), ((3653, 3664), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3661, 3664), False, 'import sys\n'), ((5304, 5314), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5312, 5314), False, 'import sys\n'), ((5470, 5481), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5478, 5481), False, 'import sys\n'), ((5654, 5665), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5662, 5665), False, 'import sys\n'), ((7738, 7749), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7746, 7749), False, 'import sys\n')] |
import gym
import pixelate_arena
import time
import pybullet as p
import os
if __name__ == "__main__":
parent_path = os.path.dirname(os.getcwd())
os.chdir(parent_path)
env = gym.make("pixelate_arena-v0")
x=0
while True:
p.stepSimulation()
if x==10000:
env.unlock_antidotes()
x+=1
time.sleep(1) | [
"time.sleep",
"os.getcwd",
"os.chdir",
"pybullet.stepSimulation",
"gym.make"
] | [((155, 176), 'os.chdir', 'os.chdir', (['parent_path'], {}), '(parent_path)\n', (163, 176), False, 'import os\n'), ((187, 216), 'gym.make', 'gym.make', (['"""pixelate_arena-v0"""'], {}), "('pixelate_arena-v0')\n", (195, 216), False, 'import gym\n'), ((341, 354), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (351, 354), False, 'import time\n'), ((138, 149), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (147, 149), False, 'import os\n'), ((249, 267), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (265, 267), True, 'import pybullet as p\n')] |
from flask import Flask
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = "secret-key-goes-here"
# blueprint for auth routes in our app
from .blue_prints.auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from .blue_prints.main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .blue_prints.offers import offers as offers_blueprint
app.register_blueprint(offers_blueprint)
from .blue_prints.orders import orders as orders_blueprint
app.register_blueprint(orders_blueprint)
return app
| [
"flask.Flask"
] | [((54, 69), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (59, 69), False, 'from flask import Flask\n')] |
import os
import tensorflow as tf
from BertLibrary.bert_predictor import BertPredictor
from BertLibrary.bert_trainer import BertTrainer
from BertLibrary.bert_evaluator import BertEvaluator
from tensorflow.estimator import Estimator
from tensorflow.estimator import RunConfig
from BertLibrary.bert.run_classifier import *
import BertLibrary.bert.modeling as modeling
import BertLibrary.bert.tokenization as tokenization
class BertModel:
def __init__(self,
model_dir,
ckpt_name,
do_lower_case,
max_seq_len,
batch_size,
labels,
trainable=True,
keep_checkpoint_max=5,
config=None):
self.model_dir = model_dir
self.bert_config, self.vocab_file, \
self.init_checkpoint = self.get_model_configs(model_dir, ckpt_name)
self.do_lower_case = do_lower_case
self.max_seq_len = max_seq_len
self.batch_size = batch_size
self.processer = None
self.keep_checkpoint_max = keep_checkpoint_max
self.labels = labels
self.config = config if config else None
self.predictor = None
self.trainable = trainable
def build(self, model_fn_args, config_args):
config = self.get_config(**config_args)
model_fn = self.get_model_fn(**model_fn_args)
self.estimator = Estimator(
model_fn=model_fn,
config=config,
params={'batch_size': self.batch_size})
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.vocab_file, do_lower_case=self.do_lower_case)
def get_model_configs(self, base_dir, ckpt_name):
bert_config_file = os.path.join(base_dir, 'bert_config.json')
vocab_file = os.path.join(base_dir, 'vocab.txt')
init_checkpoint = os.path.join(base_dir, ckpt_name)
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
return bert_config, vocab_file, init_checkpoint
def get_config(self, ckpt_output_dir='./output', save_check_steps=1000):
if not self.config:
self.config = tf.ConfigProto(device_count={'GPU': 1})
self.config.gpu_options.allow_growth = True
self.config.gpu_options.per_process_gpu_memory_fraction = 0.5
run_config = RunConfig(
model_dir=ckpt_output_dir,
session_config=self.config,
keep_checkpoint_max=self.keep_checkpoint_max,
save_checkpoints_steps=save_check_steps)
return run_config
def get_predictor(self):
return BertPredictor(self.estimator, self.processer, self.config)
def get_trainer(self):
assert self.trainable, 'This model cannot be trained'
return BertTrainer(self)
def get_evaluator(self, iter_steps=1000):
return BertEvaluator(self, iter_steps=iter_steps)
def get_model_fn(self, *args):
return NotImplementedError() | [
"BertLibrary.bert.tokenization.FullTokenizer",
"tensorflow.estimator.RunConfig",
"BertLibrary.bert.modeling.BertConfig.from_json_file",
"tensorflow.estimator.Estimator",
"os.path.join",
"BertLibrary.bert_evaluator.BertEvaluator",
"BertLibrary.bert_predictor.BertPredictor",
"BertLibrary.bert_trainer.Be... | [((1426, 1514), 'tensorflow.estimator.Estimator', 'Estimator', ([], {'model_fn': 'model_fn', 'config': 'config', 'params': "{'batch_size': self.batch_size}"}), "(model_fn=model_fn, config=config, params={'batch_size': self.\n batch_size})\n", (1435, 1514), False, 'from tensorflow.estimator import Estimator\n'), ((1573, 1666), 'BertLibrary.bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'self.vocab_file', 'do_lower_case': 'self.do_lower_case'}), '(vocab_file=self.vocab_file, do_lower_case=self.\n do_lower_case)\n', (1599, 1666), True, 'import BertLibrary.bert.tokenization as tokenization\n'), ((1757, 1799), 'os.path.join', 'os.path.join', (['base_dir', '"""bert_config.json"""'], {}), "(base_dir, 'bert_config.json')\n", (1769, 1799), False, 'import os\n'), ((1821, 1856), 'os.path.join', 'os.path.join', (['base_dir', '"""vocab.txt"""'], {}), "(base_dir, 'vocab.txt')\n", (1833, 1856), False, 'import os\n'), ((1883, 1916), 'os.path.join', 'os.path.join', (['base_dir', 'ckpt_name'], {}), '(base_dir, ckpt_name)\n', (1895, 1916), False, 'import os\n'), ((1939, 1991), 'BertLibrary.bert.modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['bert_config_file'], {}), '(bert_config_file)\n', (1973, 1991), True, 'import BertLibrary.bert.modeling as modeling\n'), ((2373, 2533), 'tensorflow.estimator.RunConfig', 'RunConfig', ([], {'model_dir': 'ckpt_output_dir', 'session_config': 'self.config', 'keep_checkpoint_max': 'self.keep_checkpoint_max', 'save_checkpoints_steps': 'save_check_steps'}), '(model_dir=ckpt_output_dir, session_config=self.config,\n keep_checkpoint_max=self.keep_checkpoint_max, save_checkpoints_steps=\n save_check_steps)\n', (2382, 2533), False, 'from tensorflow.estimator import RunConfig\n'), ((2646, 2704), 'BertLibrary.bert_predictor.BertPredictor', 'BertPredictor', (['self.estimator', 'self.processer', 'self.config'], {}), '(self.estimator, self.processer, self.config)\n', (2659, 2704), False, 'from BertLibrary.bert_predictor import BertPredictor\n'), ((2810, 2827), 'BertLibrary.bert_trainer.BertTrainer', 'BertTrainer', (['self'], {}), '(self)\n', (2821, 2827), False, 'from BertLibrary.bert_trainer import BertTrainer\n'), ((2890, 2932), 'BertLibrary.bert_evaluator.BertEvaluator', 'BertEvaluator', (['self'], {'iter_steps': 'iter_steps'}), '(self, iter_steps=iter_steps)\n', (2903, 2932), False, 'from BertLibrary.bert_evaluator import BertEvaluator\n'), ((2181, 2220), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 1}"}), "(device_count={'GPU': 1})\n", (2195, 2220), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
import unittest
import torch
from torch.distributions import HalfCauchy
from gpytorch.priors import HalfCauchyPrior
from gpytorch.test.utils import least_used_cuda_device
class TestHalfCauchyPrior(unittest.TestCase):
def test_half_cauchy_prior_to_gpu(self):
if torch.cuda.is_available():
prior = HalfCauchy(1.0).cuda()
self.assertEqual(prior.concentration.device.type, "cuda")
self.assertEqual(prior.rate.device.type, "cuda")
def test_half_cauchy_prior_validate_args(self):
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
with self.assertRaises(ValueError):
HalfCauchyPrior(-1, validate_args=True)
def test_half_cauchy_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.tensor(1.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([1.5, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([[1.0, 0.5], [3.0, 0.25]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
def test_half_cauchy_prior_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_gamma_prior_log_prob(cuda=True)
def test_half_cauchy_prior_log_prob_log_transform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1, transform=torch.exp)
dist = HalfCauchy(0.1)
t = torch.tensor(0.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([-1, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
def test_half_cauchy_prior_log_prob_log_transform_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_log_prob_log_transform(cuda=True)
def test_half_cauchy_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
prior = HalfCauchyPrior(0.1)
dist = HalfCauchy(0.1)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
scale = torch.tensor([0.1, 1.0], device=device)
prior = HalfCauchyPrior(scale)
dist = HalfCauchy(scale)
t = torch.ones(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.ones(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(3, device=device))
with self.assertRaises(ValueError):
prior.log_prob(torch.ones(2, 3, device=device))
def test_half_cauchy_prior_batch_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_half_cauchy_prior_batch_log_prob(cuda=True)
if __name__ == "__main__":
unittest.main()
| [
"torch.device",
"torch.distributions.HalfCauchy",
"gpytorch.priors.HalfCauchyPrior",
"torch.tensor",
"torch.cuda.is_available",
"unittest.main",
"gpytorch.test.utils.least_used_cuda_device",
"torch.ones"
] | [((3657, 3672), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3670, 3672), False, 'import unittest\n'), ((301, 326), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (324, 326), False, 'import torch\n'), ((894, 914), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['(0.1)'], {}), '(0.1)\n', (909, 914), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((930, 945), 'torch.distributions.HalfCauchy', 'HalfCauchy', (['(0.1)'], {}), '(0.1)\n', (940, 945), False, 'from torch.distributions import HalfCauchy\n'), ((959, 991), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'device': 'device'}), '(1.0, device=device)\n', (971, 991), False, 'import torch\n'), ((1078, 1117), 'torch.tensor', 'torch.tensor', (['[1.5, 0.5]'], {'device': 'device'}), '([1.5, 0.5], device=device)\n', (1090, 1117), False, 'import torch\n'), ((1204, 1258), 'torch.tensor', 'torch.tensor', (['[[1.0, 0.5], [3.0, 0.25]]'], {'device': 'device'}), '([[1.0, 0.5], [3.0, 0.25]], device=device)\n', (1216, 1258), False, 'import torch\n'), ((1397, 1422), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1420, 1422), False, 'import torch\n'), ((1693, 1734), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['(0.1)'], {'transform': 'torch.exp'}), '(0.1, transform=torch.exp)\n', (1708, 1734), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((1750, 1765), 'torch.distributions.HalfCauchy', 'HalfCauchy', (['(0.1)'], {}), '(0.1)\n', (1760, 1765), False, 'from torch.distributions import HalfCauchy\n'), ((1779, 1811), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'device'}), '(0.0, device=device)\n', (1791, 1811), False, 'import torch\n'), ((1904, 1942), 'torch.tensor', 'torch.tensor', (['[-1, 0.5]'], {'device': 'device'}), '([-1, 0.5], device=device)\n', (1916, 1942), False, 'import torch\n'), ((2035, 2088), 'torch.tensor', 'torch.tensor', (['[[-1, 0.5], [0.1, -2.0]]'], {'device': 'device'}), '([[-1, 0.5], [0.1, -2.0]], device=device)\n', (2047, 2088), False, 'import torch\n'), ((2247, 2272), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2270, 2272), False, 'import torch\n'), ((2555, 2575), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['(0.1)'], {}), '(0.1)\n', (2570, 2575), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((2591, 2606), 'torch.distributions.HalfCauchy', 'HalfCauchy', (['(0.1)'], {}), '(0.1)\n', (2601, 2606), False, 'from torch.distributions import HalfCauchy\n'), ((2619, 2647), 'torch.ones', 'torch.ones', (['(2)'], {'device': 'device'}), '(2, device=device)\n', (2629, 2647), False, 'import torch\n'), ((2734, 2765), 'torch.ones', 'torch.ones', (['(2)', '(2)'], {'device': 'device'}), '(2, 2, device=device)\n', (2744, 2765), False, 'import torch\n'), ((2857, 2896), 'torch.tensor', 'torch.tensor', (['[0.1, 1.0]'], {'device': 'device'}), '([0.1, 1.0], device=device)\n', (2869, 2896), False, 'import torch\n'), ((2913, 2935), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['scale'], {}), '(scale)\n', (2928, 2935), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((2951, 2968), 'torch.distributions.HalfCauchy', 'HalfCauchy', (['scale'], {}), '(scale)\n', (2961, 2968), False, 'from torch.distributions import HalfCauchy\n'), ((2981, 3009), 'torch.ones', 'torch.ones', (['(2)'], {'device': 'device'}), '(2, device=device)\n', (2991, 3009), False, 'import torch\n'), ((3096, 3127), 'torch.ones', 'torch.ones', (['(2)', '(2)'], {'device': 'device'}), '(2, 2, device=device)\n', (3106, 3127), False, 'import torch\n'), ((3477, 3502), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3500, 3502), False, 'import torch\n'), ((611, 650), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['(-1)'], {'validate_args': '(True)'}), '(-1, validate_args=True)\n', (626, 650), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((707, 746), 'gpytorch.priors.HalfCauchyPrior', 'HalfCauchyPrior', (['(-1)'], {'validate_args': '(True)'}), '(-1, validate_args=True)\n', (722, 746), False, 'from gpytorch.priors import HalfCauchyPrior\n'), ((824, 844), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (836, 844), False, 'import torch\n'), ((858, 877), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (870, 877), False, 'import torch\n'), ((1623, 1643), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1635, 1643), False, 'import torch\n'), ((1657, 1676), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1669, 1676), False, 'import torch\n'), ((2485, 2505), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2497, 2505), False, 'import torch\n'), ((2519, 2538), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2531, 2538), False, 'import torch\n'), ((1441, 1465), 'gpytorch.test.utils.least_used_cuda_device', 'least_used_cuda_device', ([], {}), '()\n', (1463, 1465), False, 'from gpytorch.test.utils import least_used_cuda_device\n'), ((2291, 2315), 'gpytorch.test.utils.least_used_cuda_device', 'least_used_cuda_device', ([], {}), '()\n', (2313, 2315), False, 'from gpytorch.test.utils import least_used_cuda_device\n'), ((3273, 3301), 'torch.ones', 'torch.ones', (['(3)'], {'device': 'device'}), '(3, device=device)\n', (3283, 3301), False, 'import torch\n'), ((3374, 3405), 'torch.ones', 'torch.ones', (['(2)', '(3)'], {'device': 'device'}), '(2, 3, device=device)\n', (3384, 3405), False, 'import torch\n'), ((3521, 3545), 'gpytorch.test.utils.least_used_cuda_device', 'least_used_cuda_device', ([], {}), '()\n', (3543, 3545), False, 'from gpytorch.test.utils import least_used_cuda_device\n'), ((348, 363), 'torch.distributions.HalfCauchy', 'HalfCauchy', (['(1.0)'], {}), '(1.0)\n', (358, 363), False, 'from torch.distributions import HalfCauchy\n')] |
#!/usr/bin/env python3
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Cipher import ChaCha20
from Crypto.Hash import SHA256
import binascii
import argparse
#from pathlib import Path
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""CLI tool to decrypt an ulog file\n""")
parser.add_argument("ulog_file", help=".ulog file", nargs='?', default=None)
parser.add_argument("ulog_key", help=".ulogk, encrypted key", nargs='?', default=None)
parser.add_argument("rsa_key", help=".pem format key for decrypting the ulog key", nargs='?', default=None)
args = parser.parse_args()
# Only generate a key pair, don't sign
if not args.ulog_file or not args.ulog_key or not args.rsa_key:
print('Need all arguments, the encrypted ulog file, the key and the key decryption key')
sys.exit(1);
# Read the private RSA key to decrypt the cahcha key
with open(args.rsa_key, 'rb') as f:
r = RSA.importKey(f.read(), passphrase='')
# Read the encrypted xchacha key and the nonce
with open(args.ulog_key, 'rb') as f:
ulog_key_header = f.read(22)
# Parse the header
try:
# magic
if not ulog_key_header.startswith(bytearray("ULogKey".encode())):
raise Exception()
# version
if ulog_key_header[7] != 1:
raise Exception()
# expected key exchange algorithm (RSA_OAEP)
if ulog_key_header[16] != 4:
raise Exception()
key_size = ulog_key_header[19] << 8 | ulog_key_header[18];
nonce_size = ulog_key_header[21] << 8 | ulog_key_header[20];
ulog_key_cipher = f.read(key_size)
nonce = f.read(nonce_size)
except:
print("Keyfile format error")
sys.exit(1);
# Decrypt the xchacha key
cipher_rsa = PKCS1_OAEP.new(r,SHA256)
ulog_key = cipher_rsa.decrypt(ulog_key_cipher)
#print(binascii.hexlify(ulog_key))
# Read and decrypt the .ulgc
cipher = ChaCha20.new(key=ulog_key, nonce=nonce)
with open(args.ulog_file, 'rb') as f:
with open(args.ulog_file.rstrip(args.ulog_file[-1]), 'wb') as out:
out.write(cipher.decrypt(f.read()))
| [
"Crypto.Cipher.PKCS1_OAEP.new",
"sys.exit",
"argparse.ArgumentParser",
"Crypto.Cipher.ChaCha20.new"
] | [((271, 344), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CLI tool to decrypt an ulog file\n"""'}), "(description='CLI tool to decrypt an ulog file\\n')\n", (294, 344), False, 'import argparse\n'), ((1936, 1961), 'Crypto.Cipher.PKCS1_OAEP.new', 'PKCS1_OAEP.new', (['r', 'SHA256'], {}), '(r, SHA256)\n', (1950, 1961), False, 'from Crypto.Cipher import PKCS1_OAEP\n'), ((2098, 2137), 'Crypto.Cipher.ChaCha20.new', 'ChaCha20.new', ([], {'key': 'ulog_key', 'nonce': 'nonce'}), '(key=ulog_key, nonce=nonce)\n', (2110, 2137), False, 'from Crypto.Cipher import ChaCha20\n'), ((882, 893), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (890, 893), False, 'import sys\n'), ((1875, 1886), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1883, 1886), False, 'import sys\n')] |
from django.contrib import admin
from .models import Message, Reply, Reader
# Register your models here.
admin.site.register(Message)
admin.site.register(Reply)
admin.site.register(Reader)
| [
"django.contrib.admin.site.register"
] | [((106, 134), 'django.contrib.admin.site.register', 'admin.site.register', (['Message'], {}), '(Message)\n', (125, 134), False, 'from django.contrib import admin\n'), ((135, 161), 'django.contrib.admin.site.register', 'admin.site.register', (['Reply'], {}), '(Reply)\n', (154, 161), False, 'from django.contrib import admin\n'), ((162, 189), 'django.contrib.admin.site.register', 'admin.site.register', (['Reader'], {}), '(Reader)\n', (181, 189), False, 'from django.contrib import admin\n')] |
from models.lenet import *
from models.wresnet import *
import os
def select_model(dataset,
model_name,
pretrained=False,
pretrained_models_path=None):
if dataset in ['SVHN', 'CIFAR10', 'CINIC10', 'CIFAR100']:
n_classes = 100 if dataset == 'CIFAR100' else 10
assert model_name in ['LeNet', 'WRN-16-1', 'WRN-16-2', 'WRN-40-1', 'WRN-40-2']
if model_name=='LeNet':
model = LeNet32(n_classes=n_classes)
elif model_name=='WRN-16-1':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-16-2':
model = WideResNet(depth=16, num_classes=n_classes, widen_factor=2, dropRate=0.0)
elif model_name=='WRN-40-1':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=1, dropRate=0.0)
elif model_name=='WRN-40-2':
model = WideResNet(depth=40, num_classes=n_classes, widen_factor=2, dropRate=0.0)
if pretrained:
model_path = os.path.join(pretrained_models_path, dataset, model_name, "last.pth.tar")
print('Loading Model from {}'.format(model_path))
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
elif dataset=='ImageNet':
assert model_name in ['ResNet18', 'ResNet34', 'ResNet50', 'ResNet101', 'ResNet152']
if model_name == 'ResNet18':
model = resnet18(pretrained=pretrained)
elif model_name == 'ResNet34':
model = resnet34(pretrained=pretrained)
elif model_name == 'ResNet50':
model = resnet50(pretrained=pretrained)
elif model_name == 'ResNet101':
model = resnet101(pretrained=pretrained)
elif model_name == 'ResNet152':
model = resnet152(pretrained=pretrained)
else:
raise NotImplementedError
return model
if __name__ == '__main__':
import torch
from torchsummary import summary
import random
import time
random.seed(1234) # torch transforms use this seed
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
support_x_task = torch.autograd.Variable(torch.FloatTensor(64, 3, 32, 32).uniform_(0, 1))
t0 = time.time()
model = select_model('CIFAR10', model_name='WRN-16-2')
output, act = model(support_x_task)
print("Time taken for forward pass: {} s".format(time.time() - t0))
print("\nOUTPUT SHAPE: ", output.shape)
summary(model, (3, 32, 32)) | [
"torch.manual_seed",
"torch.load",
"os.path.join",
"random.seed",
"torch.cuda.manual_seed",
"torchsummary.summary",
"time.time",
"torch.FloatTensor"
] | [((2094, 2111), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (2105, 2111), False, 'import random\n'), ((2150, 2173), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (2167, 2173), False, 'import torch\n'), ((2178, 2206), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1234)'], {}), '(1234)\n', (2200, 2206), False, 'import torch\n'), ((2312, 2323), 'time.time', 'time.time', ([], {}), '()\n', (2321, 2323), False, 'import time\n'), ((2543, 2570), 'torchsummary.summary', 'summary', (['model', '(3, 32, 32)'], {}), '(model, (3, 32, 32))\n', (2550, 2570), False, 'from torchsummary import summary\n'), ((1065, 1138), 'os.path.join', 'os.path.join', (['pretrained_models_path', 'dataset', 'model_name', '"""last.pth.tar"""'], {}), "(pretrained_models_path, dataset, model_name, 'last.pth.tar')\n", (1077, 1138), False, 'import os\n'), ((1226, 1268), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (1236, 1268), False, 'import torch\n'), ((2253, 2285), 'torch.FloatTensor', 'torch.FloatTensor', (['(64)', '(3)', '(32)', '(32)'], {}), '(64, 3, 32, 32)\n', (2270, 2285), False, 'import torch\n'), ((2476, 2487), 'time.time', 'time.time', ([], {}), '()\n', (2485, 2487), False, 'import time\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
import asyncio
import base64
import json
import os
import sys
from aiohttp import web
from navigator.conf import (
DEBUG,
SESSION_PREFIX,
SESSION_URL,
SESSION_KEY,
config
)
from navigator.handlers import nav_exception_handler
from navigator.exceptions import (
NavException,
UserDoesntExists,
InvalidAuth
)
from navigator.auth.sessions import get_session, new_session
from navigator.views import BaseView, BaseHandler
from asyncdb.utils.encoders import BaseEncoder, DefaultEncoder
from navigator.auth.models import User
class UserHandler(BaseView):
async def session(self):
session = None
try:
session = await get_session(self.request)
except Exception as err:
print(err)
return self.critical(
request=self.request,
exception=err
)
return session
async def get(self):
""" Getting Session information."""
session = await self.session()
try:
if not session:
headers = {"x-status": "Empty", "x-message": "Invalid User Session"}
return self.no_content(headers=headers)
else:
try:
sessionid = session[SESSION_KEY]
except KeyError:
return self.error('Invalid Session, missing Session ID')
headers = {"x-status": "OK", "x-message": "Session OK"}
userdata = dict(session)
data = {
"session_id": sessionid,
**userdata
}
if data:
return self.json_response(
response=data,
headers=headers
)
except Exception as err:
return self.error(
self.request,
exception=err
)
async def delete(self):
""" Close and Delete User Session."""
session = await self.session()
try:
app = self.request.app
router = app.router
session.invalidate()
print(session)
except Exception as err:
print(err, err.__class__.__name__)
return self.critical(
request=self.request,
exception=err,
state=501
)
# return a redirect to LOGIN
return web.HTTPFound(router["login"].url_for())
async def put(self):
"""Re-login and re-authenticate..."""
class UserInfo(BaseHandler):
async def session(self, request):
session = None
try:
session = await get_session(request)
except Exception as err:
print(err)
return self.critical(
request=request,
exception=err
)
return session
async def profile(self, request):
session = await self.session(request)
print(session)
if not session:
headers = {"x-status": "Empty", "x-message": "Invalid User Session"}
return self.no_content(headers=headers)
else:
try:
sessionid = session['id']
except KeyError:
return self.error('Invalid Session, missing Session ID')
# getting User information
try:
user_id = request["user_id"]
except KeyError:
info = session[sessionid]
user_id = info['user_id']
try:
user = await User.get(user_id=user_id)
return web.Response(
text=user.json(ensure_ascii=True, indent=4),
status=200,
content_type="application/json"
)
except Exception as err:
print(err)
return self.critical(
request=request,
exception=err
)
async def logout(self, request):
""" Close and Delete User Session."""
session = await self.session(request)
try:
app = request.app
router = app.router
session.invalidate()
except Exception as err:
print(err, err.__class__.__name__)
response = {
"message": f"Exception on: {err.__class__.__name__}",
"error": str(err)
}
args = {
"status": 501,
"content_type": "application/json",
"text": json.dumps(response, cls=DefaultEncoder)
}
return web.Response(**args)
# return a redirect to LOGIN
# TODO: configure the return of LOGOUT
return web.HTTPFound('/')
| [
"aiohttp.web.HTTPFound",
"json.dumps",
"aiohttp.web.Response",
"navigator.auth.sessions.get_session",
"navigator.auth.models.User.get"
] | [((4799, 4817), 'aiohttp.web.HTTPFound', 'web.HTTPFound', (['"""/"""'], {}), "('/')\n", (4812, 4817), False, 'from aiohttp import web\n'), ((727, 752), 'navigator.auth.sessions.get_session', 'get_session', (['self.request'], {}), '(self.request)\n', (738, 752), False, 'from navigator.auth.sessions import get_session, new_session\n'), ((2760, 2780), 'navigator.auth.sessions.get_session', 'get_session', (['request'], {}), '(request)\n', (2771, 2780), False, 'from navigator.auth.sessions import get_session, new_session\n'), ((3639, 3664), 'navigator.auth.models.User.get', 'User.get', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (3647, 3664), False, 'from navigator.auth.models import User\n'), ((4679, 4699), 'aiohttp.web.Response', 'web.Response', ([], {}), '(**args)\n', (4691, 4699), False, 'from aiohttp import web\n'), ((4605, 4645), 'json.dumps', 'json.dumps', (['response'], {'cls': 'DefaultEncoder'}), '(response, cls=DefaultEncoder)\n', (4615, 4645), False, 'import json\n')] |
from bs4 import BeautifulSoup
from PIL import Image
from io import BytesIO
import requests
import os
def start_search():
search = input("Enter Search Item: ")
params = {"q": search}
dir_name = search.replace(" ", "_").lower()
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
r = requests.get("http://www.bing.com/images/search", params=params)
soup = BeautifulSoup(r.text,"html.parser")
links = soup.findAll("a",{"class":"thumb"})
for items in links:
img_obj = requests.get(items.attrs["href"])
print("Getting: ", items.attrs["href"])
title = items.attrs["href"].split("/")[-1]
try:
img = Image.open(BytesIO(img_obj.content))
img.save("./" + dir_name + "/" + title, img.format)
except:
print("error")
start_search()
start_search() | [
"os.makedirs",
"io.BytesIO",
"requests.get",
"bs4.BeautifulSoup",
"os.path.isdir"
] | [((317, 381), 'requests.get', 'requests.get', (['"""http://www.bing.com/images/search"""'], {'params': 'params'}), "('http://www.bing.com/images/search', params=params)\n", (329, 381), False, 'import requests\n'), ((394, 430), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (407, 430), False, 'from bs4 import BeautifulSoup\n'), ((252, 275), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (265, 275), False, 'import os\n'), ((285, 306), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (296, 306), False, 'import os\n'), ((521, 554), 'requests.get', 'requests.get', (["items.attrs['href']"], {}), "(items.attrs['href'])\n", (533, 554), False, 'import requests\n'), ((696, 720), 'io.BytesIO', 'BytesIO', (['img_obj.content'], {}), '(img_obj.content)\n', (703, 720), False, 'from io import BytesIO\n')] |
from office31 import office31
from office31 import download_and_extract_office31
from pathlib import Path
import os
#Hacky script to count how many images are in each folder/cluster in both sources
out_name = "./data/office31/office31_count.txt"
def count_items(src="amazon"):
file_open=open(out_name, "a")
label = 0
d = "./data/office31/"+src+"/images/"
count = {}
for path in os.listdir(d):
full_path = os.path.join(d, path)
for f in os.listdir(full_path):
if count.keys().__contains__(path):
count[path] += 1
else:
count[path] = 1
label +=1
file_open.write('\n'+src+'\n')
file_open.write(str(count)+'')
file_open=open(out_name, "w")
file_open.write('')
count_items("amazon")
count_items("webcam") | [
"os.listdir",
"os.path.join"
] | [((399, 412), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (409, 412), False, 'import os\n'), ((434, 455), 'os.path.join', 'os.path.join', (['d', 'path'], {}), '(d, path)\n', (446, 455), False, 'import os\n'), ((473, 494), 'os.listdir', 'os.listdir', (['full_path'], {}), '(full_path)\n', (483, 494), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""Webscraper for Swedish data.
Reference: https://www.scb.se/hitta-statistik/statistik-efter-amne/befolkning/befolkningens-sammansattning/befolkningsstatistik/pong/tabell-och-diagram/preliminar-statistik-over-doda/
Todo:
* caching
"""
import pkg_resources
from .main import *
from . import fohm
from .scb import *
from .backup import *
try:
__version__ = pkg_resources.get_distribution("covid19sweden").version
except:
__version__ = None | [
"pkg_resources.get_distribution"
] | [((394, 441), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""covid19sweden"""'], {}), "('covid19sweden')\n", (424, 441), False, 'import pkg_resources\n')] |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
# ---------------
# Airflow imports
# ---------------
#--------------
# local imports
# -------------
from airflow_actionproject.hooks.action import ActionDatabaseHook
# -----------------------
# Module global variables
# -----------------------
# ----------------
# Module constants
# ----------------
def check_number_of_entries(conn_id, start_date, n_entries, project, true_task_id, false_task_id, obs_type='observation'):
'''Callable to use with BranchPythonOperator'''
next_task = false_task_id
with ActionDatabaseHook(conn_id) as hook:
observations = list(
hook.download(
start_date = start_date,
end_date = '2999-12-31T23:59:59.99999Z', # far away,
n_entries = n_entries+1,
project = project,
obs_type = obs_type,
)
)
if len(observations) >= (n_entries+1):
next_task = true_task_id
return next_task
| [
"airflow_actionproject.hooks.action.ActionDatabaseHook"
] | [((846, 873), 'airflow_actionproject.hooks.action.ActionDatabaseHook', 'ActionDatabaseHook', (['conn_id'], {}), '(conn_id)\n', (864, 873), False, 'from airflow_actionproject.hooks.action import ActionDatabaseHook\n')] |
from ioflo.base.consoling import getConsole
from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer
from raet.raeting import AutoMode, Acceptance
from raet.road.estating import RemoteEstate
from raet.road.stacking import RoadStack
from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote
from stp_core.common.log import getlogger
from stp_core.network.port_dispenser import genHa
logger = getlogger()
def testPromiscuousConnection(tdir):
alpha = RoadStack(name='alpha',
ha=genHa(),
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
auto=AutoMode.always,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
main=True,
auto=AutoMode.always,
basedirpath=tdir)
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha,
verkey=betaSigner.verhex)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testRaetPreSharedKeysNonPromiscous(tdir):
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = RoadStack(name='alpha',
ha=genHa(),
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.allow(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
finally:
cleanup(alpha, beta)
def testConnectionWithHaChanged(tdir):
console = getConsole()
console.reinit(verbosity=console.Wordage.verbose)
alphaSigner = NaclSigner()
betaSigner = NaclSigner()
alphaPrivateer = Privateer()
betaPrivateer = Privateer()
logger.debug("Alpha's verkey {}".format(alphaSigner.verhex))
logger.debug("Beta's verkey {}".format(betaSigner.verhex))
alpha = None
def setupAlpha(ha):
nonlocal alpha
alpha = RoadStack(name='alpha',
ha=ha,
sigkey=alphaSigner.keyhex,
prikey=alphaPrivateer.keyhex,
auto=AutoMode.never,
basedirpath=tdir)
alpha.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": betaSigner.verhex,
"pubhex": betaPrivateer.pubhex
}, "beta")
oldHa = genHa()
setupAlpha(oldHa)
beta = RoadStack(name='beta',
ha=genHa(),
sigkey=betaSigner.keyhex,
prikey=betaPrivateer.keyhex,
main=True,
auto=AutoMode.never,
basedirpath=tdir, mutable=True)
beta.keep.dumpRemoteRoleData({
"acceptance": Acceptance.accepted.value,
"verhex": alphaSigner.verhex,
"pubhex": alphaPrivateer.pubhex
}, "alpha")
try:
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
cleanup(alpha)
newHa = genHa()
logger.debug("alpha changing ha to {}".format(newHa))
setupAlpha(newHa)
betaRemote = RemoteEstate(stack=alpha, ha=beta.ha)
alpha.addRemote(betaRemote)
alpha.join(uid=betaRemote.uid, cascade=True)
handshake(alpha, beta)
sendMsgs(alpha, beta, betaRemote)
logger.debug("beta knows alpha as {}".
format(getRemote(beta, "alpha").ha))
finally:
cleanup(alpha, beta)
| [
"ioflo.base.consoling.getConsole",
"stp_raet.test.helper.sendMsgs",
"stp_core.crypto.nacl_wrappers.Privateer",
"stp_raet.test.helper.getRemote",
"stp_core.network.port_dispenser.genHa",
"stp_raet.test.helper.handshake",
"stp_core.crypto.nacl_wrappers.Signer",
"raet.road.stacking.RoadStack",
"raet.ro... | [((425, 436), 'stp_core.common.log.getlogger', 'getlogger', ([], {}), '()\n', (434, 436), False, 'from stp_core.common.log import getlogger\n'), ((1151, 1163), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (1161, 1163), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((1181, 1193), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (1191, 1193), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((2162, 2174), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (2172, 2174), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((2192, 2204), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (2202, 2204), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((2227, 2238), 'stp_core.crypto.nacl_wrappers.Privateer', 'Privateer', ([], {}), '()\n', (2236, 2238), False, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((2259, 2270), 'stp_core.crypto.nacl_wrappers.Privateer', 'Privateer', ([], {}), '()\n', (2268, 2270), False, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((3623, 3635), 'ioflo.base.consoling.getConsole', 'getConsole', ([], {}), '()\n', (3633, 3635), False, 'from ioflo.base.consoling import getConsole\n'), ((3709, 3721), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (3719, 3721), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((3739, 3751), 'stp_core.crypto.nacl_wrappers.Signer', 'NaclSigner', ([], {}), '()\n', (3749, 3751), True, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((3774, 3785), 'stp_core.crypto.nacl_wrappers.Privateer', 'Privateer', ([], {}), '()\n', (3783, 3785), False, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((3806, 3817), 'stp_core.crypto.nacl_wrappers.Privateer', 'Privateer', ([], {}), '()\n', (3815, 3817), False, 'from stp_core.crypto.nacl_wrappers import Signer as NaclSigner, Privateer\n'), ((4496, 4503), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (4501, 4503), False, 'from stp_core.network.port_dispenser import genHa\n'), ((843, 880), 'raet.road.estating.RemoteEstate', 'RemoteEstate', ([], {'stack': 'alpha', 'ha': 'beta.ha'}), '(stack=alpha, ha=beta.ha)\n', (855, 880), False, 'from raet.road.estating import RemoteEstate\n'), ((980, 1002), 'stp_raet.test.helper.handshake', 'handshake', (['alpha', 'beta'], {}), '(alpha, beta)\n', (989, 1002), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((1012, 1045), 'stp_raet.test.helper.sendMsgs', 'sendMsgs', (['alpha', 'beta', 'betaRemote'], {}), '(alpha, beta, betaRemote)\n', (1020, 1045), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((1067, 1087), 'stp_raet.test.helper.cleanup', 'cleanup', (['alpha', 'beta'], {}), '(alpha, beta)\n', (1074, 1087), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((1788, 1851), 'raet.road.estating.RemoteEstate', 'RemoteEstate', ([], {'stack': 'alpha', 'ha': 'beta.ha', 'verkey': 'betaSigner.verhex'}), '(stack=alpha, ha=beta.ha, verkey=betaSigner.verhex)\n', (1800, 1851), False, 'from raet.road.estating import RemoteEstate\n'), ((1987, 2009), 'stp_raet.test.helper.handshake', 'handshake', (['alpha', 'beta'], {}), '(alpha, beta)\n', (1996, 2009), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((2019, 2052), 'stp_raet.test.helper.sendMsgs', 'sendMsgs', (['alpha', 'beta', 'betaRemote'], {}), '(alpha, beta, betaRemote)\n', (2027, 2052), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((2075, 2095), 'stp_raet.test.helper.cleanup', 'cleanup', (['alpha', 'beta'], {}), '(alpha, beta)\n', (2082, 2095), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((3321, 3358), 'raet.road.estating.RemoteEstate', 'RemoteEstate', ([], {'stack': 'alpha', 'ha': 'beta.ha'}), '(stack=alpha, ha=beta.ha)\n', (3333, 3358), False, 'from raet.road.estating import RemoteEstate\n'), ((3460, 3482), 'stp_raet.test.helper.handshake', 'handshake', (['alpha', 'beta'], {}), '(alpha, beta)\n', (3469, 3482), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((3492, 3525), 'stp_raet.test.helper.sendMsgs', 'sendMsgs', (['alpha', 'beta', 'betaRemote'], {}), '(alpha, beta, betaRemote)\n', (3500, 3525), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((3547, 3567), 'stp_raet.test.helper.cleanup', 'cleanup', (['alpha', 'beta'], {}), '(alpha, beta)\n', (3554, 3567), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((4029, 4160), 'raet.road.stacking.RoadStack', 'RoadStack', ([], {'name': '"""alpha"""', 'ha': 'ha', 'sigkey': 'alphaSigner.keyhex', 'prikey': 'alphaPrivateer.keyhex', 'auto': 'AutoMode.never', 'basedirpath': 'tdir'}), "(name='alpha', ha=ha, sigkey=alphaSigner.keyhex, prikey=\n alphaPrivateer.keyhex, auto=AutoMode.never, basedirpath=tdir)\n", (4038, 4160), False, 'from raet.road.stacking import RoadStack\n'), ((5028, 5065), 'raet.road.estating.RemoteEstate', 'RemoteEstate', ([], {'stack': 'alpha', 'ha': 'beta.ha'}), '(stack=alpha, ha=beta.ha)\n', (5040, 5065), False, 'from raet.road.estating import RemoteEstate\n'), ((5163, 5185), 'stp_raet.test.helper.handshake', 'handshake', (['alpha', 'beta'], {}), '(alpha, beta)\n', (5172, 5185), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5194, 5227), 'stp_raet.test.helper.sendMsgs', 'sendMsgs', (['alpha', 'beta', 'betaRemote'], {}), '(alpha, beta, betaRemote)\n', (5202, 5227), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5341, 5355), 'stp_raet.test.helper.cleanup', 'cleanup', (['alpha'], {}), '(alpha)\n', (5348, 5355), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5373, 5380), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (5378, 5380), False, 'from stp_core.network.port_dispenser import genHa\n'), ((5491, 5528), 'raet.road.estating.RemoteEstate', 'RemoteEstate', ([], {'stack': 'alpha', 'ha': 'beta.ha'}), '(stack=alpha, ha=beta.ha)\n', (5503, 5528), False, 'from raet.road.estating import RemoteEstate\n'), ((5626, 5648), 'stp_raet.test.helper.handshake', 'handshake', (['alpha', 'beta'], {}), '(alpha, beta)\n', (5635, 5648), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5657, 5690), 'stp_raet.test.helper.sendMsgs', 'sendMsgs', (['alpha', 'beta', 'betaRemote'], {}), '(alpha, beta, betaRemote)\n', (5665, 5690), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5817, 5837), 'stp_raet.test.helper.cleanup', 'cleanup', (['alpha', 'beta'], {}), '(alpha, beta)\n', (5824, 5837), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((537, 544), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (542, 544), False, 'from stp_core.network.port_dispenser import genHa\n'), ((689, 696), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (694, 696), False, 'from stp_core.network.port_dispenser import genHa\n'), ((1385, 1392), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (1390, 1392), False, 'from stp_core.network.port_dispenser import genHa\n'), ((1586, 1593), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (1591, 1593), False, 'from stp_core.network.port_dispenser import genHa\n'), ((2462, 2469), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (2467, 2469), False, 'from stp_core.network.port_dispenser import genHa\n'), ((2714, 2721), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (2719, 2721), False, 'from stp_core.network.port_dispenser import genHa\n'), ((4585, 4592), 'stp_core.network.port_dispenser.genHa', 'genHa', ([], {}), '()\n', (4590, 4592), False, 'from stp_core.network.port_dispenser import genHa\n'), ((5303, 5327), 'stp_raet.test.helper.getRemote', 'getRemote', (['beta', '"""alpha"""'], {}), "(beta, 'alpha')\n", (5312, 5327), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n'), ((5766, 5790), 'stp_raet.test.helper.getRemote', 'getRemote', (['beta', '"""alpha"""'], {}), "(beta, 'alpha')\n", (5775, 5790), False, 'from stp_raet.test.helper import handshake, sendMsgs, cleanup, getRemote\n')] |
import threading
import sqlite3
from enum import Enum
import time
import datetime
from ..CTGP7Defines import CTGP7Defines
current_time_min = lambda: int(round(time.time() / 60))
class ConsoleMessageType(Enum):
SINGLE_MESSAGE = 0
TIMED_MESSAGE = 1
SINGLE_KICKMESSAGE = 2
TIMED_KICKMESSAGE = 3
class CTGP7ServerDatabase:
def __init__(self):
self.isConn = False
self.conn = None
self.lock = threading.Lock()
self.kickCallback = None
def setKickLogCallback(self, callback):
self.kickCallback = callback
def connect(self):
if not self.isConn:
self.conn = sqlite3.connect('RedYoshiBot/server/data/data.sqlite', check_same_thread=False)
self.isConn = True
def disconnect(self):
if (self.isConn):
self.commit()
with self.lock:
self.isConn = False
self.conn.close()
self.conn = None
def commit(self):
if (self.isConn):
with self.lock:
self.conn.commit()
def set_database_config(self, field, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE config SET value = ? WHERE field = ?", (str(value), str(field)))
def get_database_config(self, field):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM config WHERE field = ?", (str(field),))
for row in rows:
return row[1]
def get_online_region(self):
return int(self.get_database_config("onlregion"))
def get_debugonline_region(self):
return int(self.get_database_config("onlregion")) + 2
def set_online_region(self, value):
self.set_database_config("onlregion", value)
def get_track_freq_split(self):
return int(self.get_database_config("trackfreqsplit"))
def set_track_freq_split(self, value):
self.set_database_config("trackfreqsplit", value)
def get_ctww_version(self):
return int(self.get_database_config("ctwwver"))
def set_ctww_version(self, value):
self.set_database_config("ctwwver", value)
def get_beta_version(self):
return int(self.get_database_config("betaver"))
def set_beta_version(self, value):
self.set_database_config("betaver", value)
def get_stats_dirty(self):
return int(self.get_database_config("stats_dirty")) == 1
def set_stats_dirty(self, isDirty):
self.set_database_config("stats_dirty", 1 if isDirty else 0)
def get_most_played_tracks(self, course_type, amount):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
c2 = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE split = ? AND type = ? ORDER BY freq DESC", (int(currsplit), int(course_type)))
i = 0
ret = []
for row in rows:
if (i >= amount): break
prevValue = c2.execute("SELECT SUM(freq) FROM stats_tracksfreq WHERE id = ? AND split < ?", (str(row[0]), int(currsplit))).fetchone()[0]
ret.append([row[0], row[2], 0 if prevValue is None else prevValue])
i += 1
return ret
def increment_track_frequency(self, szsName, value):
currsplit = self.get_track_freq_split()
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_tracksfreq WHERE id = ? AND split = ?", (str(szsName),int(currsplit)))
for _ in rows:
c.execute("UPDATE stats_tracksfreq SET freq = freq + {} WHERE id = ? AND split = ?".format(str(int(value))), (str(szsName),int(currsplit)))
return
courseType = CTGP7Defines.getTypeFromSZS(szsName)
if (courseType != -1):
c.execute('INSERT INTO stats_tracksfreq VALUES (?,?,?,?)', (str(szsName), int(currsplit), int(value), int(courseType)))
def get_stats(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_general WHERE 1=1")
ret = {}
i = 0
names = [description[0] for description in rows.description]
for row in rows:
for val in row:
ret[names[i]] = val
i += 1
break
return ret
def increment_general_stats(self, param, value):
with self.lock:
c = self.conn.cursor()
c.execute("UPDATE stats_general SET {} = {} + {} WHERE 1=1".format(param, param, str(int(value))))
def fetch_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
newSeqID = row[1] + 1
c.execute("UPDATE stats_seqid SET seqID = ? WHERE cID = ?", (int(newSeqID), int(cID)))
return newSeqID
c.execute('INSERT INTO stats_seqid VALUES (?,?)', (int(cID), int(1)))
return 1
def get_stats_seqid(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM stats_seqid WHERE cID = ?", (int(cID),))
for row in rows:
return row[1]
return 0
def get_unique_console_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM stats_seqid")
for row in rows:
return row[0]
return 0
def delete_console_message(self, cID):
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
def set_console_message(self, cID, messageType, message, amountMin=None, isSilent=False):
currTime = current_time_min() if amountMin is not None else None
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM console_message WHERE cID = ?", (int(cID),))
c.execute('INSERT INTO console_message VALUES (?,?,?,?,?)', (int(cID), str(message), int(messageType), currTime, amountMin))
if (self.kickCallback):
self.kickCallback(cID, messageType, message, amountMin, isSilent)
def get_console_message(self, cID, realConsoleID): # Real console ID is to keep track if cID is 0
ret = None
startTime = None
amountTime = None
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_message WHERE cID = ?", (int(cID),))
for row in rows:
messageText = row[1]
messageType = row[2]
startTime = row[3]
amountTime = row[4]
ret = [messageType, messageText, startTime, amountTime]
if (ret is not None):
if (ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.SINGLE_MESSAGE.value
elif (ret[0] == ConsoleMessageType.TIMED_KICKMESSAGE.value and self.get_console_is_admin(realConsoleID)):
ret[0] = ConsoleMessageType.TIMED_MESSAGE.value
if ret[0] == ConsoleMessageType.SINGLE_MESSAGE.value or ret[0] == ConsoleMessageType.SINGLE_KICKMESSAGE.value:
self.delete_console_message(cID)
elif (startTime is not None and amountTime is not None and startTime + amountTime < current_time_min()):
self.delete_console_message(cID)
if (ret is None and cID != 0):
ret = self.get_console_message(0, realConsoleID)
return tuple(ret) if ret is not None else None
def set_console_is_verified(self, cID, isVerified):
wasVerified = self.get_console_is_verified(cID)
if (wasVerified == isVerified):
return
with self.lock:
c = self.conn.cursor()
if (isVerified):
c.execute('INSERT INTO verified_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM verified_consoles WHERE cID = ?", (int(cID),))
def get_console_is_verified(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM verified_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_is_admin(self, cID, isAdmin):
wasAdmin = self.get_console_is_admin(cID)
if (wasAdmin == isAdmin):
return
with self.lock:
c = self.conn.cursor()
if (isAdmin):
c.execute('INSERT INTO admin_consoles VALUES (?)', (int(cID),))
else:
c.execute("DELETE FROM admin_consoles WHERE cID = ?", (int(cID),))
def get_console_is_admin(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM admin_consoles WHERE cID = ?", (int(cID),))
for row in rows:
return True
return False
def set_console_last_name(self, cID, lastName):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_name SET name = ? WHERE cID = ?", (str(lastName), int(cID)))
return
c.execute('INSERT INTO console_name VALUES (?,?)', (int(cID), str(lastName)))
def get_console_last_name(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_name WHERE cID = ?", (int(cID),))
for row in rows:
return str(row[1])
return "(Unknown)"
def set_console_vr(self, cID, vr):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE console_vr SET ctvr = ?, cdvr = ? WHERE cID = ?", (int(vr[0]), int(vr[1]), int(cID)))
return
c.execute('INSERT INTO console_vr VALUES (?,?,?)', (int(cID), int(vr[0]), int(vr[1])))
def get_console_vr(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr WHERE cID = ?", (int(cID),))
for row in rows:
return (row[1], row[2])
return (1000, 1000)
def get_unique_console_vr_count(self):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT COUNT(*) FROM console_vr")
for row in rows:
return row[0]
return 0
def get_most_users_vr(self, mode, amount):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM console_vr ORDER BY {} DESC".format("ctvr" if mode == 0 else "cdvr"))
i = 0
ret = []
for row in rows:
if (i >= amount): break
ret.append([row[0], row[1] if mode == 0 else row[2]])
i += 1
return ret
def increment_today_launches(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO launch_times VALUES (?,?)', (now, 1))
def get_daily_launches(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def increment_today_unique_consoles(self):
with self.lock:
now = datetime.datetime.utcnow().strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (now,))
for row in rows:
c.execute("UPDATE new_launch_times SET value = ? WHERE date = ?", (row[1] + 1, now))
return
c.execute('INSERT INTO new_launch_times VALUES (?,?)', (now, 1))
def get_daily_unique_consoles(self, date: datetime.datetime):
with self.lock:
d = date.strftime('%Y-%m-%d')
c = self.conn.cursor()
rows = c.execute("SELECT * FROM new_launch_times WHERE date = ?", (d,))
for row in rows:
return row[1]
return 0
def set_discord_link_console(self, discordID, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
c.execute("UPDATE discord_link SET discordID = ? WHERE cID = ?", (int(discordID), int(cID)))
return
c.execute('INSERT INTO discord_link VALUES (?,?)', (int(cID), int(discordID)))
def get_discord_link_console(self, cID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE cID = ?", (int(cID),))
for row in rows:
return row[1]
return None
def get_discord_link_user(self, discordID):
with self.lock:
c = self.conn.cursor()
rows = c.execute("SELECT * FROM discord_link WHERE discordID = ?", (int(discordID),))
for row in rows:
return row[0]
return None
def delete_discord_link_console(self, cID):
with self.lock:
c = self.conn.cursor()
c.execute("DELETE FROM discord_link WHERE cID = ?", (int(cID),)) | [
"threading.Lock",
"time.time",
"sqlite3.connect",
"datetime.datetime.utcnow"
] | [((436, 452), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (450, 452), False, 'import threading\n'), ((644, 723), 'sqlite3.connect', 'sqlite3.connect', (['"""RedYoshiBot/server/data/data.sqlite"""'], {'check_same_thread': '(False)'}), "('RedYoshiBot/server/data/data.sqlite', check_same_thread=False)\n", (659, 723), False, 'import sqlite3\n'), ((161, 172), 'time.time', 'time.time', ([], {}), '()\n', (170, 172), False, 'import time\n'), ((11655, 11681), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11679, 11681), False, 'import datetime\n'), ((12457, 12483), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (12481, 12483), False, 'import datetime\n')] |
import taichi as ti
from FD_wave.wave_module_2d4d import wave
from FD_wave.receiver_module import receiver
import Visualization.SFE_visual as vis
ti.init(arch=ti.gpu)
frame = 1
c = ti.field(dtype=ti.f32, shape=(600,600))
c_s = ti.field(dtype=ti.f32, shape=(1000, 1000))
wave_cs = wave(300, 400, 600, 600, 10.0, 10.0, 1, 50, 1, 1e-3, 3, 20)
wave_cs.mod_default()
wave_cs.PML_cal()
gui = ti.GUI("wave", (600, 600))
receiver_cs = receiver('PIC', 1000, 1000)
receiver_cs.rec_init(600, 600)
gui_rec = ti.GUI("rec", (1000, 1000))
while frame < 1000:
wave_cs.wave_field_cal(frame)
receiver_cs.rec_gather(wave_cs.p, int(frame/1))
receiver_cs.rec_dynamic(wave_cs.dt, int(frame/1), 12.0)
vis.SFE_2mix_show(c, wave_cs.p, wave_cs.model_v)
vis.SFE_gray_show(c_s, receiver_cs.rec_value)
gui.set_image(c)
gui_rec.set_image(c_s)
gui.show()
gui_rec.show()
frame += 1
path='./data/rec_seis.txt'
receiver_cs.export(receiver_cs.rec_value, path)
| [
"FD_wave.receiver_module.receiver",
"taichi.init",
"FD_wave.wave_module_2d4d.wave",
"Visualization.SFE_visual.SFE_2mix_show",
"taichi.field",
"taichi.GUI",
"Visualization.SFE_visual.SFE_gray_show"
] | [((147, 167), 'taichi.init', 'ti.init', ([], {'arch': 'ti.gpu'}), '(arch=ti.gpu)\n', (154, 167), True, 'import taichi as ti\n'), ((183, 223), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '(600, 600)'}), '(dtype=ti.f32, shape=(600, 600))\n', (191, 223), True, 'import taichi as ti\n'), ((229, 271), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.f32', 'shape': '(1000, 1000)'}), '(dtype=ti.f32, shape=(1000, 1000))\n', (237, 271), True, 'import taichi as ti\n'), ((282, 342), 'FD_wave.wave_module_2d4d.wave', 'wave', (['(300)', '(400)', '(600)', '(600)', '(10.0)', '(10.0)', '(1)', '(50)', '(1)', '(0.001)', '(3)', '(20)'], {}), '(300, 400, 600, 600, 10.0, 10.0, 1, 50, 1, 0.001, 3, 20)\n', (286, 342), False, 'from FD_wave.wave_module_2d4d import wave\n'), ((388, 414), 'taichi.GUI', 'ti.GUI', (['"""wave"""', '(600, 600)'], {}), "('wave', (600, 600))\n", (394, 414), True, 'import taichi as ti\n'), ((431, 458), 'FD_wave.receiver_module.receiver', 'receiver', (['"""PIC"""', '(1000)', '(1000)'], {}), "('PIC', 1000, 1000)\n", (439, 458), False, 'from FD_wave.receiver_module import receiver\n'), ((500, 527), 'taichi.GUI', 'ti.GUI', (['"""rec"""', '(1000, 1000)'], {}), "('rec', (1000, 1000))\n", (506, 527), True, 'import taichi as ti\n'), ((707, 755), 'Visualization.SFE_visual.SFE_2mix_show', 'vis.SFE_2mix_show', (['c', 'wave_cs.p', 'wave_cs.model_v'], {}), '(c, wave_cs.p, wave_cs.model_v)\n', (724, 755), True, 'import Visualization.SFE_visual as vis\n'), ((760, 805), 'Visualization.SFE_visual.SFE_gray_show', 'vis.SFE_gray_show', (['c_s', 'receiver_cs.rec_value'], {}), '(c_s, receiver_cs.rec_value)\n', (777, 805), True, 'import Visualization.SFE_visual as vis\n')] |
critics={'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, 'The Night Listener': 3.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 3.5},
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 4.0},
'<NAME>': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'The Night Listener': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 2.5},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
# a dictionary for movie critics and their ratings
print (critics['<NAME>'])#output is {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.5}
print (critics['<NAME>']['Lady in the Water']) #output is 2.5
from math import sqrt
#returns a distance based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
si={} #get the list of shared items
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1 #this means their ratings are identical
if len(si)==0:
return 0 #if they are no ratings than it will be count as 0
#sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
def distance(dict,per1,per2):
shared_items={}
for item in dict[per1]: #an item is in the dict of person 1
if item in dict[per2]: #if same item is in the dict of person 2
shared_items[item]=1 #the value will be 1
if len(shared_items)==0:
return 0
inital_dis=sum([pow(dict[per1][item]-dict[per2][item],2)
for item in dict[per1] if item in dict[per2] ])
all_sum=sqrt(inital_dis)
return 1/(1+all_sum)
print (distance(critics,'<NAME>','Toby'))
print (distance(critics, '<NAME>', '<NAME>'))
# Perason correlation score
def sim_pearson(dict,pers1,pers2):
si={}
for item in dict[pers1]:#an item is in the dict for person 1
if item in dict[pers2]: #the item is also is in the dict for person 2
si[item]=1 #the value will be 1
n=len(si)
if n==0: #if there is no commen item than the value will be 0
return 0
#adding all the preferences
sum1=sum([dict[pers1][item] for item in si])
sum2=sum([dict[pers2][item] for item in si])
sum1sq=sum([pow(dict[pers1][item],2) for item in si])
sum2sq=sum([pow(dict[pers2][item],2) for item in si])
All_Sum=sum([dict[pers1][item]*dict[pers2][item] for item in si])
num=All_Sum-(sum1*sum2/n)
den=sqrt((sum1sq-pow(sum1,2)/n)*(sum2sq-pow(sum2,2)/n))
if den==0:
return 0
r= num/den
return r
print (sim_pearson(critics,'<NAME>','Toby'))
#returns the best matches for person from the critics dict
#number of results and similarity function are optinal params
| [
"math.sqrt"
] | [((2218, 2234), 'math.sqrt', 'sqrt', (['inital_dis'], {}), '(inital_dis)\n', (2222, 2234), False, 'from math import sqrt\n')] |
from django.contrib import admin
from applications.core.models import Clients
admin.site.register(Clients)
| [
"django.contrib.admin.site.register"
] | [((81, 109), 'django.contrib.admin.site.register', 'admin.site.register', (['Clients'], {}), '(Clients)\n', (100, 109), False, 'from django.contrib import admin\n')] |
import itertools
import logging
import netCDF4
import numpy
from .. import core
from ..constants import masked as cfdm_masked
from ..decorators import (
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
_manage_log_level_via_verbosity,
)
from ..functions import abspath
from ..mixin.container import Container
from ..mixin.netcdf import NetCDFHDF5
from . import NumpyArray, abstract
logger = logging.getLogger(__name__)
class Data(Container, NetCDFHDF5, core.Data):
"""An orthogonal multidimensional array with masking and units.
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(
self,
array=None,
units=None,
calendar=None,
fill_value=None,
source=None,
copy=True,
dtype=None,
mask=None,
_use_array=True,
**kwargs,
):
"""**Initialisation**
:Parameters:
array: data_like, optional
The array of values.
{{data_like}}
Ignored if the *source* parameter is set.
*Parameter example:*
``array=[34.6]``
*Parameter example:*
``array=[[1, 2], [3, 4]]``
*Parameter example:*
``array=numpy.ma.arange(10).reshape(2, 1, 5)``
units: `str`, optional
The physical units of the data. Ignored if the *source*
parameter is set.
The units may also be set after initialisation with the
`set_units` method.
*Parameter example:*
``units='km hr-1'``
*Parameter example:*
``units='days since 2018-12-01'``
calendar: `str`, optional
The calendar for reference time units. Ignored if the
*source* parameter is set.
The calendar may also be set after initialisation with the
`set_calendar` method.
*Parameter example:*
``calendar='360_day'``
fill_value: optional
The fill value of the data. By default, or if set to
`None`, the `numpy` fill value appropriate to the array's
data type will be used (see
`numpy.ma.default_fill_value`). Ignored if the *source*
parameter is set.
The fill value may also be set after initialisation with
the `set_fill_value` method.
*Parameter example:*
``fill_value=-999.``
dtype: data-type, optional
The desired data-type for the data. By default the
data-type will be inferred form the *array* parameter.
The data-type may also be set after initialisation
with the `dtype` attribute.
*Parameter example:*
``dtype=float``
*Parameter example:*
``dtype='float32'``
*Parameter example:*
``dtype=numpy.dtype('i2')``
mask: data_like, optional
Apply this mask to the data given by the *array*
parameter. By default, or if *mask* is `None`, no mask
is applied. May be any data_like object that
broadcasts to *array*. Masking will be carried out
where mask elements evaluate to `True`.
{{data_like}}
This mask will applied in addition to any mask already
defined by the *array* parameter.
source: optional
Initialise the array, units, calendar and fill value
from those of *source*.
{{init source}}
copy: `bool`, optional
If False then do not deep copy input parameters prior
to initialisation. By default arguments are deep
copied.
kwargs: ignored
Not used. Present to facilitate subclassing.
"""
if dtype is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = array.astype(dtype)
array = NumpyArray(array)
if mask is not None:
if isinstance(array, abstract.Array):
array = array.array
elif not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = numpy.ma.array(array, mask=mask)
array = NumpyArray(array)
super().__init__(
array=array,
units=units,
calendar=calendar,
fill_value=fill_value,
source=source,
copy=copy,
_use_array=_use_array,
)
self._initialise_netcdf(source)
def __array__(self, *dtype):
"""The numpy array interface.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
dtype: optional
Typecode or data-type to which the array is cast.
:Returns:
`numpy.ndarray`
An independent numpy array of the data.
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3])
>>> a = numpy.array(d)
>>> print(type(a))
<class 'numpy.ndarray'>
>>> a[0] = -99
>>> d
<{{repr}}{{class}}(3): [1, 2, 3]>
>>> b = numpy.array(d, float)
>>> print(b)
[1. 2. 3.]
"""
array = self.array
if not dtype:
return array
else:
return array.astype(dtype[0], copy=False)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
try:
shape = self.shape
except AttributeError:
shape = ""
else:
shape = str(shape)
shape = shape.replace(",)", ")")
return f"<{ self.__class__.__name__}{shape}: {self}>"
def __format__(self, format_spec):
"""Interpret format specifiers for size 1 arrays.
**Examples:**
>>> d = {{package}}.{{class}}(9, 'metres')
>>> f"{d}"
'9 metres'
>>> f"{d!s}"
'9 metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(): 9 metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([[9]], 'metres')
>>> f"{d}"
'[[9]] metres'
>>> f"{d!s}"
'[[9]] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(1, 1): [[9]] metres>'
>>> f"{d:.3f}"
'9.000'
>>> d = {{package}}.{{class}}([9, 10], 'metres')
>>> f"{d}"
>>> '[9, 10] metres'
>>> f"{d!s}"
>>> '[9, 10] metres'
>>> f"{d!r}"
'<{{repr}}{{class}}(2): [9, 10] metres>'
>>> f"{d:.3f}"
Traceback (most recent call last):
...
ValueError: Can't format Data array of size 2 with format code .3f
"""
if not format_spec:
return super().__format__("")
n = self.size
if n == 1:
return "{x:{f}}".format(x=self.first_element(), f=format_spec)
raise ValueError(
f"Can't format Data array of size {n} with "
f"format code {format_spec}"
)
def __getitem__(self, indices):
"""Return a subspace of the data defined by indices.
d.__getitem__(indices) <==> d[indices]
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__setitem__`, `_parse_indices`
:Returns:
`{{class}}`
The subspace of the data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1].shape
(1, 10, 1)
>>> d[:, 0].shape
(1, 1, 9)
>>> d[..., 6:3:-1, 3:6].shape
(1, 3, 3)
>>> d[0, [2, 9], [4, 8]].shape
(1, 2, 2)
>>> d[0, :, -2].shape
(1, 10, 1)
"""
indices = self._parse_indices(indices)
array = self._get_Array(None)
if array is None:
raise ValueError("No array!!")
array = array[tuple(indices)]
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def __int__(self):
"""Called by the `int` built-in function.
x.__int__() <==> int(x)
"""
if self.size != 1:
raise TypeError(
"only length-1 arrays can be converted to "
f"Python scalars. Got {self}"
)
return int(self.array)
def __iter__(self):
"""Called when an iterator is required.
x.__iter__() <==> iter(x)
**Examples:**
>>> d = {{package}}.{{class}}([1, 2, 3], 'metres')
>>> for e in d:
... print(repr(e))
...
1
2
3
>>> d = {{package}}.{{class}}([[1, 2], [4, 5]], 'metres')
>>> for e in d:
... print(repr(e))
...
<{{repr}}Data(2): [1, 2] metres>
<{{repr}}Data(2): [4, 5] metres>
>>> d = {{package}}.{{class}}(34, 'metres')
>>> for e in d:
... print(repr(e))
Traceback (most recent call last):
...
TypeError: Iteration over 0-d Data
"""
ndim = self.ndim
if not ndim:
raise TypeError(f"Iteration over 0-d {self.__class__.__name__}")
if ndim == 1:
i = iter(self.array)
while 1:
try:
yield next(i)
except StopIteration:
return
else:
# ndim > 1
for n in range(self.shape[0]):
out = self[n, ...]
out.squeeze(0, inplace=True)
yield out
def __setitem__(self, indices, value):
"""Assign to data elements defined by indices.
d.__setitem__(indices, x) <==> d[indices]=x
Indexing follows rules that are very similar to the numpy indexing
rules, the only differences being:
* An integer index i takes the i-th element but does not reduce
the rank by one.
* When two or more dimensions' indices are sequences of integers
then these indices work independently along each dimension
(similar to the way vector subscripts work in Fortran). This is
the same behaviour as indexing on a Variable object of the
netCDF4 package.
**Broadcasting**
The value, or values, being assigned must be broadcastable to the
shape defined by the indices, using the numpy broadcasting rules.
**Missing data**
Data array elements may be set to missing values by assigning them
to `masked`. Missing values may be unmasked by assigning them to
any other value.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `__getitem__`, `_parse_indices`
:Returns:
`None`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d.shape
(1, 10, 9)
>>> d[:, :, 1] = -10
>>> d[:, 0] = range(9)
>>> d[..., 6:3:-1, 3:6] = numpy.arange(-18, -9).reshape(3, 3)
>>> d[0, [2, 9], [4, 8]] = {{package}}.{{class}}([[-2, -3]])
>>> d[0, :, -2] = {{package}}.masked
"""
indices = self._parse_indices(indices)
array = self.array
if value is cfdm_masked or numpy.ma.isMA(value):
# The data is not masked but the assignment is masking
# elements, so turn the non-masked array into a masked
# one.
array = array.view(numpy.ma.MaskedArray)
self._set_subspace(array, indices, numpy.asanyarray(value))
self._set_Array(array, copy=False)
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
units = self.get_units(None)
calendar = self.get_calendar(None)
isreftime = False
if units is not None:
if isinstance(units, str):
isreftime = "since" in units
else:
units = "??"
try:
first = self.first_element()
except Exception:
out = ""
if units and not isreftime:
out += f" {units}"
if calendar:
out += f" {calendar}"
return out
size = self.size
shape = self.shape
ndim = self.ndim
open_brackets = "[" * ndim
close_brackets = "]" * ndim
mask = [False, False, False]
if size == 1:
if isreftime:
# Convert reference time to date-time
if first is numpy.ma.masked:
first = 0
mask[0] = True
try:
first = type(self)(
numpy.ma.array(first, mask=mask[0]), units, calendar
).datetime_array
except (ValueError, OverflowError):
first = "??"
out = f"{open_brackets}{first}{close_brackets}"
else:
last = self.last_element()
if isreftime:
if last is numpy.ma.masked:
last = 0
mask[-1] = True
# Convert reference times to date-times
try:
first, last = type(self)(
numpy.ma.array(
[first, last], mask=(mask[0], mask[-1])
),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
first, last = ("??", "??")
if size > 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
elif shape[-1:] == (3,):
middle = self.second_element()
if isreftime:
# Convert reference time to date-time
if middle is numpy.ma.masked:
middle = 0
mask[1] = True
try:
middle = type(self)(
numpy.ma.array(middle, mask=mask[1]),
units,
calendar,
).datetime_array
except (ValueError, OverflowError):
middle = "??"
out = (
f"{open_brackets}{first}, {middle}, {last}{close_brackets}"
)
elif size == 3:
out = f"{open_brackets}{first}, ..., {last}{close_brackets}"
else:
out = f"{open_brackets}{first}, {last}{close_brackets}"
if isreftime:
if calendar:
out += f" {calendar}"
elif units:
out += f" {units}"
return out
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
def _item(self, index):
"""Return an element of the data as a scalar.
It is assumed, but not checked, that the given index selects
exactly one element.
:Parameters:
index:
:Returns:
The selected element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]], 'km')
>>> x = d._item((0, -1))
>>> print(x, type(x))
3 <class 'int'>
>>> x = d._item((0, 1))
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> d._item((slice(None), slice(1, 2)))
masked
"""
array = self[index].array
if not numpy.ma.isMA(array):
return array.item()
mask = array.mask
if mask is numpy.ma.nomask or not mask.item():
return array.item()
return numpy.ma.masked
def _parse_axes(self, axes):
"""Parses the data axes and returns valid non-duplicate axes.
:Parameters:
axes: (sequence of) `int`
The axes of the data.
{{axes int examples}}
:Returns:
`tuple`
**Examples:**
>>> d._parse_axes(1)
(1,)
>>> e._parse_axes([0, 2])
(0, 2)
"""
if axes is None:
return axes
ndim = self.ndim
if isinstance(axes, int):
axes = (axes,)
axes2 = []
for axis in axes:
if 0 <= axis < ndim:
axes2.append(axis)
elif -ndim <= axis < 0:
axes2.append(axis + ndim)
else:
raise ValueError(f"Invalid axis: {axis!r}")
# Check for duplicate axes
n = len(axes2)
if n > len(set(axes2)) >= 1:
raise ValueError(f"Duplicate axis: {axes2}")
return tuple(axes2)
def _set_Array(self, array, copy=True):
"""Set the array.
.. seealso:: `_set_CompressedArray`
:Parameters:
array: `numpy` array_like or `Array`, optional
The array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_Array(a)
"""
if not isinstance(array, abstract.Array):
if not isinstance(array, numpy.ndarray):
array = numpy.asanyarray(array)
array = NumpyArray(array)
super()._set_Array(array, copy=copy)
def _set_CompressedArray(self, array, copy=True):
"""Set the compressed array.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `_set_Array`
:Parameters:
array: subclass of `CompressedArray`
The compressed array to be inserted.
:Returns:
`None`
**Examples:**
>>> d._set_CompressedArray(a)
"""
self._set_Array(array, copy=copy)
@classmethod
def _set_subspace(cls, array, indices, value):
"""Set a subspace of the data array defined by indices."""
axes_with_list_indices = [
i for i, x in enumerate(indices) if not isinstance(x, slice)
]
if len(axes_with_list_indices) < 2:
# --------------------------------------------------------
# At most one axis has a list-of-integers index so we can
# do a normal numpy assignment
# --------------------------------------------------------
array[tuple(indices)] = value
else:
# --------------------------------------------------------
# At least two axes have list-of-integers indices so we
# can't do a normal numpy assignment
# --------------------------------------------------------
indices1 = indices[:]
for i, x in enumerate(indices):
if i in axes_with_list_indices:
# This index is a list of integers
y = []
args = [iter(x)] * 2
for start, stop in itertools.zip_longest(*args):
if not stop:
y.append(slice(start, start + 1))
else:
step = stop - start
stop += 1
y.append(slice(start, stop, step))
indices1[i] = y
else:
indices1[i] = (x,)
if numpy.size(value) == 1:
for i in itertools.product(*indices1):
array[i] = value
else:
indices2 = []
ndim_difference = array.ndim - numpy.ndim(value)
for i, n in enumerate(numpy.shape(value)):
if n == 1:
indices2.append((slice(None),))
elif i + ndim_difference in axes_with_list_indices:
y = []
start = 0
while start < n:
stop = start + 2
y.append(slice(start, stop))
start = stop
indices2.append(y)
else:
indices2.append((slice(None),))
for i, j in zip(
itertools.product(*indices1), itertools.product(*indices2)
):
array[i] = value[j]
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def compressed_array(self):
"""Returns an independent numpy array of the compressed data.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_compressed_axes`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`numpy.ndarray`
An independent numpy array of the compressed data.
**Examples:**
>>> a = d.compressed_array
"""
ca = self._get_Array(None)
if not ca.get_compression_type():
raise ValueError("not compressed: can't get compressed array")
return ca.compressed_array
@property
def datetime_array(self):
"""Returns an independent numpy array of datetimes.
Specifically, returns an independent numpy array containing
the date-time objects corresponding to times since a reference
date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
'standard' (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `array`, `datetime_as_string`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time objects.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> a = d.datetime_array
>>> print(a)
[cftime.DatetimeGregorian(2019, 1, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 2, 1, 0, 0, 0, 0)
cftime.DatetimeGregorian(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-01 00:00:00
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> a = d.datetime_array
>>> print(a)
[cftime.Datetime360Day(2019, 1, 2, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 2, 3, 0, 0, 0, 0)
cftime.Datetime360Day(2019, 3, 1, 0, 0, 0, 0)]
>>> print(a[1])
2019-02-03 00:00:00
"""
array = self.array
mask = None
if numpy.ma.isMA(array):
# num2date has issues if the mask is nomask
mask = array.mask
if mask is numpy.ma.nomask or not numpy.ma.is_masked(array):
mask = None
array = array.view(numpy.ndarray)
if mask is not None and not array.ndim:
# Fix until num2date copes with scalar aarrays containing
# missing data
return array
array = netCDF4.num2date(
array,
units=self.get_units(None),
calendar=self.get_calendar("standard"),
only_use_cftime_datetimes=True,
)
if mask is None:
# There is no missing data
array = numpy.array(array, dtype=object)
else:
# There is missing data
array = numpy.ma.masked_where(mask, array)
if not numpy.ndim(array):
array = numpy.ma.masked_all((), dtype=object)
return array
@property
def datetime_as_string(self):
"""Returns an independent numpy array with datetimes as strings.
Specifically, returns an independent numpy array containing
string representations of times since a reference date.
Only applicable for reference time units.
If the calendar has not been set then the CF default calendar of
"standard" (i.e. the mixed Gregorian/Julian calendar as defined by
Udunits) will be used.
Conversions are carried out with the `netCDF4.num2date` function.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `array`, `datetime_array`
:Returns:
`numpy.ndarray`
An independent numpy array of the date-time strings.
**Examples:**
>>> d = {{package}}.{{class}}([31, 62, 90], units='days since 2018-12-01')
>>> print(d.datetime_as_string)
['2019-01-01 00:00:00' '2019-02-01 00:00:00' '2019-03-01 00:00:00']
>>> d = {{package}}.{{class}}(
... [31, 62, 90], units='days since 2018-12-01', calendar='360_day')
>>> print(d.datetime_as_string)
['2019-01-02 00:00:00' '2019-02-03 00:00:00' '2019-03-01 00:00:00']
"""
return self.datetime_array.astype(str)
@property
def mask(self):
"""The Boolean missing data mask of the data array.
The Boolean mask has True where the data array has missing data
and False otherwise.
:Returns:
`{{class}}`
The Boolean mask as data.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.ma.array(
... [[280.0, -99, -99, -99],
... [281.0, 279.0, 278.0, 279.5]],
... mask=[[0, 1, 1, 1], [0, 0, 0, 0]]
... ))
>>> d
<{{repr}}Data(2, 4): [[280.0, ..., 279.5]]>
>>> print(d.array)
[[280.0 -- -- --]
[281.0 279.0 278.0 279.5]]
>>> d.mask
<{{repr}}Data(2, 4): [[False, ..., False]]>
>>> print(d.mask.array)
[[False True True True]
[False False False False]]
"""
return type(self)(numpy.ma.getmaskarray(self.array))
# ----------------------------------------------------------------
# Methods
# ----------------------------------------------------------------
def any(self):
"""Test whether any data array elements evaluate to True.
Performs a logical or over the data array and returns the
result. Masked values are considered as False during computation.
:Returns:
`bool`
`True` if any data array elements evaluate to True,
otherwise `False`.
**Examples:**
>>> d = {{package}}.{{class}}([[0, 0, 0]])
>>> d.any()
False
>>> d[0, 0] = {{package}}.masked
>>> print(d.array)
[[-- 0 0]]
>>> d.any()
False
>>> d[0, 1] = 3
>>> print(d.array)
[[-- 3 0]]
>>> d.any()
True
>>> d[...] = {{package}}.masked
>>> print(d.array)
[[-- -- --]]
>>> d.any()
False
"""
masked = self.array.any()
if masked is numpy.ma.masked:
masked = False
return masked
@_inplace_enabled(default=False)
def apply_masking(
self,
fill_values=None,
valid_min=None,
valid_max=None,
valid_range=None,
inplace=False,
):
"""Apply masking.
Masking is applied according to the values of the keyword
parameters.
Elements that are already masked remain so.
.. versionadded:: (cfdm) 1.8.2
.. seealso:: `get_fill_value`, `mask`
:Parameters:
fill_values: `bool` or sequence of scalars, optional
Specify values that will be set to missing data. Data
elements exactly equal to any of the values are set to
missing data.
If True then the value returned by the `get_fill_value`
method, if such a value exists, is used.
Zero or more values may be provided in a sequence of
scalars.
*Parameter example:*
Specify a fill value of 999: ``fill_values=[999]``
*Parameter example:*
Specify fill values of 999 and -1.0e30:
``fill_values=[999, -1.0e30]``
*Parameter example:*
Use the fill value already set for the data:
``fill_values=True``
*Parameter example:*
Use no fill values: ``fill_values=False`` or
``fill_value=[]``
valid_min: number, optional
A scalar specifying the minimum valid value. Data elements
strictly less than this number will be set to missing
data.
valid_max: number, optional
A scalar specifying the maximum valid value. Data elements
strictly greater than this number will be set to missing
data.
valid_range: (number, number), optional
A vector of two numbers specifying the minimum and maximum
valid values, equivalent to specifying values for both
*valid_min* and *valid_max* parameters. The *valid_range*
parameter must not be set if either *valid_min* or
*valid_max* is defined.
*Parameter example:*
``valid_range=[-999, 10000]`` is equivalent to setting
``valid_min=-999, valid_max=10000``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with masked values. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(12).reshape(3, 4), 'm')
>>> d[1, 1] = {{package}}.masked
>>> print(d.array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking().array)
[[0 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=[0, 11]).array)
[[-- 1 2 3]
[4 -- 6 7]
[8 9 10 --]]
>>> print(d.apply_masking(valid_min=3).array)
[[-- -- -- 3]
[4 -- 6 7]
[8 9 10 11]]
>>> print(d.apply_masking(valid_max=6).array)
[[0 1 2 3]
[4 -- 6 --]
[-- -- -- --]]
>>> print(d.apply_masking(valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 7]
[8 -- -- --]]
>>> d.set_fill_value(7)
>>> print(d.apply_masking(fill_values=True).array)
[[0 1 2 3]
[4 -- 6 --]
[8 9 10 11]]
>>> print(d.apply_masking(fill_values=True,
... valid_range=[2, 8]).array)
[[-- -- 2 3]
[4 -- 6 --]
[8 -- -- --]]
"""
if valid_range is not None:
if valid_min is not None or valid_max is not None:
raise ValueError(
"Can't set 'valid_range' parameter with either the "
"'valid_min' nor 'valid_max' parameters"
)
try:
if len(valid_range) != 2:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
except TypeError:
raise ValueError(
"'valid_range' parameter must be a vector of "
"two elements"
)
valid_min, valid_max = valid_range
d = _inplace_enabled_define_and_cleanup(self)
if fill_values is None:
fill_values = False
if isinstance(fill_values, bool):
if fill_values:
fill_value = self.get_fill_value(None)
if fill_value is not None:
fill_values = (fill_value,)
else:
fill_values = ()
else:
fill_values = ()
else:
try:
_ = iter(fill_values)
except TypeError:
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
else:
if isinstance(fill_values, str):
raise TypeError(
"'fill_values' parameter must be a sequence or "
f"of type bool. Got type {type(fill_values)}"
)
mask = None
if fill_values:
array = self.array
mask = array == fill_values[0]
for fill_value in fill_values[1:]:
mask |= array == fill_value
if valid_min is not None:
if mask is None:
array = self.array
mask = array < valid_min
else:
mask |= array < valid_min
if valid_max is not None:
if mask is None:
array = self.array
mask = array > valid_max
else:
mask |= array > valid_max
if mask is not None:
array = numpy.ma.where(mask, cfdm_masked, array)
d._set_Array(array, copy=False)
return d
def copy(self, array=True):
"""Return a deep copy.
``d.copy()`` is equivalent to ``copy.deepcopy(d)``.
:Parameters:
array: `bool`, optional
If False then do not copy the array. By default the array
is copied.
:Returns:
`{{class}}`
The deep copy.
**Examples:**
>>> e = d.copy()
>>> e = d.copy(array=False)
"""
return super().copy(array=array)
def creation_commands(
self, name="data", namespace=None, indent=0, string=True
):
"""Return the commands that would create the data object.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
name: `str` or `None`, optional
Set the variable name of `Data` object that the commands
create.
{{namespace: `str`, optional}}
{{indent: `int`, optional}}
{{string: `bool`, optional}}
:Returns:
{{returns creation_commands}}
**Examples:**
>>> d = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]],
... units='degrees_east')
>>> print(d.creation_commands())
data = {{package}}.{{class}}([[0.0, 45.0], [45.0, 90.0]], units='degrees_east', dtype='f8')
>>> d = {{package}}.{{class}}(['alpha', 'beta', 'gamma', 'delta'],
... mask = [1, 0, 0, 0])
>>> d.creation_commands(name='d', namespace='', string=False)
["d = Data(['', 'beta', 'gamma', 'delta'], dtype='U5', mask=Data([True, False, False, False], dtype='b1'))"]
"""
namespace0 = namespace
if namespace is None:
namespace = self._package() + "."
elif namespace and not namespace.endswith("."):
namespace += "."
mask = self.mask
if mask.any():
if name == "mask":
raise ValueError(
"When the data is masked, the 'name' parameter "
"can not have the value 'mask'"
)
masked = True
array = self.filled().array.tolist()
else:
masked = False
array = self.array.tolist()
units = self.get_units(None)
if units is None:
units = ""
else:
units = f", units={units!r}"
calendar = self.get_calendar(None)
if calendar is None:
calendar = ""
else:
calendar = f", calendar={calendar!r}"
fill_value = self.get_fill_value(None)
if fill_value is None:
fill_value = ""
else:
fill_value = f", fill_value={fill_value}"
dtype = self.dtype.descr[0][1][1:]
if masked:
mask = mask.creation_commands(
name="mask", namespace=namespace0, indent=0, string=True
)
mask = mask.replace("mask = ", "mask=", 1)
mask = f", {mask}"
else:
mask = ""
if name is None:
name = ""
else:
name = name + " = "
out = []
out.append(
f"{name}{namespace}{self.__class__.__name__}({array}{units}"
f"{calendar}, dtype={dtype!r}{mask}{fill_value})"
)
if string:
indent = " " * indent
out[0] = indent + out[0]
out = ("\n" + indent).join(out)
return out
@_inplace_enabled(default=False)
def filled(self, fill_value=None, inplace=False):
"""Replace masked elements with the fill value.
.. versionadded:: (cfdm) 1.8.7.0
:Parameters:
fill_value: scalar, optional
The fill value. By default the fill returned by
`get_fill_value` is used, or if this is not set then the
netCDF default fill value for the data type is used (as
defined by `netCDF.fillvals`).
{{inplace: `bool`, optional}}
:Returns:
`Data` or `None`
The filled data, or `None` if the operation was in-place.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2, 3]])
>>> print(d.filled().array)
[[1 2 3]]
>>> d[0, 0] = {{package}}.masked
>>> print(d.filled().array)
[[-9223372036854775806 2 3]]
>>> d.set_fill_value(-99)
>>> print(d.filled().array)
[[-99 2 3]]
>>> print(d.filled(1e10).array)
[[10000000000 2 3]]
"""
d = _inplace_enabled_define_and_cleanup(self)
if fill_value is None:
fill_value = d.get_fill_value(None)
if fill_value is None:
default_fillvals = netCDF4.default_fillvals
fill_value = default_fillvals.get(d.dtype.str[1:], None)
if fill_value is None and d.dtype.kind in ("SU"):
fill_value = default_fillvals.get("S1", None)
if fill_value is None: # should not be None by this stage
raise ValueError(
"Can't determine fill value for "
f"data type {d.dtype.str!r}"
) # pragma: no cover
array = self.array
if numpy.ma.isMA(array):
array = array.filled(fill_value)
d._set_Array(array, copy=False)
return d
@_inplace_enabled(default=False)
def insert_dimension(self, position=0, inplace=False):
"""Expand the shape of the data array.
Inserts a new size 1 axis, corresponding to a given position in
the data array shape.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `squeeze`, `transpose`
:Parameters:
position: `int`, optional
Specify the position that the new axis will have in the
data array. By default the new axis has position 0, the
slowest varying position. Negative integers counting from
the last position are allowed.
*Parameter example:*
``position=2``
*Parameter example:*
``position=-1``
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with expanded axes. If the operation was in-place
then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.insert_dimension('domainaxis3').shape
(1, 96, 73, 19)
>>> d.insert_dimension('domainaxis3', position=3).shape
(19, 73, 96, 1)
>>> d.insert_dimension('domainaxis3', position=-1, inplace=True)
>>> d.shape
(19, 73, 1, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
# Parse position
ndim = d.ndim
if -ndim - 1 <= position < 0:
position += ndim + 1
elif not 0 <= position <= ndim:
raise ValueError(
f"Can't insert dimension: Invalid position: {position!r}"
)
array = numpy.expand_dims(self.array, position)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def get_count(self, default=ValueError()):
"""Return the count variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_index`, `get_list`
:Parameters:
default: optional
Return the value of the *default* parameter if a count
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The count variable.
**Examples:**
>>> c = d.get_count()
"""
try:
return self._get_Array().get_count()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no count variable"
)
def get_index(self, default=ValueError()):
"""Return the index variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_list`
:Parameters:
default: optional
Return *default* if index variable has not been set.
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The index variable.
**Examples:**
>>> i = d.get_index()
"""
try:
return self._get_Array().get_index()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no index variable"
)
def get_list(self, default=ValueError()):
"""Return the list variable for a compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `get_count`, `get_index`
:Parameters:
default: optional
Return the value of the *default* parameter if an index
variable has not been set. If set to an `Exception`
instance then it will be raised instead.
:Returns:
The list variable.
**Examples:**
>>> l = d.get_list()
"""
try:
return self._get_Array().get_list()
except (AttributeError, ValueError):
return self._default(
default, f"{self.__class__.__name__!r} has no list variable"
)
def get_compressed_dimension(self, default=ValueError()):
"""Returns the compressed dimension's array position.
That is, returns the position of the compressed dimension
in the compressed array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_axes`,
`get_compression_type`
:Parameters:
default: optional
Return the value of the *default* parameter there is no
compressed dimension. If set to an `Exception` instance
then it will be raised instead.
:Returns:
`int`
The position of the compressed dimension in the compressed
array.
**Examples:**
>>> d.get_compressed_dimension()
2
"""
try:
return self._get_Array().get_compressed_dimension()
except (AttributeError, ValueError):
return self._default(
default,
f"{ self.__class__.__name__!r} has no compressed dimension",
)
def _parse_indices(self, indices):
"""Parse indices of the data and return valid indices in a list.
:Parameters:
indices: `tuple` (not a `list`!)
:Returns:
`list`
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(100, 190).reshape(1, 10, 9))
>>> d._parse_indices((slice(None, None, None), 1, 2))
[slice(None, None, None), slice(1, 2, 1), slice(2, 3, 1)]
>>> d._parse_indices((1,))
[slice(1, 2, 1), slice(None, None, None), slice(None, None, None)]
"""
shape = self.shape
parsed_indices = []
if not isinstance(indices, tuple):
indices = (indices,)
# Initialise the list of parsed indices as the input indices
# with any Ellipsis objects expanded
length = len(indices)
n = len(shape)
ndim = n
for index in indices:
if index is Ellipsis:
m = n - length + 1
parsed_indices.extend([slice(None)] * m)
n -= m
else:
parsed_indices.append(index)
n -= 1
length -= 1
len_parsed_indices = len(parsed_indices)
if ndim and len_parsed_indices > ndim:
raise IndexError(
f"Invalid indices for data with shape {shape}: "
f"{parsed_indices}"
)
if len_parsed_indices < ndim:
parsed_indices.extend([slice(None)] * (ndim - len_parsed_indices))
if not ndim and parsed_indices:
raise IndexError(
"Scalar data can only be indexed with () or Ellipsis"
)
for i, (index, size) in enumerate(zip(parsed_indices, shape)):
if isinstance(index, slice):
continue
if isinstance(index, int):
# E.g. 43 -> slice(43, 44, 1)
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
if getattr(getattr(index, "dtype", None), "kind", None) == "b":
# E.g. index is [True, False, True] -> [0, 2]
#
# Convert Booleans to non-negative integers. We're
# assuming that anything with a dtype attribute also
# has a size attribute.
if index.size != size:
raise IndexError(
"Invalid indices for data "
f"with shape {shape}: {parsed_indices}"
)
index = numpy.where(index)[0]
if not numpy.ndim(index):
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
len_index = len(index)
if len_index == 1:
# E.g. [3] -> slice(3, 4, 1)
index = index[0]
if index < 0:
index += size
index = slice(index, index + 1, 1)
else:
# E.g. [1, 3, 4] -> [1, 3, 4]
pass
parsed_indices[i] = index
return parsed_indices
def maximum(self, axes=None):
"""Return the maximum of an array or the maximum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `minimum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the maximum. By default the
maximum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Maximum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.max()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[23]]]]>
>>> print(e.array)
[[[[23]]]]
>>> e = d.max(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[8, ..., 23]]]]>
>>> print(e.array)
[[[[ 8 9 10 11]]
[[20 21 22 23]]]]
>>> e = d.max([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[11, 23]]]]>
>>> print(e.array)
[[[[11]]
[[23]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find maximum of data: {error}")
array = self.array
array = numpy.amax(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
def minimum(self, axes=None):
"""Return the minimum of an array or minimum along axes.
Missing data array elements are omitted from the calculation.
.. versionadded:: (cfdm) 1.8.0
.. seealso:: `maximum`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to take the minimum. By default the
minimum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
Minimum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.min()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[0]]]]>
>>> print(e.array)
[[[[0]]]]
>>> e = d.min(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[0, ..., 15]]]]>
>>> print(e.array)
[[[[ 0 1 2 3]]
[[12 13 14 15]]]]
>>> e = d.min([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[0, 12]]]]>
>>> print(e.array)
[[[[ 0]]
[[12]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't find minimum of data: {error}")
array = self.array
array = numpy.amin(array, axis=axes, keepdims=True)
out = self.copy(array=False)
out._set_Array(array, copy=False)
if out.shape != self.shape:
# Delete hdf5 chunksizes
out.nc_clear_hdf5_chunksizes()
return out
@_inplace_enabled(default=False)
def squeeze(self, axes=None, inplace=False):
"""Remove size 1 axes from the data.
By default all size 1 axes are removed, but particular axes may be
selected with the keyword arguments.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
The positions of the size one axes to be removed. By
default all size one axes are removed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The data with removed data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(1, 73, 1, 96)
>>> f.squeeze().shape
(73, 96)
>>> d.squeeze(0).shape
(73, 1, 96)
>>> d.squeeze([-3, 2]).shape
(73, 96)
>>> d.squeeze(2, inplace=True)
>>> d.shape
(1, 73, 96)
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't squeeze data: {error}")
shape = d.shape
if axes is None:
axes = tuple([i for i, n in enumerate(shape) if n == 1])
else:
# Check the squeeze axes
for i in axes:
if shape[i] > 1:
raise ValueError(
"Can't squeeze data: "
f"Can't remove axis of size {shape[i]}"
)
if not axes:
return d
array = self.array
array = numpy.squeeze(array, axes)
d._set_Array(array, copy=False)
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
def sum(self, axes=None):
"""Return the sum of an array or the sum along axes.
Missing data array elements are omitted from the calculation.
.. seealso:: `max`, `min`
:Parameters:
axes: (sequence of) `int`, optional
The axes over which to calculate the sum. By default the
sum over all axes is returned.
{{axes int examples}}
:Returns:
`{{class}}`
The sum of the data along the specified axes.
**Examples:**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.sum()
>>> e
<{{repr}}Data(1, 1, 1, 1): [[[[276]]]]>
>>> print(e.array)
[[[[276]]]]
>>> e = d.sum(2)
>>> e
<{{repr}}Data(1, 2, 1, 4): [[[[12, ..., 57]]]]>
>>> print(e.array)
[[[[12 15 18 21]]
[[48 51 54 57]]]]
>>> e = d.sum([-2, -1])
>>> e
<{{repr}}Data(1, 2, 1, 1): [[[[66, 210]]]]>
>>> print(e.array)
[[[[ 66]]
[[210]]]]
"""
# Parse the axes. By default flattened input is used.
try:
axes = self._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't sum data: {error}")
array = self.array
array = numpy.sum(array, axis=axes, keepdims=True)
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
@_inplace_enabled(default=False)
def transpose(self, axes=None, inplace=False):
"""Permute the axes of the data array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `flatten`, `insert_dimension`, `squeeze`
:Parameters:
axes: (sequence of) `int`
The new axis order. By default the order is reversed.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The data with permuted data axes. If the operation was
in-place then `None` is returned.
**Examples:**
>>> d.shape
(19, 73, 96)
>>> d.transpose().shape
(96, 73, 19)
>>> d.transpose([1, 0, 2]).shape
(73, 19, 96)
>>> d.transpose([-1, 0, 1], inplace=True)
>>> d.shape
(96, 19, 73)
"""
d = _inplace_enabled_define_and_cleanup(self)
ndim = d.ndim
# Parse the axes. By default, reverse the order of the axes.
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't transpose data: {error}")
if axes is None:
if ndim <= 1:
return d
axes = tuple(range(ndim - 1, -1, -1))
elif len(axes) != ndim:
raise ValueError(
f"Can't transpose data: Axes don't match array: {axes}"
)
# Return unchanged if axes are in the same order as the data
if axes == tuple(range(ndim)):
return d
array = self.array
array = numpy.transpose(array, axes=axes)
d._set_Array(array, copy=False)
return d
def get_compressed_axes(self):
"""Returns the dimensions that are compressed in the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `get_compressed_dimension`,
`get_compression_type`
:Returns:
`list`
The dimensions of the data that are compressed to a single
dimension in the underlying array. If the data are not
compressed then an empty list is returned.
**Examples:**
>>> d.shape
(2, 3, 4, 5, 6)
>>> d.compressed_array.shape
(2, 14, 6)
>>> d.get_compressed_axes()
[1, 2, 3]
>>> d.get_compression_type()
''
>>> d.get_compressed_axes()
[]
"""
ca = self._get_Array(None)
if ca is None:
return []
return ca.get_compressed_axes()
def get_compression_type(self):
"""Returns the type of compression applied to the array.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `compressed_array`, `compression_axes`,
`get_compressed_dimension`
:Returns:
`str`
The compression type. An empty string means that no
compression has been applied.
**Examples:**
>>> d.get_compression_type()
''
>>> d.get_compression_type()
'gathered'
>>> d.get_compression_type()
'ragged contiguous'
"""
ma = self._get_Array(None)
if ma is None:
return ""
return ma.get_compression_type()
@classmethod
def empty(cls, shape, dtype=None, units=None, calendar=None):
"""Create a new data array without initialising the elements.
Note that the mask of the returned empty data is hard.
.. seealso:: `full`, `ones`, `zeros`
:Parameters:
shape: `int` or `tuple` of `int`
The shape of the new array.
dtype: `numpy.dtype` or any object convertible to `numpy.dtype`
The data-type of the new array. By default the
data-type is ``float``.
units: `str` or `Units`
The units for the empty data array.
calendar: `str`, optional
The calendar for reference time units.
:Returns:
`{{class}}`
**Examples:**
>>> d = {{package}}.{{class}}.empty((96, 73))
"""
return cls(
numpy.empty(shape=shape, dtype=dtype),
units=units,
calendar=calendar,
)
@_manage_log_level_via_verbosity
def equals(
self,
other,
rtol=None,
atol=None,
verbose=None,
ignore_data_type=False,
ignore_fill_value=False,
ignore_compression=True,
ignore_type=False,
_check_values=True,
):
"""Whether two data arrays are the same.
Equality is strict by default. This means that for data arrays to
be considered equal:
* the units and calendar must be the same,
..
* the fill value must be the same (see the *ignore_fill_value*
parameter), and
..
* the arrays must have same shape and data type, the same missing
data mask, and be element-wise equal (see the *ignore_data_type*
parameter).
{{equals tolerance}}
Any compression is ignored by default, with only the arrays in
their uncompressed forms being compared. See the
*ignore_compression* parameter.
Any type of object may be tested but, in general, equality is only
possible with another cell measure construct, or a subclass of
one. See the *ignore_type* parameter.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
other:
The object to compare for equality.
{{atol: number, optional}}
{{rtol: number, optional}}
ignore_fill_value: `bool`, optional
If True then the fill value is omitted from the
comparison.
{{ignore_data_type: `bool`, optional}}
{{ignore_compression: `bool`, optional}}
{{ignore_type: `bool`, optional}}
{{verbose: `int` or `str` or `None`, optional}}
:Returns:
`bool`
Whether the two data arrays are equal.
**Examples:**
>>> d.equals(d)
True
>>> d.equals(d.copy())
True
>>> d.equals('not a data array')
False
"""
pp = super()._equals_preprocess(
other, verbose=verbose, ignore_type=ignore_type
)
if pp is True or pp is False:
return pp
other = pp
# Check that each instance has the same shape
if self.shape != other.shape:
logger.info(
f"{self.__class__.__name__}: Different shapes: "
f"{self.shape} != {other.shape}"
) # pragma: no cover
return False
# Check that each instance has the same fill value
if not ignore_fill_value and self.get_fill_value(
None
) != other.get_fill_value(None):
logger.info(
f"{self.__class__.__name__}: Different fill value: "
f"{self.get_fill_value(None)} != {other.get_fill_value(None)}"
) # pragma: no cover
return False
# Check that each instance has the same data type
if not ignore_data_type and self.dtype != other.dtype:
logger.info(
f"{self.__class__.__name__}: Different data types: "
f"{self.dtype} != {other.dtype}"
) # pragma: no cover
return False
# Return now if we have been asked to not check the array
# values
if not _check_values:
return True
# Check that each instance has the same units
for attr in ("units", "calendar"):
x = getattr(self, "get_" + attr)(None)
y = getattr(other, "get_" + attr)(None)
if x != y:
logger.info(
f"{self.__class__.__name__}: Different {attr}: "
f"{x!r} != {y!r}"
) # pragma: no cover
return False
if not ignore_compression:
# --------------------------------------------------------
# Check for equal compression types
# --------------------------------------------------------
compression_type = self.get_compression_type()
if compression_type != other.get_compression_type():
logger.info(
f"{self.__class__.__name__}: Different compression types: "
f"{compression_type} != {other.get_compression_type()}"
) # pragma: no cover
return False
# --------------------------------------------------------
# Check for equal compressed array values
# --------------------------------------------------------
if compression_type:
if not self._equals(
self.compressed_array,
other.compressed_array,
rtol=rtol,
atol=atol,
):
logger.info(
f"{self.__class__.__name__}: Different compressed "
"array values"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Check for equal (uncompressed) array values
# ------------------------------------------------------------
if not self._equals(self.array, other.array, rtol=rtol, atol=atol):
logger.info(
f"{self.__class__.__name__}: Different array values "
f"(atol={atol}, rtol={rtol})"
) # pragma: no cover
return False
# ------------------------------------------------------------
# Still here? Then the two data arrays are equal.
# ------------------------------------------------------------
return True
def get_filenames(self):
"""Return the name of the file containing the data array.
:Returns:
`set`
The file name in normalised, absolute form. If the
data is are memory then an empty `set` is returned.
**Examples:**
>>> f = {{package}}.example_field(0)
>>> {{package}}.write(f, 'temp_file.nc')
>>> g = {{package}}.read('temp_file.nc')[0]
>>> d = g.data
>>> d.get_filenames()
{'/data/user/temp_file.nc'}
>>> d[...] = -99
>>> d.get_filenames()
set()
"""
source = self.source(None)
if source is None:
return set()
try:
filename = source.get_filename()
except AttributeError:
return set()
else:
return set((abspath(filename),))
def first_element(self):
"""Return the first element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `last_element`, `second_element`
:Returns:
The first element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.first_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.first_element()
>>> print(x, type(x))
1 <class 'int'>
>>> d[0, 0] = {{package}}.masked
>>> y = d.first_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.first_element()
>>> print(x, type(x))
foo <class 'str'>
"""
return self._item((slice(0, 1),) * self.ndim)
@_inplace_enabled(default=False)
def flatten(self, axes=None, inplace=False):
"""Flatten axes of the data.
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened across
both dimensions to ``[1 2 3 4]``.
.. versionadded:: (cfdm) 1.7.11
.. seealso:: `insert_dimension`, `squeeze`, `transpose`
:Parameters:
axes: (sequence of) `int`, optional
Select the axes. By default all axes are flattened. No
axes are flattened if *axes* is an empty sequence.
{{axes int examples}}
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`Data` or `None`
The flattened data, or `None` if the operation was
in-place.
**Examples**
>>> d = {{package}}.{{class}}(numpy.arange(24).reshape(1, 2, 3, 4))
>>> d
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> print(d.array)
[[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]]
>>> e = d.flatten()
>>> e
<{{repr}}Data(24): [0, ..., 23]>
>>> print(e.array)
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
>>> e = d.flatten([])
>>> e
<{{repr}}Data(1, 2, 3, 4): [[[[0, ..., 23]]]]>
>>> e = d.flatten([1, 3])
>>> e
<{{repr}}Data(1, 8, 3): [[[0, ..., 23]]]>
>>> print(e.array)
[[[ 0 4 8]
[ 1 5 9]
[ 2 6 10]
[ 3 7 11]
[12 16 20]
[13 17 21]
[14 18 22]
[15 19 23]]]
>>> d.flatten([0, -1], inplace=True)
>>> d
<{{repr}}Data(4, 2, 3): [[[0, ..., 23]]]>
>>> print(d.array)
[[[ 0 4 8]
[12 16 20]]
[[ 1 5 9]
[13 17 21]]
[[ 2 6 10]
[14 18 22]]
[[ 3 7 11]
[15 19 23]]]
"""
d = _inplace_enabled_define_and_cleanup(self)
try:
axes = d._parse_axes(axes)
except ValueError as error:
raise ValueError(f"Can't flatten data: {error}")
ndim = d.ndim
if ndim <= 1:
return d
if axes is None:
# By default flatten all axes
axes = tuple(range(ndim))
else:
if len(axes) <= 1:
return d
# Note that it is important that the first axis in the
# list is the left-most flattened axis
axes = sorted(axes)
# Save the shape before we transpose
shape = list(d.shape)
order = [i for i in range(ndim) if i not in axes]
order[axes[0] : axes[0]] = axes
d.transpose(order, inplace=True)
new_shape = [n for i, n in enumerate(shape) if i not in axes]
new_shape.insert(axes[0], numpy.prod([shape[i] for i in axes]))
array = d.array.reshape(new_shape)
out = type(self)(
array,
units=d.get_units(None),
calendar=d.get_calendar(None),
fill_value=d.get_fill_value(None),
)
if inplace:
d.__dict__ = out.__dict__
return out
def last_element(self):
"""Return the last element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `second_element`
:Returns:
The last element of the data.
**Examples:**
>>> d = {{package}}.{{class}}(9.0)
>>> x = d.last_element()
>>> print(x, type(x))
9.0 <class 'float'>
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.last_element()
>>> print(x, type(x))
4 <class 'int'>
>>> d[-1, -1] = {{package}}.masked
>>> y = d.last_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.last_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(-1, None),) * self.ndim)
def second_element(self):
"""Return the second element of the data as a scalar.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `first_element`, `last_element`
:Returns:
The second element of the data.
**Examples:**
>>> d = {{package}}.{{class}}([[1, 2], [3, 4]])
>>> x = d.second_element()
>>> print(x, type(x))
2 <class 'int'>
>>> d[0, 1] = {{package}}.masked
>>> y = d.second_element()
>>> print(y, type(y))
-- <class 'numpy.ma.core.MaskedConstant'>
>>> d = {{package}}.{{class}}(['foo', 'bar'])
>>> x = d.second_element()
>>> print(x, type(x))
bar <class 'str'>
"""
return self._item((slice(0, 1),) * (self.ndim - 1) + (slice(1, 2),))
def to_memory(self):
"""Bring data on disk into memory and retain it there.
There is no change to data that is already in memory.
:Returns:
`None`
**Examples:**
>>> f = {{package}}.example_field(4)
>>> f.data
<{{repr}}Data(3, 26, 4): [[[290.0, ..., --]]] K>
>>> f.data.to_memory()
"""
self._set_Array(self.source().to_memory())
@_inplace_enabled(default=False)
def uncompress(self, inplace=False):
"""Uncompress the underlying array.
.. versionadded:: (cfdm) 1.7.3
.. seealso:: `array`, `compressed_array`, `source`
:Parameters:
inplace: `bool`, optional
If True then do the operation in-place and return `None`.
:Returns:
`{{class}}` or `None`
The uncompressed data, or `None` if the operation was
in-place.
**Examples:**
>>> d.get_compression_type()
'ragged contiguous'
>>> d.source()
<RaggedContiguousArray(4, 9): >
>>> d.uncompress(inpalce=True)
>>> d.get_compression_type()
''
>>> d.source()
<NumpyArray(4, 9): >
"""
d = _inplace_enabled_define_and_cleanup(self)
if d.get_compression_type():
d._set_Array(d.array, copy=False)
return d
def unique(self):
"""The unique elements of the data.
The unique elements are sorted into a one dimensional array. with
no missing values.
.. versionadded:: (cfdm) 1.7.0
:Returns:
`{{class}}`
The unique elements.
**Examples:**
>>> d = {{package}}.{{class}}([[4, 2, 1], [1, 2, 3]], 'metre')
>>> d.unique()
<{{repr}}Data(4): [1, ..., 4] metre>
>>> d[1, -1] = {{package}}.masked
>>> d.unique()
<{{repr}}Data(3): [1, 2, 4] metre>
"""
array = self.array
array = numpy.unique(array)
if numpy.ma.is_masked(array):
array = array.compressed()
d = self.copy(array=False)
d._set_Array(array, copy=False)
if d.shape != self.shape:
# Delete hdf5 chunksizes
d.nc_clear_hdf5_chunksizes()
return d
# ----------------------------------------------------------------
# Aliases
# ----------------------------------------------------------------
def max(self, axes=None):
"""Alias for `maximum`."""
return self.maximum(axes=axes)
def min(self, axes=None):
"""Alias for `minimum`."""
return self.minimum(axes=axes)
| [
"logging.getLogger",
"numpy.prod",
"numpy.ma.getmaskarray",
"numpy.asanyarray",
"numpy.array",
"numpy.ma.is_masked",
"numpy.where",
"numpy.ma.masked_all",
"itertools.product",
"numpy.ndim",
"numpy.ma.masked_where",
"numpy.empty",
"numpy.ma.where",
"numpy.amin",
"numpy.ma.array",
"numpy... | [((412, 439), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (429, 439), False, 'import logging\n'), ((24164, 24184), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (24177, 24184), False, 'import numpy\n'), ((40415, 40435), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (40428, 40435), False, 'import numpy\n'), ((42343, 42382), 'numpy.expand_dims', 'numpy.expand_dims', (['self.array', 'position'], {}), '(self.array, position)\n', (42360, 42382), False, 'import numpy\n'), ((51195, 51238), 'numpy.amax', 'numpy.amax', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (51205, 51238), False, 'import numpy\n'), ((53131, 53174), 'numpy.amin', 'numpy.amin', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (53141, 53174), False, 'import numpy\n'), ((55270, 55296), 'numpy.squeeze', 'numpy.squeeze', (['array', 'axes'], {}), '(array, axes)\n', (55283, 55296), False, 'import numpy\n'), ((57050, 57092), 'numpy.sum', 'numpy.sum', (['array'], {'axis': 'axes', 'keepdims': '(True)'}), '(array, axis=axes, keepdims=True)\n', (57059, 57092), False, 'import numpy\n'), ((59029, 59062), 'numpy.transpose', 'numpy.transpose', (['array'], {'axes': 'axes'}), '(array, axes=axes)\n', (59044, 59062), False, 'import numpy\n'), ((76655, 76674), 'numpy.unique', 'numpy.unique', (['array'], {}), '(array)\n', (76667, 76674), False, 'import numpy\n'), ((76687, 76712), 'numpy.ma.is_masked', 'numpy.ma.is_masked', (['array'], {}), '(array)\n', (76705, 76712), False, 'import numpy\n'), ((4663, 4695), 'numpy.ma.array', 'numpy.ma.array', (['array'], {'mask': 'mask'}), '(array, mask=mask)\n', (4677, 4695), False, 'import numpy\n'), ((12464, 12484), 'numpy.ma.isMA', 'numpy.ma.isMA', (['value'], {}), '(value)\n', (12477, 12484), False, 'import numpy\n'), ((12736, 12759), 'numpy.asanyarray', 'numpy.asanyarray', (['value'], {}), '(value)\n', (12752, 12759), False, 'import numpy\n'), ((16925, 16945), 'numpy.ma.isMA', 'numpy.ma.isMA', (['array'], {}), '(array)\n', (16938, 16945), False, 'import numpy\n'), ((24879, 24911), 'numpy.array', 'numpy.array', (['array'], {'dtype': 'object'}), '(array, dtype=object)\n', (24890, 24911), False, 'import numpy\n'), ((24982, 25016), 'numpy.ma.masked_where', 'numpy.ma.masked_where', (['mask', 'array'], {}), '(mask, array)\n', (25003, 25016), False, 'import numpy\n'), ((27323, 27356), 'numpy.ma.getmaskarray', 'numpy.ma.getmaskarray', (['self.array'], {}), '(self.array)\n', (27344, 27356), False, 'import numpy\n'), ((34903, 34943), 'numpy.ma.where', 'numpy.ma.where', (['mask', 'cfdm_masked', 'array'], {}), '(mask, cfdm_masked, array)\n', (34917, 34943), False, 'import numpy\n'), ((61673, 61710), 'numpy.empty', 'numpy.empty', ([], {'shape': 'shape', 'dtype': 'dtype'}), '(shape=shape, dtype=dtype)\n', (61684, 61710), False, 'import numpy\n'), ((72555, 72591), 'numpy.prod', 'numpy.prod', (['[shape[i] for i in axes]'], {}), '([shape[i] for i in axes])\n', (72565, 72591), False, 'import numpy\n'), ((18596, 18619), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (18612, 18619), False, 'import numpy\n'), ((20733, 20750), 'numpy.size', 'numpy.size', (['value'], {}), '(value)\n', (20743, 20750), False, 'import numpy\n'), ((20782, 20810), 'itertools.product', 'itertools.product', (['*indices1'], {}), '(*indices1)\n', (20799, 20810), False, 'import itertools\n'), ((25036, 25053), 'numpy.ndim', 'numpy.ndim', (['array'], {}), '(array)\n', (25046, 25053), False, 'import numpy\n'), ((25079, 25116), 'numpy.ma.masked_all', 'numpy.ma.masked_all', (['()'], {'dtype': 'object'}), '((), dtype=object)\n', (25098, 25116), False, 'import numpy\n'), ((4320, 4343), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (4336, 4343), False, 'import numpy\n'), ((4618, 4641), 'numpy.asanyarray', 'numpy.asanyarray', (['array'], {}), '(array)\n', (4634, 4641), False, 'import numpy\n'), ((20311, 20339), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {}), '(*args)\n', (20332, 20339), False, 'import itertools\n'), ((20945, 20962), 'numpy.ndim', 'numpy.ndim', (['value'], {}), '(value)\n', (20955, 20962), False, 'import numpy\n'), ((21001, 21019), 'numpy.shape', 'numpy.shape', (['value'], {}), '(value)\n', (21012, 21019), False, 'import numpy\n'), ((21610, 21638), 'itertools.product', 'itertools.product', (['*indices1'], {}), '(*indices1)\n', (21627, 21638), False, 'import itertools\n'), ((21640, 21668), 'itertools.product', 'itertools.product', (['*indices2'], {}), '(*indices2)\n', (21657, 21668), False, 'import itertools\n'), ((24318, 24343), 'numpy.ma.is_masked', 'numpy.ma.is_masked', (['array'], {}), '(array)\n', (24336, 24343), False, 'import numpy\n'), ((48849, 48866), 'numpy.ndim', 'numpy.ndim', (['index'], {}), '(index)\n', (48859, 48866), False, 'import numpy\n'), ((48803, 48821), 'numpy.where', 'numpy.where', (['index'], {}), '(index)\n', (48814, 48821), False, 'import numpy\n'), ((13943, 13978), 'numpy.ma.array', 'numpy.ma.array', (['first'], {'mask': 'mask[0]'}), '(first, mask=mask[0])\n', (13957, 13978), False, 'import numpy\n'), ((14515, 14570), 'numpy.ma.array', 'numpy.ma.array', (['[first, last]'], {'mask': '(mask[0], mask[-1])'}), '([first, last], mask=(mask[0], mask[-1]))\n', (14529, 14570), False, 'import numpy\n'), ((15325, 15361), 'numpy.ma.array', 'numpy.ma.array', (['middle'], {'mask': 'mask[1]'}), '(middle, mask=mask[1])\n', (15339, 15361), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import str
import json
import os
import sys
from dateutil.parser import parse
from html2text import HTML2Text
from lxml import etree
from bs4 import BeautifulSoup
class EverConverter(object):
"""Evernote conversion runner
"""
fieldnames = ['createdate', 'modifydate', 'content', 'tags']
date_fmt = '%h %d %Y %H:%M:%S'
def __init__(self, enex_filename, output_dir=None, fmt="text",
preserve_title=False, verbose=False):
self.enex_filename = os.path.expanduser(enex_filename)
self.stdout = False
if output_dir is None:
self.stdout = True
self.output_dir = output_dir
else:
self.output_dir = os.path.expanduser(output_dir)
self.fmt = fmt
self.preserve_title = preserve_title
self.verbose = verbose
self.use_beautifulsoup = True
def _load_xml(self, enex_file):
try:
parser = etree.XMLParser(huge_tree=True)
xml_tree = etree.parse(enex_file, parser)
except (etree.XMLSyntaxError, ) as e:
print('Could not parse XML')
print(e)
sys.exit(1)
return xml_tree
def prepare_notes(self, xml_tree):
notes = []
raw_notes = xml_tree.xpath('//note')
for note in raw_notes:
note_dict = {}
title = note.xpath('title')[0].text
note_dict['title'] = title
# Use dateutil to figure out these dates
# 20110610T182917Z
created_string = parse('19700101T000017Z')
if note.xpath('created'):
created_string = parse(note.xpath('created')[0].text)
updated_string = created_string
if note.xpath('updated'):
updated_string = parse(note.xpath('updated')[0].text)
note_dict['createdate'] = created_string.strftime(self.date_fmt)
note_dict['modifydate'] = updated_string.strftime(self.date_fmt)
tags = [tag.text for tag in note.xpath('tag')]
note_dict['tags'] = tags
note_dict['content'] = ''
content = note.xpath('content')
if content:
raw_text = content[0].text
# TODO: Option to go to just plain text, no markdown
converted_text = self._convert_note_to_text(title, raw_text)
note_dict['content'] = converted_text
if self.verbose:
print("note_dict: {}".format(note_dict))
notes.append(note_dict)
return notes
def convert(self):
if not os.path.exists(self.enex_filename):
print("File does not exist: {}".format(self.enex_filename))
sys.exit(1)
# TODO: use with here, but pyflakes barfs on it
enex_file = open(self.enex_filename)
xml_tree = self._load_xml(enex_file)
enex_file.close()
notes = self.prepare_notes(xml_tree)
self._convert_dir(notes)
def _convert_note_to_text(self, title, text):
if self.fmt == "markdown":
html2plain = HTML2Text(None, "")
html2plain.feed("<h1>%s</h1>" % title)
html2plain.feed(text)
return html2plain.close()
else:
soup = BeautifulSoup(text, 'html.parser')
output = soup.get_text()
return output
def sanitize_note_title(self, note_title):
# replace spaces with underscores
note_title = note_title.replace(' ', '_')
# replace forward slaces with dashes
note_title = note_title.replace('/', '-')
note_title = note_title.replace('|', '-')
note_title = note_title.replace('(', '')
note_title = note_title.replace(')', '')
note_title = note_title.replace('?', '')
note_title = note_title.replace('*', '')
note_title = note_title.replace('!', '')
note_title = note_title.replace('$', '')
note_title = note_title.replace('"', '')
note_title = note_title.replace("'", '')
note_title = note_title.replace(':', '-')
note_title = note_title.replace('>', '-')
note_title = note_title.replace('<', '-')
note_title = note_title.replace('®', '')
note_title = note_title.replace(u"\u2122", '')
return note_title
def _convert_dir(self, notes):
if self.output_dir is None:
sys.stdout.write(json.dumps(notes))
else:
if (os.path.exists(self.output_dir) and
not os.path.isdir(self.output_dir)):
print('"{}" exists but is not a directory.'.format(
self.output_dir))
sys.exit(1)
elif not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
for i, note in enumerate(notes):
if self.preserve_title:
# (nicholaskuechler) try to preserve the title, but replace
# spaces with underscores, replace forward slash with dash,
# and preserve the note number in case of duplicate titles.
note_title = note['title']
note_title = self.sanitize_note_title(note_title)
note_title = "%s-%s" % (note_title, i)
else:
note_title = str(i)
try:
output_file_path = \
os.path.join(self.output_dir, note_title + '.txt')
with open(output_file_path, 'w') as output_file:
output_file.write(note['content'])
except Exception as e:
output_file_path = os.path.join(
self.output_dir,
"title_fail" + '-' + str(i) + '.txt')
print("failed to use title for filename: {}".format(e))
with open(output_file_path, 'w') as output_file:
output_file.write(note['content'])
| [
"dateutil.parser.parse",
"os.path.exists",
"os.makedirs",
"lxml.etree.parse",
"html2text.HTML2Text",
"json.dumps",
"builtins.str",
"os.path.join",
"bs4.BeautifulSoup",
"lxml.etree.XMLParser",
"os.path.isdir",
"sys.exit",
"builtins.open",
"os.path.expanduser"
] | [((702, 735), 'os.path.expanduser', 'os.path.expanduser', (['enex_filename'], {}), '(enex_filename)\n', (720, 735), False, 'import os\n'), ((3037, 3061), 'builtins.open', 'open', (['self.enex_filename'], {}), '(self.enex_filename)\n', (3041, 3061), False, 'from builtins import open\n'), ((911, 941), 'os.path.expanduser', 'os.path.expanduser', (['output_dir'], {}), '(output_dir)\n', (929, 941), False, 'import os\n'), ((1150, 1181), 'lxml.etree.XMLParser', 'etree.XMLParser', ([], {'huge_tree': '(True)'}), '(huge_tree=True)\n', (1165, 1181), False, 'from lxml import etree\n'), ((1205, 1235), 'lxml.etree.parse', 'etree.parse', (['enex_file', 'parser'], {}), '(enex_file, parser)\n', (1216, 1235), False, 'from lxml import etree\n'), ((1754, 1779), 'dateutil.parser.parse', 'parse', (['"""19700101T000017Z"""'], {}), "('19700101T000017Z')\n", (1759, 1779), False, 'from dateutil.parser import parse\n'), ((2829, 2863), 'os.path.exists', 'os.path.exists', (['self.enex_filename'], {}), '(self.enex_filename)\n', (2843, 2863), False, 'import os\n'), ((2949, 2960), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2957, 2960), False, 'import sys\n'), ((3322, 3341), 'html2text.HTML2Text', 'HTML2Text', (['None', '""""""'], {}), "(None, '')\n", (3331, 3341), False, 'from html2text import HTML2Text\n'), ((3498, 3532), 'bs4.BeautifulSoup', 'BeautifulSoup', (['text', '"""html.parser"""'], {}), "(text, 'html.parser')\n", (3511, 3532), False, 'from bs4 import BeautifulSoup\n'), ((1356, 1367), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1364, 1367), False, 'import sys\n'), ((4654, 4671), 'json.dumps', 'json.dumps', (['notes'], {}), '(notes)\n', (4664, 4671), False, 'import json\n'), ((4703, 4734), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (4717, 4734), False, 'import os\n'), ((4916, 4927), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4924, 4927), False, 'import sys\n'), ((4763, 4793), 'os.path.isdir', 'os.path.isdir', (['self.output_dir'], {}), '(self.output_dir)\n', (4776, 4793), False, 'import os\n'), ((4949, 4980), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (4963, 4980), False, 'import os\n'), ((4998, 5026), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {}), '(self.output_dir)\n', (5009, 5026), False, 'import os\n'), ((5583, 5589), 'builtins.str', 'str', (['i'], {}), '(i)\n', (5586, 5589), False, 'from builtins import str\n'), ((5677, 5727), 'os.path.join', 'os.path.join', (['self.output_dir', "(note_title + '.txt')"], {}), "(self.output_dir, note_title + '.txt')\n", (5689, 5727), False, 'import os\n'), ((5753, 5780), 'builtins.open', 'open', (['output_file_path', '"""w"""'], {}), "(output_file_path, 'w')\n", (5757, 5780), False, 'from builtins import open\n'), ((6152, 6179), 'builtins.open', 'open', (['output_file_path', '"""w"""'], {}), "(output_file_path, 'w')\n", (6156, 6179), False, 'from builtins import open\n'), ((6034, 6040), 'builtins.str', 'str', (['i'], {}), '(i)\n', (6037, 6040), False, 'from builtins import str\n')] |
from django.conf.urls import patterns, url
# App specific URL patterns
urlpatterns = patterns("djkatta.cabshare.views",
# post new req
url(r'new_post/$', 'new_post', name='new_post'),
# view posts by the user
url(r'my_posts/$', 'my_posts', name='my_posts'),
# modify old req
url(r'(?P<post_id>[\d]+)/edit/$', 'edit', name='edit'),
# view individual req
url(r'(?P<post_id>[\d]+)/$', 'indi', name='indi'),
)
| [
"django.conf.urls.url"
] | [((145, 191), 'django.conf.urls.url', 'url', (['"""new_post/$"""', '"""new_post"""'], {'name': '"""new_post"""'}), "('new_post/$', 'new_post', name='new_post')\n", (148, 191), False, 'from django.conf.urls import patterns, url\n'), ((228, 274), 'django.conf.urls.url', 'url', (['"""my_posts/$"""', '"""my_posts"""'], {'name': '"""my_posts"""'}), "('my_posts/$', 'my_posts', name='my_posts')\n", (231, 274), False, 'from django.conf.urls import patterns, url\n'), ((303, 357), 'django.conf.urls.url', 'url', (['"""(?P<post_id>[\\\\d]+)/edit/$"""', '"""edit"""'], {'name': '"""edit"""'}), "('(?P<post_id>[\\\\d]+)/edit/$', 'edit', name='edit')\n", (306, 357), False, 'from django.conf.urls import patterns, url\n'), ((390, 439), 'django.conf.urls.url', 'url', (['"""(?P<post_id>[\\\\d]+)/$"""', '"""indi"""'], {'name': '"""indi"""'}), "('(?P<post_id>[\\\\d]+)/$', 'indi', name='indi')\n", (393, 439), False, 'from django.conf.urls import patterns, url\n')] |
#!/usr/bin/env python
# coding:utf8
# -*- coding: utf-8 -*-
"""
Main Program: Run MODIS AGGREGATION IN MPI WITH FLEXIBLE STATISTICS
Created on 2020
@author: <NAME> (Email: <EMAIL>)
"""
import os
import sys
import h5py
import timeit
import random
import calendar
import numpy as np
import pandas as pd
from mpi4py import MPI
from netCDF4 import Dataset
from collections import OrderedDict
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY, MONTHLY
from MODIS_Aggregation import *
if __name__ =='__main__':
# This is the main program for using concurrent to speed up the whole process
#--------------STEP 1: Read User Inputs and Initial Paramters for Aggregation--------------------
fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,map_lon,map_lat,grid_lon,grid_lat,gap_x,gap_y,filenum, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,bin_num1,bin_num2,var_idx,spl_num,sts_name,histnames, \
output_dir,l3name,unit_list,scale_list,offst_list,longname_list,fillvalue_list = read_user_inputs()
total_file_num = len(filenum)
#--------------STEP 2: Start Aggregation------------------------------------------------
# Start counting operation time
start_time = timeit.default_timer()
print("-------- START AGGREGATION --------")
# Initiate MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
random.seed(rank)
# Initiate the number of files for MPI
remain = size-total_file_num%size
files_part1 = np.arange(total_file_num + remain)
tasks_part1 = np.array(np.split(files_part1,size))
files_part2 = np.arange(total_file_num - tasks_part1[rank].size * (size-remain)) + tasks_part1[rank].size * (size-remain)
tasks_part2 = np.array(np.split(files_part2,remain))
if rank < (size-remain):
fileloop = tasks_part1[rank]
else:
fileloop = tasks_part2[rank-(size-remain)]
print("Process {} calculating files from {} to {}... (Total: {} / {})".format(rank, fileloop[0],fileloop[-1],fileloop.shape[0],total_file_num))
if rank == 0:
grid_data = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
for i in range(1,size):
results = comm.recv(source=i, tag=0)
grid_data = addCounter(grid_data, results)
# Compute the mean cloud fraction & Statistics (Include Min & Max & Standard deviation)
# Reference for statstic parameters
# sts_name[0]: min
# sts_name[1]: max
# sts_name[2]: mean / total_value
# sts_name[3]: count
# sts_name[4]: square
# sts_name[5]: histogram
# sts_name[6]: joint histogram
sts_idx = np.array(np.where(sts_switch == True))[0]
print("Index of User-defined Statistics:",sts_idx)
print(grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
key_idx = 0
for key in varnames:
for i in sts_idx:
if i == 0:
grid_data[key+'_'+sts_name[0]] = grid_data[key+'_'+sts_name[0]].reshape([grid_lat,grid_lon])
elif i == 1:
grid_data[key+'_'+sts_name[1]] = grid_data[key+'_'+sts_name[1]].reshape([grid_lat,grid_lon])
elif i == 2:
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]] / grid_data[key+'_'+sts_name[3]]
grid_data[key+'_'+sts_name[2]] = grid_data[key+'_'+sts_name[2]].reshape([grid_lat,grid_lon])
elif i == 3:
grid_data[key+'_'+sts_name[3]] = grid_data[key+'_'+sts_name[3]].reshape([grid_lat,grid_lon])
elif i == 4:
grid_data[key+'_'+sts_name[4]] = ((grid_data[key+'_'+sts_name[4]] / grid_data[key+'_'+sts_name[3]].ravel()) - grid_data[key+'_'+sts_name[2]].ravel()**2)**0.5
grid_data[key+'_'+sts_name[4]] = grid_data[key+'_'+sts_name[4]].reshape([grid_lat,grid_lon])
elif i == 5:
grid_data[key+'_'+sts_name[5]] = grid_data[key+'_'+sts_name[5]].reshape([grid_lat,grid_lon,bin_num1[key_idx]])
elif i == 6:
grid_data[key+'_'+sts_name[6]+histnames[key_idx]] = grid_data[key+'_'+sts_name[6]+histnames[key_idx]].reshape([grid_lat,grid_lon,bin_num1[key_idx],bin_num2[key_idx]])
key_idx += 1
end_time = timeit.default_timer()
print ("Operation Time in {:7.2f} seconds".format(end_time - start_time))
#--------------STEP 3: Create HDF5 file to store the result------------------------------
ff=h5py.File(output_dir+l3name+'MPI','w')
PC=ff.create_dataset('lat_bnd',data=map_lat)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Latitude_boundaries'
PC=ff.create_dataset('lon_bnd',data=map_lon)
PC.attrs['units']='degrees'
PC.attrs['long_name']='Longitude_boundaries'
PCentry=ff.create_dataset('GRID_Counts',data=grid_data['GRID_Counts'].reshape([grid_lat,grid_lon]))
PCentry.dims[0].label='lat_bnd'
PCentry.dims[1].label='lon_bnd'
PC.attrs['units']='none'
PC.attrs['long_name']='grid_point_counts'
for i in range(sts_idx.shape[0]):
cnt = 0
for key in grid_data:
if key.find("1km") != -1:
new_name = key.replace("_1km", "")
else:
new_name = key
if (sts_name[sts_idx[i]] in key) == True:
if sts_idx[i] >= 5:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[cnt],intervals_2d[cnt])
else:
addGridEntry(ff,new_name,unit_list[cnt],longname_list[cnt],fillvalue_list[cnt],scale_list[cnt],offst_list[cnt],grid_data[key],intervals_1d[0],intervals_2d[0])
cnt += 1
ff.close()
print(l3name+' Saved!')
print("-------- AGGREGATION COMPLETED --------")
else:
results = run_modis_aggre(fname1,fname2,day_in_year,shift_hour,NTA_lats,NTA_lons,grid_lon,grid_lat,gap_x,gap_y,fileloop, \
grid_data,sts_switch,varnames,intervals_1d,intervals_2d,var_idx,spl_num,sts_name,histnames)
massage = "Process {} finished".format(rank)
print(massage)
comm.send(results, dest=0, tag=0)
#---------------------------COMPLETED------------------------------------------------------
| [
"numpy.where",
"timeit.default_timer",
"random.seed",
"h5py.File",
"numpy.split",
"numpy.arange"
] | [((1212, 1234), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1232, 1234), False, 'import timeit\n'), ((1372, 1389), 'random.seed', 'random.seed', (['rank'], {}), '(rank)\n', (1383, 1389), False, 'import random\n'), ((1484, 1518), 'numpy.arange', 'np.arange', (['(total_file_num + remain)'], {}), '(total_file_num + remain)\n', (1493, 1518), True, 'import numpy as np\n'), ((1543, 1570), 'numpy.split', 'np.split', (['files_part1', 'size'], {}), '(files_part1, size)\n', (1551, 1570), True, 'import numpy as np\n'), ((1587, 1655), 'numpy.arange', 'np.arange', (['(total_file_num - tasks_part1[rank].size * (size - remain))'], {}), '(total_file_num - tasks_part1[rank].size * (size - remain))\n', (1596, 1655), True, 'import numpy as np\n'), ((1719, 1748), 'numpy.split', 'np.split', (['files_part2', 'remain'], {}), '(files_part2, remain)\n', (1727, 1748), True, 'import numpy as np\n'), ((4099, 4121), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4119, 4121), False, 'import timeit\n'), ((4298, 4341), 'h5py.File', 'h5py.File', (["(output_dir + l3name + 'MPI')", '"""w"""'], {}), "(output_dir + l3name + 'MPI', 'w')\n", (4307, 4341), False, 'import h5py\n'), ((2702, 2730), 'numpy.where', 'np.where', (['(sts_switch == True)'], {}), '(sts_switch == True)\n', (2710, 2730), True, 'import numpy as np\n')] |
import typing
from lxml import html
import requests
from dataclasses import dataclass
from waffle.logger import LOG
from waffle.law_url import LawUrl
@dataclass
class _FollowResults:
path: LawUrl
links: typing.List[LawUrl]
@dataclass
class DownloadResults:
path: LawUrl
content: str
class Downloader:
LAWS = {
'english': "eng/acts/",
'french': "fra/lois/",
}
def legislation(self, language: str) -> typing.Iterable[DownloadResults]:
location = LawUrl(self.LAWS[language])
yield from self._fetch_pages([location])
def _fetch_pages(self, paths: typing.List[LawUrl]) -> typing.Iterable[DownloadResults]:
for path in paths:
LOG.debug("Fetching page at %s", path)
follow, downloaded = self._process_page(path)
if downloaded:
yield downloaded
yield from self._fetch_pages(follow.links)
@staticmethod
def _process_page(location: LawUrl) -> typing.Tuple[_FollowResults, typing.Optional[DownloadResults]]:
LOG.debug("Processing page at %s", location)
resp = requests.get(str(location))
body = resp.content.decode("UTF-8")
download: typing.Optional[DownloadResults] = None
if str(location).endswith('.html'):
paths = []
download = DownloadResults(path=location, content=body)
else:
root = html.fromstring(body)
letters = root.findall('.//div[@id="alphaList"]//a')
paths = [resp.request.path + letter.attrib['href'] for letter in letters if letter.attrib['href'] != '#']
links = [LawUrl(path) for path in paths]
return _FollowResults(path=location, links=links), download
| [
"waffle.logger.LOG.debug",
"waffle.law_url.LawUrl",
"lxml.html.fromstring"
] | [((503, 530), 'waffle.law_url.LawUrl', 'LawUrl', (['self.LAWS[language]'], {}), '(self.LAWS[language])\n', (509, 530), False, 'from waffle.law_url import LawUrl\n'), ((1059, 1103), 'waffle.logger.LOG.debug', 'LOG.debug', (['"""Processing page at %s"""', 'location'], {}), "('Processing page at %s', location)\n", (1068, 1103), False, 'from waffle.logger import LOG\n'), ((712, 750), 'waffle.logger.LOG.debug', 'LOG.debug', (['"""Fetching page at %s"""', 'path'], {}), "('Fetching page at %s', path)\n", (721, 750), False, 'from waffle.logger import LOG\n'), ((1418, 1439), 'lxml.html.fromstring', 'html.fromstring', (['body'], {}), '(body)\n', (1433, 1439), False, 'from lxml import html\n'), ((1641, 1653), 'waffle.law_url.LawUrl', 'LawUrl', (['path'], {}), '(path)\n', (1647, 1653), False, 'from waffle.law_url import LawUrl\n')] |
from django.urls import path
from rest_framework import routers
from .administration.views import AdministrationOfUserAPIView
from .views import PersonDetailView, PersonDashboardsWidgetsView, PersonWidgetDefinitionViewSet, PersonStackViewset
router = routers.SimpleRouter()
router.register(r'admin/users', AdministrationOfUserAPIView)
router.register(r'admin/users-widgets', PersonWidgetDefinitionViewSet, base_name='admin_users-widgets')
urlpatterns = [
path('me/', PersonDetailView.as_view(), name='user-detail'),
path('me/dashboards-widgets/', PersonDashboardsWidgetsView.as_view(), name='user-widgets-dashboards-detail'),
path('admin/users-stacks/', PersonStackViewset.as_view(), name='admin_users-stacks')
]
urlpatterns += router.urls
| [
"rest_framework.routers.SimpleRouter"
] | [((253, 275), 'rest_framework.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (273, 275), False, 'from rest_framework import routers\n')] |
# -*- coding: utf-8 -*-
'''This code pulls all coins from Conmarketcap.com
It stores it in a pandas dataframe'''
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
import collections
def coinmarketcap_coins(n):
'''This function pulls all the cryptocurrencies and its related statistics
from the table that you see in the first page of coinmarketcap.com.
This doesnt pull the history of a coin
Input number of pages worth of cryptocurrency to pull'''
df_coindata = pd.DataFrame() # create an empty dataframe to add coins
# Loop through the number of coinmarketcap pages
# Use n + 1 for the last page as Python range (1, n) is actually 1 to (n-1)
for pages in range(1, n + 1):
# Request the coin informations and make it pretty with beautifulsoup
cmc = requests.get('https://coinmarketcap.com/?page=' + str(pages))
soup = BeautifulSoup(cmc.content, 'html.parser')
# Find where the cryptocurrency data resides in html
# This is is json format with script id __NEXT_DATA__
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
# Load the contents of json data
coin_data = json.loads(data.contents[0])
# Find where the coin data actually is
# Here you find keyattributes/colnames in the dictionary
# Each Crypto stats is in rows with the order following keyattributes
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest']['data']
# Loop through it to match the keysAttribute to data
# Assign it and create a dicitonary for each coin
# Is there a better way of doing it?
# IDK Just my first few weeks of dedicated python coding
coindata_cmb = {"data": []}
coindata_cmb = collections.defaultdict(list)
for k in range(1, len(listings)):
coins = {}
coins = collections.defaultdict(list)
coindata_cmb["data"].append(coins)
for i in range(len(listings[0]['keysArr'])):
coins[listings[0]['keysArr'][i]] = listings[k][i]
# Combine all the coins data
# You have each key as keyattributes/colnames and values for each coin
temp_comb_coindata = collections.defaultdict(list)
for d in coindata_cmb['data']:
for k, v in d.items():
temp_comb_coindata[k].append(v)
# Convert the coins into a pandas dataframe
df_coins = pd.DataFrame.from_dict(temp_comb_coindata)
# Concatenate all the data together to have one big dataset
df_coindata = pd.concat([df_coindata, df_coins], axis=0, sort=False,
ignore_index=True)
# =============================================================================
# There is way too many variables
# Not all of them is useful but keep them all
# Reorder some variables that helps to figure out what you are looking at
# Risk - Errros if coinmarketcap changed the names of these variables
# But should be relatively easy to fix
# =============================================================================
var_first = ["rank", "cmcRank", "id", "name", "symbol", "slug", "isActive",
"isAudited", "dateAdded", "lastUpdated", "quote.USD.price",
"ath", "atl", "high24h", "low24h", "circulatingSupply",
"maxSupply", "totalSupply", "quote.USD.marketCap",
"quote.USD.marketCapByTotalSupply",
"quote.USD.fullyDilluttedMarketCap"]
var_order = var_first + list(set(list(df_coindata)) - set(var_first))
# Final dataframe to return
df_coindata = df_coindata[var_order]
return(df_coindata)
df_allcoins = coinmarketcap_coins(79)
| [
"json.loads",
"pandas.DataFrame.from_dict",
"bs4.BeautifulSoup",
"collections.defaultdict",
"pandas.DataFrame",
"pandas.concat"
] | [((534, 548), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (546, 548), True, 'import pandas as pd\n'), ((937, 978), 'bs4.BeautifulSoup', 'BeautifulSoup', (['cmc.content', '"""html.parser"""'], {}), "(cmc.content, 'html.parser')\n", (950, 978), False, 'from bs4 import BeautifulSoup\n'), ((1252, 1280), 'json.loads', 'json.loads', (['data.contents[0]'], {}), '(data.contents[0])\n', (1262, 1280), False, 'import json\n'), ((1870, 1899), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1893, 1899), False, 'import collections\n'), ((2341, 2370), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2364, 2370), False, 'import collections\n'), ((2571, 2613), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['temp_comb_coindata'], {}), '(temp_comb_coindata)\n', (2593, 2613), True, 'import pandas as pd\n'), ((2708, 2781), 'pandas.concat', 'pd.concat', (['[df_coindata, df_coins]'], {'axis': '(0)', 'sort': '(False)', 'ignore_index': '(True)'}), '([df_coindata, df_coins], axis=0, sort=False, ignore_index=True)\n', (2717, 2781), True, 'import pandas as pd\n'), ((1988, 2017), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2011, 2017), False, 'import collections\n')] |
import json
import re
import traceback
from flask import request, make_response, abort
from flask.views import MethodView
from api import app
from api.extensions.api import Blueprint
from api.commands import log_parser
blp = Blueprint(
'Log',
__name__,
url_prefix='/logs',
description="Operations on all logs"
)
@blp.route('/')
class Logs(MethodView):
def get(self):
response = make_response(json.dumps(['alerts', 'farming', 'plotting', 'archiving', 'webui', 'apisrv', 'pooling']), 200)
response.mimetype = "application/json"
return response
@blp.route('/<type>')
class LogByType(MethodView):
def get(self, type):
log = log_parser.get_log_lines(type, log_id=request.args.get('log_id'), blockchain=request.args.get('blockchain'))
response = make_response(log, 200)
response.mimetype = "plain/text"
return response
| [
"flask.make_response",
"api.extensions.api.Blueprint",
"json.dumps",
"flask.request.args.get"
] | [((229, 318), 'api.extensions.api.Blueprint', 'Blueprint', (['"""Log"""', '__name__'], {'url_prefix': '"""/logs"""', 'description': '"""Operations on all logs"""'}), "('Log', __name__, url_prefix='/logs', description=\n 'Operations on all logs')\n", (238, 318), False, 'from api.extensions.api import Blueprint\n'), ((814, 837), 'flask.make_response', 'make_response', (['log', '(200)'], {}), '(log, 200)\n', (827, 837), False, 'from flask import request, make_response, abort\n'), ((427, 519), 'json.dumps', 'json.dumps', (["['alerts', 'farming', 'plotting', 'archiving', 'webui', 'apisrv', 'pooling']"], {}), "(['alerts', 'farming', 'plotting', 'archiving', 'webui', 'apisrv',\n 'pooling'])\n", (437, 519), False, 'import json\n'), ((724, 750), 'flask.request.args.get', 'request.args.get', (['"""log_id"""'], {}), "('log_id')\n", (740, 750), False, 'from flask import request, make_response, abort\n'), ((763, 793), 'flask.request.args.get', 'request.args.get', (['"""blockchain"""'], {}), "('blockchain')\n", (779, 793), False, 'from flask import request, make_response, abort\n')] |
import os
import re
import string
from collections import namedtuple, defaultdict
from tests import utils
from tests.parser import ParserTestCase
from sglove.parser.exception import *
from sglove.parser import _OptionManager
class TestOptionManager(ParserTestCase):
__TEST_COUNT = 50
def __test_invalid_naming(self, str_func, str_list):
manager = _OptionManager(self._APP_NAME)
name_t = namedtuple('name_t', ('name', 'sub'))
for _ in range(self.__TEST_COUNT):
invalid_strings = [str_func(c) for c in str_list]
# 1. Try constructor's name format check
for case in invalid_strings:
with self.assertRaises(SGLException) as err:
_OptionManager(case)
self.assertEqual(err.exception.code,
SGL_PARSER_INVALID_NAME_FORMAT)
# 2. Try each member function's name format check
valid = self._gen_random_string()
test_case = [
(name_t(invalid, valid), name_t(valid, invalid))
for invalid in invalid_strings
]
for case in [v for sub in test_case for v in sub]:
for func in (manager.dest_name,
manager.env_name,
manager.long_arg):
with self.assertRaises(SGLException) as err:
func(case.name, case.sub)
self.assertEqual(err.exception.code,
SGL_PARSER_INVALID_NAME_FORMAT)
def test_invalid_naming(self):
# 1. Contain invalid characters
punctuation = re.sub(r'[_\-]', '', string.punctuation)
self.__test_invalid_naming(lambda c: self._gen_random_string(middle=c),
punctuation)
# 2. Consisted with valid character but not started with alphabet
invalid_first = string.digits + '_-'
self.__test_invalid_naming(lambda c: self._gen_random_string(prefix=c),
invalid_first)
# 3. Not ended with alphabet and numbers
invalid_last = '_-'
self.__test_invalid_naming(lambda c: self._gen_random_string(suffix=c),
invalid_last)
def test_valid_naming(self):
manager = _OptionManager(self._APP_NAME)
test_case = {self._gen_random_string(): self._gen_random_string()
for _ in range(self.__TEST_COUNT)}
for k, v in test_case.items():
self.assertEqual(manager.env_name(k, v), self._to_env_name(k, v))
self.assertEqual(manager.long_arg(k, v), self._to_arg_name(k, v))
self.assertEqual(manager.dest_name(k, v), self._to_dest_name(k, v))
def test_invalid_initialization(self):
# 1. Enter invalid type.
with self.assertRaises(SGLException) as err:
_OptionManager(self._APP_NAME, [self._APP_NAME])
self.assertEqual(err.exception.code, SGL_PARSER_UNEXPECTED_ENV_TYPE)
# 2. Enter dict converted os.environ as environ target
_OptionManager(self._APP_NAME, dict(os.environ))
def test_invalid_loading(self):
manager = _OptionManager(self._APP_NAME)
with self.assertRaises(SGLException) as err:
manager.load(utils.get_temp_file('invalid_path'))
self.assertEqual(err.exception.code, SGL_PARSER_CONFIG_NOT_EXIST)
def test_normal(self):
test_options = self._gen_random_inputs(self.__TEST_COUNT)
# This is the temporal manager to call the env_name().
manager = _OptionManager(self._APP_NAME)
env_dict = {}
conf_dict = defaultdict(dict)
for category, values in test_options.items():
for name, value in values.items():
if value.is_env_choosable:
env_dict.update({
manager.env_name(category, name): str(value.e_val)
})
if value.is_file_choosable:
conf_dict[category].update({name: value.f_val})
del manager
# Run normal valid test
with utils.config_file(conf_dict) as temp_file:
manager = _OptionManager(self._APP_NAME, environ=env_dict)
manager.load(temp_file)
for category, values in test_options.items():
for name, value in values.items():
default = manager.default_value(category, name,
default=value.default,
type=value.type)
self.assertEqual(default, value.expected)
| [
"sglove.parser._OptionManager",
"collections.namedtuple",
"tests.utils.get_temp_file",
"collections.defaultdict",
"tests.utils.config_file",
"re.sub"
] | [((368, 398), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME'], {}), '(self._APP_NAME)\n', (382, 398), False, 'from sglove.parser import _OptionManager\n'), ((416, 453), 'collections.namedtuple', 'namedtuple', (['"""name_t"""', "('name', 'sub')"], {}), "('name_t', ('name', 'sub'))\n", (426, 453), False, 'from collections import namedtuple, defaultdict\n'), ((1685, 1725), 're.sub', 're.sub', (['"""[_\\\\-]"""', '""""""', 'string.punctuation'], {}), "('[_\\\\-]', '', string.punctuation)\n", (1691, 1725), False, 'import re\n'), ((2363, 2393), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME'], {}), '(self._APP_NAME)\n', (2377, 2393), False, 'from sglove.parser import _OptionManager\n'), ((3245, 3275), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME'], {}), '(self._APP_NAME)\n', (3259, 3275), False, 'from sglove.parser import _OptionManager\n'), ((3643, 3673), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME'], {}), '(self._APP_NAME)\n', (3657, 3673), False, 'from sglove.parser import _OptionManager\n'), ((3717, 3734), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3728, 3734), False, 'from collections import namedtuple, defaultdict\n'), ((2942, 2990), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME', '[self._APP_NAME]'], {}), '(self._APP_NAME, [self._APP_NAME])\n', (2956, 2990), False, 'from sglove.parser import _OptionManager\n'), ((4196, 4224), 'tests.utils.config_file', 'utils.config_file', (['conf_dict'], {}), '(conf_dict)\n', (4213, 4224), False, 'from tests import utils\n'), ((4261, 4309), 'sglove.parser._OptionManager', '_OptionManager', (['self._APP_NAME'], {'environ': 'env_dict'}), '(self._APP_NAME, environ=env_dict)\n', (4275, 4309), False, 'from sglove.parser import _OptionManager\n'), ((3355, 3390), 'tests.utils.get_temp_file', 'utils.get_temp_file', (['"""invalid_path"""'], {}), "('invalid_path')\n", (3374, 3390), False, 'from tests import utils\n'), ((736, 756), 'sglove.parser._OptionManager', '_OptionManager', (['case'], {}), '(case)\n', (750, 756), False, 'from sglove.parser import _OptionManager\n')] |
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.15.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from gitea_api.configuration import Configuration
class InternalTracker(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_only_contributors_to_track_time': 'bool',
'enable_issue_dependencies': 'bool',
'enable_time_tracker': 'bool'
}
attribute_map = {
'allow_only_contributors_to_track_time': 'allow_only_contributors_to_track_time',
'enable_issue_dependencies': 'enable_issue_dependencies',
'enable_time_tracker': 'enable_time_tracker'
}
def __init__(self, allow_only_contributors_to_track_time=None, enable_issue_dependencies=None, enable_time_tracker=None, _configuration=None): # noqa: E501
"""InternalTracker - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._allow_only_contributors_to_track_time = None
self._enable_issue_dependencies = None
self._enable_time_tracker = None
self.discriminator = None
if allow_only_contributors_to_track_time is not None:
self.allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
if enable_issue_dependencies is not None:
self.enable_issue_dependencies = enable_issue_dependencies
if enable_time_tracker is not None:
self.enable_time_tracker = enable_time_tracker
@property
def allow_only_contributors_to_track_time(self):
"""Gets the allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
Let only contributors track time (Built-in issue tracker) # noqa: E501
:return: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._allow_only_contributors_to_track_time
@allow_only_contributors_to_track_time.setter
def allow_only_contributors_to_track_time(self, allow_only_contributors_to_track_time):
"""Sets the allow_only_contributors_to_track_time of this InternalTracker.
Let only contributors track time (Built-in issue tracker) # noqa: E501
:param allow_only_contributors_to_track_time: The allow_only_contributors_to_track_time of this InternalTracker. # noqa: E501
:type: bool
"""
self._allow_only_contributors_to_track_time = allow_only_contributors_to_track_time
@property
def enable_issue_dependencies(self):
"""Gets the enable_issue_dependencies of this InternalTracker. # noqa: E501
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:return: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_issue_dependencies
@enable_issue_dependencies.setter
def enable_issue_dependencies(self, enable_issue_dependencies):
"""Sets the enable_issue_dependencies of this InternalTracker.
Enable dependencies for issues and pull requests (Built-in issue tracker) # noqa: E501
:param enable_issue_dependencies: The enable_issue_dependencies of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_issue_dependencies = enable_issue_dependencies
@property
def enable_time_tracker(self):
"""Gets the enable_time_tracker of this InternalTracker. # noqa: E501
Enable time tracking (Built-in issue tracker) # noqa: E501
:return: The enable_time_tracker of this InternalTracker. # noqa: E501
:rtype: bool
"""
return self._enable_time_tracker
@enable_time_tracker.setter
def enable_time_tracker(self, enable_time_tracker):
"""Sets the enable_time_tracker of this InternalTracker.
Enable time tracking (Built-in issue tracker) # noqa: E501
:param enable_time_tracker: The enable_time_tracker of this InternalTracker. # noqa: E501
:type: bool
"""
self._enable_time_tracker = enable_time_tracker
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InternalTracker, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalTracker):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InternalTracker):
return True
return self.to_dict() != other.to_dict()
| [
"six.iteritems",
"gitea_api.configuration.Configuration"
] | [((4858, 4891), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (4871, 4891), False, 'import six\n'), ((1449, 1464), 'gitea_api.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1462, 1464), False, 'from gitea_api.configuration import Configuration\n')] |
import unittest
from jina.executors.crafters import BaseCrafter
from jina.flow import Flow
from jina.proto import jina_pb2
from tests import JinaTestCase
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
return 1 / 0
class FlowExceptTestCase(JinaTestCase):
def test_bad_flow(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert req.status.details[0].pod == 'r1'
f = (Flow().add(name='r1', uses='!BaseCrafter')
.add(name='r2', uses='!BaseEncoder')
.add(name='r3', uses='!BaseEncoder'))
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
def test_bad_flow_customized(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert req.status.details[0].pod == 'r2'
self.assertTrue(req.status.details[0].exception.startswith('ZeroDivisionError'))
f = (Flow().add(name='r1', uses='_pass')
.add(name='r2', uses='!DummyCrafter')
.add(name='r3', uses='!BaseEncoder'))
with f:
f.dry_run()
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
def test_except_with_parallel(self):
def validate(req):
assert req.status.code == jina_pb2.Status.ERROR
assert len(req.status.details) == 2
assert req.status.details[0].executor == 'DummyCrafter'
assert req.status.details[1].executor == 'BaseEncoder'
self.assertTrue(req.status.details[0].exception.startswith('ZeroDivisionError'))
self.assertTrue(req.status.details[1].exception.startswith('NotImplementedError'))
f = (Flow().add(name='r1', uses='_pass')
.add(name='r2', uses='!DummyCrafter', parallel=3)
.add(name='r3', uses='!BaseEncoder'))
with f:
f.dry_run()
with f:
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
f.index_lines(lines=['abbcs', 'efgh'], output_fn=validate)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"jina.flow.Flow"
] | [((2485, 2500), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2498, 2500), False, 'import unittest\n'), ((475, 481), 'jina.flow.Flow', 'Flow', ([], {}), '()\n', (479, 481), False, 'from jina.flow import Flow\n'), ((1158, 1164), 'jina.flow.Flow', 'Flow', ([], {}), '()\n', (1162, 1164), False, 'from jina.flow import Flow\n'), ((2102, 2108), 'jina.flow.Flow', 'Flow', ([], {}), '()\n', (2106, 2108), False, 'from jina.flow import Flow\n')] |
from enum import Enum
from dlchord2.const import ACCIDENTALS_SHARP, ACCIDENTALS_FLAT
from dlchord2.exceptions.accidentals_exceptions import AccidentalsParseError
class AccidentalsType(Enum):
"""
調号の種類を表す列挙体
"""
NONE = 0
SHARP = 1
FLAT = 2
class AccidentalsParseData(object):
"""
調号を解析したデータを格納するクラス
"""
def __init__(self, accidentals, accidentals_type, transpose_num):
self._accidentals = accidentals
self._accidentals_type = accidentals_type
self._transpose_num = transpose_num
@property
def accidentals(self):
"""
調号のテキストを取得します。
:return: 生の調号のテキスト
:rtype: str
"""
return self._accidentals
@property
def accidentals_type(self):
"""
調号の種類を取得します。
:return: 調号の種類
:rtype: AccidentalsType
"""
return self._accidentals_type
@property
def transpose_num(self):
"""
調号の変化量を取得します。
:return: 調号の変化量
:rtype: int
"""
return self._transpose_num
class AccidentalsParser(object):
"""
調号を解析するクラス
"""
def parse(self, accidentals_text):
"""
調号を解析します。
:param accidentals_text: ルート音を含まない調号テキスト
:type accidentals_text: str
:return: 調号解析データ
:rtype: AccidentalsParseData
"""
sharp_num = accidentals_text.count(ACCIDENTALS_SHARP)
flat_num = accidentals_text.count(ACCIDENTALS_FLAT)
if sharp_num > 0 and flat_num > 0:
raise AccidentalsParseError("異なる調号は重複して存在することはできません。")
accidentals_type = AccidentalsType.NONE
trans_num = 0
if sharp_num > 0:
accidentals_type = AccidentalsType.SHARP
trans_num = sharp_num
elif flat_num > 0:
accidentals_type = AccidentalsType.FLAT
trans_num = -flat_num
accidentals_parse_data = AccidentalsParseData(
accidentals_text,
accidentals_type,
trans_num)
return accidentals_parse_data
| [
"dlchord2.exceptions.accidentals_exceptions.AccidentalsParseError"
] | [((1551, 1599), 'dlchord2.exceptions.accidentals_exceptions.AccidentalsParseError', 'AccidentalsParseError', (['"""異なる調号は重複して存在することはできません。"""'], {}), "('異なる調号は重複して存在することはできません。')\n", (1572, 1599), False, 'from dlchord2.exceptions.accidentals_exceptions import AccidentalsParseError\n')] |
import pytest
import yaml
from nequip.utils import instantiate
simple_default = {"b": 1, "d": 31}
class SimpleExample:
def __init__(self, a, b=simple_default["b"], d=simple_default["d"]):
self.a = a
self.b = b
self.d = d
nested_default = {"d": 37}
class NestedExample:
def __init__(self, cls_c, a, cls_c_kwargs={}, d=nested_default["d"]):
self.c_obj = cls_c(**cls_c_kwargs)
self.a = a
self.d = d
def assert_dict(d):
for k, v in d.items():
if isinstance(v, dict):
assert_dict(v)
elif isinstance(v, str):
assert k == v
@pytest.mark.parametrize("positional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_simple_init(positional_args, optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
union.update(positional_args)
if "a" not in union:
return
# decorate test with prefix
_all_args = (
{"simple_example_" + k: v for k, v in all_args.items()} if prefix else all_args
)
# check key mapping is correct
km, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
return_args_only=True,
)
for t in km:
for k, v in km[t].items():
assert k in locals()[t + "_args"]
if prefix and t == "all":
assert v == "simple_example_" + k
else:
assert v == k
km, _ = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
all_args=params,
return_args_only=True,
)
assert_dict(km)
# check whether it gets the priority right
a1, params = instantiate(
builder=SimpleExample,
prefix="simple_example",
positional_args=positional_args,
optional_args=optional_args,
all_args=_all_args,
)
assert a1.a == union["a"]
if "b" in union:
assert a1.b == union["b"]
else:
assert a1.b == simple_default["b"]
for k in params:
if k in simple_default:
assert params[k] == union.get(k, simple_default[k])
# check whether the return value is right
a2 = SimpleExample(**positional_args, **params)
assert a1.a == a2.a
assert a1.b == a2.b
def test_prefix_priority():
args = {"prefix_a": 3, "a": 4}
a, params = instantiate(
builder=SimpleExample,
prefix="prefix",
all_args=args,
)
assert a.a == 3
@pytest.mark.parametrize("optional_args", [dict(a=3, b=4), dict(a=5), dict()])
@pytest.mark.parametrize("all_args", [dict(a=6, b=7), dict(a=8), dict()])
@pytest.mark.parametrize("prefix", [True, False])
def test_nested_kwargs(optional_args, all_args, prefix):
union = {}
union.update(all_args)
union.update(optional_args)
if "a" not in union:
return
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args=optional_args,
all_args=all_args,
)
def test_default():
"""
check the default value will not contaminate the other class
"""
c, params = instantiate(
builder=NestedExample,
prefix="prefix",
positional_args={"cls_c": SimpleExample},
optional_args={"a": 11},
)
c.d = nested_default["d"]
c.c_obj.d = simple_default["d"]
class A:
def __init__(self, cls_a, cls_a_kwargs):
self.a_obj = cls_a(**cls_a_kwargs)
class B:
def __init__(self, cls_b, cls_b_kwargs):
self.b_obj = cls_b(**cls_b_kwargs)
class C:
def __init__(self, cls_c, cls_c_kwargs): # noqa
self.c_obj = c_cls(**c_cls_kwargs) # noqa
def test_deep_nests():
all_args = {"a": 101, "b": 103, "c": 107}
obj, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
)
print(yaml.dump(params))
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
obj = NestedExample(**params)
assert obj.c_obj.a_obj.b_obj.a == all_args["a"]
assert obj.c_obj.a_obj.b_obj.b == all_args["b"]
assert obj.c_obj.a_obj.b_obj.d == simple_default["d"]
assert obj.d == nested_default["d"]
km, params = instantiate(
builder=NestedExample,
optional_args={"cls_c": A, "cls_a": B, "cls_b": SimpleExample},
all_args=all_args,
return_args_only=True,
)
print(yaml.dump(km))
# check the key mapping is unique for
km, _ = instantiate(
builder=NestedExample, optional_args=params, return_args_only=True
)
assert_dict(km)
def test_recursion_nests():
with pytest.raises(RuntimeError) as excinfo:
b, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo)
def test_cyclic_nests():
with pytest.raises(RuntimeError) as excinfo:
c, params = instantiate(
builder=A,
positional_args={"cls_a": B},
optional_args={"cls_b": C},
all_args={"cls_c": A},
)
assert "cyclic" in str(excinfo.value)
print(excinfo, "hello")
class BadKwargs1:
def __init__(self, thing_kwargs={}):
pass
class BadKwargs2:
def __init__(self, thing="a string", thing_kwargs={}):
pass
def test_bad_kwargs():
with pytest.raises(KeyError):
_ = instantiate(BadKwargs1)
with pytest.raises(ValueError):
_ = instantiate(BadKwargs2)
| [
"pytest.mark.parametrize",
"nequip.utils.instantiate",
"pytest.raises",
"yaml.dump"
] | [((865, 913), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prefix"""', '[True, False]'], {}), "('prefix', [True, False])\n", (888, 913), False, 'import pytest\n'), ((3012, 3060), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""prefix"""', '[True, False]'], {}), "('prefix', [True, False])\n", (3035, 3060), False, 'import pytest\n'), ((1333, 1506), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'SimpleExample', 'prefix': '"""simple_example"""', 'positional_args': 'positional_args', 'optional_args': 'optional_args', 'all_args': '_all_args', 'return_args_only': '(True)'}), "(builder=SimpleExample, prefix='simple_example', positional_args\n =positional_args, optional_args=optional_args, all_args=_all_args,\n return_args_only=True)\n", (1344, 1506), False, 'from nequip.utils import instantiate\n'), ((1799, 1936), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'SimpleExample', 'prefix': '"""simple_example"""', 'positional_args': 'positional_args', 'all_args': 'params', 'return_args_only': '(True)'}), "(builder=SimpleExample, prefix='simple_example', positional_args\n =positional_args, all_args=params, return_args_only=True)\n", (1810, 1936), False, 'from nequip.utils import instantiate\n'), ((2064, 2210), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'SimpleExample', 'prefix': '"""simple_example"""', 'positional_args': 'positional_args', 'optional_args': 'optional_args', 'all_args': '_all_args'}), "(builder=SimpleExample, prefix='simple_example', positional_args\n =positional_args, optional_args=optional_args, all_args=_all_args)\n", (2075, 2210), False, 'from nequip.utils import instantiate\n'), ((2738, 2804), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'SimpleExample', 'prefix': '"""prefix"""', 'all_args': 'args'}), "(builder=SimpleExample, prefix='prefix', all_args=args)\n", (2749, 2804), False, 'from nequip.utils import instantiate\n'), ((3249, 3395), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'NestedExample', 'prefix': '"""prefix"""', 'positional_args': "{'cls_c': SimpleExample}", 'optional_args': 'optional_args', 'all_args': 'all_args'}), "(builder=NestedExample, prefix='prefix', positional_args={\n 'cls_c': SimpleExample}, optional_args=optional_args, all_args=all_args)\n", (3260, 3395), False, 'from nequip.utils import instantiate\n'), ((3558, 3681), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'NestedExample', 'prefix': '"""prefix"""', 'positional_args': "{'cls_c': SimpleExample}", 'optional_args': "{'a': 11}"}), "(builder=NestedExample, prefix='prefix', positional_args={\n 'cls_c': SimpleExample}, optional_args={'a': 11})\n", (3569, 3681), False, 'from nequip.utils import instantiate\n'), ((4184, 4305), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'NestedExample', 'optional_args': "{'cls_c': A, 'cls_a': B, 'cls_b': SimpleExample}", 'all_args': 'all_args'}), "(builder=NestedExample, optional_args={'cls_c': A, 'cls_a': B,\n 'cls_b': SimpleExample}, all_args=all_args)\n", (4195, 4305), False, 'from nequip.utils import instantiate\n'), ((4820, 4964), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'NestedExample', 'optional_args': "{'cls_c': A, 'cls_a': B, 'cls_b': SimpleExample}", 'all_args': 'all_args', 'return_args_only': '(True)'}), "(builder=NestedExample, optional_args={'cls_c': A, 'cls_a': B,\n 'cls_b': SimpleExample}, all_args=all_args, return_args_only=True)\n", (4831, 4964), False, 'from nequip.utils import instantiate\n'), ((5080, 5159), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'NestedExample', 'optional_args': 'params', 'return_args_only': '(True)'}), '(builder=NestedExample, optional_args=params, return_args_only=True)\n', (5091, 5159), False, 'from nequip.utils import instantiate\n'), ((4344, 4361), 'yaml.dump', 'yaml.dump', (['params'], {}), '(params)\n', (4353, 4361), False, 'import yaml\n'), ((5010, 5023), 'yaml.dump', 'yaml.dump', (['km'], {}), '(km)\n', (5019, 5023), False, 'import yaml\n'), ((5233, 5260), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5246, 5260), False, 'import pytest\n'), ((5293, 5378), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'A', 'positional_args': "{'cls_a': B}", 'optional_args': "{'cls_b': A}"}), "(builder=A, positional_args={'cls_a': B}, optional_args={'cls_b': A}\n )\n", (5304, 5378), False, 'from nequip.utils import instantiate\n'), ((5518, 5545), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5531, 5545), False, 'import pytest\n'), ((5578, 5685), 'nequip.utils.instantiate', 'instantiate', ([], {'builder': 'A', 'positional_args': "{'cls_a': B}", 'optional_args': "{'cls_b': C}", 'all_args': "{'cls_c': A}"}), "(builder=A, positional_args={'cls_a': B}, optional_args={'cls_b':\n C}, all_args={'cls_c': A})\n", (5589, 5685), False, 'from nequip.utils import instantiate\n'), ((6011, 6034), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6024, 6034), False, 'import pytest\n'), ((6048, 6071), 'nequip.utils.instantiate', 'instantiate', (['BadKwargs1'], {}), '(BadKwargs1)\n', (6059, 6071), False, 'from nequip.utils import instantiate\n'), ((6081, 6106), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6094, 6106), False, 'import pytest\n'), ((6120, 6143), 'nequip.utils.instantiate', 'instantiate', (['BadKwargs2'], {}), '(BadKwargs2)\n', (6131, 6143), False, 'from nequip.utils import instantiate\n')] |
import torch
import torch.nn as nn
import os
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from .modules import WavePool, WaveUnpool, ImagePool, NLayerDiscriminator
from utils.metrics import compute_dice_metric
from utils.losses import DiceLoss
import numpy as np
class WaveEncoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveEncoder, self).__init__()
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv0 = nn.Conv2d(3, 3, 1, 1, 0)
self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 0)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 0)
self.pool1 = WavePool(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 0)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 0)
self.pool2 = WavePool(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 0)
self.pool3 = WavePool(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 0)
def forward(self, x, skips):
"""Wavelet encoding - only up to level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary to contain LH, HL, HH filter responses
Returns:
LL (torch.Tensor): output of LL filters
skips (dict): dictionary containing said filters
"""
# level 1
out = self.conv0(x)
out = self.relu(self.conv1_1(self.pad(out)))
# level 2
out = self.relu(self.conv1_2(self.pad(out)))
skips['conv1_2'] = out
LL, LH, HL, HH = self.pool1(out)
skips['pool1'] = [LH, HL, HH]
return LL, skips
class WaveDecoder(nn.Module):
"""Wavelet encoder in WCT2, only partial layers used"""
def __init__(self):
super(WaveDecoder, self).__init__()
multiply_in = 5
self.pad = nn.ReflectionPad2d(1)
self.relu = nn.ReLU(inplace=True)
self.conv4_1 = nn.Conv2d(512, 256, 3, 1, 0)
self.recon_block3 = WaveUnpool(256)
self.conv3_4_2 = nn.Conv2d(256*multiply_in, 256, 3, 1, 0)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 0)
self.conv3_1 = nn.Conv2d(256, 128, 3, 1, 0)
self.recon_block2 = WaveUnpool(128)
self.conv2_2_2 = nn.Conv2d(128*multiply_in, 128, 3, 1, 0)
self.conv2_1 = nn.Conv2d(128, 64, 3, 1, 0)
self.recon_block1 = WaveUnpool(64)
self.conv1_2_2 = nn.Conv2d(64*multiply_in, 64, 3, 1, 0)
self.conv1_1 = nn.Conv2d(64, 3, 3, 1, 0)
def forward(self, x, skips):
"""Decoder - upsample from level 2
Args:
x (torch.Tensor): input to be encoded
skips (dict): dictionary containing LH, HL, HH filter responses
Returns:
out (torch.Tensor): output of wavelet unpooling layer
"""
LH, HL, HH = skips['pool1']
original = skips['conv1_2'] if 'conv1_2' in skips.keys() else None
out = self.recon_block1(x, LH, HL, HH, original)
return out
class WCT2Features(nn.Module):
"""WCT2 transform with fixed input and output channels and handpicked LL filters
"""
def __init__(self, filters=None, model_path_encoder=None, model_path_decoder=None):
super(WCT2Features, self).__init__()
self.encoder = WaveEncoder().cuda()
self.decoder = WaveDecoder().cuda()
self.encoder.load_state_dict(
torch.load(os.path.join(model_path_encoder),
map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(
torch.load(os.path.join(model_path_decoder),
map_location=lambda storage, loc: storage))
self.filters = filters
# self.tanh = nn.Tanh()
# chosen channels
# self.ll_filter_idx = [4,7,11,24,25,27]
# Sparsest CT channels [25, 54,16,22,61,4,8,27,7,3]
# self.ll_filter_idx = [15,2,41,12,39,1,42,23,51,38]
# self.ll_filter_idx = [14 ,15 ,45 ,19 ,39, 1 ,42 ,23 ,51, 38]
def forward(self, x):
"""Get WCT2 LL filters
Args:
x (torch.Tensor): input tensor
Returns:
out (torch.Tensor): output LL filters
"""
skips = {}
out, skips = self.encoder(x, skips)
out = self.decoder(out, skips)
out = out[:,:64,:,:]
if self.filters != None:
out = torch.index_select(out, 1, torch.tensor(self.filters).cuda())
return out
class WCT2GANUNet(nn.Module):
"""WCT2 GAN UNet all in one class"""
def __init__(self, g, seg, n_channels, lr=0.0002):
super(WCT2GANUNet, self).__init__()
# generator
self.g = g.cuda()
# discriminator
self.d = NLayerDiscriminator(input_nc=n_channels).cuda()
# segmentor
self.seg = seg.cuda()
self.lr = lr
# optimisers here
self.g_optim = optim.Adam(self.g.parameters(), lr=self.lr)
self.seg_optim = optim.Adam(self.seg.parameters(), lr=self.lr)
# self.optim = optim.Adam(chain(self.g.parameters(), self.seg.parameters()), lr=self.lr)
self.d_optim = optim.SGD(self.d.parameters(), lr=self.lr, momentum=0.5)
self.criterion_gan = nn.BCELoss()
self.pool = ImagePool()
def criterion_seg(self, prediction, target):
return nn.BCELoss()(prediction, target) + DiceLoss()(prediction, target)
def forward_gen(self, x):
return self.g(x)
def forward_seg(self, x):
out = self.forward_gen(x)
a1, a2, a3, a4, a5 = self.seg.downsample(out)
seg = self.seg.upsample(a1, a2, a3, a4, a5)
return seg
def get_target(self, pred, is_true=True):
"""Return target tensor with similar shape to pred"""
if is_true == True and np.random.random() > 0.65:
return torch.ones(pred.size(), requires_grad=False).cuda()
return torch.zeros(pred.size(), requires_grad=False).cuda()
# # occasionally give wrong labels
# if is_true == True and np.random.random() + 0.3 > 0.5:
# # use soft label for true [0.7, 1.2]
# return (1.2 - 0.7) * torch.rand(pred.size(), requires_grad=False).cuda() + 0.7
# # use soft label [0, 0.1] for false
# return 0.1 * torch.rand(pred.size(), requires_grad=False).cuda()
def set_requires_grad(self, net, requires_grad=False):
for param in net.parameters():
param.requires_grad=requires_grad
def step(self, x_s, x_t, y_s):
# GAN loss - update discriminator and generator here
# GAN loss - max log(D(x)) + log(1 - D(G(x)))
# update d only
self.d_optim.zero_grad()
out_x_s = self.forward_gen(x_s)
out_x_t = self.forward_gen(x_t)
x_s_real = self.d(out_x_s)
target_real = self.get_target(x_s_real)
loss_real = self.criterion_gan(x_s_real, target_real)
loss_real.backward()
# get generated feature maps from pool / replay for stability
x_s_fake_map = (self.pool.query(out_x_t)).detach()
x_s_fake = self.d(x_s_fake_map)
target_fake = self.get_target(x_s_fake, is_true=False)
loss_fake = self.criterion_gan(x_s_fake, target_fake)
loss_fake.backward()
self.d_optim.step()
# update g - max D(G(X))
self.g_optim.zero_grad()
x_s_fake = self.d(x_s_fake_map)
target_real = self.get_target(x_s_real)
loss_g = self.criterion_gan(x_s_fake, target_real)
loss_g.backward()
self.g_optim.step()
# Segmentation loss
self.set_requires_grad(self.g, requires_grad=False)
# self.g_optim.zero_grad()
self.seg_optim.zero_grad()
out_seg = self.forward_seg(x_s)
seg_loss = self.criterion_seg(out_seg, y_s)
seg_loss.backward()
# self.g_optim.step()
self.seg_optim.step()
# calculate dice score for current batch
dice_score = compute_dice_metric(torch.round(out_seg), y_s).item()
# backward pass
return seg_loss.item(), (loss_real + loss_fake).item(), dice_score
def save(self, path):
print('saving model...')
if os.path.isdir(path) == False:
os.makedirs(path)
torch.save(self.g.state_dict(), os.path.join(path,'g.pth'))
torch.save(self.d.state_dict(), os.path.join(path,'d.pth'))
torch.save(self.seg.state_dict(), os.path.join(path,'seg.pth'))
print('saving done!')
| [
"torch.nn.ReLU",
"os.makedirs",
"numpy.random.random",
"torch.nn.ReflectionPad2d",
"os.path.join",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.BCELoss",
"os.path.isdir",
"torch.round",
"utils.losses.DiceLoss"
] | [((479, 500), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (497, 500), True, 'import torch.nn as nn\n'), ((521, 542), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (528, 542), True, 'import torch.nn as nn\n'), ((565, 589), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(1)', '(1)', '(0)'], {}), '(3, 3, 1, 1, 0)\n', (574, 589), True, 'import torch.nn as nn\n'), ((613, 638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(1)', '(0)'], {}), '(3, 64, 3, 1, 0)\n', (622, 638), True, 'import torch.nn as nn\n'), ((662, 688), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(0)'], {}), '(64, 64, 3, 1, 0)\n', (671, 688), True, 'import torch.nn as nn\n'), ((747, 774), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(1)', '(0)'], {}), '(64, 128, 3, 1, 0)\n', (756, 774), True, 'import torch.nn as nn\n'), ((798, 826), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(0)'], {}), '(128, 128, 3, 1, 0)\n', (807, 826), True, 'import torch.nn as nn\n'), ((886, 914), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)', '(1)', '(0)'], {}), '(128, 256, 3, 1, 0)\n', (895, 914), True, 'import torch.nn as nn\n'), ((938, 966), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (947, 966), True, 'import torch.nn as nn\n'), ((990, 1018), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (999, 1018), True, 'import torch.nn as nn\n'), ((1042, 1070), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (1051, 1070), True, 'import torch.nn as nn\n'), ((1138, 1166), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)', '(1)', '(0)'], {}), '(256, 512, 3, 1, 0)\n', (1147, 1166), True, 'import torch.nn as nn\n'), ((2084, 2105), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (2102, 2105), True, 'import torch.nn as nn\n'), ((2126, 2147), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2133, 2147), True, 'import torch.nn as nn\n'), ((2171, 2199), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(3)', '(1)', '(0)'], {}), '(512, 256, 3, 1, 0)\n', (2180, 2199), True, 'import torch.nn as nn\n'), ((2279, 2321), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 * multiply_in)', '(256)', '(3)', '(1)', '(0)'], {}), '(256 * multiply_in, 256, 3, 1, 0)\n', (2288, 2321), True, 'import torch.nn as nn\n'), ((2343, 2371), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (2352, 2371), True, 'import torch.nn as nn\n'), ((2395, 2423), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(0)'], {}), '(256, 256, 3, 1, 0)\n', (2404, 2423), True, 'import torch.nn as nn\n'), ((2447, 2475), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)', '(1)', '(0)'], {}), '(256, 128, 3, 1, 0)\n', (2456, 2475), True, 'import torch.nn as nn\n'), ((2555, 2597), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * multiply_in)', '(128)', '(3)', '(1)', '(0)'], {}), '(128 * multiply_in, 128, 3, 1, 0)\n', (2564, 2597), True, 'import torch.nn as nn\n'), ((2619, 2646), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)', '(1)', '(0)'], {}), '(128, 64, 3, 1, 0)\n', (2628, 2646), True, 'import torch.nn as nn\n'), ((2716, 2756), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * multiply_in)', '(64)', '(3)', '(1)', '(0)'], {}), '(64 * multiply_in, 64, 3, 1, 0)\n', (2725, 2756), True, 'import torch.nn as nn\n'), ((2778, 2803), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3)', '(1)', '(0)'], {}), '(64, 3, 3, 1, 0)\n', (2787, 2803), True, 'import torch.nn as nn\n'), ((5668, 5680), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5678, 5680), True, 'import torch.nn as nn\n'), ((8858, 8877), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8871, 8877), False, 'import os\n'), ((8900, 8917), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (8911, 8917), False, 'import os\n'), ((8958, 8985), 'os.path.join', 'os.path.join', (['path', '"""g.pth"""'], {}), "(path, 'g.pth')\n", (8970, 8985), False, 'import os\n'), ((9026, 9053), 'os.path.join', 'os.path.join', (['path', '"""d.pth"""'], {}), "(path, 'd.pth')\n", (9038, 9053), False, 'import os\n'), ((9096, 9125), 'os.path.join', 'os.path.join', (['path', '"""seg.pth"""'], {}), "(path, 'seg.pth')\n", (9108, 9125), False, 'import os\n'), ((3753, 3785), 'os.path.join', 'os.path.join', (['model_path_encoder'], {}), '(model_path_encoder)\n', (3765, 3785), False, 'import os\n'), ((3915, 3947), 'os.path.join', 'os.path.join', (['model_path_decoder'], {}), '(model_path_decoder)\n', (3927, 3947), False, 'import os\n'), ((5787, 5799), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5797, 5799), True, 'import torch.nn as nn\n'), ((5822, 5832), 'utils.losses.DiceLoss', 'DiceLoss', ([], {}), '()\n', (5830, 5832), False, 'from utils.losses import DiceLoss\n'), ((6290, 6308), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6306, 6308), True, 'import numpy as np\n'), ((8636, 8656), 'torch.round', 'torch.round', (['out_seg'], {}), '(out_seg)\n', (8647, 8656), False, 'import torch\n'), ((4799, 4825), 'torch.tensor', 'torch.tensor', (['self.filters'], {}), '(self.filters)\n', (4811, 4825), False, 'import torch\n')] |
#
# copyright © 2020 - all rights reserved
# Created at: 03/11/20
# By: mauromarini
# License: MIT
# Repository: https://github.com/marinimau/wayne_django_rest
# Credits: @marinimau (https://github.com/marinimau)
#
from django.urls import path
from api.user import views as user_views
from api.social import views as social_views
urlpatterns = [
# ------------------------------------------------------------------------------------------------------------------
# public urls
# ------------------------------------------------------------------------------------------------------------------
path('get/<username>/', user_views.UserDetailPublic.as_view()),
path('get/<username>/detail/', user_views.ProfileDetailPublic.as_view()),
path('get/<username>/account/username_based/', social_views.UsernameSocialAccountPublic.as_view()),
path('reverse/username_based/<platform>/<value>/', social_views.UsernameSocialAccountRetrieve.as_view()),
path('get/<username>/account/email_based/', social_views.EmailSocialAccountPublic.as_view()),
path('reverse/email_based/<platform>/<value>/', social_views.EmailSocialAccountRetrieve.as_view()),
]
| [
"api.social.views.UsernameSocialAccountRetrieve.as_view",
"api.user.views.UserDetailPublic.as_view",
"api.social.views.UsernameSocialAccountPublic.as_view",
"api.social.views.EmailSocialAccountRetrieve.as_view",
"api.user.views.ProfileDetailPublic.as_view",
"api.social.views.EmailSocialAccountPublic.as_vi... | [((651, 688), 'api.user.views.UserDetailPublic.as_view', 'user_views.UserDetailPublic.as_view', ([], {}), '()\n', (686, 688), True, 'from api.user import views as user_views\n'), ((726, 766), 'api.user.views.ProfileDetailPublic.as_view', 'user_views.ProfileDetailPublic.as_view', ([], {}), '()\n', (764, 766), True, 'from api.user import views as user_views\n'), ((820, 870), 'api.social.views.UsernameSocialAccountPublic.as_view', 'social_views.UsernameSocialAccountPublic.as_view', ([], {}), '()\n', (868, 870), True, 'from api.social import views as social_views\n'), ((928, 980), 'api.social.views.UsernameSocialAccountRetrieve.as_view', 'social_views.UsernameSocialAccountRetrieve.as_view', ([], {}), '()\n', (978, 980), True, 'from api.social import views as social_views\n'), ((1031, 1078), 'api.social.views.EmailSocialAccountPublic.as_view', 'social_views.EmailSocialAccountPublic.as_view', ([], {}), '()\n', (1076, 1078), True, 'from api.social import views as social_views\n'), ((1133, 1182), 'api.social.views.EmailSocialAccountRetrieve.as_view', 'social_views.EmailSocialAccountRetrieve.as_view', ([], {}), '()\n', (1180, 1182), True, 'from api.social import views as social_views\n')] |
# -*- coding: utf-8 -*-
# This file is part of CSDaily.
# Copyright (C) 2018-present qytz <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import asyncio
import logging
from datetime import datetime, date
import aiohttp
import pandas as pd
from sqlalchemy import create_engine
from .cli import FinApp, CSDaily
logger = logging.getLogger(__file__)
@CSDaily.subcommand('data')
class DataApp(FinApp):
async def get_daily_stocks_xq(self, session, data_type='stock'):
"""
获取每日行情概览信息,只能获取当天的
返回一个 pd.DataFrame
出错,返回 None
data_type
stock: 沪深股票
cb: 可转债
eft: ETF基金
fenji: 分级基金
https://xueqiu.com/stock/cata/stocklist.json
https://xueqiu.com/fund/quote/list.json
股票代码
=================
上海证券交易所
首位代码 产品定义
0 国债/指数
00 上证指数、沪深300指数、中证指数
1 债券
2 回购
3 期货
4 备用
5 基金/权证
6 A股
7 非交易业务(发行、权益分配)
8 备用
9 B股
深圳证券交易所
00 A股证券
002~004 中小板
1 债券
2 B股
30 创业板证券
39 综合指数、成份指数
"""
headers = {
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) '
# 'Gecko/20100101 Firefox/54.0',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:54.0) '
'Gecko/20100101 Firefox/54.0',
}
await session.get('https://xueqiu.com', headers=headers)
quotes = []
page_size = 90
curr_page = page_cnt = 1
params = {
'_': 0,
'order': 'desc',
'orderby': 'percent',
'page': curr_page,
'size': page_size,
}
quotes_url = 'https://xueqiu.com/stock/cata/stocklist.json'
if data_type == 'stock':
params['type'] = '11,12'
elif data_type == 'cb':
params['exchange'] = 'CN'
params['industry'] = '可转债'
elif data_type == 'etf':
params['parent_type'] = 13
params['type'] = 135
params['orderBy'] = 'percent'
quotes_url = 'https://xueqiu.com/fund/quote/list.json'
elif data_type == 'fenji':
params['parent_type'] = 1
params['type'] = 14
params['orderBy'] = 'percent'
quotes_url = 'https://xueqiu.com/fund/quote/list.json'
logger.info('start download xueqiu daily quotes for %s...', data_type)
while curr_page <= page_cnt:
logger.info('Fetching %s/%s page', curr_page, page_cnt)
params['page'] = curr_page
params['_'] = int(time.time() * 1000)
resp = await session.get(quotes_url, params=params, headers=headers)
resp_json = await resp.json()
if data_type in ('stock', 'cb'):
if not resp_json['success']:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']['count']
elif data_type in ('etf',):
if 'error_code' in resp_json:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']
elif data_type in ('fenji',):
if 'error_code' in resp_json:
logger.error('Get daily quotes for %s failed: %s', data_type, resp_json)
break
total_cnt = resp_json['count']
page_cnt = total_cnt // page_size + 1 if total_cnt % page_size != 0 else 0
quotes.extend(resp_json['stocks'])
curr_page += 1
if not quotes:
logger.warn('no data downloaded for %s, return None', data_type)
pd.DataFrame()
logger.info('download xueqiu daily quotes for %s finish', data_type)
df = pd.DataFrame(quotes)
# df['day'] = date.today()
df['day'] = datetime.now().replace(hour=16, minute=0, second=0, microsecond=0)
# set index
df.set_index(['symbol', 'day'], inplace=True)
df.drop_duplicates(inplace=True)
# convert to numertic types
return df.apply(pd.to_numeric, errors='ignore')
async def update_data_daily(self):
day = str(date.today())
db_dir = os.path.join(self._data_dir, 'daily_quotes')
os.makedirs(db_dir, exist_ok=True)
db_file = os.path.join(db_dir, f'{day}.db')
engine = create_engine('sqlite:///' + db_file)
logger.info('start downloading, data will be saved to %s', db_file)
async with aiohttp.ClientSession() as session:
df = await self.get_daily_stocks_xq(session, data_type='stock')
if not df.empty:
df.to_sql('stock_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='cb')
if not df.empty:
df.to_sql('cb_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='etf')
if not df.empty:
df.to_sql('etf_quotes', engine, chunksize=1000, if_exists='append', index=True)
df = await self.get_daily_stocks_xq(session, data_type='fenji')
if not df.empty:
df.to_sql('fenji_quotes', engine, chunksize=1000, if_exists='append', index=True)
logger.info('all data has be saved to %s', db_file)
def main(self, *args):
self._data_dir = os.path.join(self._root_dir, 'origin_data')
os.makedirs(self._data_dir, exist_ok=True)
loop = asyncio.get_event_loop()
loop.run_until_complete(self.update_data_daily())
if __name__ == '__main__':
DataApp()
| [
"logging.getLogger",
"aiohttp.ClientSession",
"os.makedirs",
"sqlalchemy.create_engine",
"os.path.join",
"datetime.datetime.now",
"pandas.DataFrame",
"datetime.date.today",
"asyncio.get_event_loop",
"time.time"
] | [((853, 880), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (870, 880), False, 'import logging\n'), ((4685, 4705), 'pandas.DataFrame', 'pd.DataFrame', (['quotes'], {}), '(quotes)\n', (4697, 4705), True, 'import pandas as pd\n'), ((5124, 5168), 'os.path.join', 'os.path.join', (['self._data_dir', '"""daily_quotes"""'], {}), "(self._data_dir, 'daily_quotes')\n", (5136, 5168), False, 'import os\n'), ((5177, 5211), 'os.makedirs', 'os.makedirs', (['db_dir'], {'exist_ok': '(True)'}), '(db_dir, exist_ok=True)\n', (5188, 5211), False, 'import os\n'), ((5230, 5263), 'os.path.join', 'os.path.join', (['db_dir', 'f"""{day}.db"""'], {}), "(db_dir, f'{day}.db')\n", (5242, 5263), False, 'import os\n'), ((5281, 5318), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_file)"], {}), "('sqlite:///' + db_file)\n", (5294, 5318), False, 'from sqlalchemy import create_engine\n'), ((6365, 6408), 'os.path.join', 'os.path.join', (['self._root_dir', '"""origin_data"""'], {}), "(self._root_dir, 'origin_data')\n", (6377, 6408), False, 'import os\n'), ((6417, 6459), 'os.makedirs', 'os.makedirs', (['self._data_dir'], {'exist_ok': '(True)'}), '(self._data_dir, exist_ok=True)\n', (6428, 6459), False, 'import os\n'), ((6476, 6500), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6498, 6500), False, 'import asyncio\n'), ((4580, 4594), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4592, 4594), True, 'import pandas as pd\n'), ((5093, 5105), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5103, 5105), False, 'from datetime import datetime, date\n'), ((5414, 5437), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5435, 5437), False, 'import aiohttp\n'), ((4761, 4775), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4773, 4775), False, 'from datetime import datetime, date\n'), ((3393, 3404), 'time.time', 'time.time', ([], {}), '()\n', (3402, 3404), False, 'import time\n')] |
import kfserving
from typing import List, Union
import numpy as np
class Predictor(): # pylint:disable=too-few-public-methods
def __init__(self, clf: kfserving.KFModel):
self.clf = clf
def predict_fn(self, arr: Union[np.ndarray, List]) -> np.ndarray:
instances = []
for req_data in arr:
if isinstance(req_data, np.ndarray):
instances.append(req_data.tolist())
else:
instances.append(req_data)
resp = self.clf.predict({"instances": instances})
return np.array(resp["predictions"])
| [
"numpy.array"
] | [((557, 586), 'numpy.array', 'np.array', (["resp['predictions']"], {}), "(resp['predictions'])\n", (565, 586), True, 'import numpy as np\n')] |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../tnpy'))
from tnpy import __version__
# -- Project information -----------------------------------------------------
project = 'tnpy'
copyright = '2021, <NAME>'
author = '<NAME>'
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'nbsphinx',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.inheritance_diagram',
'm2r2',
]
# Turn on sphinx.ext.autosummary
autosummary_generate = True
# Looks for objects in external projects
intersphinx_mapping = {
'tensornetwork': ('https://tensornetwork.readthedocs.io/en/latest/', None),
}
# Mathjax
mathjax_path = 'https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
mathjax2_config = {
'tex2jax': {
'inlineMath': [['$', '$'], ['\\(', '\\)']],
'displayMath': [["$$", "$$"]],
'processEscapes': True,
'ignoreClass': 'document',
'processClass': 'math|output_area',
}
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Allowing docstring in both __init__ and right under class definition
autoclass_content = 'both'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"github_url": "https://github.com/tanlin2013/tnpy",
"repository_url": "https://github.com/tanlin2013/tnpy",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "docs",
"use_fullscreen_button": False,
"use_download_button": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for LaTeX output ---------------------------------------------
latex_elements = { # type: ignore
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'tnpy.tex', 'tnpy Documentation',
'<NAME>', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
| [
"os.path.abspath"
] | [((593, 617), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (608, 617), False, 'import os\n'), ((638, 667), 'os.path.abspath', 'os.path.abspath', (['"""../../tnpy"""'], {}), "('../../tnpy')\n", (653, 667), False, 'import os\n')] |
import docker, json
VALID_PROFILE = ['volume', 'mariadb']
def backup_list():
client = docker.DockerClient(base_url='unix://var/run/docker.sock')
bck = {}
for ct in client.containers.list(all=True):
is_backup = False
error = []
if 'one.h42.backup.enable' in ct.labels:
if ct.labels['one.h42.backup.enable'] == 'true':
is_backup = True
if is_backup:
ctb = bck[ct.name] = {}
ctb['profile'] = "volume"
if 'one.h42.backup.profile' in ct.labels:
if ct.labels['one.h42.backup.profile'] in VALID_PROFILE:
profile = ctb['profile'] = ct.labels['one.h42.backup.profile']
else:
profile = ctb['profile'] = 'invalid'
error.append("H42backup: invalid profile detected! one.h42.backup.profile=" + ct.labels['one.h42.backup.profile'] + ".")
if profile == 'volume':
vol_ignore = []
if 'one.h42.backup.volume.ignore' in ct.labels:
vol_ignore = ctb['volume_ignore'] = ct.labels['one.h42.backup.volume.ignore'].split(',')
include_bind = False
if 'one.h42.backup.volume.include_bind' in ct.labels:
if ct.labels['one.h42.backup.volume.include_bind'] == 'true':
include_bind = ctb['volume_include_bind'] = True
mounts = ctb['mounts'] = []
for vol in ct.attrs['Mounts']:
if vol['Type'] == 'bind' and include_bind:
mounts.append({'type': 'bind', 'dest': vol['Destination']})
if vol['Type'] == 'volume':
ignore = vol['Name'] in vol_ignore
mounts.append({'type': 'volume', 'dest': vol['Destination'], 'name': vol['Name'], 'ignore': ignore })
if profile == 'mariadb':
backup_volume = None
if 'one.h42.backup.mariadb.volume' in ct.labels:
backup_volume = ctb['mariadb_backup_volume'] = ct.labels['one.h42.backup.mariadb.volume']
else:
error.append('Mariadb: backup volume not define, "one.h42.backup.mariadb.volume" docker label not exists or is empty.')
if backup_volume:
mounts = ctb['mounts'] = []
for vol in ct.attrs['Mounts']:
if vol['Type'] == 'volume' and vol['Name'] == backup_volume:
mounts.append({'type': 'volume', 'dest': vol['Destination'], 'name': vol['Name'],'ignore': False })
if len(mounts) == 0:
error.append('Mariadb: backup volume ' + backup_volume + ' not found in docker mount list.')
if len(error):
ctb['error'] = error
return bck
def backup_run(bck):
client = docker.DockerClient(base_url='unix://var/run/docker.sock')
vols = bck.getDockerVolumes()
ctr = client.containers.run(
image='h42-backup/agent',
command='/h42backup/h42-backup-agent backup exec --name={}'.format(bck.name),
auto_remove=True,
network_mode='bridge',
volumes=vols
)
return ctr
| [
"docker.DockerClient"
] | [((92, 150), 'docker.DockerClient', 'docker.DockerClient', ([], {'base_url': '"""unix://var/run/docker.sock"""'}), "(base_url='unix://var/run/docker.sock')\n", (111, 150), False, 'import docker, json\n'), ((2966, 3024), 'docker.DockerClient', 'docker.DockerClient', ([], {'base_url': '"""unix://var/run/docker.sock"""'}), "(base_url='unix://var/run/docker.sock')\n", (2985, 3024), False, 'import docker, json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Based on storm.py module from https://github.com/nathanmarz/storm/blob/master/storm-core/src/multilang/py/storm.py, and the examples from https://github.com/apache/incubator-storm/blob/master/examples/storm-starter/multilang/resources/splitsentence.py and http://storm.incubator.apache.org/documentation/Tutorial.html
Packaging: To run a shell component on a cluster, the scripts that are shelled out to must be in the resources/ directory within the jar submitted to the master (https://github.com/nathanmarz/storm/wiki/Multilang-protocol). By default, Maven will look for your project's resources under src/main/resources (https://maven.apache.org/plugins/maven-resources-plugin/examples/resource-directory.html).
Tested in Python2.7 and Apache Storm 0.9.0.1
'''
import storm, tweepy
import json, time
from operator import itemgetter
import get_tweets
def log_tweeter_error(tweep_error, sleep_time=2):
'''
:param tweep_error: Exception dealing with twitter to log to the parent process
:type api: tweepy.TweepError
:param sleep_time: time in seconds to sleep before continuing the execution
'''
# We have hit the REST API Rate limit for Twitter https://dev.twitter.com/docs/rate-limiting/1.1, no more tweets for some time
storm.log("Tweepy error {error}, sleeping for {secs} seconds in case Twitter rate limit has been hit".format(error=str(tweep_error), secs=sleep_time))
time.sleep(sleep_time)
class PlacesSpout(storm.Spout):
'''
Emit a tuple with a single field for a place as encoded in the module get_tweets,
with the frequency specified in the storm configuration passed at initialize()
'''
# Field in the configuration map where the emision frequency is
# configured
_frequency_conf_key = "PlacesSpoutFrequency"
def initialize(self, conf, context):
# self._conf = conf
# self._context = context
self._places = get_tweets.available_places()
self._tick_frequency = conf[self.__class__._frequency_conf_key]
def nextTuple(self):
'''
Should adhere to the following contract expressed in the wrapping Java spout
declarer.declare(new Fields(TopologyFields.PLACE));
'''
# storm.log(json.dumps({ "conf" : self._conf}))
# storm.log(json.dumps({ "context" : self._context}))
for place in self._places:
storm.emit([place])
time.sleep(self._tick_frequency)
class TwitterBolt(storm.Bolt):
'''
This class extends storm.Bolt as no ack is handled because we are using a non reliable source with no defined id for the messages.
As additional functionality, a tweepy.api.API object ready to used is stored at self._twitter_api during initialization.
NOTE: don't forget to setup authentication calling "python2.7 get_tweets.py" __before__ compiling the topology: the auth file has to be included in the jar and copied to the cluster
'''
def initialize(self, stormconf, context):
# Init connection to twitter API
auth = get_tweets.authenticate(rebuild=False)
# Better fail here if we cannot even authenticate
self._twitter_api = tweepy.API(auth)
class TrendsBolt(TwitterBolt):
'''
Assumes each input tuple has a single field for the name of a place as encoded in get_tweets. This bolt emits tuples (place, trend name, query) for the trending topics at the coordinates corresponding to that place.
In case the Twitter REST API Rate limit is hit, this bolt sleeps for some seconds.
'''
_rate_limit_sleep_time = 1
def process(self, tuple):
place = tuple.values[0]
try:
trends = get_tweets.get_trending_topics_text(self._twitter_api, place)
except tweepy.TweepError as te:
# We have hit the REST API Rate limit for Twitter https://dev.twitter.com/docs/rate-limiting/1.1, no more tweets for some time
log_tweeter_error(te, sleep_time=self._rate_limit_sleep_time)
return
for trend in trends:
storm.emit([place, trend['name'], trend['query']])
class GetTweetsBolt(TwitterBolt):
'''
Assumes each input tuple is of the shape (place, topic_name, query) where query is a twitter query string for the trending topic topic_name. For each input tuple Twitter is queried for the most popular tweets, and some fields are projected from each resulting tweets and then emitted, see process() for details about the fields.
In case the Twitter REST API Rate limit is hit, this bolt sleeps for some seconds.
'''
@staticmethod
def _storm_tweet_processor(status):
shallow_fields = ['text', 'favorite_count', 'retweeted', 'in_reply_to_screen_name',
'retweet_count', 'possibly_sensitive', 'lang', 'created_at', 'source']
ret = {k : status.__dict__.get(k, None) for k in shallow_fields}
ret['created_at'] = ret['created_at'].strftime('%Y-%m-%d %H:%M:%S')
ret['author_screen_name'] = status.author.screen_name
ret['hashtags_texts'] = "|".join(sorted([hashtag['text'] for hashtag in status.entities['hashtags']]))
ret['place_full_name'] = status.place.full_name if not status.place is None else None
return ret
_rate_limit_sleep_time = 1
def process(self, tuple):
'''
Must fulfil the following contract expressed in the Java wrapper:
declarer.declare(new Fields(TopologyFields.AUTHOR_SCREEN_NAME, TopologyFields.CREATED_AT,
TopologyFields.FAV_COUNT, TopologyFields.HASHTAGS_TEXTS, TopologyFields.IN_REPLY_TO_SCREEN_NAME,
TopologyFields.LANG, TopologyFields.RETWEET_COUNT, TopologyFields.RETWEETED,
TopologyFields.SOURCE, TopologyFields.PLACE, TopologyFields.POSSIBLY_SENSITIVE,
TopologyFields.TEXT, TopologyFields.TOPIC_NAME));
'''
place, topic_name, query = tuple.values
try:
tweets = list(get_tweets.get_tweets_for_trends(self._twitter_api, [{"query" : query}], popular = True, tweet_processor = self._storm_tweet_processor))[0]["tweets"]
except tweepy.TweepError as te:
# We have hit the REST API Rate limit for Twitter https://dev.twitter.com/docs/rate-limiting/1.1, no more tweets for some time
log_tweeter_error(te, sleep_time=self._rate_limit_sleep_time)
return
for pt in tweets:
# Here we add the trending topic name, and take the place name from those
# used internally by get_tweets, instead of the from place names returned by twitter
tup = [pt['author_screen_name'], pt['created_at'],
pt['favorite_count'], pt['hashtags_texts'], pt['in_reply_to_screen_name'],
pt['lang'], pt['retweet_count'], pt['retweeted'],
pt['source'], place, pt['possibly_sensitive'],
pt['text'], topic_name]
storm.emit(tup) | [
"get_tweets.get_tweets_for_trends",
"storm.emit",
"time.sleep",
"get_tweets.authenticate",
"tweepy.API",
"get_tweets.get_trending_topics_text",
"get_tweets.available_places"
] | [((1462, 1484), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (1472, 1484), False, 'import json, time\n'), ((1964, 1993), 'get_tweets.available_places', 'get_tweets.available_places', ([], {}), '()\n', (1991, 1993), False, 'import get_tweets\n'), ((2459, 2491), 'time.sleep', 'time.sleep', (['self._tick_frequency'], {}), '(self._tick_frequency)\n', (2469, 2491), False, 'import json, time\n'), ((3090, 3128), 'get_tweets.authenticate', 'get_tweets.authenticate', ([], {'rebuild': '(False)'}), '(rebuild=False)\n', (3113, 3128), False, 'import get_tweets\n'), ((3219, 3235), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (3229, 3235), False, 'import storm, tweepy\n'), ((2431, 2450), 'storm.emit', 'storm.emit', (['[place]'], {}), '([place])\n', (2441, 2450), False, 'import storm, tweepy\n'), ((3722, 3783), 'get_tweets.get_trending_topics_text', 'get_tweets.get_trending_topics_text', (['self._twitter_api', 'place'], {}), '(self._twitter_api, place)\n', (3757, 3783), False, 'import get_tweets\n'), ((4098, 4148), 'storm.emit', 'storm.emit', (["[place, trend['name'], trend['query']]"], {}), "([place, trend['name'], trend['query']])\n", (4108, 4148), False, 'import storm, tweepy\n'), ((7004, 7019), 'storm.emit', 'storm.emit', (['tup'], {}), '(tup)\n', (7014, 7019), False, 'import storm, tweepy\n'), ((6023, 6157), 'get_tweets.get_tweets_for_trends', 'get_tweets.get_tweets_for_trends', (['self._twitter_api', "[{'query': query}]"], {'popular': '(True)', 'tweet_processor': 'self._storm_tweet_processor'}), "(self._twitter_api, [{'query': query}],\n popular=True, tweet_processor=self._storm_tweet_processor)\n", (6055, 6157), False, 'import get_tweets\n')] |
#!/usr/bin/env python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Emperor Project"
__credits__ = ["<NAME>"]
__license__ = "BSD"
__version__ = "0.9.3-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from unittest import TestCase, main
from tempfile import mkstemp
from os import close
import numpy as np
import numpy.testing as npt
from emperor.parse import parse_coords
class ParseTests(TestCase):
def test_parse_coords_ordination_results(self):
"""parse_coords should handle skbio's OrdinationResults file"""
coords = ordination_results_file.splitlines()
obs = parse_coords(coords)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
def test_parse_coords_qiime(self):
"""parse_coords should handle old qiime PCoA coords format"""
coords = qiime_pcoa_file.splitlines()
obs = parse_coords(coords)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
def test_parse_coords_qiime_file(self):
"""parse_coords should handle old qiime PCoA coords file"""
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write(qiime_pcoa_file)
with open(fp, 'U') as f:
obs = parse_coords(f)
exp = (['A', 'B', 'C'],
np.array([[.11, .09, .23], [.03, .07, -.26], [.12, .06, -.32]]),
np.array([4.94, 1.79, 1.50]),
np.array([14.3, 5.2, 4.3]))
# test the header and the values apart from each other
self.assertEqual(obs[0], exp[0])
npt.assert_almost_equal(obs[1], exp[1])
npt.assert_almost_equal(obs[2], exp[2])
npt.assert_almost_equal(obs[3], exp[3])
ordination_results_file = """Eigvals\t3
4.94\t1.79\t1.50
Proportion explained\t3
14.3\t5.2\t4.3
Species\t0\t0
Site\t3\t3
A\t.11\t.09\t.23
B\t.03\t.07\t-.26
C\t.12\t.06\t-.32
Biplot\t0\t0
Site constraints\t0\t0"""
qiime_pcoa_file = """pc vector number\t1\t2\t3
A\t0.11\t0.09\t0.23
B\t0.03\t0.07\t-0.26
C\t0.12\t0.06\t-0.32
eigvals\t4.94\t1.79\t1.50
% variation explained\t14.3\t5.2\t4.3
"""
if __name__ == '__main__':
main()
| [
"os.close",
"numpy.testing.assert_almost_equal",
"numpy.array",
"unittest.main",
"emperor.parse.parse_coords",
"tempfile.mkstemp"
] | [((2964, 2970), 'unittest.main', 'main', ([], {}), '()\n', (2968, 2970), False, 'from unittest import TestCase, main\n'), ((677, 697), 'emperor.parse.parse_coords', 'parse_coords', (['coords'], {}), '(coords)\n', (689, 697), False, 'from emperor.parse import parse_coords\n'), ((1010, 1049), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (1033, 1049), True, 'import numpy.testing as npt\n'), ((1058, 1097), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (1081, 1097), True, 'import numpy.testing as npt\n'), ((1106, 1145), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (1129, 1145), True, 'import numpy.testing as npt\n'), ((1316, 1336), 'emperor.parse.parse_coords', 'parse_coords', (['coords'], {}), '(coords)\n', (1328, 1336), False, 'from emperor.parse import parse_coords\n'), ((1649, 1688), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (1672, 1688), True, 'import numpy.testing as npt\n'), ((1697, 1736), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (1720, 1736), True, 'import numpy.testing as npt\n'), ((1745, 1784), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (1768, 1784), True, 'import numpy.testing as npt\n'), ((1915, 1924), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (1922, 1924), False, 'from tempfile import mkstemp\n'), ((1933, 1942), 'os.close', 'close', (['fd'], {}), '(fd)\n', (1938, 1942), False, 'from os import close\n'), ((2395, 2434), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[1]', 'exp[1]'], {}), '(obs[1], exp[1])\n', (2418, 2434), True, 'import numpy.testing as npt\n'), ((2443, 2482), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[2]', 'exp[2]'], {}), '(obs[2], exp[2])\n', (2466, 2482), True, 'import numpy.testing as npt\n'), ((2491, 2530), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['obs[3]', 'exp[3]'], {}), '(obs[3], exp[3])\n', (2514, 2530), True, 'import numpy.testing as npt\n'), ((745, 817), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (753, 817), True, 'import numpy as np\n'), ((825, 852), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (833, 852), True, 'import numpy as np\n'), ((870, 896), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (878, 896), True, 'import numpy as np\n'), ((1384, 1456), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (1392, 1456), True, 'import numpy as np\n'), ((1464, 1491), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (1472, 1491), True, 'import numpy as np\n'), ((1509, 1535), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (1517, 1535), True, 'import numpy as np\n'), ((2066, 2081), 'emperor.parse.parse_coords', 'parse_coords', (['f'], {}), '(f)\n', (2078, 2081), False, 'from emperor.parse import parse_coords\n'), ((2130, 2202), 'numpy.array', 'np.array', (['[[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]]'], {}), '([[0.11, 0.09, 0.23], [0.03, 0.07, -0.26], [0.12, 0.06, -0.32]])\n', (2138, 2202), True, 'import numpy as np\n'), ((2210, 2237), 'numpy.array', 'np.array', (['[4.94, 1.79, 1.5]'], {}), '([4.94, 1.79, 1.5])\n', (2218, 2237), True, 'import numpy as np\n'), ((2255, 2281), 'numpy.array', 'np.array', (['[14.3, 5.2, 4.3]'], {}), '([14.3, 5.2, 4.3])\n', (2263, 2281), True, 'import numpy as np\n')] |
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
"""empty message
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2018-08-06 16:03:36.820890
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '8efa45d83a3b'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('auth_providers',
sa.Column('created_at',
sa.DateTime(),
nullable=True))
op.add_column('auth_providers',
sa.Column('token',
sqlalchemy_utils.types.json.JSONType(),
nullable=True))
op.create_unique_constraint('auth_providers_by_provider',
'auth_providers',
['provider', 'provider_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('auth_providers_by_provider',
'auth_providers',
type_='unique')
op.drop_column('auth_providers', 'token')
op.drop_column('auth_providers', 'created_at')
# ### end Alembic commands ###
| [
"sqlalchemy.DateTime",
"alembic.op.drop_constraint",
"alembic.op.drop_column",
"sqlalchemy_utils.types.json.JSONType",
"alembic.op.create_unique_constraint"
] | [((705, 813), 'alembic.op.create_unique_constraint', 'op.create_unique_constraint', (['"""auth_providers_by_provider"""', '"""auth_providers"""', "['provider', 'provider_id']"], {}), "('auth_providers_by_provider', 'auth_providers',\n ['provider', 'provider_id'])\n", (732, 813), False, 'from alembic import op\n'), ((998, 1085), 'alembic.op.drop_constraint', 'op.drop_constraint', (['"""auth_providers_by_provider"""', '"""auth_providers"""'], {'type_': '"""unique"""'}), "('auth_providers_by_provider', 'auth_providers', type_=\n 'unique')\n", (1016, 1085), False, 'from alembic import op\n'), ((1132, 1173), 'alembic.op.drop_column', 'op.drop_column', (['"""auth_providers"""', '"""token"""'], {}), "('auth_providers', 'token')\n", (1146, 1173), False, 'from alembic import op\n'), ((1178, 1224), 'alembic.op.drop_column', 'op.drop_column', (['"""auth_providers"""', '"""created_at"""'], {}), "('auth_providers', 'created_at')\n", (1192, 1224), False, 'from alembic import op\n'), ((455, 468), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (466, 468), True, 'import sqlalchemy as sa\n'), ((616, 654), 'sqlalchemy_utils.types.json.JSONType', 'sqlalchemy_utils.types.json.JSONType', ([], {}), '()\n', (652, 654), False, 'import sqlalchemy_utils\n')] |
from pathlib import Path
from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox
import controller as db_controller
OUTPUT_PATH = Path(__file__).parent
ASSETS_PATH = OUTPUT_PATH / Path("./assets")
def relative_to_assets(path: str) -> Path:
return ASSETS_PATH / Path(path)
def add_reservations():
AddReservations()
class AddReservations(Frame):
def __init__(self, parent, controller=None, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.data = {"g_id": "", "check_in": "", "meal": "", "r_id": ""}
self.configure(bg="#FFFFFF")
self.canvas = Canvas(
self,
bg="#FFFFFF",
height=432,
width=797,
bd=0,
highlightthickness=0,
relief="ridge",
)
self.canvas.place(x=0, y=0)
self.entry_image_1 = PhotoImage(file=relative_to_assets("entry_1.png"))
entry_bg_1 = self.canvas.create_image(137.5, 153.0, image=self.entry_image_1)
self.canvas.create_text(
52.0,
128.0,
anchor="nw",
text="Guest Id",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_2 = PhotoImage(file=relative_to_assets("entry_2.png"))
entry_bg_2 = self.canvas.create_image(141.5, 165.0, image=self.entry_image_2)
entry_2 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
font=("Montserrat Bold", 18 * -1),
foreground="#777777",
)
entry_2.place(x=52.0, y=153.0, width=179.0, height=22.0)
self.data["g_id"] = entry_2
self.entry_image_3 = PhotoImage(file=relative_to_assets("entry_3.png"))
entry_bg_3 = self.canvas.create_image(137.5, 259.0, image=self.entry_image_3)
self.canvas.create_text(
52.0,
234.0,
anchor="nw",
text="Is Taking Meal",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_4 = PhotoImage(file=relative_to_assets("entry_4.png"))
entry_bg_4 = self.canvas.create_image(141.5, 271.0, image=self.entry_image_4)
entry_4 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
font=("Montserrat Bold", 18 * -1),
foreground="#777777",
)
entry_4.place(x=52.0, y=259.0, width=179.0, height=22.0)
self.data["r_id"] = entry_4
self.entry_image_5 = PhotoImage(file=relative_to_assets("entry_5.png"))
entry_bg_5 = self.canvas.create_image(378.5, 153.0, image=self.entry_image_5)
self.canvas.create_text(
293.0,
128.0,
anchor="nw",
text="Room Id",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_6 = PhotoImage(file=relative_to_assets("entry_6.png"))
entry_bg_6 = self.canvas.create_image(382.5, 165.0, image=self.entry_image_6)
entry_6 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
foreground="#777777",
font=("Montserrat Bold", 18 * -1),
)
entry_6.place(x=293.0, y=153.0, width=179.0, height=22.0)
self.data["meal"] = entry_6
self.entry_image_7 = PhotoImage(file=relative_to_assets("entry_7.png"))
entry_bg_7 = self.canvas.create_image(378.5, 259.0, image=self.entry_image_7)
self.canvas.create_text(
293.0,
234.0,
anchor="nw",
text="Check-in Time",
fill="#5E95FF",
font=("Montserrat Bold", 14 * -1),
)
self.entry_image_8 = PhotoImage(file=relative_to_assets("entry_8.png"))
entry_bg_8 = self.canvas.create_image(382.5, 271.0, image=self.entry_image_8)
entry_8 = Entry(
self,
bd=0,
bg="#EFEFEF",
highlightthickness=0,
foreground="#777777",
font=("Montserrat Bold", 18 * -1),
)
entry_8.place(x=293.0, y=259.0, width=179.0, height=22.0)
self.data["check_in"] = entry_8
self.button_image_1 = PhotoImage(file=relative_to_assets("button_1.png"))
button_1 = Button(
self,
image=self.button_image_1,
borderwidth=0,
highlightthickness=0,
command=self.save,
relief="flat",
)
button_1.place(x=164.0, y=322.0, width=190.0, height=48.0)
self.canvas.create_text(
139.0,
59.0,
anchor="nw",
text="Add a Reservation",
fill="#5E95FF",
font=("Montserrat Bold", 26 * -1),
)
self.canvas.create_text(
549.0,
59.0,
anchor="nw",
text="Operations",
fill="#5E95FF",
font=("Montserrat Bold", 26 * -1),
)
self.canvas.create_rectangle(
515.0, 59.0, 517.0, 370.0, fill="#EFEFEF", outline=""
)
self.button_image_2 = PhotoImage(file=relative_to_assets("button_2.png"))
button_2 = Button(
self,
image=self.button_image_2,
borderwidth=0,
highlightthickness=0,
command=lambda: self.parent.navigate("view"),
relief="flat",
)
button_2.place(x=547.0, y=116.0, width=209.0, height=74.0)
self.button_image_3 = PhotoImage(file=relative_to_assets("button_3.png"))
button_3 = Button(
self,
image=self.button_image_3,
borderwidth=0,
highlightthickness=0,
command=lambda: self.parent.navigate("edit"),
relief="flat",
)
button_3.place(x=547.0, y=210.0, width=209.0, height=74.0)
# Set default value for entry
self.data["check_in"].insert(0, "now")
# Save the data to the database
def save(self):
# check if any fields are empty
for label in self.data.keys():
if self.data[label].get() == "":
messagebox.showinfo("Error", "Please fill in all the fields")
return
# Save the reservation
result = db_controller.add_reservation(
*[self.data[label].get() for label in ("g_id", "meal", "r_id", "check_in")]
)
if result:
messagebox.showinfo("Success", "Reservation added successfully")
self.parent.navigate("view")
self.parent.refresh_entries()
# clear all fields
for label in self.data.keys():
self.data[label].delete(0, "end")
else:
messagebox.showerror(
"Error",
"Unable to add reservation. Please make sure the data is validated",
)
| [
"tkinter.messagebox.showerror",
"tkinter.Frame.__init__",
"tkinter.Entry",
"pathlib.Path",
"tkinter.Button",
"tkinter.Canvas",
"tkinter.messagebox.showinfo"
] | [((155, 169), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'from pathlib import Path\n'), ((205, 221), 'pathlib.Path', 'Path', (['"""./assets"""'], {}), "('./assets')\n", (209, 221), False, 'from pathlib import Path\n'), ((292, 302), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (296, 302), False, 'from pathlib import Path\n'), ((457, 502), 'tkinter.Frame.__init__', 'Frame.__init__', (['self', 'parent', '*args'], {}), '(self, parent, *args, **kwargs)\n', (471, 502), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((666, 764), 'tkinter.Canvas', 'Canvas', (['self'], {'bg': '"""#FFFFFF"""', 'height': '(432)', 'width': '(797)', 'bd': '(0)', 'highlightthickness': '(0)', 'relief': '"""ridge"""'}), "(self, bg='#FFFFFF', height=432, width=797, bd=0, highlightthickness=\n 0, relief='ridge')\n", (672, 764), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((1453, 1568), 'tkinter.Entry', 'Entry', (['self'], {'bd': '(0)', 'bg': '"""#EFEFEF"""', 'highlightthickness': '(0)', 'font': "('Montserrat Bold', 18 * -1)", 'foreground': '"""#777777"""'}), "(self, bd=0, bg='#EFEFEF', highlightthickness=0, font=(\n 'Montserrat Bold', 18 * -1), foreground='#777777')\n", (1458, 1568), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((2316, 2431), 'tkinter.Entry', 'Entry', (['self'], {'bd': '(0)', 'bg': '"""#EFEFEF"""', 'highlightthickness': '(0)', 'font': "('Montserrat Bold', 18 * -1)", 'foreground': '"""#777777"""'}), "(self, bd=0, bg='#EFEFEF', highlightthickness=0, font=(\n 'Montserrat Bold', 18 * -1), foreground='#777777')\n", (2321, 2431), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((3173, 3287), 'tkinter.Entry', 'Entry', (['self'], {'bd': '(0)', 'bg': '"""#EFEFEF"""', 'highlightthickness': '(0)', 'foreground': '"""#777777"""', 'font': "('Montserrat Bold', 18 * -1)"}), "(self, bd=0, bg='#EFEFEF', highlightthickness=0, foreground='#777777',\n font=('Montserrat Bold', 18 * -1))\n", (3178, 3287), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((4037, 4151), 'tkinter.Entry', 'Entry', (['self'], {'bd': '(0)', 'bg': '"""#EFEFEF"""', 'highlightthickness': '(0)', 'foreground': '"""#777777"""', 'font': "('Montserrat Bold', 18 * -1)"}), "(self, bd=0, bg='#EFEFEF', highlightthickness=0, foreground='#777777',\n font=('Montserrat Bold', 18 * -1))\n", (4042, 4151), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((4439, 4553), 'tkinter.Button', 'Button', (['self'], {'image': 'self.button_image_1', 'borderwidth': '(0)', 'highlightthickness': '(0)', 'command': 'self.save', 'relief': '"""flat"""'}), "(self, image=self.button_image_1, borderwidth=0, highlightthickness=0,\n command=self.save, relief='flat')\n", (4445, 4553), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((6603, 6667), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Success"""', '"""Reservation added successfully"""'], {}), "('Success', 'Reservation added successfully')\n", (6622, 6667), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((6902, 7004), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Unable to add reservation. Please make sure the data is validated"""'], {}), "('Error',\n 'Unable to add reservation. Please make sure the data is validated')\n", (6922, 7004), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n'), ((6308, 6369), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error"""', '"""Please fill in all the fields"""'], {}), "('Error', 'Please fill in all the fields')\n", (6327, 6369), False, 'from tkinter import Frame, Canvas, Entry, Text, Button, PhotoImage, messagebox\n')] |
import cv2
import numpy as np
import imutils
from collections import defaultdict
# mouse callback function
def define_points(target_img):
corners = []
refPt = []
def draw_circle(event,x,y,flags,param):
global refPt
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(param,(x,y),5,(255,0,0),-1)
refPt = [x,y]
print(type(refPt))
corners.append(refPt)
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle, target_img)
while(1):
cv2.imshow('image',target_img)
k = cv2.waitKey(20) & 0xFF
# corners.append(refPt)
if k == 27:
break
cv2.destroyAllWindows()
print (corners)
new_corners = np.array(corners)
return new_corners
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def segment_by_angle_kmeans(lines,k=2, **kwargs):
"""Groups lines based on angle with k-means.
Uses k-means on the coordinates of the angle on the unit circle
to segment `k` angles inside `lines`.
"""
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def intersection(line1, line2):
"""Finds the intersection of two lines given in Hesse normal form.
Returns closest integer pixel locations.
See https://stackoverflow.com/a/383527/5087436
"""
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def segmented_intersections(lines):
"""Finds the intersections between groups of lines."""
intersections = []
for i, group in enumerate(lines[:-1]):
for next_group in lines[i+1:]:
for line1 in group:
for line2 in next_group:
intersections.append(intersection(line1, line2))
return intersections
def isEqual(l1, l2):
length1 = sqrtf((l1[2] - l1[0])*(l1[2] - l1[0]) + (l1[3] - l1[1])*(l1[3] - l1[1]))
length2 = sqrtf((l2[2] - l2[0])*(l2[2] - l2[0]) + (l2[3] - l2[1])*(l2[3] - l2[1]))
product = (l1[2] - l1[0])*(l2[2] - l2[0]) + (l1[3] - l1[1])*(l2[3] - l2[1])
if (fabs(product / (length1 * length2)) < cos(CV_PI / 30)):
return false
mx1 = (l1[0] + l1[2]) * 0.5
mx2 = (l2[0] + l2[2]) * 0.5
my1 = (l1[1] + l1[3]) * 0.5
my2 = (l2[1] + l2[3]) * 0.5
dist = sqrtf((mx1 - mx2)*(mx1 - mx2) + (my1 - my2)*(my1 - my2))
if (dist > max(length1, length2) * 0.5):
return false
return true
def birdseye_correction(img = "angled.jpg"):
img = cv2.imread(img,0)
resized = imutils.resize(img, height = 1000)
copy = resized.copy()
rect = order_points(define_points(copy))
print (rect)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0]-bl[0])**2)+((br[1]-bl[1])**2))
widthB = np.sqrt(((tr[0]-tl[0])**2)+((tr[1]-tl[1])**2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0]-br[0])**2)+((tr[1]-br[1])**2))
heightB = np.sqrt(((tl[0]-bl[0])**2)+((tl[1]-bl[1])**2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0], \
[maxWidth - 1, 0], \
[maxWidth - 1, maxHeight - 1], \
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(resized, M, (maxWidth, maxHeight))
cv2.imshow("warped", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
blurred_img = cv2.GaussianBlur(warped,(3,3),0)
binary = cv2.adaptiveThreshold(blurred_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,31,2)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(binary,cv2.MORPH_OPEN,kernel, iterations = 2)
# Apply edge detection method on the image
edges = cv2.Canny(warped,50,150,apertureSize = 3)
#
cv2.imshow("edges", edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# This returns an array of r and theta values
lines = cv2.HoughLines(edges,1,np.pi/180, 140)
# The below for loop runs till r and theta values
# are in the range of the 2d array
for line in lines:
for r,theta in line:
# Stores the value of cos(theta) in a
a = np.cos(theta)
# Stores the value of sin(theta) in b
b = np.sin(theta)
# x0 stores the value rcos(theta)
x0 = a*r
# y0 stores the value rsin(theta)
y0 = b*r
# x1 stores the rounded off value of (rcos(theta)-1000sin(theta))
x1 = int(x0 + 1000*(-b))
# y1 stores the rounded off value of (rsin(theta)+1000cos(theta))
y1 = int(y0 + 1000*(a))
# x2 stores the rounded off value of (rcos(theta)+1000sin(theta))
x2 = int(x0 - 1000*(-b))
# y2 stores the rounded off value of (rsin(theta)-1000cos(theta))
y2 = int(y0 - 1000*(a))
# cv2.line draws a line in img from the point(x1,y1) to (x2,y2).
# (0,0,255) denotes the colour of the line to be
# In this case, it is red.
cv2.line(warped,(x1,y1), (x2,y2), (0,0,255),2)
# labels = []
# num_lines = partition(lines, labels, isEqual)
# define criteria, number of clusters(K) and apply kmeans()
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 54, 1.0)
# K = 54
# ret,label,center=cv2.kmeans(lines,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
#
# # Now convert back into uint8, and make original image
# center = np.uint8(center)
# res = center[label.flatten()]
# print(res.shape, img.shape)
# # res2 = res.reshape((img.shape))
# cv2.imshow('res',res)
# res2 = cv2.resize(res, warped.shape);
# cv2.imshow('img', img)
# cv2.imshow('res2',res2)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
cv2.imwrite("unclustered_lines.jpg", warped)
#
cv2.imshow("lines", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
# segmented = segment_by_angle_kmeans(lines)
# intersections = segmented_intersections(segmented)
# print(intersections)
# draw the intersection points
# intersectsimg = img.copy()
# for cx, cy in zip(intersections):
# cx = np.round(cx).astype(int)
# cy = np.round(cy).astype(int)
# color = np.random.randint(0,255,3).tolist() # random colors
# cv2.circle(intersectsimg, (cx, cy), radius=2, color=color, thickness=-1) # -1: filled circle
#
#
# cv2.imshow("intersections", intersectionimg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def main():
birdseye_correction()
if __name__ == "__main__":
main()
| [
"numpy.sqrt",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.HoughLines",
"cv2.destroyAllWindows",
"numpy.sin",
"cv2.setMouseCallback",
"cv2.line",
"numpy.diff",
"numpy.argmin",
"cv2.waitKey",
"numpy.round",
"numpy.ones",
"cv2.getPerspectiveTransform",
"cv2.kmeans",
"numpy.... | [((429, 453), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (444, 453), False, 'import cv2\n'), ((458, 512), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle', 'target_img'], {}), "('image', draw_circle, target_img)\n", (478, 512), False, 'import cv2\n'), ((674, 697), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (695, 697), False, 'import cv2\n'), ((736, 753), 'numpy.array', 'np.array', (['corners'], {}), '(corners)\n', (744, 753), True, 'import numpy as np\n'), ((1031, 1064), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""float32"""'}), "((4, 2), dtype='float32')\n", (1039, 1064), True, 'import numpy as np\n'), ((1437, 1457), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (1444, 1457), True, 'import numpy as np\n'), ((2151, 2191), 'numpy.array', 'np.array', (['[line[0][1] for line in lines]'], {}), '([line[0][1] for line in lines])\n', (2159, 2191), True, 'import numpy as np\n'), ((2606, 2623), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2617, 2623), False, 'from collections import defaultdict\n'), ((3160, 3186), 'numpy.array', 'np.array', (['[[rho1], [rho2]]'], {}), '([[rho1], [rho2]])\n', (3168, 3186), True, 'import numpy as np\n'), ((3200, 3221), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (3215, 3221), True, 'import numpy as np\n'), ((4367, 4385), 'cv2.imread', 'cv2.imread', (['img', '(0)'], {}), '(img, 0)\n', (4377, 4385), False, 'import cv2\n'), ((4399, 4431), 'imutils.resize', 'imutils.resize', (['img'], {'height': '(1000)'}), '(img, height=1000)\n', (4413, 4431), False, 'import imutils\n'), ((4739, 4791), 'numpy.sqrt', 'np.sqrt', (['((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)'], {}), '((br[0] - bl[0]) ** 2 + (br[1] - bl[1]) ** 2)\n', (4746, 4791), True, 'import numpy as np\n'), ((4799, 4851), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)'], {}), '((tr[0] - tl[0]) ** 2 + (tr[1] - tl[1]) ** 2)\n', (4806, 4851), True, 'import numpy as np\n'), ((5095, 5147), 'numpy.sqrt', 'np.sqrt', (['((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)'], {}), '((tr[0] - br[0]) ** 2 + (tr[1] - br[1]) ** 2)\n', (5102, 5147), True, 'import numpy as np\n'), ((5156, 5208), 'numpy.sqrt', 'np.sqrt', (['((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)'], {}), '((tl[0] - bl[0]) ** 2 + (tl[1] - bl[1]) ** 2)\n', (5163, 5208), True, 'import numpy as np\n'), ((5262, 5372), 'numpy.array', 'np.array', (['[[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]]'], {'dtype': '"""float32"""'}), "([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, \n maxHeight - 1]], dtype='float32')\n", (5270, 5372), True, 'import numpy as np\n'), ((5462, 5500), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['rect', 'dst'], {}), '(rect, dst)\n', (5489, 5500), False, 'import cv2\n'), ((5514, 5568), 'cv2.warpPerspective', 'cv2.warpPerspective', (['resized', 'M', '(maxWidth, maxHeight)'], {}), '(resized, M, (maxWidth, maxHeight))\n', (5533, 5568), False, 'import cv2\n'), ((5574, 5602), 'cv2.imshow', 'cv2.imshow', (['"""warped"""', 'warped'], {}), "('warped', warped)\n", (5584, 5602), False, 'import cv2\n'), ((5607, 5621), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5618, 5621), False, 'import cv2\n'), ((5626, 5649), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5647, 5649), False, 'import cv2\n'), ((5724, 5759), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['warped', '(3, 3)', '(0)'], {}), '(warped, (3, 3), 0)\n', (5740, 5759), False, 'import cv2\n'), ((5770, 5872), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['blurred_img', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(31)', '(2)'], {}), '(blurred_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2\n .THRESH_BINARY, 31, 2)\n', (5791, 5872), False, 'import cv2\n'), ((5914, 5939), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (5921, 5939), True, 'import numpy as np\n'), ((5952, 6014), 'cv2.morphologyEx', 'cv2.morphologyEx', (['binary', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(binary, cv2.MORPH_OPEN, kernel, iterations=2)\n', (5968, 6014), False, 'import cv2\n'), ((6074, 6116), 'cv2.Canny', 'cv2.Canny', (['warped', '(50)', '(150)'], {'apertureSize': '(3)'}), '(warped, 50, 150, apertureSize=3)\n', (6083, 6116), False, 'import cv2\n'), ((6126, 6152), 'cv2.imshow', 'cv2.imshow', (['"""edges"""', 'edges'], {}), "('edges', edges)\n", (6136, 6152), False, 'import cv2\n'), ((6157, 6171), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6168, 6171), False, 'import cv2\n'), ((6176, 6199), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6197, 6199), False, 'import cv2\n'), ((6264, 6306), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(np.pi / 180)', '(140)'], {}), '(edges, 1, np.pi / 180, 140)\n', (6278, 6306), False, 'import cv2\n'), ((8153, 8197), 'cv2.imwrite', 'cv2.imwrite', (['"""unclustered_lines.jpg"""', 'warped'], {}), "('unclustered_lines.jpg', warped)\n", (8164, 8197), False, 'import cv2\n'), ((8208, 8235), 'cv2.imshow', 'cv2.imshow', (['"""lines"""', 'warped'], {}), "('lines', warped)\n", (8218, 8235), False, 'import cv2\n'), ((8240, 8254), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8251, 8254), False, 'import cv2\n'), ((8259, 8282), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8280, 8282), False, 'import cv2\n'), ((534, 565), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'target_img'], {}), "('image', target_img)\n", (544, 565), False, 'import cv2\n'), ((1216, 1228), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (1225, 1228), True, 'import numpy as np\n'), ((1245, 1257), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (1254, 1257), True, 'import numpy as np\n'), ((1475, 1490), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1484, 1490), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (1516, 1522), True, 'import numpy as np\n'), ((2429, 2480), 'cv2.kmeans', 'cv2.kmeans', (['pts', 'k', 'None', 'criteria', 'attempts', 'flags'], {}), '(pts, k, None, criteria, attempts, flags)\n', (2439, 2480), False, 'import cv2\n'), ((294, 339), 'cv2.circle', 'cv2.circle', (['param', '(x, y)', '(5)', '(255, 0, 0)', '(-1)'], {}), '(param, (x, y), 5, (255, 0, 0), -1)\n', (304, 339), False, 'import cv2\n'), ((577, 592), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (588, 592), False, 'import cv2\n'), ((3239, 3251), 'numpy.round', 'np.round', (['x0'], {}), '(x0)\n', (3247, 3251), True, 'import numpy as np\n'), ((3258, 3270), 'numpy.round', 'np.round', (['y0'], {}), '(y0)\n', (3266, 3270), True, 'import numpy as np\n'), ((6515, 6528), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6521, 6528), True, 'import numpy as np\n'), ((6595, 6608), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6601, 6608), True, 'import numpy as np\n'), ((7391, 7443), 'cv2.line', 'cv2.line', (['warped', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(warped, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (7399, 7443), False, 'import cv2\n'), ((2281, 2298), 'numpy.cos', 'np.cos', (['(2 * angle)'], {}), '(2 * angle)\n', (2287, 2298), True, 'import numpy as np\n'), ((2298, 2315), 'numpy.sin', 'np.sin', (['(2 * angle)'], {}), '(2 * angle)\n', (2304, 2315), True, 'import numpy as np\n'), ((3071, 3085), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (3077, 3085), True, 'import numpy as np\n'), ((3087, 3101), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (3093, 3101), True, 'import numpy as np\n'), ((3113, 3127), 'numpy.cos', 'np.cos', (['theta2'], {}), '(theta2)\n', (3119, 3127), True, 'import numpy as np\n'), ((3129, 3143), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (3135, 3143), True, 'import numpy as np\n')] |
import argparse
import configparser
import os
import pathlib
import platform
import random
import subprocess as sp
import sys
import typing
import warnings
if platform.system() == 'Windows':
import winsound
try:
from IPython.core import magic
IPYTHON_INSTALLED = True
except ImportError:
IPYTHON_INSTALLED = False
def _get_config_path(system: str) -> pathlib.Path:
"""Return the path of the config file.
Parameters:
system: The OS being used.
"""
home_dir = pathlib.Path.home()
if system == 'Windows':
config_path = (
pathlib.Path(
os.getenv('APPDATA',
home_dir / pathlib.Path('AppData', 'Roaming')))
/ pathlib.Path('chime', 'chime.ini')
)
else:
config_path = home_dir / pathlib.Path('.config', 'chime', 'chime.conf')
return config_path.resolve().absolute()
def _get_default_theme(path: pathlib.Path, fallback_theme: str) -> str:
"""Check for the existence of a theme in a config file.
Parameters:
path: Path of the config file.
fallback_theme: The theme to fallback to if a config file is not found or contains no
theme.
"""
if path.exists():
config = configparser.ConfigParser()
config.read(path)
if 'chime' in config:
default_theme = config['chime'].get('theme', fallback_theme)
else:
default_theme = fallback_theme
else:
default_theme = fallback_theme
return default_theme
config_path = _get_config_path(platform.system())
THEME = _get_default_theme(config_path, fallback_theme='chime')
__all__ = [
'error',
'info',
'notify_exceptions',
'success'
'theme',
'themes',
'warning'
]
def run(command: str, sync: bool, raise_error: bool):
if sync:
try:
sp.run(command, shell=True, check=True, stdout=sp.PIPE, stderr=sp.PIPE)
except sp.CalledProcessError as e:
msg = f'{e} stderr: {e.stderr.decode().strip()}'
if raise_error:
raise RuntimeError(msg)
else:
warnings.warn(msg)
else:
sp.Popen(command, shell=True, stderr=sp.DEVNULL)
def play_wav(path: pathlib.Path, sync=True, raise_error=True):
"""Play a .wav file.
This function is platform agnostic, meaning that it will determine what to do based on
the `sys.platform` variable.
Parameters:
path: Path to a .wav file.
sync: The sound file will be played synchronously if this is `True`. If not, then the sound
will be played asynchronously in a separate process. In such a case, the process will
fail silently if an error occurs.
raise_error: Whether to raise an exception when an occurs, or instead to just send a
warning.
Raises:
RuntimeError: If the platform is not supported.
"""
system = platform.system()
if system == 'Darwin':
run(f'afplay {path}', sync, raise_error)
elif system == 'Linux':
run(f'aplay {path}', sync, raise_error)
elif system == 'OpenBSD':
run(f'aucat -i {path}', sync, raise_error)
elif system == 'Windows':
flags = winsound.SND_FILENAME
if not sync:
flags |= winsound.SND_ASYNC
try:
winsound.PlaySound(str(path), flags)
except RuntimeError as e:
if raise_error:
raise e
else:
warnings.warn(e)
else:
raise RuntimeError(f'Unsupported platform ({sys.platform})')
def themes_dir() -> pathlib.Path:
"""Return the directory where the themes are located."""
here = pathlib.Path(__file__).parent
return here.joinpath('themes')
def current_theme_dir() -> pathlib.Path:
"""Return the current theme's sound directory."""
if THEME == 'random':
return themes_dir().joinpath(random.choice(themes()))
return themes_dir().joinpath(THEME)
def themes() -> typing.List[str]:
"""Return the available themes to choose from."""
return sorted(
theme.name
for theme in themes_dir().iterdir()
if not theme.name.startswith('.') # ignores .DS_Store on MacOS
)
def theme(name: str = None):
"""Set the current theme.
Parameters:
name: The change will be switched if a valid name is provided. The current theme is
returned if `None`.
Raises:
ValueError: If the theme is unknown.
"""
global THEME
if name is None:
return THEME
if name != 'random' and name not in themes():
raise ValueError(f'Unknown theme ({name})')
THEME = name
def notify(event: str, sync: bool, raise_error: bool):
wav_path = current_theme_dir().joinpath(f'{event}.wav')
if not wav_path.exists():
raise ValueError(f"{wav_path} is doesn't exist")
play_wav(wav_path, sync, raise_error)
def success(sync=False, raise_error=False):
"""Make a success sound.
Parameters:
sync: The sound file will be played synchronously if this is `True`. If not, then the sound
will be played in a separate process. In such a case, the process will fail silently if
an error occurs.
raise_error: Whether to raise an exception when an occurs, or instead to just send a
warning.
"""
return notify('success', sync, raise_error)
def warning(sync=False, raise_error=False):
"""Make a warning sound.
Parameters:
sync: The sound file will be played synchronously if this is `True`. If not, then the sound
will be played in a separate process. In such a case, the process will fail silently if
an error occurs.
raise_error: Whether to raise an exception when an occurs, or instead to just send a
warning.
"""
return notify('warning', sync, raise_error)
def error(sync=False, raise_error=False):
"""Make an error sound.
Parameters:
sync: The sound file will be played synchronously if this is `True`. If not, then the sound
will be played in a separate process. In such a case, the process will fail silently if
an error occurs.
raise_error: Whether to raise an exception when an occurs, or instead to just send a
warning.
"""
return notify('error', sync, raise_error)
def info(sync=False, raise_error=False):
"""Make a generic information sound.
Parameters:
sync: The sound file will be played synchronously if this is `True`. If not, then the sound
will be played in a separate process. In such a case, the process will fail silently if
an error occurs.
raise_error: Whether to raise an exception when an occurs, or instead to just send a
warning.
"""
return notify('info', sync, raise_error)
def notify_exceptions():
"""Will call error() whenever an exception occurs."""
def except_hook(exctype, value, traceback):
error()
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = except_hook
if IPYTHON_INSTALLED:
class Watcher:
def __init__(self, ipython):
self.shell = ipython
def post_run_cell(self, result):
if result.error_in_exec:
error()
try:
ipython = get_ipython()
except NameError:
return
watcher = Watcher(ipython)
ipython.events.register('post_run_cell', watcher.post_run_cell)
if IPYTHON_INSTALLED:
@magic.magics_class
class ChimeMagics(magic.Magics):
@magic.needs_local_scope
@magic.line_cell_magic
def chime(self, line, cell=None, local_ns=None):
def run(code):
try:
exec(code, local_ns)
success()
except Exception as e:
error()
raise e
if cell is None:
run(line)
else:
run(cell)
def load_ipython_extension(ipython):
ipython.register_magics(ChimeMagics)
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser()
parser.add_argument('event', nargs='?', default='success',
help='either one of {success, warning, error, info}')
parser.add_argument('--theme', help=f'either one of {{{", ".join(themes())}}}')
args = parser.parse_args()
if args.theme:
theme(args.theme)
notify(args.event, sync=False, raise_error=False)
if __name__ == '__main__':
main()
| [
"configparser.ConfigParser",
"argparse.ArgumentParser",
"pathlib.Path",
"subprocess.Popen",
"pathlib.Path.home",
"subprocess.run",
"platform.system",
"warnings.warn",
"sys.__excepthook__"
] | [((160, 177), 'platform.system', 'platform.system', ([], {}), '()\n', (175, 177), False, 'import platform\n'), ((503, 522), 'pathlib.Path.home', 'pathlib.Path.home', ([], {}), '()\n', (520, 522), False, 'import pathlib\n'), ((1595, 1612), 'platform.system', 'platform.system', ([], {}), '()\n', (1610, 1612), False, 'import platform\n'), ((2975, 2992), 'platform.system', 'platform.system', ([], {}), '()\n', (2990, 2992), False, 'import platform\n'), ((8308, 8333), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8331, 8333), False, 'import argparse\n'), ((1274, 1301), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1299, 1301), False, 'import configparser\n'), ((2209, 2257), 'subprocess.Popen', 'sp.Popen', (['command'], {'shell': '(True)', 'stderr': 'sp.DEVNULL'}), '(command, shell=True, stderr=sp.DEVNULL)\n', (2217, 2257), True, 'import subprocess as sp\n'), ((3746, 3768), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (3758, 3768), False, 'import pathlib\n'), ((7110, 7155), 'sys.__excepthook__', 'sys.__excepthook__', (['exctype', 'value', 'traceback'], {}), '(exctype, value, traceback)\n', (7128, 7155), False, 'import sys\n'), ((744, 778), 'pathlib.Path', 'pathlib.Path', (['"""chime"""', '"""chime.ini"""'], {}), "('chime', 'chime.ini')\n", (756, 778), False, 'import pathlib\n'), ((832, 878), 'pathlib.Path', 'pathlib.Path', (['""".config"""', '"""chime"""', '"""chime.conf"""'], {}), "('.config', 'chime', 'chime.conf')\n", (844, 878), False, 'import pathlib\n'), ((1894, 1965), 'subprocess.run', 'sp.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'sp.PIPE', 'stderr': 'sp.PIPE'}), '(command, shell=True, check=True, stdout=sp.PIPE, stderr=sp.PIPE)\n', (1900, 1965), True, 'import subprocess as sp\n'), ((2172, 2190), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (2185, 2190), False, 'import warnings\n'), ((693, 727), 'pathlib.Path', 'pathlib.Path', (['"""AppData"""', '"""Roaming"""'], {}), "('AppData', 'Roaming')\n", (705, 727), False, 'import pathlib\n'), ((3541, 3557), 'warnings.warn', 'warnings.warn', (['e'], {}), '(e)\n', (3554, 3557), False, 'import warnings\n')] |
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
from pymongo.errors import InvalidOperation
from iu_mongo.errors import TransactionError
__all__ = ['Session', 'TransactionContext']
DEFAULT_READ_CONCERN = ReadConcern('majority')
DEFAULT_WRITE_CONCERN = WriteConcern(w='majority', wtimeout=5000)
DEFAULT_READ_PREFERENCE = ReadPreference.PRIMARY
class TransactionContext(object):
def __init__(self, pymongo_transaction_context, pymongo_session):
self._pymongo_transaction_context = pymongo_transaction_context
self._pymongo_session = pymongo_session
def __enter__(self):
self._pymongo_transaction_context.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._pymongo_transaction_context.__exit__(exc_type, exc_val, exc_tb)
@property
def _transaction(self):
return self._pymongo_session._transaction
@property
def transaction_id(self):
return self._transaction.transaction_id
class Session(object):
def __init__(self, pymongo_client_session):
self._pymongo_client_session = pymongo_client_session
@property
def pymongo_session(self):
return self._pymongo_client_session
@property
def pymongo_client(self):
return self._pymongo_client_session.client
@property
def session_id(self):
return self._pymongo_client_session.session_id
def start_transaction(self):
try:
pymongo_transaction_context = self._pymongo_client_session.start_transaction(
read_concern=DEFAULT_READ_CONCERN,
write_concern=DEFAULT_WRITE_CONCERN,
read_preference=DEFAULT_READ_PREFERENCE
)
return TransactionContext(pymongo_transaction_context, self._pymongo_client_session)
except InvalidOperation as e:
raise TransactionError(str(e))
def abort_transaction(self):
try:
self._pymongo_client_session.abort_transaction()
except InvalidOperation as e:
raise TransactionError(str(e))
def commit_transaction(self):
self._pymongo_client_session.commit_transaction()
def __enter__(self):
self._pymongo_client_session.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._pymongo_client_session.__exit__(exc_type, exc_val, exc_tb)
| [
"pymongo.read_concern.ReadConcern",
"pymongo.write_concern.WriteConcern"
] | [((302, 325), 'pymongo.read_concern.ReadConcern', 'ReadConcern', (['"""majority"""'], {}), "('majority')\n", (313, 325), False, 'from pymongo.read_concern import ReadConcern\n'), ((350, 391), 'pymongo.write_concern.WriteConcern', 'WriteConcern', ([], {'w': '"""majority"""', 'wtimeout': '(5000)'}), "(w='majority', wtimeout=5000)\n", (362, 391), False, 'from pymongo.write_concern import WriteConcern\n')] |
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request, Markup, abort, make_response
# import peewee
# import json
api = Flask(__name__)
@api.route('/')
def index():
html = '''
<form action="/iperf3test">
<p><label>iperf3 test: </label></p>
Test Name: <input type="text" name="TestName"></p>
Config File: <input type="text" name="ConfigFile"></p>
Interval: <input type="text" name="Interval" value="1"></p>
Bandwidth: <input type="text" name="Bandwidth" value="1G"></p>
MSS: <input type="text" name="MSS" value="1460"></p>
Parallel: <input type="text" name="Parallel" value="1"></p>
Time: <input type="text" name="Time" value="10"></p>
Protocol is UDP? : <input type="checkbox" name="UDP?"></p>
Use Server Output? : <input type="checkbox" name="Get Server Output?"></p>
Use ESXTOP Output? : <input type="checkbox" name="Get ESXTOP Output?"></p>
<button type="submit" formmethod="get">GET</button></p>
<button type="submit" formmethod="post">POST</button></p>
</form>
'''
return Markup(html)
@api.route('/iperf3test', methods=['GET', 'POST'])
def iperf3test():
try:
if request.method == 'POST':
return request.form['TestName']
else:
return request.args.get('TestName', '')
except Exception as e:
return str(e)
@api.route('/sayHello', methods=['GET'])
def say_hello():
result = {
"result":True,
"data": "Hello, world!"
}
return make_response(jsonify(result))
# if you do not want to use Unicode:
# return make_response(json.dumps(result, ensure_ascii=False))
@api.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
api.run(host='0.0.0.0', port=8000)
| [
"flask.Markup",
"flask.request.args.get",
"flask.jsonify",
"flask.Flask"
] | [((133, 148), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'from flask import Flask, jsonify, request, Markup, abort, make_response\n'), ((1115, 1127), 'flask.Markup', 'Markup', (['html'], {}), '(html)\n', (1121, 1127), False, 'from flask import Flask, jsonify, request, Markup, abort, make_response\n'), ((1569, 1584), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1576, 1584), False, 'from flask import Flask, jsonify, request, Markup, abort, make_response\n'), ((1766, 1797), 'flask.jsonify', 'jsonify', (["{'error': 'Not found'}"], {}), "({'error': 'Not found'})\n", (1773, 1797), False, 'from flask import Flask, jsonify, request, Markup, abort, make_response\n'), ((1321, 1353), 'flask.request.args.get', 'request.args.get', (['"""TestName"""', '""""""'], {}), "('TestName', '')\n", (1337, 1353), False, 'from flask import Flask, jsonify, request, Markup, abort, make_response\n')] |
import math
import numpy as np
from scipy.spatial.distance import cdist
from torch.utils.data import Dataset
import torch
import os
import pickle
from problems.tsptw.state_tsptw import StateTSPTWInt
from utils.functions import accurate_cdist
class TSPTW(object):
NAME = 'tsptw' # TSP with Time Windows
@staticmethod
def get_costs(dataset, pi):
"""
:param dataset: (batch_size, graph_size, 2) coordinates
:param pi: (batch_size, graph_size) permutations representing tours
:return: (batch_size) lengths of tours
"""
# Check that tours are valid, i.e. contain 1 to n (0 is depot and should not be included)
if (pi[:, 0] == 0).all():
pi = pi[:, 1:] # Strip of depot
assert (
torch.arange(pi.size(1), out=pi.data.new()).view(1, -1).expand_as(pi) + 1 ==
pi.data.sort(1)[0]
).all(), "Invalid tour"
# Distance must be provided in dataset since way of rounding can vary
if 'dist' in dataset:
dist = dataset['dist']
else:
coords = torch.cat((dataset['depot'][:, None, :], dataset['loc']), 1)
dist = accurate_cdist(coords, coords).round().int()
batch_size, graph_size, _ = dataset['loc'].size()
# Check the time windows
t = dist.new_zeros((batch_size, ))
#assert (pi[:, 0] == 0).all() # Tours must start at depot
batch_zeros = pi.new_zeros((batch_size, ))
cur = batch_zeros
batch_ind = torch.arange(batch_size).long()
lb, ub = torch.unbind(dataset['timew'], -1)
for i in range(graph_size - 1):
next = pi[:, i]
t = torch.max(t + dist[batch_ind, cur, next], lb[batch_ind, next])
assert (t <= ub[batch_ind, next]).all()
cur = next
length = dist[batch_ind, 0, pi[:, 0]] + dist[batch_ind[:, None], pi[:, :-1], pi[:, 1:]].sum(-1) + dist[batch_ind, pi[:, -1], 0]
# We want to maximize total prize but code minimizes so return negative
return length, None
# @staticmethod
def make_dataset(*args, **kwargs):
return TSPTWDataset(*args, **kwargs)
@staticmethod
def make_state(*args, **kwargs):
return StateTSPTWInt.initialize(*args, **kwargs)
def get_rounded_distance_matrix(coord):
return cdist(coord, coord).round().astype(np.int)
def generate_instance(size):
raise NotImplementedError()
class TSPTWDataset(Dataset):
def __init__(self, filename=None, size=100, num_samples=1000000, offset=0, distribution=None, normalize=False):
super(TSPTWDataset, self).__init__()
self.data_set = []
assert filename is not None
assert not normalize
assert os.path.splitext(filename)[1] == '.pkl'
with open(filename, 'rb') as f:
data = pickle.load(f)
self.data = [
{
'loc': torch.tensor(loc, dtype=torch.float),
'depot': torch.tensor(depot, dtype=torch.float),
'timew': torch.tensor(timew, dtype=torch.int64),
'max_coord': torch.tensor(max_coord, dtype=torch.int64), # Scalar
}
for depot, loc, timew, max_coord in (data[offset:offset+num_samples])
]
self.size = len(self.data)
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
| [
"utils.functions.accurate_cdist",
"scipy.spatial.distance.cdist",
"torch.max",
"pickle.load",
"os.path.splitext",
"torch.unbind",
"torch.tensor",
"torch.arange",
"problems.tsptw.state_tsptw.StateTSPTWInt.initialize",
"torch.cat"
] | [((1578, 1612), 'torch.unbind', 'torch.unbind', (["dataset['timew']", '(-1)'], {}), "(dataset['timew'], -1)\n", (1590, 1612), False, 'import torch\n'), ((2256, 2297), 'problems.tsptw.state_tsptw.StateTSPTWInt.initialize', 'StateTSPTWInt.initialize', (['*args'], {}), '(*args, **kwargs)\n', (2280, 2297), False, 'from problems.tsptw.state_tsptw import StateTSPTWInt\n'), ((1104, 1164), 'torch.cat', 'torch.cat', (["(dataset['depot'][:, None, :], dataset['loc'])", '(1)'], {}), "((dataset['depot'][:, None, :], dataset['loc']), 1)\n", (1113, 1164), False, 'import torch\n'), ((1697, 1759), 'torch.max', 'torch.max', (['(t + dist[batch_ind, cur, next])', 'lb[batch_ind, next]'], {}), '(t + dist[batch_ind, cur, next], lb[batch_ind, next])\n', (1706, 1759), False, 'import torch\n'), ((2862, 2876), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2873, 2876), False, 'import pickle\n'), ((1529, 1553), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (1541, 1553), False, 'import torch\n'), ((2762, 2788), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2778, 2788), False, 'import os\n'), ((2351, 2370), 'scipy.spatial.distance.cdist', 'cdist', (['coord', 'coord'], {}), '(coord, coord)\n', (2356, 2370), False, 'from scipy.spatial.distance import cdist\n'), ((2948, 2984), 'torch.tensor', 'torch.tensor', (['loc'], {'dtype': 'torch.float'}), '(loc, dtype=torch.float)\n', (2960, 2984), False, 'import torch\n'), ((3015, 3053), 'torch.tensor', 'torch.tensor', (['depot'], {'dtype': 'torch.float'}), '(depot, dtype=torch.float)\n', (3027, 3053), False, 'import torch\n'), ((3084, 3122), 'torch.tensor', 'torch.tensor', (['timew'], {'dtype': 'torch.int64'}), '(timew, dtype=torch.int64)\n', (3096, 3122), False, 'import torch\n'), ((3157, 3199), 'torch.tensor', 'torch.tensor', (['max_coord'], {'dtype': 'torch.int64'}), '(max_coord, dtype=torch.int64)\n', (3169, 3199), False, 'import torch\n'), ((1184, 1214), 'utils.functions.accurate_cdist', 'accurate_cdist', (['coords', 'coords'], {}), '(coords, coords)\n', (1198, 1214), False, 'from utils.functions import accurate_cdist\n')] |
"""Defines various classes and definitions that provide assistance for
unit testing Actors in an ActorSystem."""
import unittest
import pytest
import logging
import time
from thespian.actors import ActorSystem
def simpleActorTestLogging():
"""This function returns a logging dictionary that can be passed as
the logDefs argument for ActorSystem() initialization to get
simple stdout logging configuration. This is not necessary for
typical unit testing that uses the simpleActorSystemBase, but
it can be useful for multiproc.. ActorSystems where the
separate processes created should have a very simple logging
configuration.
"""
import sys
if sys.platform == 'win32':
# Windows will not allow sys.stdout to be passed to a child
# process, which breaks the startup/config for some of the
# tests.
handler = { 'class': 'logging.handlers.RotatingFileHandler',
'filename': 'nosetests.log',
'maxBytes': 256*1024,
'backupCount':3,
}
else:
handler = { 'class': 'logging.StreamHandler',
'stream': sys.stdout,
}
return {
'version' : 1,
'handlers': { #'discarder': {'class': 'logging.NullHandler' },
'testStream' : handler,
},
'root': { 'handlers': ['testStream'] },
'disable_existing_loggers': False,
}
class LocallyManagedActorSystem(object):
def setSystemBase(self, newBase='simpleSystemBase', systemCapabilities=None, logDefs='BestForBase'):
newBaseStr = str(newBase)
if not hasattr(self, 'currentBase') or self.currentBase != newBaseStr:
ldefs = logDefs if logDefs != 'BestForBase' else (simpleActorTestLogging() if newBase.startswith('multiproc') else False)
# In case the ActorSystem was *already* setup, break the singleton aspect and re-init
ActorSystem(logDefs = ldefs).shutdown()
ActorSystem(newBase, systemCapabilities, logDefs = ldefs)
self.currentBase = newBaseStr
class ActorSystemTestCase(unittest.TestCase, LocallyManagedActorSystem):
"""The ActorSystemTestCase is a wrapper for the unittest TestCase
class that will startup a default ActorSystem in the provided
setUp() and tearDown() any active ActorSystem after testing.
If a non-default ActorSystem is to be used, the setSystemBase()
method should be called with that system base.
It also provides some additional methods for assistance in testing Actors.
"""
def setUp(self):
if not hasattr(self, 'currentBase'):
self.setSystemBase()
def tearDown(self):
if hasattr(self, 'currentBase'):
ActorSystem().shutdown()
delattr(self, 'currentBase')
import time
time.sleep(0.02)
@staticmethod
def actualActorObject(actorClass):
"""Normally an Actor is only instantiated in the context of an
ActorSystem, and then only responds to messages delivered
via that system. For testing purposes *only*, it may be
desireable to have the actual Actor instance to test
methods on that Actor directly. This method will return
that actual Actor instance after instantiating the actor in
an ActorSystem.
This method can ONLY be used with an ActorSystem that will
instantiate the Actor in the context of the current process
(e.g. simpleSystemBase) and the methods tested on the
resulting Actor CANNOT perform any Actor-related actions
(e.g. self.createActor(), self.send()).
This method is for TESTING only under very special
circumstances; if you're not sure you need this, then you
probably don't.
"""
# Create the Actor within the system.
aAddr = ActorSystem().createActor(actorClass)
# This depends on the internals of the systemBase
return ActorSystem()._systemBase.actorRegistry[aAddr.actorAddressString].instance
###
### pytest fixtures and helpers
###
testAdminPort = None
def get_free_admin_port_random():
global testAdminPort
if testAdminPort is None:
import random
# Reserved system ports are typically below 1024. Ephemeral
# ports typically start at either 32768 (Linux) or 49152
# (IANA), or range from 1024-5000 (older Windows). Pick
# something unused outside those ranges for the admin.
testAdminPort = random.randint(10000, 30000)
#testAdminPort = random.randint(5,60) * 1000
else:
testAdminPort = testAdminPort + 1
return testAdminPort
def get_free_admin_port():
import socket
import random
for tries in range(100):
port = random.randint(5000, 30000)
try:
for m,p in [ (socket.SOCK_STREAM, socket.IPPROTO_TCP),
(socket.SOCK_DGRAM, socket.IPPROTO_UDP),
]:
s = socket.socket(socket.AF_INET, m, p)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('',port))
s.close()
return port
except Exception:
pass
return get_free_admin_port_random()
@pytest.fixture(params=['simpleSystemBase',
'multiprocQueueBase',
'multiprocUDPBase',
'multiprocTCPBase',
'multiprocTCPBase-AdminRouting',
'multiprocTCPBase-AdminRoutingTXOnly',
])
def asys(request):
caps = {'Foo Allowed': True,
'Cows Allowed': True,
'Dogs Allowed': True,
'dog': 'food'}
if request.param.startswith('multiprocTCP') or \
request.param.startswith('multiprocUDP'):
caps['Admin Port'] = get_free_admin_port()
caps['Convention Address.IPv4'] = '', caps['Admin Port']
if request.param.endswith('-AdminRouting'):
caps['Admin Routing'] = True
if request.param.endswith('-AdminRoutingTXOnly'):
caps['Admin Routing'] = True
caps['Outbound Only'] = True
asys = ActorSystem(systemBase=request.param.partition('-')[0],
capabilities=caps,
logDefs=(simpleActorTestLogging()
if request.param.startswith('multiproc')
else False),
transientUnique=True)
asys.base_name = request.param
asys.port_num = caps.get('Admin Port', None)
asys.txonly = request.param.endswith('-AdminRoutingTXOnly')
request.addfinalizer(lambda asys=asys: asys.shutdown())
return asys
def similar_asys(asys, in_convention=True, start_wait=True, capabilities=None):
caps = capabilities or {}
if asys.base_name.startswith('multiprocTCP') or \
asys.base_name.startswith('multiprocUDP'):
caps['Admin Port'] = get_free_admin_port()
if in_convention:
caps['Convention Address.IPv4'] = '', asys.port_num
if asys.base_name.endswith('-AdminRouting'):
caps['Admin Routing'] = True
asys2 = ActorSystem(systemBase=asys.base_name.partition('-')[0],
capabilities=caps,
logDefs=(simpleActorTestLogging()
if asys.base_name.startswith('multiproc')
else False),
transientUnique=True)
asys2.base_name = asys.base_name
asys2.port_num = caps.get('Admin Port', None)
if in_convention and start_wait:
time.sleep(0.25) # Wait for Actor Systems to start and connect together
return asys2
@pytest.fixture
def asys2(request, asys):
asys2 = similar_asys(asys, in_convention=False)
# n.b. shutdown the second actor system first:
# 1. Some tests ask asys1 to create an actor
# 2. That actor is actually supported by asys2
# 3. There is an external port the tester uses for each asys
# 4. When asys2 is shutdown, it will attempt to notify the
# parent of the actor that the actor is dead
# 5. This parent is the external port for asys1.
# 6. If asys1 is shutdown first, then asys2 must time out
# on the transmit attempt (usually 5 minutes) before
# it can exit.
# 7. If the test is re-run within this 5 minute period, it will fail
# because the old asys2 is still existing but in shutdown state
# (and will therefore rightfully refuse new actions).
# By shutting down asys2 first, the parent notification can be
# performed and subsequent runs don't encounter the lingering
# asys2.
request.addfinalizer(lambda asys=asys2: asys2.shutdown())
return asys2
@pytest.fixture
def asys_pair(request, asys):
asys2 = similar_asys(asys, in_convention=True)
# n.b. shutdown the second actor system first:
# 1. Some tests ask asys1 to create an actor
# 2. That actor is actually supported by asys2
# 3. There is an external port the tester uses for each asys
# 4. When asys2 is shutdown, it will attempt to notify the
# parent of the actor that the actor is dead
# 5. This parent is the external port for asys1.
# 6. If asys1 is shutdown first, then asys2 must time out
# on the transmit attempt (usually 5 minutes) before
# it can exit.
# 7. If the test is re-run within this 5 minute period, it will fail
# because the old asys2 is still existing but in shutdown state
# (and will therefore rightfully refuse new actions).
# By shutting down asys2 first, the parent notification can be
# performed and subsequent runs don't encounter the lingering
# asys2.
request.addfinalizer(lambda asys=asys2: asys2.shutdown())
return (asys, asys2)
@pytest.fixture
def run_unstable_tests(request):
return request.config.getoption('unstable', default=False)
def unstable_test(run_unstable_tests, asys, *unstable_bases):
if asys.base_name in unstable_bases and not run_unstable_tests:
pytest.skip("Test unstable for %s system base"%asys.base_name)
def actor_system_unsupported(asys, *unsupported_bases):
if asys.base_name in unsupported_bases:
pytest.skip("Functionality not supported for %s system base"%asys.base_name)
from thespian.system.timing import timePeriodSeconds
import time
inTestDelay = lambda period: time.sleep(timePeriodSeconds(period))
def delay_for_next_of_kin_notification(system):
if system.base_name == 'multiprocQueueBase':
# The multiprocQueueBase signal processor cannot interrupt a
# sleeping Queue.get(), so for this base it is necessary to
# wait for the timeout on the Queue.get() to allow it time to
# notice and process the child exit.
time.sleep(2.5)
elif system.base_name == 'multiprocUDPBase':
time.sleep(0.6)
else:
time.sleep(0.1)
| [
"thespian.system.timing.timePeriodSeconds",
"socket.socket",
"thespian.actors.ActorSystem",
"time.sleep",
"pytest.fixture",
"pytest.skip",
"random.randint"
] | [((5370, 5555), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['simpleSystemBase', 'multiprocQueueBase', 'multiprocUDPBase',\n 'multiprocTCPBase', 'multiprocTCPBase-AdminRouting',\n 'multiprocTCPBase-AdminRoutingTXOnly']"}), "(params=['simpleSystemBase', 'multiprocQueueBase',\n 'multiprocUDPBase', 'multiprocTCPBase', 'multiprocTCPBase-AdminRouting',\n 'multiprocTCPBase-AdminRoutingTXOnly'])\n", (5384, 5555), False, 'import pytest\n'), ((4616, 4644), 'random.randint', 'random.randint', (['(10000)', '(30000)'], {}), '(10000, 30000)\n', (4630, 4644), False, 'import random\n'), ((4883, 4910), 'random.randint', 'random.randint', (['(5000)', '(30000)'], {}), '(5000, 30000)\n', (4897, 4910), False, 'import random\n'), ((7712, 7728), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (7722, 7728), False, 'import time\n'), ((10222, 10286), 'pytest.skip', 'pytest.skip', (["('Test unstable for %s system base' % asys.base_name)"], {}), "('Test unstable for %s system base' % asys.base_name)\n", (10233, 10286), False, 'import pytest\n'), ((10395, 10473), 'pytest.skip', 'pytest.skip', (["('Functionality not supported for %s system base' % asys.base_name)"], {}), "('Functionality not supported for %s system base' % asys.base_name)\n", (10406, 10473), False, 'import pytest\n'), ((10580, 10605), 'thespian.system.timing.timePeriodSeconds', 'timePeriodSeconds', (['period'], {}), '(period)\n', (10597, 10605), False, 'from thespian.system.timing import timePeriodSeconds\n'), ((10966, 10981), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (10976, 10981), False, 'import time\n'), ((2015, 2070), 'thespian.actors.ActorSystem', 'ActorSystem', (['newBase', 'systemCapabilities'], {'logDefs': 'ldefs'}), '(newBase, systemCapabilities, logDefs=ldefs)\n', (2026, 2070), False, 'from thespian.actors import ActorSystem\n'), ((2896, 2912), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (2906, 2912), False, 'import time\n'), ((11039, 11054), 'time.sleep', 'time.sleep', (['(0.6)'], {}), '(0.6)\n', (11049, 11054), False, 'import time\n'), ((11073, 11088), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (11083, 11088), False, 'import time\n'), ((3969, 3982), 'thespian.actors.ActorSystem', 'ActorSystem', ([], {}), '()\n', (3980, 3982), False, 'from thespian.actors import ActorSystem\n'), ((5092, 5127), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'm', 'p'], {}), '(socket.AF_INET, m, p)\n', (5105, 5127), False, 'import socket\n'), ((1963, 1989), 'thespian.actors.ActorSystem', 'ActorSystem', ([], {'logDefs': 'ldefs'}), '(logDefs=ldefs)\n', (1974, 1989), False, 'from thespian.actors import ActorSystem\n'), ((2794, 2807), 'thespian.actors.ActorSystem', 'ActorSystem', ([], {}), '()\n', (2805, 2807), False, 'from thespian.actors import ActorSystem\n'), ((4080, 4093), 'thespian.actors.ActorSystem', 'ActorSystem', ([], {}), '()\n', (4091, 4093), False, 'from thespian.actors import ActorSystem\n')] |
import logging as log
import numpy as np
import h5py
import humblerl as hrl
from humblerl import Callback, Interpreter
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from torch.utils.data import Dataset
from common_utils import get_model_path_if_exists
from third_party.torchtrainer import TorchTrainer, evaluate
class MDNInterpreter(Interpreter, Callback):
"""Performs state preprocessing with VAE module and concatenates it with hidden state of MDN module.
Args:
vae_model (keras.Model): Keras VAE encoder.
mdn_model (torch.nn.Module): PyTorch MDN-RNN memory.
latent_dim (int): Latent space dimensionality.
Note:
In order to work, this Interpreter system must be also passed as callback to 'hrl.loop(...)'!
"""
def __init__(self, vae_model, mdn_model, latent_dim):
self.vae_model = vae_model
self.mdn_model = mdn_model
self.latent_dim = latent_dim
def __call__(self, state, reward=0.):
return self.process_state(state), reward
def process_state(self, state):
# NOTE: [0][0] <- it gets first in the batch latent space mean (mu)
latent = self.vae_model.predict(state[np.newaxis, :])[0][0]
memory = self.mdn_model.hidden[0].cpu().detach().numpy()
# NOTE: See HRL `ply`, `on_step_taken` that would update hidden state is called AFTER
# Interpreter is used to preprocess next_state. So next_state has out-dated hidden state!
# What saves us is the fact, that `state` in next `ply` call will have it updated so,
# Transitions.state has up-to-date latent and hidden state and in all the other places
# exactly it is used, not next state.
return np.concatenate((latent, memory.flatten()))
def on_episode_start(self, episode, train_mode):
self.mdn_model.init_hidden(1)
def on_step_taken(self, step, transition, info):
state = torch.from_numpy(transition.state[:self.latent_dim]).view(1, 1, -1)
action = torch.from_numpy(np.array([transition.action])).view(1, 1, -1)
if torch.cuda.is_available():
state = state.cuda()
action = action.cuda()
with torch.no_grad(), evaluate(self.mdn_model) as net:
net(state, action)
class MDNDataset(Dataset):
"""Dataset of sequential data to train MDN-RNN.
Args:
dataset_path (string): Path to HDF5 dataset file.
sequence_len (int): Desired output sequence len.
terminal_prob (float): Probability of sampling sequence that finishes with
terminal state. (Default: 0.5)
Note:
Arrays should have the same size of the first dimension and their type should be the
same as desired Tensor type.
"""
def __init__(self, dataset_path, sequence_len, terminal_prob=0.5, dataset_fraction=1.):
assert 0 < terminal_prob and terminal_prob <= 1.0, "0 < terminal_prob <= 1.0"
assert 0 < dataset_fraction and dataset_fraction <= 1.0, "0 < dataset_fraction <= 1.0"
self.dataset = h5py.File(dataset_path, "r")
self.sequence_len = sequence_len
self.terminal_prob = terminal_prob
self.dataset_fraction = dataset_fraction
self.latent_dim = self.dataset.attrs["LATENT_DIM"]
self.action_dim = self.dataset.attrs["ACTION_DIM"]
def __getitem__(self, idx):
"""Get sequence at random starting position of given sequence length from episode `idx`."""
offset = 1
t_start, t_end = self.dataset['episodes'][idx:idx + 2]
episode_length = t_end - t_start
if self.sequence_len <= episode_length - offset:
sequence_len = self.sequence_len
else:
sequence_len = episode_length - offset
log.warning(
"Episode %d is too short to form full sequence, data will be zero-padded.", idx)
# Sample where to start sequence of length `self.sequence_len` in episode `idx`
# '- offset' because "next states" are offset by 'offset'
if np.random.rand() < self.terminal_prob:
# Take sequence ending with terminal state
start = t_start + episode_length - sequence_len - offset
else:
# NOTE: np.random.randint takes EXCLUSIVE upper bound of range to sample from
start = t_start + np.random.randint(max(1, episode_length - sequence_len - offset))
states_ = torch.from_numpy(self.dataset['states'][start:start + sequence_len + offset])
actions_ = torch.from_numpy(self.dataset['actions'][start:start + sequence_len])
states = torch.zeros(self.sequence_len, self.latent_dim, dtype=states_.dtype)
next_states = torch.zeros(self.sequence_len, self.latent_dim, dtype=states_.dtype)
actions = torch.zeros(self.sequence_len, self.action_dim, dtype=actions_.dtype)
# Sample latent states (this is done to prevent overfitting MDN-RNN to a specific 'z'.)
mu = states_[:, 0]
sigma = torch.exp(states_[:, 1] / 2)
latent = Normal(loc=mu, scale=sigma)
z_samples = latent.sample()
states[:sequence_len] = z_samples[:-offset]
next_states[:sequence_len] = z_samples[offset:]
actions[:sequence_len] = actions_
return [states, actions], [next_states]
def __len__(self):
return int(self.dataset.attrs["N_GAMES"] * self.dataset_fraction)
def close(self):
self.dataset.close()
class MDN(nn.Module):
def __init__(self, hidden_units, latent_dim, action_space, temperature, n_gaussians, num_layers=1):
super(MDN, self).__init__()
self.hidden_units = hidden_units
self.latent_dim = latent_dim
self.temperature = temperature
self.n_gaussians = n_gaussians
self.num_layers = num_layers
self.embedding = nn.Embedding.from_pretrained(torch.eye(action_space.num)) \
if isinstance(action_space, hrl.environments.Discrete) else None
self.lstm = nn.LSTM(input_size=(latent_dim + action_space.num),
hidden_size=hidden_units,
num_layers=num_layers,
batch_first=True)
self.pi = nn.Linear(hidden_units, n_gaussians * latent_dim)
self.mu = nn.Linear(hidden_units, n_gaussians * latent_dim)
self.logsigma = nn.Linear(hidden_units, n_gaussians * latent_dim)
# NOTE: This is here only for backward compatibility with trained checkpoint
self.reward = nn.Linear(hidden_units, 1)
def forward(self, latent, action, hidden=None):
self.lstm.flatten_parameters()
sequence_len = latent.size(1)
if self.embedding:
# Use one-hot representation for discrete actions
x = torch.cat((latent, self.embedding(action).squeeze(dim=2)), dim=2)
else:
# Pass raw action vector for continuous actions
x = torch.cat((latent, action.float()), dim=2)
h, self.hidden = self.lstm(x, hidden if hidden else self.hidden)
pi = self.pi(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim) / self.temperature
pi = torch.softmax(pi, dim=2)
logsigma = self.logsigma(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim)
sigma = torch.exp(logsigma)
mu = self.mu(h).view(-1, sequence_len, self.n_gaussians, self.latent_dim)
return mu, sigma, pi
def sample(self, latent, action, hidden=None):
"""Sample (simulate) next state from Mixture Density Network a.k.a. Gaussian Mixture Model.
Args:
latent (torch.Tensor): Latent vectors to start from.
Shape of tensor: batch x sequence x latent dim.
action (torch.Tensor): Actions to simulate.
Shape of tensor: batch x sequence x action dim.
hidden (tuple): Memory module (torch.nn.LSTM) hidden state.
Return:
numpy.ndarray: Latent vector of next state.
Shape of array: batch x sequence x latent dim.
Note:
You can find next hidden state in this module `hidden` member.
"""
# Simulate transition
with torch.no_grad(), evaluate(self) as net:
mu, sigma, pi = net(latent, action, hidden)
# Transform tensors to numpy arrays and move "gaussians mixture" dim to the end
# NOTE: Arrays will have shape (batch x sequence x latent dim. x num. gaussians)
mu = np.transpose(mu.cpu().detach().numpy(), axes=[0, 1, 3, 2])
sigma = np.transpose(sigma.cpu().detach().numpy(), axes=[0, 1, 3, 2])
pi = np.transpose(pi.cpu().detach().numpy(), axes=[0, 1, 3, 2])
# Sample parameters of Gaussian distribution(s) from mixture
c = pi.cumsum(axis=-1)
u = np.random.rand(*c.shape[:-1], 1)
choices = np.expand_dims((u < c).argmax(axis=-1), axis=-1)
# Sample latent vector from Gaussian distribution with mean and std. dev. from above
mean = np.take_along_axis(mu, choices, axis=-1)
stddev = np.take_along_axis(sigma, choices, axis=-1)
samples = mean + stddev * np.random.randn(*mean.shape)
return np.squeeze(samples, axis=-1)
def simulate(self, latent, actions):
"""Simulate environment trajectory.
Args:
latent (torch.Tensor): Latent vector with state(s) to start from.
Shape of tensor: batch x 1 (sequence dim.) x latent dim.
actions (torch.Tensor): Tensor with actions to take in simulated trajectory.
Shape of tensor: batch x sequence x action dim.
Return:
np.ndarray: Array of latent vectors of simulated trajectory.
Shape of array: batch x sequence x latent dim.
Note:
You can find next hidden state in this module `hidden` member.
"""
states = []
for a in range(actions.shape[1]):
# NOTE: We use np.newaxis to preserve shape of tensor.
states.append(self.sample(latent, actions[:, a, np.newaxis, :]))
# NOTE: This is a bit arbitrary to set it to float32 which happens to be type of torch
# tensors. It can blow up further in code if we'll choose to change tensors types.
latent = torch.from_numpy(states[-1]).float().to(next(self.parameters()).device)
# NOTE: Squeeze former sequence dim. (which is 1 because we inferred next latent state
# action by action) and reorder batch dim. and list sequence dim. to finally get:
# batch x len(states) (sequence dim.) x latent dim.
return np.transpose(np.squeeze(np.array(states), axis=2), axes=[1, 0, 2])
def init_hidden(self, batch_size):
device = next(self.parameters()).device
self.hidden = (
torch.zeros(self.num_layers, batch_size, self.hidden_units, device=device),
torch.zeros(self.num_layers, batch_size, self.hidden_units, device=device)
)
def build_rnn_model(rnn_params, latent_dim, action_space, model_path=None):
"""Builds MDN-RNN memory module, which model time dependencies.
Args:
rnn_params (dict): MDN-RNN parameters from .json config.
latent_dim (int): Latent space dimensionality.
action_space (hrl.environments.ActionSpace): Action space, discrete or continuous.
model_path (str): Path to VAE ckpt. Taken from .json config if `None` (Default: None)
Returns:
TorchTrainer: Compiled MDN-RNN model wrapped in TorchTrainer, ready for training.
"""
use_cuda = torch.cuda.is_available()
def mdn_loss_function(pred, target):
"""Mixed Density Network loss function, see:
https://mikedusenberry.com/mixture-density-networks"""
mu, sigma, pi = pred
sequence_len = mu.size(1)
latent_dim = mu.size(3)
target = target.view(-1, sequence_len, 1, latent_dim)
loss = Normal(loc=mu, scale=sigma)
loss = torch.exp(loss.log_prob(target)) # TODO: Is this stable?! Check that.
loss = torch.sum(loss * pi, dim=2)
loss = -torch.log(loss + 1e-9)
return torch.mean(loss)
mdn = TorchTrainer(MDN(rnn_params['hidden_units'], latent_dim, action_space,
rnn_params['temperature'], rnn_params['n_gaussians']),
device_name='cuda' if use_cuda else 'cpu')
mdn.compile(
optimizer=optim.Adam(mdn.model.parameters(), lr=rnn_params['learning_rate']),
loss=mdn_loss_function
)
model_path = get_model_path_if_exists(
path=model_path, default_path=rnn_params['ckpt_path'], model_name="MDN-RNN")
if model_path is not None:
mdn.load_ckpt(model_path)
log.info("Loaded MDN-RNN model weights from: %s", model_path)
return mdn
| [
"numpy.random.rand",
"torch.from_numpy",
"torch.exp",
"torch.softmax",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"numpy.take_along_axis",
"logging.info",
"torch.nn.LSTM",
"torch.mean",
"torch.eye",
"torch.distributions.Normal",
"logging.warning",
"h5py.File",
"numpy.squeez... | [((11699, 11724), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11722, 11724), False, 'import torch\n'), ((12676, 12782), 'common_utils.get_model_path_if_exists', 'get_model_path_if_exists', ([], {'path': 'model_path', 'default_path': "rnn_params['ckpt_path']", 'model_name': '"""MDN-RNN"""'}), "(path=model_path, default_path=rnn_params[\n 'ckpt_path'], model_name='MDN-RNN')\n", (12700, 12782), False, 'from common_utils import get_model_path_if_exists\n'), ((2157, 2182), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2180, 2182), False, 'import torch\n'), ((3126, 3154), 'h5py.File', 'h5py.File', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (3135, 3154), False, 'import h5py\n'), ((4501, 4578), 'torch.from_numpy', 'torch.from_numpy', (["self.dataset['states'][start:start + sequence_len + offset]"], {}), "(self.dataset['states'][start:start + sequence_len + offset])\n", (4517, 4578), False, 'import torch\n'), ((4598, 4667), 'torch.from_numpy', 'torch.from_numpy', (["self.dataset['actions'][start:start + sequence_len]"], {}), "(self.dataset['actions'][start:start + sequence_len])\n", (4614, 4667), False, 'import torch\n'), ((4686, 4754), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.latent_dim'], {'dtype': 'states_.dtype'}), '(self.sequence_len, self.latent_dim, dtype=states_.dtype)\n', (4697, 4754), False, 'import torch\n'), ((4777, 4845), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.latent_dim'], {'dtype': 'states_.dtype'}), '(self.sequence_len, self.latent_dim, dtype=states_.dtype)\n', (4788, 4845), False, 'import torch\n'), ((4864, 4933), 'torch.zeros', 'torch.zeros', (['self.sequence_len', 'self.action_dim'], {'dtype': 'actions_.dtype'}), '(self.sequence_len, self.action_dim, dtype=actions_.dtype)\n', (4875, 4933), False, 'import torch\n'), ((5074, 5102), 'torch.exp', 'torch.exp', (['(states_[:, 1] / 2)'], {}), '(states_[:, 1] / 2)\n', (5083, 5102), False, 'import torch\n'), ((5120, 5147), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), '(loc=mu, scale=sigma)\n', (5126, 5147), False, 'from torch.distributions import Normal\n'), ((6074, 6194), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(latent_dim + action_space.num)', 'hidden_size': 'hidden_units', 'num_layers': 'num_layers', 'batch_first': '(True)'}), '(input_size=latent_dim + action_space.num, hidden_size=hidden_units,\n num_layers=num_layers, batch_first=True)\n', (6081, 6194), True, 'import torch.nn as nn\n'), ((6295, 6344), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6304, 6344), True, 'import torch.nn as nn\n'), ((6363, 6412), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6372, 6412), True, 'import torch.nn as nn\n'), ((6437, 6486), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(n_gaussians * latent_dim)'], {}), '(hidden_units, n_gaussians * latent_dim)\n', (6446, 6486), True, 'import torch.nn as nn\n'), ((6594, 6620), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(1)'], {}), '(hidden_units, 1)\n', (6603, 6620), True, 'import torch.nn as nn\n'), ((7244, 7268), 'torch.softmax', 'torch.softmax', (['pi'], {'dim': '(2)'}), '(pi, dim=2)\n', (7257, 7268), False, 'import torch\n'), ((7380, 7399), 'torch.exp', 'torch.exp', (['logsigma'], {}), '(logsigma)\n', (7389, 7399), False, 'import torch\n'), ((8892, 8924), 'numpy.random.rand', 'np.random.rand', (['*c.shape[:-1]', '(1)'], {}), '(*c.shape[:-1], 1)\n', (8906, 8924), True, 'import numpy as np\n'), ((9101, 9141), 'numpy.take_along_axis', 'np.take_along_axis', (['mu', 'choices'], {'axis': '(-1)'}), '(mu, choices, axis=-1)\n', (9119, 9141), True, 'import numpy as np\n'), ((9159, 9202), 'numpy.take_along_axis', 'np.take_along_axis', (['sigma', 'choices'], {'axis': '(-1)'}), '(sigma, choices, axis=-1)\n', (9177, 9202), True, 'import numpy as np\n'), ((9282, 9310), 'numpy.squeeze', 'np.squeeze', (['samples'], {'axis': '(-1)'}), '(samples, axis=-1)\n', (9292, 9310), True, 'import numpy as np\n'), ((12058, 12085), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'mu', 'scale': 'sigma'}), '(loc=mu, scale=sigma)\n', (12064, 12085), False, 'from torch.distributions import Normal\n'), ((12187, 12214), 'torch.sum', 'torch.sum', (['(loss * pi)'], {'dim': '(2)'}), '(loss * pi, dim=2)\n', (12196, 12214), False, 'import torch\n'), ((12270, 12286), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (12280, 12286), False, 'import torch\n'), ((12861, 12922), 'logging.info', 'log.info', (['"""Loaded MDN-RNN model weights from: %s"""', 'model_path'], {}), "('Loaded MDN-RNN model weights from: %s', model_path)\n", (12869, 12922), True, 'import logging as log\n'), ((2265, 2280), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2278, 2280), False, 'import torch\n'), ((2282, 2306), 'third_party.torchtrainer.evaluate', 'evaluate', (['self.mdn_model'], {}), '(self.mdn_model)\n', (2290, 2306), False, 'from third_party.torchtrainer import TorchTrainer, evaluate\n'), ((3843, 3944), 'logging.warning', 'log.warning', (['"""Episode %d is too short to form full sequence, data will be zero-padded."""', 'idx'], {}), "(\n 'Episode %d is too short to form full sequence, data will be zero-padded.',\n idx)\n", (3854, 3944), True, 'import logging as log\n'), ((4119, 4135), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4133, 4135), True, 'import numpy as np\n'), ((8283, 8298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8296, 8298), False, 'import torch\n'), ((8300, 8314), 'third_party.torchtrainer.evaluate', 'evaluate', (['self'], {}), '(self)\n', (8308, 8314), False, 'from third_party.torchtrainer import TorchTrainer, evaluate\n'), ((10936, 11010), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_units'], {'device': 'device'}), '(self.num_layers, batch_size, self.hidden_units, device=device)\n', (10947, 11010), False, 'import torch\n'), ((11024, 11098), 'torch.zeros', 'torch.zeros', (['self.num_layers', 'batch_size', 'self.hidden_units'], {'device': 'device'}), '(self.num_layers, batch_size, self.hidden_units, device=device)\n', (11035, 11098), False, 'import torch\n'), ((12231, 12254), 'torch.log', 'torch.log', (['(loss + 1e-09)'], {}), '(loss + 1e-09)\n', (12240, 12254), False, 'import torch\n'), ((1998, 2050), 'torch.from_numpy', 'torch.from_numpy', (['transition.state[:self.latent_dim]'], {}), '(transition.state[:self.latent_dim])\n', (2014, 2050), False, 'import torch\n'), ((5946, 5973), 'torch.eye', 'torch.eye', (['action_space.num'], {}), '(action_space.num)\n', (5955, 5973), False, 'import torch\n'), ((9237, 9265), 'numpy.random.randn', 'np.random.randn', (['*mean.shape'], {}), '(*mean.shape)\n', (9252, 9265), True, 'import numpy as np\n'), ((10768, 10784), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (10776, 10784), True, 'import numpy as np\n'), ((2100, 2129), 'numpy.array', 'np.array', (['[transition.action]'], {}), '([transition.action])\n', (2108, 2129), True, 'import numpy as np\n'), ((10399, 10427), 'torch.from_numpy', 'torch.from_numpy', (['states[-1]'], {}), '(states[-1])\n', (10415, 10427), False, 'import torch\n')] |
# coding: utf-8
__author__ = "<NAME>"
from django.test import TestCase
from model_mommy import mommy
from core.models import Programs
class ProgramsModelTestCase(TestCase):
"""Class Testing Model Pogramas """
def setUp(self):
"""
Initial Test Settings
"""
self.program = mommy.make(Programs)
def tearDown(self):
"""Final method"""
self.program.delete()
def test_there_are_fields(self):
"""test the fields the model"""
self.assertTrue('title' in dir(Programs), 'Class Program does not have the field title')
self.assertTrue('start_time' in dir(Programs), 'Class Program does not have the field start_time')
self.assertTrue('end_time' in dir(Programs), 'Class Program does not have the field end_time')
def test_there_is_a_program(self):
"""test if you are creating a Program correctly"""
self.assertEquals(Programs.objects.count(), 1)
self.assertEquals(Programs.objects.all()[0].title, self.program.title)
self.assertEquals(Programs.objects.all()[0].start_time, self.program.start_time)
self.assertEquals(Programs.objects.all()[0].end_time, self.program.end_time)
| [
"core.models.Programs.objects.all",
"model_mommy.mommy.make",
"core.models.Programs.objects.count"
] | [((320, 340), 'model_mommy.mommy.make', 'mommy.make', (['Programs'], {}), '(Programs)\n', (330, 340), False, 'from model_mommy import mommy\n'), ((935, 959), 'core.models.Programs.objects.count', 'Programs.objects.count', ([], {}), '()\n', (957, 959), False, 'from core.models import Programs\n'), ((990, 1012), 'core.models.Programs.objects.all', 'Programs.objects.all', ([], {}), '()\n', (1010, 1012), False, 'from core.models import Programs\n'), ((1069, 1091), 'core.models.Programs.objects.all', 'Programs.objects.all', ([], {}), '()\n', (1089, 1091), False, 'from core.models import Programs\n'), ((1158, 1180), 'core.models.Programs.objects.all', 'Programs.objects.all', ([], {}), '()\n', (1178, 1180), False, 'from core.models import Programs\n')] |
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import rcParams
params = {
# 'text.latex.preamble': ['\\usepackage{gensymb}'],
# 'text.usetex': True,
'font.family': 'Helvetica',
'lines.solid_capstyle':'butt',
'lines.markeredgewidth': 1,
}
rcParams.update(params)
sns.set_context("paper", font_scale=1.6, rc={"lines.linewidth": 2})
sns.set_style('white')
sns.set_palette("cividis")
dir_path = os.path.dirname(os.path.realpath(__file__))
def main():
sens = pd.read_csv('model_sensitivities.csv',header=0,index_col=0) # ,low_memory=False)
occu = pd.read_csv('sc_occurrence_data.csv',header=0,index_col=0)
occu.index = [val if val[6] is not '_' else val[:6] + val[7:] for val in occu.index.values] # oops, used two naming conventions
occu['cluster'] = [sens.loc[((sens['model_id']==i) & (sens['experiment']=='classification')),'cluster'].values[0] for i in occu.index.values]
clust_list = ['Dominated Cluster','Overfit Cluster','Parsimonious Cluster',]
occu = occu[[True if i in [1,2,3] else False for i in occu['cluster'].values]]
occu['Cluster'] = [clust_list[i-1] for i in occu['cluster']]
occu = occu.drop(['training_error', 'complexity', 'test_error','cluster'],axis=1)
occu[occu.columns[:-1]] = occu[occu.columns[:-1]] > 0
occu = occu.groupby(['Cluster']).sum()
inpu = occu[occu.columns[:-9]].stack()
inputs = pd.DataFrame()
inputs['Cluster'] = inpu.index.get_level_values(0)
inputs['Input'] = inpu.index.get_level_values(1)
inputs['category'] = [categorize(i) for i in inputs['Input'].values]
inputs['Category'] = [translate(i) for i in inputs['category'].values]
inputs['Lag'] = [lag(i) for i in inputs['Input'].values]
inputs['Neighbor'] = [neighbor(i) for i in inputs['Input'].values]
inputs['Occurrence'] = inpu.values
func = occu[occu.columns[-9:]].stack()
functions = pd.DataFrame()
functions['Cluster'] = func.index.get_level_values(0)
functions['Function'] = [func_trans(i) for i in func.index.get_level_values(1)]
functions['Occurrence'] = func.values
plots = {'category':'Category',
'neighborhood':'Neighborhood',
'lag':'Lag'
} # three in total
orders = {'category':['land_tree','land_non','water_pump','water_deliv','econ_tree','econ_non'],
'neighborhood':['home','neighbor_1','neighbor_2','neighbor_3','neighbor_4','neighbor_5'],
'lag':['present','lag1','lag2','lag3','lag4','lag5','lag6'],
'function':['Addition','Subtraction','Multiplication','Division','Negative','Sine','Cosine','Less Than','If-Then-Else'],
'Category':['Tree Acreage','Non-Tree Acreage','Tree Prices/Values','Non-Tree Prices/Values','Water Deliveries','Water Pumping'],
'Cluster':['Parsimonious Cluster','Dominated Cluster','Overfit Cluster'],
# 'color':['midnightblue','Red']
}
colors = ['midnightblue','Red','Blue'] #,'c','m','y','b']
fig, axes = plt.subplots(1,2,figsize=(8,6))
g2 = sns.boxplot(x='Occurrence',
y='Category',
order=orders['Category'],
hue='Cluster',
hue_order=orders['Cluster'],
data=inputs,
whis='range',
dodge=True,
# width=0.8,
linewidth=2,
palette=colors,
ax=axes[0],
)
g1 = sns.scatterplot(x='Occurrence',
y='Function',
marker='o',
palette=colors,
s=100,
alpha=0.9,
hue='Cluster',
hue_order=orders['Cluster'],
data=functions,
ax=axes[1]
)
adjust_box_widths(fig, 0.8)
for i,artist in enumerate(axes[0].artists):
# Set the linecolor on the artist to the facecolor, and set the facecolor to None
col = artist.get_facecolor()
artist.set_edgecolor(col)
artist.set_facecolor('None')
# Each box has 6 associated Line2D objects (to make the whiskers, fliers, etc.)
# Loop over them here, and use the same colour as above
for j in range(i*6,i*6+6):
line = axes[0].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
line.set_solid_capstyle('butt')
med_line = axes[0].lines[i*6+4].set_ydata(axes[0].lines[i*6+2].get_ydata())
axes[0].set_xscale('log')
axes[0].legend_.remove()
axes[1].legend_.remove()
axes[1].legend(frameon=False,markerscale=2,bbox_to_anchor=(1, 1),ncol=4,bbox_transform=plt.gcf().transFigure)
axes[1].yaxis.set_label_position("right")
axes[1].yaxis.tick_right()
axes[0].text(x= 0.95,y=0.8,s='(A)',ha='right',va='top',transform=axes[0].transAxes)
axes[1].text(x= 0.05,y=0.8,s='(B)',ha='left',va='top',transform=axes[1].transAxes)
# for patch in axes[0].artists:
# r, g, b, a = patch.get_facecolor()
# patch.set_facecolor((r, g, b, .9))
# plt.tight_layout()
plt.subplots_adjust(wspace=0.05)
fig.savefig('plot_occurrence.pdf',format='pdf',bbox_inches='tight',dpi=600,transparent=True)
from matplotlib.patches import PathPatch
def adjust_box_widths(g, fac):
"""
Adjust the withs of a seaborn-generated boxplot.
"""
# iterating through Axes instances
for ax in g.axes:
# iterating through axes artists:
for i,c in enumerate(ax.get_children()):
# searching for PathPatches
if isinstance(c, PathPatch):
# getting current width of box:
p = c.get_path()
verts = p.vertices
# print(verts)
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 1])
xmax = np.max(verts_sub[:, 1])
xmid = 0.5*(xmin+xmax)
xhalf = 0.5*(xmax - xmin)
# setting new width of box
xmin_new = xmid-fac*xhalf
xmax_new = xmid+fac*xhalf
verts_sub[verts_sub[:, 1] == xmin, 1] = xmin_new
verts_sub[verts_sub[:, 1] == xmax, 1] = xmax_new
# setting new width of median line
for l in ax.lines:
if np.all(l.get_xdata() == [xmin, xmax]):
l.set_xdata([xmin_new, xmax_new])
def categorize(term):
trees = ['ALMOND','ALMONDHULLS','APRICOT','NECTARINES','PISTACHIO','PLUMS','WALNUT']
if ('ppu' in term) or ('value' in term):
if any(tree in term for tree in trees):
category = 'econ_tree'
else:
category = 'econ_non'
elif 'Pump' in term or 'Deliv' in term:
if 'Pump' in term:
category = 'water_pump'
else:
category = 'water_deliv'
elif 'tree' in term:
category = 'land_tree'
elif 'non_' in term:
category = 'land_non'
else:
category = 'none'
return category
def lag(term):
lags = ['lag1','lag2','lag3','lag4','lag5','lag6']
if any(lag_ in term for lag_ in lags):
lag = lags[np.argmax([lag_ in term for lag_ in lags])]
else:
lag = 'present'
return lag
def neighbor(term):
if 'neighbor' in term:
neighbor = term[:10]
else:
neighbor = 'home'
return neighbor
def func_trans(term):
if term == 'lt':
return 'Less Than'
elif term == 'ite':
return 'If-Then-Else'
elif term == 'vadd':
return 'Addition'
elif term == 'vsub':
return 'Subtraction'
elif term == 'vmul':
return 'Multiplication'
elif term == 'vdiv':
return 'Division'
elif term == 'vneg':
return 'Negative'
elif term == 'vsin':
return 'Sine'
elif term == 'vcos':
return 'Cosine'
def translate(item):
if item == 'land_tree':
return 'Tree Acreage'
if item == 'land_non':
return 'Non-Tree Acreage'
if item == 'water_pump':
return 'Water Pumping'
if item == 'water_deliv':
return 'Water Deliveries'
if item == 'econ_tree':
return 'Tree Prices/Values'
if item == 'econ_non':
return 'Non-Tree Prices/Values'
if item == 'home':
return 'Current Plot Data'
if item == 'neighbor_1':
return 'Neighbor 1 Data'
if item == 'neighbor_2':
return 'Neighbor 2 Data'
if item == 'neighbor_3':
return 'Neighbor 3 Data'
if item == 'neighbor_4':
return 'Neighbor 4 Data'
if item == 'neighbor_5':
return 'Neighbor 5 Data'
if item == 'present':
return 'Present Data'
if item == 'lag1':
return "Previous Year's Data"
if item == 'lag2':
return 'Two Years Previous'
if item == 'lag3':
return 'Three Years Previous'
if item == 'lag4':
return 'Four Years Previous'
if item == 'lag5':
return 'Five Years Previous'
if item == 'lag6':
return 'Six Years Previous'
if __name__ == "__main__":
main()
| [
"seaborn.set_palette",
"matplotlib.rcParams.update",
"pandas.read_csv",
"matplotlib.pyplot.gcf",
"seaborn.set_context",
"numpy.argmax",
"numpy.max",
"seaborn.set_style",
"os.path.realpath",
"seaborn.boxplot",
"seaborn.scatterplot",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplo... | [((365, 388), 'matplotlib.rcParams.update', 'rcParams.update', (['params'], {}), '(params)\n', (380, 388), False, 'from matplotlib import rcParams\n'), ((390, 457), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.6)', 'rc': "{'lines.linewidth': 2}"}), "('paper', font_scale=1.6, rc={'lines.linewidth': 2})\n", (405, 457), True, 'import seaborn as sns\n'), ((458, 480), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (471, 480), True, 'import seaborn as sns\n'), ((481, 507), 'seaborn.set_palette', 'sns.set_palette', (['"""cividis"""'], {}), "('cividis')\n", (496, 507), True, 'import seaborn as sns\n'), ((536, 562), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (552, 562), False, 'import os\n'), ((585, 646), 'pandas.read_csv', 'pd.read_csv', (['"""model_sensitivities.csv"""'], {'header': '(0)', 'index_col': '(0)'}), "('model_sensitivities.csv', header=0, index_col=0)\n", (596, 646), True, 'import pandas as pd\n'), ((674, 734), 'pandas.read_csv', 'pd.read_csv', (['"""sc_occurrence_data.csv"""'], {'header': '(0)', 'index_col': '(0)'}), "('sc_occurrence_data.csv', header=0, index_col=0)\n", (685, 734), True, 'import pandas as pd\n'), ((1455, 1469), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1467, 1469), True, 'import pandas as pd\n'), ((1930, 1944), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1942, 1944), True, 'import pandas as pd\n'), ((2918, 2952), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 6)'}), '(1, 2, figsize=(8, 6))\n', (2930, 2952), True, 'import matplotlib.pyplot as plt\n'), ((2958, 3158), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Occurrence"""', 'y': '"""Category"""', 'order': "orders['Category']", 'hue': '"""Cluster"""', 'hue_order': "orders['Cluster']", 'data': 'inputs', 'whis': '"""range"""', 'dodge': '(True)', 'linewidth': '(2)', 'palette': 'colors', 'ax': 'axes[0]'}), "(x='Occurrence', y='Category', order=orders['Category'], hue=\n 'Cluster', hue_order=orders['Cluster'], data=inputs, whis='range',\n dodge=True, linewidth=2, palette=colors, ax=axes[0])\n", (2969, 3158), True, 'import seaborn as sns\n'), ((3236, 3409), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""Occurrence"""', 'y': '"""Function"""', 'marker': '"""o"""', 'palette': 'colors', 's': '(100)', 'alpha': '(0.9)', 'hue': '"""Cluster"""', 'hue_order': "orders['Cluster']", 'data': 'functions', 'ax': 'axes[1]'}), "(x='Occurrence', y='Function', marker='o', palette=colors, s\n =100, alpha=0.9, hue='Cluster', hue_order=orders['Cluster'], data=\n functions, ax=axes[1])\n", (3251, 3409), True, 'import seaborn as sns\n'), ((4664, 4696), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.05)'}), '(wspace=0.05)\n', (4683, 4696), True, 'import matplotlib.pyplot as plt\n'), ((6360, 6404), 'numpy.argmax', 'np.argmax', (['[(lag_ in term) for lag_ in lags]'], {}), '([(lag_ in term) for lag_ in lags])\n', (6369, 6404), True, 'import numpy as np\n'), ((4258, 4267), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4265, 4267), True, 'import matplotlib.pyplot as plt\n'), ((5261, 5284), 'numpy.min', 'np.min', (['verts_sub[:, 1]'], {}), '(verts_sub[:, 1])\n', (5267, 5284), True, 'import numpy as np\n'), ((5296, 5319), 'numpy.max', 'np.max', (['verts_sub[:, 1]'], {}), '(verts_sub[:, 1])\n', (5302, 5319), True, 'import numpy as np\n')] |
import numpy as np
def split_ids(args, ids, folds=10):
if args.dataset == 'COLORS-3':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(500)]
val_ids = [np.arange(500, 3000)]
test_ids = [np.arange(3000, 10500)]
elif args.dataset == 'TRIANGLES':
assert folds == 1, 'this dataset has train, val and test splits'
train_ids = [np.arange(30000)]
val_ids = [np.arange(30000, 35000)]
test_ids = [np.arange(35000, 45000)]
else:
n = len(ids)
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(
np.unique(np.concatenate(test_ids)) == sorted(ids)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(
np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids | [
"numpy.concatenate",
"numpy.array",
"numpy.arange"
] | [((186, 200), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (195, 200), True, 'import numpy as np\n'), ((221, 241), 'numpy.arange', 'np.arange', (['(500)', '(3000)'], {}), '(500, 3000)\n', (230, 241), True, 'import numpy as np\n'), ((263, 285), 'numpy.arange', 'np.arange', (['(3000)', '(10500)'], {}), '(3000, 10500)\n', (272, 285), True, 'import numpy as np\n'), ((419, 435), 'numpy.arange', 'np.arange', (['(30000)'], {}), '(30000)\n', (428, 435), True, 'import numpy as np\n'), ((456, 479), 'numpy.arange', 'np.arange', (['(30000)', '(35000)'], {}), '(30000, 35000)\n', (465, 479), True, 'import numpy as np\n'), ((501, 524), 'numpy.arange', 'np.arange', (['(35000)', '(45000)'], {}), '(35000, 45000)\n', (510, 524), True, 'import numpy as np\n'), ((950, 1003), 'numpy.array', 'np.array', (['[e for e in ids if e not in test_ids[fold]]'], {}), '([e for e in ids if e not in test_ids[fold]])\n', (958, 1003), True, 'import numpy as np\n'), ((719, 743), 'numpy.concatenate', 'np.concatenate', (['test_ids'], {}), '(test_ids)\n', (733, 743), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
from aws_cdk import core
from awscdk.app_stack import ApplicationStack
# naming conventions, also used for ACM certs, DNS Records, resource naming
# Dynamically generated resource names created in CDK are used in GitLab CI
# such as cluster name, task definitions, etc.
environment_name = f"{os.environ.get('ENVIRONMENT', 'dev')}"
base_domain_name = os.environ.get("DOMAIN_NAME", "mysite.com")
# if the the production environent subdomain should nott be included in the URL
# redefine `full_domain_name` to `base_domain_name` for that environment
full_domain_name = f"{environment_name}.{base_domain_name}" # dev.mysite.com
if environment_name == "app":
full_domain_name = base_domain_name
base_app_name = os.environ.get("APP_NAME", "mysite-com")
full_app_name = f"{environment_name}-{base_app_name}" # dev-mysite-com
aws_region = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
app = core.App()
stack = ApplicationStack(
app,
f"{full_app_name}-stack",
environment_name=environment_name,
base_domain_name=base_domain_name,
full_domain_name=full_domain_name,
base_app_name=base_app_name,
full_app_name=full_app_name,
env={"region": aws_region},
)
# in order to be able to tag ECS resources, you need to go to
# the ECS Console > Account Settings > Amazon ECS ARN and resource ID settings
# and enable at least Service and Task. Optionally enable
# CloudWatch Container Insights
stack.node.apply_aspect(core.Tag("StackName", full_app_name))
app.synth()
| [
"awscdk.app_stack.ApplicationStack",
"aws_cdk.core.Tag",
"os.environ.get",
"aws_cdk.core.App"
] | [((386, 429), 'os.environ.get', 'os.environ.get', (['"""DOMAIN_NAME"""', '"""mysite.com"""'], {}), "('DOMAIN_NAME', 'mysite.com')\n", (400, 429), False, 'import os\n'), ((747, 787), 'os.environ.get', 'os.environ.get', (['"""APP_NAME"""', '"""mysite-com"""'], {}), "('APP_NAME', 'mysite-com')\n", (761, 787), False, 'import os\n'), ((873, 922), 'os.environ.get', 'os.environ.get', (['"""AWS_DEFAULT_REGION"""', '"""us-east-1"""'], {}), "('AWS_DEFAULT_REGION', 'us-east-1')\n", (887, 922), False, 'import os\n'), ((931, 941), 'aws_cdk.core.App', 'core.App', ([], {}), '()\n', (939, 941), False, 'from aws_cdk import core\n'), ((950, 1203), 'awscdk.app_stack.ApplicationStack', 'ApplicationStack', (['app', 'f"""{full_app_name}-stack"""'], {'environment_name': 'environment_name', 'base_domain_name': 'base_domain_name', 'full_domain_name': 'full_domain_name', 'base_app_name': 'base_app_name', 'full_app_name': 'full_app_name', 'env': "{'region': aws_region}"}), "(app, f'{full_app_name}-stack', environment_name=\n environment_name, base_domain_name=base_domain_name, full_domain_name=\n full_domain_name, base_app_name=base_app_name, full_app_name=\n full_app_name, env={'region': aws_region})\n", (966, 1203), False, 'from awscdk.app_stack import ApplicationStack\n'), ((1480, 1516), 'aws_cdk.core.Tag', 'core.Tag', (['"""StackName"""', 'full_app_name'], {}), "('StackName', full_app_name)\n", (1488, 1516), False, 'from aws_cdk import core\n'), ((328, 364), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""', '"""dev"""'], {}), "('ENVIRONMENT', 'dev')\n", (342, 364), False, 'import os\n')] |
import json
import logging
import stripe
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from pretix.base.models import Event, Order
from pretix.plugins.stripe.payment import Stripe
logger = logging.getLogger('pretix.plugins.stripe')
@csrf_exempt
@require_POST
def webhook(request):
event_json = json.loads(request.body.decode('utf-8'))
event_type = event_json['type']
if event_type != 'charge.refunded':
# Not interested
return HttpResponse('Event is not a refund', status=200)
charge = event_json['data']['object']
if charge['object'] != 'charge':
return HttpResponse('Object is not a charge', status=200)
metadata = charge['metadata']
if 'event' not in metadata:
return HttpResponse('Event not given', status=200)
try:
event = Event.objects.current.get(identity=metadata['event'])
except Event.DoesNotExist:
return HttpResponse('Event not found', status=200)
try:
order = Order.objects.current.get(identity=metadata['order'])
except Order.DoesNotExist:
return HttpResponse('Order not found', status=200)
prov = Stripe(event)
prov._init_api()
try:
charge = stripe.Charge.retrieve(charge['id'])
except stripe.error.StripeError as err:
logger.error('Stripe error on webhook: %s Event data: %s' % (str(err), str(event_json)))
return HttpResponse('StripeError', status=500)
if charge['refunds']['total_count'] > 0 and order.status == Order.STATUS_PAID:
order.mark_refunded()
return HttpResponse(status=200)
| [
"logging.getLogger",
"pretix.base.models.Order.objects.current.get",
"django.http.HttpResponse",
"stripe.Charge.retrieve",
"pretix.base.models.Event.objects.current.get",
"pretix.plugins.stripe.payment.Stripe"
] | [((290, 332), 'logging.getLogger', 'logging.getLogger', (['"""pretix.plugins.stripe"""'], {}), "('pretix.plugins.stripe')\n", (307, 332), False, 'import logging\n'), ((1232, 1245), 'pretix.plugins.stripe.payment.Stripe', 'Stripe', (['event'], {}), '(event)\n', (1238, 1245), False, 'from pretix.plugins.stripe.payment import Stripe\n'), ((1653, 1677), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(200)'}), '(status=200)\n', (1665, 1677), False, 'from django.http import HttpResponse\n'), ((558, 607), 'django.http.HttpResponse', 'HttpResponse', (['"""Event is not a refund"""'], {'status': '(200)'}), "('Event is not a refund', status=200)\n", (570, 607), False, 'from django.http import HttpResponse\n'), ((703, 753), 'django.http.HttpResponse', 'HttpResponse', (['"""Object is not a charge"""'], {'status': '(200)'}), "('Object is not a charge', status=200)\n", (715, 753), False, 'from django.http import HttpResponse\n'), ((836, 879), 'django.http.HttpResponse', 'HttpResponse', (['"""Event not given"""'], {'status': '(200)'}), "('Event not given', status=200)\n", (848, 879), False, 'from django.http import HttpResponse\n'), ((906, 959), 'pretix.base.models.Event.objects.current.get', 'Event.objects.current.get', ([], {'identity': "metadata['event']"}), "(identity=metadata['event'])\n", (931, 959), False, 'from pretix.base.models import Event, Order\n'), ((1076, 1129), 'pretix.base.models.Order.objects.current.get', 'Order.objects.current.get', ([], {'identity': "metadata['order']"}), "(identity=metadata['order'])\n", (1101, 1129), False, 'from pretix.base.models import Event, Order\n'), ((1294, 1330), 'stripe.Charge.retrieve', 'stripe.Charge.retrieve', (["charge['id']"], {}), "(charge['id'])\n", (1316, 1330), False, 'import stripe\n'), ((1006, 1049), 'django.http.HttpResponse', 'HttpResponse', (['"""Event not found"""'], {'status': '(200)'}), "('Event not found', status=200)\n", (1018, 1049), False, 'from django.http import HttpResponse\n'), ((1176, 1219), 'django.http.HttpResponse', 'HttpResponse', (['"""Order not found"""'], {'status': '(200)'}), "('Order not found', status=200)\n", (1188, 1219), False, 'from django.http import HttpResponse\n'), ((1487, 1526), 'django.http.HttpResponse', 'HttpResponse', (['"""StripeError"""'], {'status': '(500)'}), "('StripeError', status=500)\n", (1499, 1526), False, 'from django.http import HttpResponse\n')] |
import re
from sloth.grammar import LexicalGrammar
from sloth.token import Token
class LexerError(Exception):
def __init__(self, pos):
self.pos = pos
self.description = 'LexerError at Line {}, Column {}'.format(
self.pos[0], self.pos[1]
)
def __str__(self):
return self.description
def __repr__(self):
return 'LexerError {}'.format(self.pos)
class Lexer(object):
def __init__(self, buf):
# Initialise position state
self.pos = 0
self.line = 1
self.column = 1
# Remove trailing whitespace from buffer
self.buffer = buf.rstrip()
# Remove leading whitespace from buffer
# But advance lexer position accordingly
# Ensures subsequent token positions are accurate
if self.buffer:
self.skip_whitespace([' ', '\t', '\n'])
# Compile regex for lexing
grouped_rules = ['(?P<{}>{})'.format(t.name, t.value) for t in LexicalGrammar]
self.regex = re.compile('|'.join(grouped_rules))
def advance(self):
if self.buffer[self.pos] == '\n':
self.line += 1
self.column = 1
else:
self.column += 1
self.pos += 1
def skip_whitespace(self, whitespace):
while self.buffer[self.pos] in whitespace:
self.advance()
def next_token(self):
while self.pos < len(self.buffer):
# Advance past whitespace
self.skip_whitespace([' ', '\t'])
# Apply lexing regex at current positon
match = self.regex.match(self.buffer[self.pos :])
# Store current position
position = (self.line, self.column)
if not match:
raise LexerError(position)
token_name = match.lastgroup
# Extract lexeme
lexeme = match.group(token_name)
# Advance lexer position past current lexeme
for _ in lexeme:
self.advance()
# Produce token, skip comments
if token_name != 'COMMENT':
yield Token(token_name, lexeme, position)
def all_tokens(self):
return list(self.next_token())
| [
"sloth.token.Token"
] | [((2137, 2172), 'sloth.token.Token', 'Token', (['token_name', 'lexeme', 'position'], {}), '(token_name, lexeme, position)\n', (2142, 2172), False, 'from sloth.token import Token\n')] |
from com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4 import DeviceCommandCancelTaskRespV4
from com.huawei.iotplatform.client.dto.Pagination import Pagination
class QueryDeviceCmdCancelTaskOutDTO(object):
pagination = Pagination()
data = DeviceCommandCancelTaskRespV4()
def __init__(self):
pass
def getPagination(self):
return self.pagination
def setPagination(self, pagination):
self.pagination = pagination
def getData(self):
return self.data
def setData(self, data):
self.data = data
| [
"com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4.DeviceCommandCancelTaskRespV4",
"com.huawei.iotplatform.client.dto.Pagination.Pagination"
] | [((239, 251), 'com.huawei.iotplatform.client.dto.Pagination.Pagination', 'Pagination', ([], {}), '()\n', (249, 251), False, 'from com.huawei.iotplatform.client.dto.Pagination import Pagination\n'), ((263, 294), 'com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4.DeviceCommandCancelTaskRespV4', 'DeviceCommandCancelTaskRespV4', ([], {}), '()\n', (292, 294), False, 'from com.huawei.iotplatform.client.dto.DeviceCommandCancelTaskRespV4 import DeviceCommandCancelTaskRespV4\n')] |
import os
import subprocess
class TestTasks:
""" Test that the tasks work with invoke. """
CMD_KWARGS = dict(
capture_output=True,
encoding="utf-8",
shell=True,
env=os.environ.copy(),
)
def test_unapproved_licenses(self):
""" Should emit table of unapproved licenses. """
reply = subprocess.run("poetry run invoke license.unapproved-licenses", **self.CMD_KWARGS)
output = reply.stdout
# assumes we require pylint and pylint is GPL and that's on our unapproved list
assert "pylint" in output
assert "GNU General Public License" in output
def test_write_table(self):
""" Should emit a table of licenses used. """
reply = subprocess.run("poetry run invoke license.write-table --outfile='-'", **self.CMD_KWARGS)
output = reply.stdout
# assumes we require coverage and at least one package we depend on is Apache licensed
assert 'coverage' in output
assert 'Apache Software License' in output
| [
"subprocess.run",
"os.environ.copy"
] | [((347, 434), 'subprocess.run', 'subprocess.run', (['"""poetry run invoke license.unapproved-licenses"""'], {}), "('poetry run invoke license.unapproved-licenses', **self.\n CMD_KWARGS)\n", (361, 434), False, 'import subprocess\n'), ((739, 832), 'subprocess.run', 'subprocess.run', (['"""poetry run invoke license.write-table --outfile=\'-\'"""'], {}), '("poetry run invoke license.write-table --outfile=\'-\'", **\n self.CMD_KWARGS)\n', (753, 832), False, 'import subprocess\n'), ((207, 224), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (222, 224), False, 'import os\n')] |
# Space: O(n)
# Time: O(n)
import collections
class Solution():
def findWords(self, board, words):
column = len(board)
if column == 0: return []
row = len(board[0])
if row == 0: return []
target_word = set()
res = set()
# build a trie, this will be used in DFS search
trie = collections.defaultdict(set)
trie[''] = {word[0] for word in words}
for each_word in words:
string = ''
for index in range(len(each_word)):
string += each_word[index]
if index != len(each_word) - 1:
trie[string].add(each_word[index + 1])
else:
target_word.add(each_word)
def search(word_list, cur_sub_string, x, y):
if cur_sub_string in target_word:
res.add(cur_sub_string)
target_word.remove(cur_sub_string)
if (not 0 <= x < row) or (not 0 <= y < column) or board[y][x] == 0 or board[y][x] not in trie[cur_sub_string] or len(res) == len(word_list): return
cur_sub_string += board[y][x]
temp = board[y][x]
board[y][x] = 0
search(word_list, cur_sub_string, x - 1, y)
search(word_list, cur_sub_string, x + 1, y)
search(word_list, cur_sub_string, x, y - 1)
search(word_list, cur_sub_string, x, y + 1)
board[y][x] = temp
cur_sub_string = cur_sub_string[:-1]
for y in range(column):
for x in range(row):
if len(words) == len(res):
return res
search(words, '', x, y)
return res
| [
"collections.defaultdict"
] | [((350, 378), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (373, 378), False, 'import collections\n')] |
# coding: utf-8
# - We are creating a very simple machine learning model.<br>
# - Using dataset: tic-tac-toe.data.txt with user-defined columns.<br>
# - We are treating this problem as a supervised learning problem.<br>
# In[74]:
# This the rough sketch of the processing that happened in my brain while creating the program.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
# In[52]:
# Loading data
data = pd.read_csv("../tic-tac-toe.data.txt", sep = ",")
data_copy = pd.read_csv("../tic-tac-toe.data.txt", sep = ",")
# Setting cols.
data.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
data_copy.columns = ["first_row_left", "first_row_middle", "first_row_right", "center_row_left", "center_row_middle", "center_row_right", "bottom_row_left", "bottom_row_middle", "bottom_row_right", "is_win"]
# In[53]:
# Viewing data
data.head()
# In[54]:
# As we can see the the different move options, we perform label encoding.
mapping_for_moves = {'x':1, "o":0} # For b, we put mean of the data.
mapping_for_wins = {"positive":1, "negative":0} # Positive is win, negative is lose
data.is_win = data.is_win.map(mapping_for_wins)
data_copy.is_win = data_copy.is_win.map(mapping_for_wins)
data = data.drop(columns=["is_win"], axis=1)
# In[55]:
data.head()
# In[56]:
for i in data.columns: # Applying map to all the columns except is_win.
data[i] = data[i].map(mapping_for_moves)
# In[57]:
data.head() # Viewing data
# In[58]:
# Extracting features and labels
features = data.values
labels = data_copy.is_win.values
# In[63]:
# Filling missing values aka "b"
features = (Imputer().fit_transform(features))
# In[48]:
len(features)
# In[49]:
len(labels)
# In[65]:
# Changing type to int
features = features.astype(np.int)
labels = labels.astype(np.int)
# In[66]:
features
# In[67]:
labels
# - Preprocessing is done.
# In[68]:
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, random_state=3, shuffle=True)
# In[73]:
data.corr()
# - Clearly it is a classification problem, we can use DecisionTree or SVC
# In[84]:
# Trying different classifiers.
clf = DecisionTreeClassifier()
clf.fit(features_train, labels_train)
d_tree_score = clf.score(features_test, labels_test) # Good result!
# In[78]:
clf2 = SVC() # Clearly the data is non linear.
clf2.fit(features_train, labels_train)
clf2.score(features_test, labels_test) # Not good!
# In[85]:
clf3 = KNeighborsClassifier(n_neighbors=1)
clf3.fit(features_train, labels_train)
k_score = clf3.score(features_test, labels_test)
# In[86]:
d_tree_score > k_score
# In[87]:
predictions = clf3.predict(features_test)
# In[89]:
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test, predictions)
# In[90]:
cm
# In[91]:
np.where(labels_test!=predictions)
# In[95]:
d_tree_score
# In[94]:
k_score
# In[97]:
from sklearn.metrics import classification_report
c = classification_report(labels_test, predictions)
# In[98]:
c
# In[115]:
from sklearn.ensemble import RandomForestClassifier
r = RandomForestClassifier(n_estimators=100) # With 100 decision tree
r.fit(features_train, labels_train)
r_forest = r.score(features_test, labels_test)
p = r.predict(features_test)
np.where(labels_test!=features_test) # Only one misclassified
# In[116]:
cm = confusion_matrix(labels_test, p)
# In[117]:
cm
| [
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.where",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessin... | [((662, 709), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.txt', sep=',')\n", (673, 709), True, 'import pandas as pd\n'), ((724, 771), 'pandas.read_csv', 'pd.read_csv', (['"""../tic-tac-toe.data.txt"""'], {'sep': '""","""'}), "('../tic-tac-toe.data.txt', sep=',')\n", (735, 771), True, 'import pandas as pd\n'), ((2335, 2399), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'random_state': '(3)', 'shuffle': '(True)'}), '(features, labels, random_state=3, shuffle=True)\n', (2351, 2399), False, 'from sklearn.model_selection import train_test_split\n'), ((2554, 2578), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2576, 2578), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2706, 2711), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2709, 2711), False, 'from sklearn.svm import SVC\n'), ((2857, 2892), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (2877, 2892), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3138, 3180), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (3154, 3180), False, 'from sklearn.metrics import confusion_matrix\n'), ((3212, 3248), 'numpy.where', 'np.where', (['(labels_test != predictions)'], {}), '(labels_test != predictions)\n', (3220, 3248), True, 'import numpy as np\n'), ((3364, 3411), 'sklearn.metrics.classification_report', 'classification_report', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (3385, 3411), False, 'from sklearn.metrics import classification_report\n'), ((3499, 3539), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (3521, 3539), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3677, 3715), 'numpy.where', 'np.where', (['(labels_test != features_test)'], {}), '(labels_test != features_test)\n', (3685, 3715), True, 'import numpy as np\n'), ((3760, 3792), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'p'], {}), '(labels_test, p)\n', (3776, 3792), False, 'from sklearn.metrics import confusion_matrix\n'), ((1997, 2006), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {}), '()\n', (2004, 2006), False, 'from sklearn.preprocessing import Imputer\n')] |
import aiohttp
from matrix_traversal.utils import (
traverse_matrix_counterclockwise,
get_formatted_matrix,
check_url)
from typing import List
from aiohttp import ClientError
from asyncio.exceptions import TimeoutError
async def send_request(url: str) -> List[List[int]]:
"""
This function sends a request to URL and processes the response, if any
:param url: URL
:return: formatted matrix if response exists
"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if 400 <= resp.status < 500:
print(f'Client error - {resp.status}')
elif resp.status >= 500:
print(f'Server error - {resp.status}')
else:
matrix = await get_formatted_matrix(resp)
return matrix
except TimeoutError:
print("Timeout error!")
except ClientError:
print("Some problems with connection or URL")
except Exception as e:
print(e)
async def get_matrix(url: str) -> List[int]:
"""
This function gets URL address, sends a request to server
and returns a list obtained by traversing the matrix counterclockwise
:param url: URL
:return: list obtained by traversing the matrix
"""
if check_url(url):
matrix = await send_request(url)
if matrix:
lst = traverse_matrix_counterclockwise(matrix)
return lst
else:
print("Invalid URL address")
| [
"matrix_traversal.utils.check_url",
"aiohttp.ClientSession",
"matrix_traversal.utils.get_formatted_matrix",
"matrix_traversal.utils.traverse_matrix_counterclockwise"
] | [((1339, 1353), 'matrix_traversal.utils.check_url', 'check_url', (['url'], {}), '(url)\n', (1348, 1353), False, 'from matrix_traversal.utils import traverse_matrix_counterclockwise, get_formatted_matrix, check_url\n'), ((475, 498), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (496, 498), False, 'import aiohttp\n'), ((1433, 1473), 'matrix_traversal.utils.traverse_matrix_counterclockwise', 'traverse_matrix_counterclockwise', (['matrix'], {}), '(matrix)\n', (1465, 1473), False, 'from matrix_traversal.utils import traverse_matrix_counterclockwise, get_formatted_matrix, check_url\n'), ((821, 847), 'matrix_traversal.utils.get_formatted_matrix', 'get_formatted_matrix', (['resp'], {}), '(resp)\n', (841, 847), False, 'from matrix_traversal.utils import traverse_matrix_counterclockwise, get_formatted_matrix, check_url\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import PyPDF2
length = len(sys.argv)
if length >= 2:
PASSWORD = sys.argv[1]
else:
print('usage: cmd password [path]')
sys.exit(1)
if length == 3:
PATH = sys.argv[2]
else:
PATH = os.curdir
for folder_name, _, filenames in os.walk(PATH):
for filename in filenames:
if filename.endswith('.pdf'):
filename = folder_name + os.sep + filename
with open(filename, 'rb') as pdf_file:
try:
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
if not pdf_reader.isEncrypted:
pdf_writer = PyPDF2.PdfFileWriter()
pdf_writer.encrypt(PASSWORD)
for page_num in range(pdf_reader.numPages):
page = pdf_reader.getPage(page_num)
pdf_writer.addPage(page)
with open(filename + '_encrypted.pdf', 'wb') as f:
pdf_writer.write(f)
os.rename(filename + '_encrypted.pdf', filename)
except Exception as err:
print('{0}: {1}'.format(filename, err))
| [
"os.rename",
"sys.exit",
"PyPDF2.PdfFileWriter",
"PyPDF2.PdfFileReader",
"os.walk"
] | [((313, 326), 'os.walk', 'os.walk', (['PATH'], {}), '(PATH)\n', (320, 326), False, 'import os\n'), ((200, 211), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (208, 211), False, 'import sys\n'), ((557, 587), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['pdf_file'], {}), '(pdf_file)\n', (577, 587), False, 'import PyPDF2\n'), ((676, 698), 'PyPDF2.PdfFileWriter', 'PyPDF2.PdfFileWriter', ([], {}), '()\n', (696, 698), False, 'import PyPDF2\n'), ((1084, 1132), 'os.rename', 'os.rename', (["(filename + '_encrypted.pdf')", 'filename'], {}), "(filename + '_encrypted.pdf', filename)\n", (1093, 1132), False, 'import os\n')] |
import turtle as t
zel = float(input("What is your Zel: "))
for i in range(4):
t.fd(zel)
t.lt(90)
t.done()
| [
"turtle.done",
"turtle.lt",
"turtle.fd"
] | [((103, 111), 'turtle.done', 't.done', ([], {}), '()\n', (109, 111), True, 'import turtle as t\n'), ((82, 91), 'turtle.fd', 't.fd', (['zel'], {}), '(zel)\n', (86, 91), True, 'import turtle as t\n'), ((94, 102), 'turtle.lt', 't.lt', (['(90)'], {}), '(90)\n', (98, 102), True, 'import turtle as t\n')] |
"""
Ashley URLs (that includes django machina urls)
"""
from django.urls import include, path, re_path
from machina import urls as machina_urls
from ashley.api import urls as api_urls
from ashley.views import ChangeUsernameView, ForumLTIView, ManageModeratorsView
API_PREFIX = "v1.0"
urlpatterns = [
path("lti/forum/<uuid:uuid>", ForumLTIView.as_view(), name="forum.lti.view"),
path(
"profile/username",
ChangeUsernameView.as_view(),
name="forum.username.change",
),
path("moderators/", ManageModeratorsView.as_view(), name="moderators"),
re_path(r"api/{}/".format(API_PREFIX), include(api_urls)),
path("forum/", include(machina_urls)),
]
| [
"ashley.views.ManageModeratorsView.as_view",
"django.urls.include",
"ashley.views.ForumLTIView.as_view",
"ashley.views.ChangeUsernameView.as_view"
] | [((338, 360), 'ashley.views.ForumLTIView.as_view', 'ForumLTIView.as_view', ([], {}), '()\n', (358, 360), False, 'from ashley.views import ChangeUsernameView, ForumLTIView, ManageModeratorsView\n'), ((432, 460), 'ashley.views.ChangeUsernameView.as_view', 'ChangeUsernameView.as_view', ([], {}), '()\n', (458, 460), False, 'from ashley.views import ChangeUsernameView, ForumLTIView, ManageModeratorsView\n'), ((531, 561), 'ashley.views.ManageModeratorsView.as_view', 'ManageModeratorsView.as_view', ([], {}), '()\n', (559, 561), False, 'from ashley.views import ChangeUsernameView, ForumLTIView, ManageModeratorsView\n'), ((626, 643), 'django.urls.include', 'include', (['api_urls'], {}), '(api_urls)\n', (633, 643), False, 'from django.urls import include, path, re_path\n'), ((665, 686), 'django.urls.include', 'include', (['machina_urls'], {}), '(machina_urls)\n', (672, 686), False, 'from django.urls import include, path, re_path\n')] |
#!/usr/bin/env python
import certifire
import certifire.plugins.acme
import certifire.plugins.dns_providers
from codecs import open
from setuptools import setup, find_packages
import sys
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
print("error: Upgrade to a pip version newer than 10. Run \"pip install "
"--upgrade pip\".")
sys.exit(1)
with open("README.md", "r") as fh:
long_description = fh.read()
# Solution from http://bit.ly/29Yl8VN
def resolve_requires(requirements_file):
try:
requirements = parse_requirements("./%s" % requirements_file,
session=False)
return [str(ir.req) for ir in requirements]
except AttributeError:
# for pip >= 20.1.x
# Need to run again as the first run was ruined by the exception
requirements = parse_requirements("./%s" % requirements_file,
session=False)
# pr stands for parsed_requirement
return [str(pr.requirement) for pr in requirements]
setup(
name="certifire",
version=certifire.get_version(),
license=certifire.__licence__,
description=("Certifire Minimal - Automate Certificates from let'sencrypt"),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/certi-fire/certifire",
author=certifire.get_author(),
author_email=certifire.get_author_email(),
classifiers=[
"Development Status :: 1 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
packages=find_packages(),
install_requires=resolve_requires("requirements.txt"),
entry_points={
'console_scripts': [
"certifire = certifire.cli:certifire_main",
"certifire-manager = certifire.manage:main",
],
},
)
| [
"certifire.get_author",
"certifire.get_version",
"certifire.get_author_email",
"setuptools.find_packages",
"pip._internal.req.parse_requirements",
"sys.exit",
"codecs.open"
] | [((440, 462), 'codecs.open', 'open', (['"""README.md"""', '"""r"""'], {}), "('README.md', 'r')\n", (444, 462), False, 'from codecs import open\n'), ((422, 433), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (430, 433), False, 'import sys\n'), ((616, 677), 'pip._internal.req.parse_requirements', 'parse_requirements', (["('./%s' % requirements_file)"], {'session': '(False)'}), "('./%s' % requirements_file, session=False)\n", (634, 677), False, 'from pip._internal.req import parse_requirements\n'), ((1173, 1196), 'certifire.get_version', 'certifire.get_version', ([], {}), '()\n', (1194, 1196), False, 'import certifire\n'), ((1466, 1488), 'certifire.get_author', 'certifire.get_author', ([], {}), '()\n', (1486, 1488), False, 'import certifire\n'), ((1507, 1535), 'certifire.get_author_email', 'certifire.get_author_email', ([], {}), '()\n', (1533, 1535), False, 'import certifire\n'), ((2200, 2215), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2213, 2215), False, 'from setuptools import setup, find_packages\n'), ((923, 984), 'pip._internal.req.parse_requirements', 'parse_requirements', (["('./%s' % requirements_file)"], {'session': '(False)'}), "('./%s' % requirements_file, session=False)\n", (941, 984), False, 'from pip._internal.req import parse_requirements\n')] |
"""
:mod: `kanka.utils` - Helper functions
"""
from datetime import datetime
from requests_toolbelt.sessions import BaseUrlSession
from dacite import from_dict, Config
from .exceptions import KankaAPIError
API_BASE_ENDPOINT = 'https://kanka.io/api/1.0/'
class KankaSession(BaseUrlSession):
""" Store session data.
For every API request a header with the API token has to be provided.
This object stores the token and the header and provides methods for
GET, POST and UPDATE requests with the needed header. Also this object
can be handed down from the KankaClient object to entitiy objects in case
they need to make API requests.
:param api_endpoint: Base endpoint to the kanka API. Default: API_BASE_ENDPOINT
:param api_token: kanka API token. Default: empty string
:type api_endpoint: string
:type api_token: string
"""
def __init__(self, api_endpoint=API_BASE_ENDPOINT, api_token=''):
self.base_url = api_endpoint
self.token = api_token
auth_header = {
'Authorization': f'Bearer {self.token}',
'Accept': 'application/json'}
super().__init__()
self.headers.update(auth_header)
def api_request(self, endpoint=''):
"""
Requests data from given API endpoint.
:return: json data from given endpoint
:rtype: dict
"""
r = self.get(endpoint)
if r.status_code == 401:
raise KankaAPIError("Authentication error. Wrong token or no token given.")
if r.status_code == 404:
raise KankaAPIError(
"Page not found. Request from a non-existent endpoint: {}.".format(r.url))
return r.json()
def __repr__(self):
return "Kanka Session to {}".format(self.base_url)
def to_datetime(dict_date):
""" Convert json date entry to datetime.
:param dict_date: Date as retrieved from kanka in the format "YYYY-mm-dd HH:MM:SS.000000"
:type dict_date: string
:return: Date converted to python datetime object
:rtype: datetime.datetime
"""
t = dict_date.split(".")[0]
return datetime.strptime(t, "%Y-%m-%dT%H:%M:%S")
def append_from(session, data, page):
""" Collect paginated data."""
r = session.api_request(page)
url_next = r["links"]["next"]
if url_next:
append_from(session, data, url_next[len(session.base_url):])
data.extend(r["data"])
return data
def create_entity(Entity_object, data):
""" Creates entitiy objects from dictionary. """
entity = from_dict(
data_class=Entity_object,
data=data,
config=Config(type_hooks={datetime: to_datetime}))
return entity
| [
"datetime.datetime.strptime",
"dacite.Config"
] | [((2126, 2167), 'datetime.datetime.strptime', 'datetime.strptime', (['t', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(t, '%Y-%m-%dT%H:%M:%S')\n", (2143, 2167), False, 'from datetime import datetime\n'), ((2625, 2667), 'dacite.Config', 'Config', ([], {'type_hooks': '{datetime: to_datetime}'}), '(type_hooks={datetime: to_datetime})\n', (2631, 2667), False, 'from dacite import from_dict, Config\n')] |
import json
import os
from pymatgen.io.vasp import Poscar
def from_path(path):
# TODO: should maybe support .tar.gz or .tar.xz
return StructureDir.from_dir(path)
class StructureDir:
def __init__(self, *, layers, masses, layer_sc_matrices, structure):
self.layers = layers
self.masses = masses
self.layer_sc_matrices = layer_sc_matrices
self.structure = structure
@classmethod
def from_dir(cls, path):
structure = Poscar.from_file(os.path.join(path, 'POSCAR')).structure
with open(os.path.join(path, 'meta.json')) as f:
meta = json.load(f)
layer_sc_matrices = meta.pop('layer_sc_matrices', None) or meta.pop('layer-sc-matrices', None)
if layer_sc_matrices:
layer_sc_matrices = [x['matrix'] for x in layer_sc_matrices]
return cls(
layers=meta.pop('layers', None),
masses=meta.pop('masses', None),
layer_sc_matrices=layer_sc_matrices,
structure=structure,
)
| [
"json.load",
"os.path.join"
] | [((609, 621), 'json.load', 'json.load', (['f'], {}), '(f)\n', (618, 621), False, 'import json\n'), ((493, 521), 'os.path.join', 'os.path.join', (['path', '"""POSCAR"""'], {}), "(path, 'POSCAR')\n", (505, 521), False, 'import os\n'), ((551, 582), 'os.path.join', 'os.path.join', (['path', '"""meta.json"""'], {}), "(path, 'meta.json')\n", (563, 582), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import os
from functools import reduce
import sys
directory_with_mapping_reports = sys.argv[1]
mapping_summary = sys.argv[2]
############################################################
# Reads each file. Add sample name in the column with values
############################################################
list_of_logs = [directory_with_mapping_reports + f for f in os.listdir(directory_with_mapping_reports) if f.endswith("Log.final.out")]
list_of_logs.sort()
sample_names = [log.replace("_Log.final.out","") for log in list_of_logs]
list_of_dfs = [pd.read_csv(log, sep = "\t", names=["attribute", str(sample)])
for log,sample in zip(list_of_logs, sample_names)
]
df_merged = reduce(lambda left,right: pd.merge(left,right,on=['attribute'], how='outer'), list_of_dfs)
df_merged.to_csv(mapping_summary, sep=",")
| [
"pandas.merge",
"os.listdir"
] | [((431, 473), 'os.listdir', 'os.listdir', (['directory_with_mapping_reports'], {}), '(directory_with_mapping_reports)\n', (441, 473), False, 'import os\n'), ((776, 828), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': "['attribute']", 'how': '"""outer"""'}), "(left, right, on=['attribute'], how='outer')\n", (784, 828), True, 'import pandas as pd\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import dace.library
from dace.transformation import transformation as xf
import pytest
@dace.library.node
class MyLibNode(dace.nodes.LibraryNode):
implementations = {}
default_implementation = 'pure'
def __init__(self, name='MyLibNode', **kwargs):
super().__init__(name=name, **kwargs)
def test_register_expansion():
sdfg = dace.SDFG('libtest')
state = sdfg.add_state()
n = state.add_node(MyLibNode())
# Expect KeyError as pure expansion not given
with pytest.raises(KeyError):
sdfg()
@dace.library.register_expansion(MyLibNode, 'pure')
class ExpandMyLibNode(xf.ExpandTransformation):
environments = []
@staticmethod
def expansion(node: MyLibNode, state: dace.SDFGState, sdfg: dace.SDFG, **kwargs):
return dace.nodes.Tasklet('donothing', code='pass')
# After registering the expansion, the code should work
sdfg()
if __name__ == '__main__':
test_register_expansion()
| [
"dace.library.register_expansion",
"dace.SDFG",
"dace.nodes.Tasklet",
"pytest.raises"
] | [((441, 461), 'dace.SDFG', 'dace.SDFG', (['"""libtest"""'], {}), "('libtest')\n", (450, 461), False, 'import dace\n'), ((633, 683), 'dace.library.register_expansion', 'dace.library.register_expansion', (['MyLibNode', '"""pure"""'], {}), "(MyLibNode, 'pure')\n", (664, 683), False, 'import dace\n'), ((587, 610), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (600, 610), False, 'import pytest\n'), ((894, 938), 'dace.nodes.Tasklet', 'dace.nodes.Tasklet', (['"""donothing"""'], {'code': '"""pass"""'}), "('donothing', code='pass')\n", (912, 938), False, 'import dace\n')] |
"""
Functions to manipulate data from PostgreSQL database includes a
parallelise dataframe that runs a function on a pandas data frame
in parallel, as well as a loop_chunks function. This reads a chunk
from the database performs an operation and uploads to a new
table in the database.
"""
import numpy as np
import pandas as pd
import time
from multiprocessing import Pool, cpu_count
from utils import db_connect
def parallelise_dataframe(df, func, num_cores=None):
'''
Perform function in parallel on pandas data frame where if the
num_cores is not specified then use the number of available
cores -1.
Arguments:
df (dataframe to manipulate)
func (function to apply)
num_cores (number of cores to parallelise)
Returns:
The data frame processed by function in parallel.
'''
if num_cores==None:
num_cores = cpu_count() - 1
df_split = np.array_split(df, num_cores)
pool = Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def loop_chunks(table_read, chunk_function, output_schema, output_table,
size_chunk=1000000, parallel=True):
'''
Perform function on PostgreSQL database chunk. Read from the db
perform operation either threaded or on a single core, then
upload to the database.
Arguments:
table_read (a PSQL query that alchemy uses to read the table)
chunk_function (the function to apply to that chunk)
output_schema (schema for table output)
output_table (table name to output data into, will create if not exists)
size_chunk (the number of rows to process in 1 chunk)
parallel (use the parallelise_dataframe function on chunk)
'''
conn_input, conn_output = db_connect.alchemy_input_output_open()
start = round(time.time())
j = 0
for chunk in pd.read_sql_query(table_read, conn_input, chunksize=size_chunk):
if parallel==True:
chunk = parallelise_dataframe(chunk, chunk_function)
else:
chunk = chunk_function(chunk)
chunk.to_sql(output_table, conn_output, schema=output_schema,
if_exists='append', index=False)
j+=1
print('{} seconds: completed {} rows'.format(
(round(time.time()) - start), j*size_chunk))
db_connect.alchemy_input_output_close(conn_input, conn_output)
| [
"pandas.read_sql_query",
"utils.db_connect.alchemy_input_output_close",
"utils.db_connect.alchemy_input_output_open",
"multiprocessing.cpu_count",
"numpy.array_split",
"multiprocessing.Pool",
"time.time"
] | [((901, 930), 'numpy.array_split', 'np.array_split', (['df', 'num_cores'], {}), '(df, num_cores)\n', (915, 930), True, 'import numpy as np\n'), ((942, 957), 'multiprocessing.Pool', 'Pool', (['num_cores'], {}), '(num_cores)\n', (946, 957), False, 'from multiprocessing import Pool, cpu_count\n'), ((1764, 1802), 'utils.db_connect.alchemy_input_output_open', 'db_connect.alchemy_input_output_open', ([], {}), '()\n', (1800, 1802), False, 'from utils import db_connect\n'), ((1861, 1924), 'pandas.read_sql_query', 'pd.read_sql_query', (['table_read', 'conn_input'], {'chunksize': 'size_chunk'}), '(table_read, conn_input, chunksize=size_chunk)\n', (1878, 1924), True, 'import pandas as pd\n'), ((2327, 2389), 'utils.db_connect.alchemy_input_output_close', 'db_connect.alchemy_input_output_close', (['conn_input', 'conn_output'], {}), '(conn_input, conn_output)\n', (2364, 2389), False, 'from utils import db_connect\n'), ((1821, 1832), 'time.time', 'time.time', ([], {}), '()\n', (1830, 1832), False, 'import time\n'), ((870, 881), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (879, 881), False, 'from multiprocessing import Pool, cpu_count\n'), ((2284, 2295), 'time.time', 'time.time', ([], {}), '()\n', (2293, 2295), False, 'import time\n')] |
import pygame
import pygame.camera
#from pygame.locals import *
pygame.init()
pygame.camera.init()
screen = pygame.display.set_mode((640, 480), 0)
def main():
camlist = pygame.camera.list_cameras()
if camlist:
print('camera {} is detected'.format(camlist[0]))
cam = pygame.camera.Camera(camlist[0], (640, 480))
cam.start()
image = cam.get_image()
print(image)
screen.blit(
image,
(0, 0),
)
pygame.display.update()
else:
raise ValueError('Sorry no cameras detected')
print('end program')
if __name__ == '__main__':
main()
| [
"pygame.camera.init",
"pygame.init",
"pygame.display.set_mode",
"pygame.camera.Camera",
"pygame.display.update",
"pygame.camera.list_cameras"
] | [((65, 78), 'pygame.init', 'pygame.init', ([], {}), '()\n', (76, 78), False, 'import pygame\n'), ((79, 99), 'pygame.camera.init', 'pygame.camera.init', ([], {}), '()\n', (97, 99), False, 'import pygame\n'), ((109, 147), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(640, 480)', '(0)'], {}), '((640, 480), 0)\n', (132, 147), False, 'import pygame\n'), ((176, 204), 'pygame.camera.list_cameras', 'pygame.camera.list_cameras', ([], {}), '()\n', (202, 204), False, 'import pygame\n'), ((293, 337), 'pygame.camera.Camera', 'pygame.camera.Camera', (['camlist[0]', '(640, 480)'], {}), '(camlist[0], (640, 480))\n', (313, 337), False, 'import pygame\n'), ((489, 512), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (510, 512), False, 'import pygame\n')] |
import csv
import codecs
import StringIO
import cStringIO
import sys
import time
import argparse
from pyspark import SparkContext
parser = argparse.ArgumentParser(description='Count columns and lines existing in file')
parser.add_argument('-df','--DATAFILE', dest="DATAFILE", type=str,
help='the path for the file to be analyzed (a csv one)')
parser.add_argument('-orep','--SAVE_TO_REP', dest="SAVE_TO_REP", type=str,
help='the path for the repartitioned file to be analyzed')
class UnicodeWriter:
"""
A csv writer which will write rows to CSV file "f",
which is encoded in the given encoding
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def unicode_csv_reader(unicode_csv_data, **kwargs):
csv.field_size_limit(sys.maxsize)
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), delimiter='|', **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
def toCSVLine(row):
# Given a list of strings, returns a properly csv formatted string
output = StringIO.StringIO("")
UnicodeWriter(output,quoting=csv.QUOTE_ALL).writerow(row)
return output.getvalue().strip()
def get_fields_index(header):
# Identify field positions from header and populate dictionary.
return dict(zip(header, range(len(header))))
def add_header(unicode_csv_data, new_header):
final_iterator = [",".join(new_header)]
for row in unicode_csv_data:
final_iterator.append(row)
return iter(final_iterator)
args = parser.parse_args()
params = vars(args)
sc = SparkContext()
file_path_full=params['DATAFILE']
kwargs = {'escapechar': '\\', 'doublequote':False}
rdd =sc.textFile(file_path_full).map(lambda x: x.replace("\x00","")).mapPartitions(lambda x: unicode_csv_reader(x))
sample = rdd.take(2)
header = sample[0]
line = sample[1]
print(header)
print(line)
# Get field positions from header.
fields_index = get_fields_index(data_header)
a = rdd.map(lambda x: (len(x),1)).reduceByKey(lambda a,b: a+b).collect()
if params['SAVE_TO_REP']:
rdd.map(toCSVLine).mapPartitions(lambda x: add_header(x,data_header)).saveAsTextFile(params['SAVE_TO_REP']);
| [
"csv.field_size_limit",
"StringIO.StringIO",
"cStringIO.StringIO",
"argparse.ArgumentParser",
"csv.writer",
"codecs.getincrementalencoder",
"pyspark.SparkContext"
] | [((141, 220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Count columns and lines existing in file"""'}), "(description='Count columns and lines existing in file')\n", (164, 220), False, 'import argparse\n'), ((2717, 2731), 'pyspark.SparkContext', 'SparkContext', ([], {}), '()\n', (2729, 2731), False, 'from pyspark import SparkContext\n'), ((1712, 1745), 'csv.field_size_limit', 'csv.field_size_limit', (['sys.maxsize'], {}), '(sys.maxsize)\n', (1732, 1745), False, 'import csv\n'), ((2164, 2185), 'StringIO.StringIO', 'StringIO.StringIO', (['""""""'], {}), "('')\n", (2181, 2185), False, 'import StringIO\n'), ((821, 841), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (839, 841), False, 'import cStringIO\n'), ((872, 919), 'csv.writer', 'csv.writer', (['self.queue'], {'dialect': 'dialect'}), '(self.queue, dialect=dialect, **kwds)\n', (882, 919), False, 'import csv\n'), ((983, 1021), 'codecs.getincrementalencoder', 'codecs.getincrementalencoder', (['encoding'], {}), '(encoding)\n', (1011, 1021), False, 'import codecs\n')] |
from extras.plugins import PluginConfig
from django.utils.translation import gettext_lazy as _
class NetBoxSecretStore(PluginConfig):
name = 'netbox_secretstore'
verbose_name = _('Netbox Secret Store')
description = _('A Secret Storage for NetBox')
version = '1.0.8'
author = 'NetBox Maintainers'
author_email = ''
base_url = 'netbox_secretstore'
min_version = '3.0.0'
required_settings = []
caching_config = {
'*': {
'ops': 'all'
}
}
default_settings = {
'public_key_size': 2048
}
config = NetBoxSecretStore
| [
"django.utils.translation.gettext_lazy"
] | [((186, 210), 'django.utils.translation.gettext_lazy', '_', (['"""Netbox Secret Store"""'], {}), "('Netbox Secret Store')\n", (187, 210), True, 'from django.utils.translation import gettext_lazy as _\n'), ((229, 261), 'django.utils.translation.gettext_lazy', '_', (['"""A Secret Storage for NetBox"""'], {}), "('A Secret Storage for NetBox')\n", (230, 261), True, 'from django.utils.translation import gettext_lazy as _\n')] |
import os
import numpy as np
import urllib
from absl import flags
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
flags.DEFINE_float(
"learning_rate", default=0.001, help="Initial learning rate.")
flags.DEFINE_integer(
"epochs", default=100, help="Number of training steps to run.")
flags.DEFINE_string(
"activation",
default="selu",
help="Activation function for all hidden layers.")
flags.DEFINE_integer(
"batch_size",
default=32,
help="Batch size.")
flags.DEFINE_string(
"data_dir",
default="/tmp/mnist",
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default="/tmp/critic/",
help="Directory to put the model's fit.")
flags.DEFINE_integer(
"viz_steps", default=500, help="Frequency at which to save visualizations.")
flags.DEFINE_bool(
"delete_existing",
default=False,
help="If true, deletes existing `model_dir` directory.")
FLAGS = flags.FLAGS
def non_square_det(x, reltol=1e-6):
"""
Idea taken from https://www.quora.com/How-do-we-calculate-the-determinant-of-a-non-square-matrix
# for n != m
A = tf.random_normal([n, m])
det(A) := sqrt(det(A.A^T))
Args:
x (tf.tensor): shape in [..., a, b]
Returns:
[..., ]
"""
# squared_mat = tf.matmul(x, x, transpose_b=True)
# return tf.sqrt(tf.linalg.det(squared_mat))
s = tf.svd(x, compute_uv=False)
# atol = tf.reduce_max(s) * reltol
# s = tf.diag(tf.where(tf.greater(atol, tf.abs(s)), tf.ones_like(s), s))
return tf.reduce_prod(s)
def pinv(A, reltol=1e-6):
"""
Args:
A (tf.tensor): the matrix to be inverted shape=[n, m]
Returns:
inverse (tf.tensor): the invserse of A, s.t. A_T.A = I. shape=[m,n]
"""
s, u, v = tf.svd(A)
atol = tf.reduce_max(s) * reltol
s_inv = tf.diag(tf.where(tf.greater(tf.abs(s), atol), 1.0/s, tf.zeros_like(s)))
# s_inv = tf.diag(1./s)
return tf.matmul(v, tf.matmul(s_inv, u, transpose_b=True))
class Dense(tfb.Bijector):
"""
Want a hierarchical flow.
Map some low dim distribution to a manifold in a higher dimensional space.
For more info on bijectors see tfb.Bijector, I simply cloned the general
structure.
"""
def __init__(self, n_inputs, n_outputs, validate_args=False, name=''):
"""
Args:
n_inputs (int): the number of features (last dim)
n_outputs (int): the target num of feautres
"""
super(self.__class__, self).__init__(
validate_args=validate_args,
is_constant_jacobian=True,
forward_min_event_ndims=1,
name=name)
self.n_inputs = n_inputs
self.n_outputs = n_outputs
with tf.variable_scope('dense'+name):
self.weights = tf.get_variable(name='weights',
shape=[n_inputs, n_outputs],
dtype=tf.float32,
# initializer=tf.initializers.orthogonal()
)
self.bias = tf.get_variable(name='bias',
shape=[n_outputs],
dtype=tf.float32,
initializer=tf.initializers.zeros()
)
@property
def _is_injective(self):
return True
def _forward_event_shape_tensor(self, shape):
return tf.shape([shape[0], self.n_inputs])
def _invserse_event_shape_tensor(self, shape):
return tf.shape([shape[0], self.n_outputs])
def _forward(self, x):
return tf.matmul(x, self.weights) + self.bias
def _inverse(self, y):
weights_inv = pinv(self.weights)
return tf.matmul(y - self.bias, weights_inv)
def _forward_log_det_jacobian(self, x):
return tf.log(non_square_det(self.weights))
def _inverse_log_det_jacobian(self, y):
return tf.log(non_square_det(pinv(self.weights)))
def make_mixture(latent_size, mixture_components):
"""Creates a mixture of Gaussians distribution.
Args:
latent_size: The dimensionality of the latent representation.
mixture_components: Number of elements of the mixture.
Returns:
random_prior: A `tf.distributions.Distribution` instance
representing the distribution over encodings in the absence of any
evidence.
"""
if mixture_components == 1:
# See the module docstring for why we don't learn the parameters here.
return tfd.MultivariateNormalDiag(
loc=tf.zeros([latent_size]),
scale_identity_multiplier=1.0)
loc = tf.get_variable(name="loc", shape=[mixture_components, latent_size])
raw_scale_diag = tf.get_variable(
name="raw_scale_diag", shape=[mixture_components, latent_size])
mixture_logits = tf.get_variable(
name="mixture_logits", shape=[mixture_components])
return tfd.MixtureSameFamily(
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=tf.nn.softplus(raw_scale_diag)),
mixture_distribution=tfd.Categorical(logits=mixture_logits),
name="prior")
def model_fn(features, labels, mode, params, config):
"""
Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
x = features['x']
global_step = tf.train.get_or_create_global_step()
with tf.contrib.summary.record_summaries_every_n_global_steps(100, global_step=global_step):
# construct a multilayer parameterised bijector
n_hidden = 8
width = 32
n_outputs = 784
fn = tfb.Chain([
Dense(width, n_outputs, name='3'),
# tfb.Softplus(),
# Dense(width, width, name='2'),
# tfb.Softplus(),
# Dense(width, width, name='1'),
Dense(n_hidden, width, name='0')
])
# use the bijector to map a simple distribution into our a density model
dist = make_mixture(n_hidden, 10)
# logits = tf.get_variable(
# name="logits", shape=[n_outputs])
# dist = tfd.RelaxedOneHotCategorical(logits=logits, temperature=1.0)
# density = tfd.RelaxedBernoulli(logits=logits, temperature=100.0)
density = tfd.TransformedDistribution(distribution=dist, bijector=fn)
# maximise the likelihood of the data
p = density.prob(x)
loss = tf.reduce_mean(1-p) # - 0.1*density.entropy()
# reg = -density.entropy()
# tf.summary.scalar('entropy', reg)
# generate some samples to visualise
# HACK to get samples to work I had to comment out line 411 of transformed_distribution.py
samples = density.sample(3)
tf.summary.image('samples', tf.reshape(samples, [3, 28, 28, 1]))
# mu = density.mean()
# tf.summary.image('mean', tf.reshape(mu, [1, 28, 28, 1]))
opt = tf.train.AdamOptimizer(0.0001)
gnvs = opt.compute_gradients(loss)
gnvs = [(tf.clip_by_norm(g, 10.0) if g is not None else tf.zeros_like(v), v) for g, v in gnvs]
train_step = opt.apply_gradients(gnvs, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_step,
eval_metric_ops={"eval_loss": tf.metrics.mean(loss)}
)
def main(_):
params = FLAGS.flag_values_dict()
params["activation"] = getattr(tf.nn, params["activation"])
if FLAGS.delete_existing and tf.gfile.Exists(FLAGS.model_dir):
tf.logging.warn("Deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
batch_size=FLAGS.batch_size,
num_epochs=1,
shuffle=False)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.viz_steps,
),
)
for _ in range(FLAGS.epochs):
estimator.train(train_input_fn, steps=FLAGS.viz_steps)
eval_results = estimator.evaluate(eval_input_fn)
print("Evaluation_results:\n\t%s\n" % eval_results)
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.estimator.inputs.numpy_input_fn",
"tensorflow.metrics.mean",
"tensorflow.nn.softplus",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_mean",
"absl.flags.DEFINE_float",
"tensorflow.app.run"... | [((174, 260), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.001)', 'help': '"""Initial learning rate."""'}), "('learning_rate', default=0.001, help=\n 'Initial learning rate.')\n", (192, 260), False, 'from absl import flags\n'), ((261, 350), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epochs"""'], {'default': '(100)', 'help': '"""Number of training steps to run."""'}), "('epochs', default=100, help=\n 'Number of training steps to run.')\n", (281, 350), False, 'from absl import flags\n'), ((351, 456), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""activation"""'], {'default': '"""selu"""', 'help': '"""Activation function for all hidden layers."""'}), "('activation', default='selu', help=\n 'Activation function for all hidden layers.')\n", (370, 456), False, 'from absl import flags\n'), ((465, 531), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""'], {'default': '(32)', 'help': '"""Batch size."""'}), "('batch_size', default=32, help='Batch size.')\n", (485, 531), False, 'from absl import flags\n'), ((545, 664), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_dir"""'], {'default': '"""/tmp/mnist"""', 'help': '"""Directory where data is stored (if using real data)."""'}), "('data_dir', default='/tmp/mnist', help=\n 'Directory where data is stored (if using real data).')\n", (564, 664), False, 'from absl import flags\n'), ((673, 776), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_dir"""'], {'default': '"""/tmp/critic/"""', 'help': '"""Directory to put the model\'s fit."""'}), '(\'model_dir\', default=\'/tmp/critic/\', help=\n "Directory to put the model\'s fit.")\n', (692, 776), False, 'from absl import flags\n'), ((785, 887), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""viz_steps"""'], {'default': '(500)', 'help': '"""Frequency at which to save visualizations."""'}), "('viz_steps', default=500, help=\n 'Frequency at which to save visualizations.')\n", (805, 887), False, 'from absl import flags\n'), ((888, 1001), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""delete_existing"""'], {'default': '(False)', 'help': '"""If true, deletes existing `model_dir` directory."""'}), "('delete_existing', default=False, help=\n 'If true, deletes existing `model_dir` directory.')\n", (905, 1001), False, 'from absl import flags\n'), ((1464, 1491), 'tensorflow.svd', 'tf.svd', (['x'], {'compute_uv': '(False)'}), '(x, compute_uv=False)\n', (1470, 1491), True, 'import tensorflow as tf\n'), ((1621, 1638), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['s'], {}), '(s)\n', (1635, 1638), True, 'import tensorflow as tf\n'), ((1858, 1867), 'tensorflow.svd', 'tf.svd', (['A'], {}), '(A)\n', (1864, 1867), True, 'import tensorflow as tf\n'), ((4781, 4849), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""loc"""', 'shape': '[mixture_components, latent_size]'}), "(name='loc', shape=[mixture_components, latent_size])\n", (4796, 4849), True, 'import tensorflow as tf\n'), ((4869, 4948), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""raw_scale_diag"""', 'shape': '[mixture_components, latent_size]'}), "(name='raw_scale_diag', shape=[mixture_components, latent_size])\n", (4884, 4948), True, 'import tensorflow as tf\n'), ((4975, 5041), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""mixture_logits"""', 'shape': '[mixture_components]'}), "(name='mixture_logits', shape=[mixture_components])\n", (4990, 5041), True, 'import tensorflow as tf\n'), ((5816, 5852), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (5850, 5852), True, 'import tensorflow as tf\n'), ((8113, 8147), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (8130, 8147), True, 'import tensorflow as tf\n'), ((8161, 8208), 'tensorflow.contrib.learn.datasets.load_dataset', 'tf.contrib.learn.datasets.load_dataset', (['"""mnist"""'], {}), "('mnist')\n", (8199, 8208), True, 'import tensorflow as tf\n'), ((8284, 8330), 'numpy.asarray', 'np.asarray', (['mnist.train.labels'], {'dtype': 'np.int32'}), '(mnist.train.labels, dtype=np.int32)\n', (8294, 8330), True, 'import numpy as np\n'), ((8403, 8448), 'numpy.asarray', 'np.asarray', (['mnist.test.labels'], {'dtype': 'np.int32'}), '(mnist.test.labels, dtype=np.int32)\n', (8413, 8448), True, 'import numpy as np\n'), ((8471, 8603), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': 'FLAGS.batch_size', 'num_epochs': '(1)', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=FLAGS.batch_size, num_epochs=1, shuffle=True)\n", (8505, 8603), True, 'import tensorflow as tf\n'), ((8672, 8803), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'batch_size': 'FLAGS.batch_size', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n batch_size=FLAGS.batch_size, num_epochs=1, shuffle=False)\n", (8706, 8803), True, 'import tensorflow as tf\n'), ((9306, 9318), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (9316, 9318), True, 'import tensorflow as tf\n'), ((1880, 1896), 'tensorflow.reduce_max', 'tf.reduce_max', (['s'], {}), '(s)\n', (1893, 1896), True, 'import tensorflow as tf\n'), ((2043, 2080), 'tensorflow.matmul', 'tf.matmul', (['s_inv', 'u'], {'transpose_b': '(True)'}), '(s_inv, u, transpose_b=True)\n', (2052, 2080), True, 'import tensorflow as tf\n'), ((3604, 3639), 'tensorflow.shape', 'tf.shape', (['[shape[0], self.n_inputs]'], {}), '([shape[0], self.n_inputs])\n', (3612, 3639), True, 'import tensorflow as tf\n'), ((3707, 3743), 'tensorflow.shape', 'tf.shape', (['[shape[0], self.n_outputs]'], {}), '([shape[0], self.n_outputs])\n', (3715, 3743), True, 'import tensorflow as tf\n'), ((3910, 3947), 'tensorflow.matmul', 'tf.matmul', (['(y - self.bias)', 'weights_inv'], {}), '(y - self.bias, weights_inv)\n', (3919, 3947), True, 'import tensorflow as tf\n'), ((5862, 5953), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'tf.contrib.summary.record_summaries_every_n_global_steps', (['(100)'], {'global_step': 'global_step'}), '(100, global_step=\n global_step)\n', (5918, 5953), True, 'import tensorflow as tf\n'), ((6882, 6903), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(1 - p)'], {}), '(1 - p)\n', (6896, 6903), True, 'import tensorflow as tf\n'), ((7374, 7404), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (7396, 7404), True, 'import tensorflow as tf\n'), ((7939, 7971), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (7954, 7971), True, 'import tensorflow as tf\n'), ((8065, 8108), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (8091, 8108), True, 'import tensorflow as tf\n'), ((1971, 1987), 'tensorflow.zeros_like', 'tf.zeros_like', (['s'], {}), '(s)\n', (1984, 1987), True, 'import tensorflow as tf\n'), ((2830, 2863), 'tensorflow.variable_scope', 'tf.variable_scope', (["('dense' + name)"], {}), "('dense' + name)\n", (2847, 2863), True, 'import tensorflow as tf\n'), ((2890, 2968), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""weights"""', 'shape': '[n_inputs, n_outputs]', 'dtype': 'tf.float32'}), "(name='weights', shape=[n_inputs, n_outputs], dtype=tf.float32)\n", (2905, 2968), True, 'import tensorflow as tf\n'), ((3787, 3813), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weights'], {}), '(x, self.weights)\n', (3796, 3813), True, 'import tensorflow as tf\n'), ((7224, 7259), 'tensorflow.reshape', 'tf.reshape', (['samples', '[3, 28, 28, 1]'], {}), '(samples, [3, 28, 28, 1])\n', (7234, 7259), True, 'import tensorflow as tf\n'), ((8933, 9027), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': 'FLAGS.model_dir', 'save_checkpoints_steps': 'FLAGS.viz_steps'}), '(model_dir=FLAGS.model_dir, save_checkpoints_steps=\n FLAGS.viz_steps)\n', (8955, 9027), True, 'import tensorflow as tf\n'), ((1946, 1955), 'tensorflow.abs', 'tf.abs', (['s'], {}), '(s)\n', (1952, 1955), True, 'import tensorflow as tf\n'), ((4708, 4731), 'tensorflow.zeros', 'tf.zeros', (['[latent_size]'], {}), '([latent_size])\n', (4716, 4731), True, 'import tensorflow as tf\n'), ((7760, 7781), 'tensorflow.metrics.mean', 'tf.metrics.mean', (['loss'], {}), '(loss)\n', (7775, 7781), True, 'import tensorflow as tf\n'), ((3408, 3431), 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), '()\n', (3429, 3431), True, 'import tensorflow as tf\n'), ((5180, 5210), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['raw_scale_diag'], {}), '(raw_scale_diag)\n', (5194, 5210), True, 'import tensorflow as tf\n'), ((7465, 7489), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['g', '(10.0)'], {}), '(g, 10.0)\n', (7480, 7489), True, 'import tensorflow as tf\n'), ((7512, 7528), 'tensorflow.zeros_like', 'tf.zeros_like', (['v'], {}), '(v)\n', (7525, 7528), True, 'import tensorflow as tf\n')] |
from sqlalchemy import Column, Integer, String, Float, ForeignKey
from sqlalchemy.engine.create import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
CONNECTION_STRING = "sqlite+pysqlite:///data/db.sqlite"
engine = create_engine(CONNECTION_STRING)
Session = sessionmaker(engine)
Base = declarative_base()
class Item2018(Base):
__tablename__ = "Item2018"
ItemID_2018 = Column(Integer, primary_key=True, unique=True)
SpecCode_2018 = Column(String)
UnitCode_2018 = Column(String)
ItemCode_2018 = Column(String)
Description_2018 = Column(String)
Unit_2018 = Column(String)
def __str__(self) -> str:
return f'Item(Description={self.Description}, Unit={self.Unit})'
def __repr__(self) -> str:
a = f'ItemID = {self.ItemID_2018}'
b = f'SpecCode = {self.SpecCode_2018}'
c = f'UnitCode = {self.UnitCode_2018}'
d = f'ItemCode = {self.ItemCode_2018}'
e = f'Description = {self.Description_2018}'
f = f'Unit = {self.Unit_2018}'
return ', '.join( [a, b, c, d, e, f] )
class Item2020(Base):
__tablename__ = "Item2020"
ItemID_2020 = Column(Integer, primary_key=True, unique=True)
SpecCode_2020 = Column(String)
UnitCode_2020 = Column(String)
ItemCode_2020 = Column(String)
Description_2020 = Column(String)
Unit_2020 = Column(String)
Item2018_ID = Column(Integer)
def __str__(self) -> str:
return f'Item(Description={self.Description}, Unit={self.Unit})'
def __repr__(self) -> str:
a = f'ItemID = {self.ItemID_2020}'
b = f'SpecCode = {self.SpecCode_2020}'
c = f'UnitCode = {self.UnitCode_2020}'
d = f'ItemCode = {self.ItemCode_2020}'
e = f'Description = {self.Description_2020}'
f = f'Unit = {self.Unit_2020}'
return ', '.join( [a, b, c, d, e, f] )
class Abstract(Base):
__tablename__ = "Abstract"
AbstractID = Column(Integer, primary_key=True, unique=True)
Year = Column(Integer)
Processed = Column(String)
def __str__(self) -> str:
return f'Abstract(AbstractID={self.AbstractID})'
def __repr__(self) -> str:
return f'AbstractID = {self.AbstractID}, Year = {self.Year}, Processed = {self.Processed}'
class Contract(Base):
__tablename__ = "Contract"
ContractID = Column(Integer, primary_key=True, unique=True)
Year = Column(Integer)
LetDate = Column(String)
SPNumber = Column(String)
District = Column(String)
County = Column(String)
BidderID_0 = Column(Integer, ForeignKey("Bidder.BidderID"))
BidderID_1 = Column(Integer, ForeignKey("Bidder.BidderID"))
BidderID_2 = Column(Integer, ForeignKey("Bidder.BidderID"))
def __str__(self) -> str:
return f'Contract(ContractID={self.ContractID})'
def __repr__(self) -> str:
a = f'ContractID = {self.ContractID}'
b = f'Year = {self.Year}'
c = f'LetDate = {self.LetDate}'
d = f'SPNumber = {self.SPNumber}'
e = f'District = {self.District}'
f = f'County = {self.County}'
g = f'BidderID_0 = {self.BidderID_0}'
h = f'BidderID_1 = {self.BidderID_1}'
i = f'BidderID_2 = {self.BidderID_2}'
return ', '.join( [a, b, c, d, e, f, g, h, i] )
class Bid(Base):
__tablename__ = "Bid"
BidID = Column(Integer, primary_key=True, unique=True)
ContractID = Column(Integer, ForeignKey("Contract.ContractID"))
ItemID = Column(Integer)
SpecYear = Column(Integer)
Quantity = Column(Float)
Engineer_UnitPrice = Column(Float)
Engineer_TotalPrice = Column(Float)
BidderID_0_UnitPrice = Column(Float)
BidderID_0_TotalPrice = Column(Float)
BidderID_1_UnitPrice = Column(Float)
BidderID_1_TotalPrice = Column(Float)
BidderID_2_UnitPrice = Column(Float)
BidderID_2_TotalPrice = Column(Float)
def __str__(self) -> str:
return f'Bid(BidID={self.BidID})'
def __repr__(self) -> str:
a = f'BidID = {self.BidID}'
b = f'ContractID = {self.ContractID}'
c = f'ItemID = {self.ContractID}'
d = f'Quantity = {self.Quantity}'
e = f'Engineer_UnitPrice = {self.Engineer_UnitPrice}'
f = f'Engineer_TotalPrice = {self.Engineer_TotalPrice}'
g = f'BidderID_0_UnitPrice = {self.BidderID_0_UnitPrice}'
h = f'BidderID_0_TotalPrice = {self.BidderID_0_TotalPrice}'
i = f'BidderID_1_UnitPrice = {self.BidderID_1_UnitPrice}'
j = f'BidderID_1_TotalPrice = {self.BidderID_1_TotalPrice}'
k = f'BidderID_2_UnitPrice = {self.BidderID_2_UnitPrice}'
l = f'BidderID_2_TotalPrice = {self.BidderID_2_TotalPrice}'
return ', '.join( [a, b, c, d, e, f, g, h, i, j, k, l] )
class Bidder(Base):
__tablename__ = "Bidder"
BidderID = Column(Integer, primary_key=True, unique=True)
Name = Column(String)
def __str__(self) -> str:
return f'Bidder(Name={self.Name})'
def __repr__(self) -> str:
return f'BidderID = {self.BidderID}, Name = {self.Name}'
def main():
# Creates blank database file
Base.metadata.create_all(engine)
if __name__ == '__main__':
main() | [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.ForeignKey",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column",
"sqlalchemy.engine.create.create_engine"
] | [((282, 314), 'sqlalchemy.engine.create.create_engine', 'create_engine', (['CONNECTION_STRING'], {}), '(CONNECTION_STRING)\n', (295, 314), False, 'from sqlalchemy.engine.create import create_engine\n'), ((326, 346), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['engine'], {}), '(engine)\n', (338, 346), False, 'from sqlalchemy.orm import sessionmaker\n'), ((356, 374), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (372, 374), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((449, 495), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (455, 495), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((516, 530), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (522, 530), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((551, 565), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (557, 565), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((586, 600), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (592, 600), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((624, 638), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (630, 638), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((655, 669), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (661, 669), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1205, 1251), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (1211, 1251), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1272, 1286), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1278, 1286), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1307, 1321), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1313, 1321), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1342, 1356), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1348, 1356), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1380, 1394), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1386, 1394), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1411, 1425), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1417, 1425), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1444, 1459), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1450, 1459), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((1994, 2040), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (2000, 2040), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2052, 2067), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (2058, 2067), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2084, 2098), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2090, 2098), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2393, 2439), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (2399, 2439), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2451, 2466), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (2457, 2466), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2481, 2495), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2487, 2495), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2511, 2525), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2517, 2525), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2541, 2555), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2547, 2555), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2569, 2583), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (2575, 2583), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3393, 3439), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (3399, 3439), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3521, 3536), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3527, 3536), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3552, 3567), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3558, 3567), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3583, 3596), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3589, 3596), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3622, 3635), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3628, 3635), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3662, 3675), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3668, 3675), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3703, 3716), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3709, 3716), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3745, 3758), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3751, 3758), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3786, 3799), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3792, 3799), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3828, 3841), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3834, 3841), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3869, 3882), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3875, 3882), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3911, 3924), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (3917, 3924), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((4859, 4905), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'unique': '(True)'}), '(Integer, primary_key=True, unique=True)\n', (4865, 4905), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((4917, 4931), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (4923, 4931), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2617, 2646), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Bidder.BidderID"""'], {}), "('Bidder.BidderID')\n", (2627, 2646), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2681, 2710), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Bidder.BidderID"""'], {}), "('Bidder.BidderID')\n", (2691, 2710), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((2745, 2774), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Bidder.BidderID"""'], {}), "('Bidder.BidderID')\n", (2755, 2774), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n'), ((3473, 3506), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""Contract.ContractID"""'], {}), "('Contract.ContractID')\n", (3483, 3506), False, 'from sqlalchemy import Column, Integer, String, Float, ForeignKey\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.ext.flask.middleware import XRayMiddleware
from aws_xray_sdk.core import patch_all
patch_all()
from flask import Flask
from flask import request
from flask_cors import CORS
from datetime import datetime
from elasticsearch import Elasticsearch
import json
import uuid
import os, sys
import pprint
import boto3
import time
import uuid
es_search_domain_scheme = os.environ.get('ES_SEARCH_DOMAIN_SCHEME', 'https')
es_search_domain_host = os.environ['ES_SEARCH_DOMAIN_HOST']
es_search_domain_port = os.environ.get('ES_SEARCH_DOMAIN_PORT', 443)
es_products_index_name = 'products'
es = Elasticsearch(
[es_search_domain_host],
scheme=es_search_domain_scheme,
port=es_search_domain_port,
)
# -- Logging
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
# -- End Logging
# -- Handlers
app = Flask(__name__)
corps = CORS(app)
xray_recorder.configure(service='Search Service')
XRayMiddleware(app, xray_recorder)
@app.route('/')
def index():
return 'Search Service'
@app.route('/search/products', methods=['GET', 'POST'])
def searchProducts():
if request.method == 'GET':
try:
searchTerm = request.args.get('searchTerm').lower()
app.logger.info(searchTerm)
results = es.search(index = es_products_index_name, body={
"query": {
"dis_max" : {
"queries" : [
{ "wildcard" : { "name" : { "value": '{}*'.format(searchTerm), "boost": 1.2 }}},
{ "term" : { "category" : searchTerm }},
{ "term" : { "style" : searchTerm }},
{ "wildcard" : { "description" : { "value": '{}*'.format(searchTerm), "boost": 0.6 }}}
],
"tie_breaker" : 0.7
}
}
})
app.logger.info(json.dumps(results))
found_items = []
for item in results['hits']['hits']:
found_items.append({
'itemId': item['_id']
})
return json.dumps(found_items)
except Exception as e:
app.logger.error(e)
return str(e)
if request.method == 'POST':
app.logger.info("Request Received, Processing")
@app.route('/similar/products', methods=['GET'])
def similarProducts():
try:
productId = request.args.get('productId')
app.logger.info(productId)
results = es.search(index = es_products_index_name,
body={
"query": {
"more_like_this": {
"fields": ["name", "category", "style", "description"],
"like": [{
"_index": es_products_index_name,
"_id": productId
}],
"min_term_freq" : 1,
"max_query_terms" : 10
}
}
})
app.logger.info(json.dumps(results))
found_items = []
for item in results['hits']['hits']:
found_items.append({
'itemId': item['_id']
})
return json.dumps(found_items)
except Exception as e:
app.logger.error(e)
return str(e)
if __name__ == '__main__':
app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.run(debug=True,host='0.0.0.0', port=80) | [
"flask.request.args.get",
"aws_xray_sdk.ext.flask.middleware.XRayMiddleware",
"flask_cors.CORS",
"aws_xray_sdk.core.patch_all",
"flask.Flask",
"elasticsearch.Elasticsearch",
"json.dumps",
"os.environ.get",
"aws_xray_sdk.core.xray_recorder.configure",
"pprint.pprint"
] | [((249, 260), 'aws_xray_sdk.core.patch_all', 'patch_all', ([], {}), '()\n', (258, 260), False, 'from aws_xray_sdk.core import patch_all\n'), ((528, 578), 'os.environ.get', 'os.environ.get', (['"""ES_SEARCH_DOMAIN_SCHEME"""', '"""https"""'], {}), "('ES_SEARCH_DOMAIN_SCHEME', 'https')\n", (542, 578), False, 'import os, sys\n'), ((663, 707), 'os.environ.get', 'os.environ.get', (['"""ES_SEARCH_DOMAIN_PORT"""', '(443)'], {}), "('ES_SEARCH_DOMAIN_PORT', 443)\n", (677, 707), False, 'import os, sys\n'), ((750, 853), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['[es_search_domain_host]'], {'scheme': 'es_search_domain_scheme', 'port': 'es_search_domain_port'}), '([es_search_domain_host], scheme=es_search_domain_scheme, port\n =es_search_domain_port)\n', (763, 853), False, 'from elasticsearch import Elasticsearch\n'), ((1369, 1384), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1374, 1384), False, 'from flask import Flask\n'), ((1393, 1402), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1397, 1402), False, 'from flask_cors import CORS\n'), ((1405, 1454), 'aws_xray_sdk.core.xray_recorder.configure', 'xray_recorder.configure', ([], {'service': '"""Search Service"""'}), "(service='Search Service')\n", (1428, 1454), False, 'from aws_xray_sdk.core import xray_recorder\n'), ((1455, 1489), 'aws_xray_sdk.ext.flask.middleware.XRayMiddleware', 'XRayMiddleware', (['app', 'xray_recorder'], {}), '(app, xray_recorder)\n', (1469, 1489), False, 'from aws_xray_sdk.ext.flask.middleware import XRayMiddleware\n'), ((1054, 1106), 'pprint.pprint', 'pprint.pprint', (["('REQUEST', environ)"], {'stream': 'errorlog'}), "(('REQUEST', environ), stream=errorlog)\n", (1067, 1106), False, 'import pprint\n'), ((2990, 3019), 'flask.request.args.get', 'request.args.get', (['"""productId"""'], {}), "('productId')\n", (3006, 3019), False, 'from flask import request\n'), ((4028, 4051), 'json.dumps', 'json.dumps', (['found_items'], {}), '(found_items)\n', (4038, 4051), False, 'import json\n'), ((1170, 1231), 'pprint.pprint', 'pprint.pprint', (["('RESPONSE', status, headers)"], {'stream': 'errorlog'}), "(('RESPONSE', status, headers), stream=errorlog)\n", (1183, 1231), False, 'import pprint\n'), ((2684, 2707), 'json.dumps', 'json.dumps', (['found_items'], {}), '(found_items)\n', (2694, 2707), False, 'import json\n'), ((3834, 3853), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (3844, 3853), False, 'import json\n'), ((2466, 2485), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (2476, 2485), False, 'import json\n'), ((1701, 1731), 'flask.request.args.get', 'request.args.get', (['"""searchTerm"""'], {}), "('searchTerm')\n", (1717, 1731), False, 'from flask import request\n')] |
from django.conf.urls import url
from django.conf.urls import patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = patterns('',
url(r'^nl/foo/', view, name='not-translated'),
)
| [
"django.views.generic.TemplateView.as_view",
"django.conf.urls.url"
] | [((126, 174), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dummy.html"""'}), "(template_name='dummy.html')\n", (146, 174), False, 'from django.views.generic import TemplateView\n'), ((207, 251), 'django.conf.urls.url', 'url', (['"""^nl/foo/"""', 'view'], {'name': '"""not-translated"""'}), "('^nl/foo/', view, name='not-translated')\n", (210, 251), False, 'from django.conf.urls import url\n')] |
import collections
try:
stringtype = basestring # python 2
except:
stringtype = str # python 3
def coerce_to_list(x):
if isinstance(x, stringtype):
return x.replace(',', ' ').split()
return x or []
def namedtuple(name, args=None, optional=None):
args = coerce_to_list(args)
optional = coerce_to_list(optional)
x = collections.namedtuple(name, args + optional)
if hasattr(x.__new__, 'func_defaults'): # python 2
x.__new__.func_defaults = tuple([None] * len(optional))
elif hasattr(x.__new__, '__defaults__'): # python 3
x.__new__.__defaults__ = tuple([None] * len(optional))
else:
raise Exception('???')
return x
def optional(fn):
def opt(x):
if x is not None:
return fn(x)
return opt
| [
"collections.namedtuple"
] | [((357, 402), 'collections.namedtuple', 'collections.namedtuple', (['name', '(args + optional)'], {}), '(name, args + optional)\n', (379, 402), False, 'import collections\n')] |
import fudge
from armstrong.apps.embeds.mixins import TemplatesByEmbedTypeMixin
from .support.models import Parent, Child, TypeModel
from ._utils import TestCase
class TemplateCompareTestMixin(object):
def path_opts(self, obj, use_fallback=False, use_type=False):
return dict(
base=obj.base_layout_directory,
app=obj._meta.app_label,
model=obj._meta.object_name.lower(),
typemodel=self.type_name if use_type else "fail",
type=self.type_slug if use_type else "fail",
tpl=obj.fallback_template_name if use_fallback else self.tpl_name)
def compare_templates(self, obj, expected, **kwargs):
opts = self.path_opts(obj, **kwargs)
final = [line % opts for line in expected]
result = obj.get_layout_template_name(self.tpl_name)
self.assertEqual(result, final)
class TemplatesByEmbedTypeTestCase(TemplateCompareTestMixin, TestCase):
def setUp(self):
self.tpl_name = "tpl"
self.type_name = TypeModel()._meta.object_name.lower()
self.type_slug = "photo"
def test_object_requires_response(self):
with self.assertRaisesRegexp(AttributeError, "has no attribute 'response'"):
TemplatesByEmbedTypeMixin().get_layout_template_name(self.tpl_name)
def test_object_response_checks_validity(self):
obj = TemplatesByEmbedTypeMixin()
obj.response = fudge.Fake().expects('is_valid')
obj.get_layout_template_name(self.tpl_name)
def test_non_model_without_response_returns_empty(self):
obj = TemplatesByEmbedTypeMixin()
obj.response = None
self.assertEqual(obj.get_layout_template_name(self.tpl_name), [])
def test_non_model_with_valid_response_returns_empty(self):
obj = TemplatesByEmbedTypeMixin()
obj.response = fudge.Fake().expects('is_valid').returns(True)
obj.type = fudge.Fake()
self.assertEqual(obj.get_layout_template_name(self.tpl_name), [])
def test_model_without_response_uses_fallback(self):
obj = Parent()
expected = ['%(base)s/%(app)s/%(model)s/%(tpl)s.html']
self.compare_templates(obj, expected, use_fallback=True)
def test_model_with_invalid_response_uses_fallback(self):
obj = Parent()
obj.response = fudge.Fake().expects('is_valid').returns(False)
expected = ['%(base)s/%(app)s/%(model)s/%(tpl)s.html']
self.compare_templates(obj, expected, use_fallback=True)
def test_model_with_valid_response(self):
obj = Parent(type=TypeModel(slug=self.type_slug))
obj.response = fudge.Fake().expects('is_valid').returns(True)
expected = [
'%(base)s/%(app)s/%(typemodel)s/%(type)s/%(tpl)s.html',
'%(base)s/%(app)s/%(model)s/%(tpl)s.html']
self.compare_templates(obj, expected, use_type=True)
def test_model_can_specify_templates_that_dont_fallback(self):
obj = Parent()
obj.templates_without_fallbacks.append(self.tpl_name)
expected = ['%(base)s/%(app)s/%(model)s/%(tpl)s.html']
self.compare_templates(obj, expected)
def test_model_can_change_fallback_template(self):
obj = Parent()
obj.fallback_template_name = 'usethisone'
expected = ['%(base)s/%(app)s/%(model)s/%(tpl)s.html']
self.compare_templates(obj, expected, use_fallback=True)
def test_child_model_without_response_uses_fallback(self):
obj = Child()
expected = [
'%(base)s/%(app)s/child/%(tpl)s.html',
'%(base)s/%(app)s/parent/%(tpl)s.html']
self.compare_templates(obj, expected, use_fallback=True)
def test_child_model_with_invalid_response_uses_fallback(self):
obj = Child()
obj.response = fudge.Fake().expects('is_valid').returns(False)
expected = [
'%(base)s/%(app)s/child/%(tpl)s.html',
'%(base)s/%(app)s/parent/%(tpl)s.html']
self.compare_templates(obj, expected, use_fallback=True)
def test_child_model_with_valid_response(self):
obj = Child(type=TypeModel(slug=self.type_slug))
obj.response = fudge.Fake().expects('is_valid').returns(True)
expected = [
'%(base)s/%(app)s/%(typemodel)s/%(type)s/%(tpl)s.html',
'%(base)s/%(app)s/child/%(tpl)s.html',
'%(base)s/%(app)s/parent/%(tpl)s.html']
self.compare_templates(obj, expected, use_type=True)
| [
"fudge.Fake",
"armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin"
] | [((1375, 1402), 'armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin', 'TemplatesByEmbedTypeMixin', ([], {}), '()\n', (1400, 1402), False, 'from armstrong.apps.embeds.mixins import TemplatesByEmbedTypeMixin\n'), ((1587, 1614), 'armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin', 'TemplatesByEmbedTypeMixin', ([], {}), '()\n', (1612, 1614), False, 'from armstrong.apps.embeds.mixins import TemplatesByEmbedTypeMixin\n'), ((1796, 1823), 'armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin', 'TemplatesByEmbedTypeMixin', ([], {}), '()\n', (1821, 1823), False, 'from armstrong.apps.embeds.mixins import TemplatesByEmbedTypeMixin\n'), ((1913, 1925), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (1923, 1925), False, 'import fudge\n'), ((1426, 1438), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (1436, 1438), False, 'import fudge\n'), ((1240, 1267), 'armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin', 'TemplatesByEmbedTypeMixin', ([], {}), '()\n', (1265, 1267), False, 'from armstrong.apps.embeds.mixins import TemplatesByEmbedTypeMixin\n'), ((1847, 1859), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (1857, 1859), False, 'import fudge\n'), ((2318, 2330), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (2328, 2330), False, 'import fudge\n'), ((2623, 2635), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (2633, 2635), False, 'import fudge\n'), ((3784, 3796), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (3794, 3796), False, 'import fudge\n'), ((4154, 4166), 'fudge.Fake', 'fudge.Fake', ([], {}), '()\n', (4164, 4166), False, 'import fudge\n')] |
from twitchchatbot.lib.commands.parsing import commands
import json
def addcom(user, args):
# Concatenate a list of strings down to a single, space delimited string.
queueEvent = {}
if len(args) < 2:
queueEvent['msg'] = "Proper usage: !addcom <cmd> <Text to send>"
else:
commandHead = "!" + args[0]
commands[commandHead] = {
'limit' : 10,
'userbadge' : 'moderator',
'last_used' : 0
}
del args[0]
commands[commandHead]['return'] = " ".join(args)
with open("commands.json", "w") as f:
json.dump(commands, f, indent=1)
queueEvent['msg'] = "%s has added the %s command!" %( \
user, commandHead)
return queueEvent
| [
"json.dump"
] | [((629, 661), 'json.dump', 'json.dump', (['commands', 'f'], {'indent': '(1)'}), '(commands, f, indent=1)\n', (638, 661), False, 'import json\n')] |
from distutils.core import setup
import py2exe
# if py2exe complains "can't find P", try one of the following workarounds:
#
# a. py2exe doesn't support zipped eggs - http://www.py2exe.org/index.cgi/ExeWithEggs
# You should give the --always-unzip option to easy_install, or you can use setup.py directly
# $ python setup.py install --record install.log --single-version-externally-managed
# Don't forget to remove the previous zipped egg.
#
# b. Add an empty __init__.py to the P/ top-level directory, if it's missing
# - this is due to a bug (or misleading documentation) in python's imp.find_module()
setup(
console=["fog-client"],
zipfile="py2exe-fog-client.zip",
options={
"py2exe": {
"includes": ["pyptlib", "twisted", "txsocksx"],
"packages": ["ometa", "terml", "zope.interface"],
},
},
)
| [
"distutils.core.setup"
] | [((610, 801), 'distutils.core.setup', 'setup', ([], {'console': "['fog-client']", 'zipfile': '"""py2exe-fog-client.zip"""', 'options': "{'py2exe': {'includes': ['pyptlib', 'twisted', 'txsocksx'], 'packages': [\n 'ometa', 'terml', 'zope.interface']}}"}), "(console=['fog-client'], zipfile='py2exe-fog-client.zip', options={\n 'py2exe': {'includes': ['pyptlib', 'twisted', 'txsocksx'], 'packages':\n ['ometa', 'terml', 'zope.interface']}})\n", (615, 801), False, 'from distutils.core import setup\n')] |
import os
import shutil
import argparse
import torch
from torch import nn
from torchvision.utils import save_image, make_grid
import matplotlib.pyplot as plt
import numpy as np
import cv2 as cv
import utils.utils as utils
from utils.constants import *
class GenerationMode(enum.Enum):
SINGLE_IMAGE = 0,
INTERPOLATION = 1,
VECTOR_ARITHMETIC = 2
def postprocess_generated_img(generated_img_tensor):
assert isinstance(generated_img_tensor, torch.Tensor), f'Expected PyTorch tensor but got {type(generated_img_tensor)}.'
# Move the tensor from GPU to CPU, convert to numpy array, extract 0th batch, move the image channel
# from 0th to 2nd position (CHW -> HWC)
generated_img = np.moveaxis(generated_img_tensor.to('cpu').numpy()[0], 0, 2)
# If grayscale image repeat 3 times to get RGB image (for generators trained on MNIST)
if generated_img.shape[2] == 1:
generated_img = np.repeat(generated_img, 3, axis=2)
# Imagery is in the range [-1, 1] (generator has tanh as the output activation) move it into [0, 1] range
generated_img -= np.min(generated_img)
generated_img /= np.max(generated_img)
return generated_img
def generate_from_random_latent_vector(generator, cgan_digit=None):
with torch.no_grad():
latent_vector = utils.get_gaussian_latent_batch(1, next(generator.parameters()).device)
if cgan_digit is None:
generated_img = postprocess_generated_img(generator(latent_vector))
else: # condition and generate the digit specified by cgan_digit
ref_label = torch.tensor([cgan_digit], dtype=torch.int64)
ref_label_one_hot_encoding = torch.nn.functional.one_hot(ref_label, MNIST_NUM_CLASSES).type(torch.FloatTensor).to(next(generator.parameters()).device)
generated_img = postprocess_generated_img(generator(latent_vector, ref_label_one_hot_encoding))
return generated_img, latent_vector.to('cpu').numpy()[0]
def generate_from_specified_numpy_latent_vector(generator, latent_vector):
assert isinstance(latent_vector, np.ndarray), f'Expected latent vector to be numpy array but got {type(latent_vector)}.'
with torch.no_grad():
latent_vector_tensor = torch.unsqueeze(torch.tensor(latent_vector, device=next(generator.parameters()).device), dim=0)
return postprocess_generated_img(generator(latent_vector_tensor))
def linear_interpolation(t, p0, p1):
return p0 + t * (p1 - p0)
def spherical_interpolation(t, p0, p1):
""" Spherical interpolation (slerp) formula: https://en.wikipedia.org/wiki/Slerp
Found inspiration here: https://github.com/soumith/ganhacks
but I didn't get any improvement using it compared to linear interpolation.
Args:
t (float): has [0, 1] range
p0 (numpy array): First n-dimensional vector
p1 (numpy array): Second n-dimensional vector
Result:
Returns spherically interpolated vector.
"""
if t <= 0:
return p0
elif t >= 1:
return p1
elif np.allclose(p0, p1):
return p0
# Convert p0 and p1 to unit vectors and find the angle between them (omega)
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
sin_omega = np.sin(omega) # syntactic sugar
return np.sin((1.0 - t) * omega) / sin_omega * p0 + np.sin(t * omega) / sin_omega * p1
def display_vector_arithmetic_results(imgs_to_display):
fig = plt.figure(figsize=(6, 6))
title_fontsize = 'x-small'
num_display_imgs = 7
titles = ['happy women', 'happy woman (avg)', 'neutral women', 'neutral woman (avg)', 'neutral men', 'neutral man (avg)', 'result - happy man']
ax = np.zeros(num_display_imgs, dtype=object)
assert len(imgs_to_display) == num_display_imgs, f'Expected {num_display_imgs} got {len(imgs_to_display)} images.'
gs = fig.add_gridspec(5, 4, left=0.02, right=0.98, wspace=0.05, hspace=0.3)
ax[0] = fig.add_subplot(gs[0, :3])
ax[1] = fig.add_subplot(gs[0, 3])
ax[2] = fig.add_subplot(gs[1, :3])
ax[3] = fig.add_subplot(gs[1, 3])
ax[4] = fig.add_subplot(gs[2, :3])
ax[5] = fig.add_subplot(gs[2, 3])
ax[6] = fig.add_subplot(gs[3:, 1:3])
for i in range(num_display_imgs):
ax[i].imshow(cv.resize(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.INTER_NEAREST))
ax[i].set_title(titles[i], fontsize=title_fontsize)
ax[i].tick_params(which='both', bottom=False, left=False, labelleft=False, labelbottom=False)
plt.show()
def generate_new_images(model_name, cgan_digit=None, generation_mode=True, slerp=True, a=None, b=None, should_display=True):
""" Generate imagery using pre-trained generator (using vanilla_generator_000000.pth by default)
Args:
model_name (str): model name you want to use (default lookup location is BINARIES_PATH).
cgan_digit (int): if specified generate that exact digit.
generation_mode (enum): generate a single image from a random vector, interpolate between the 2 chosen latent
vectors, or perform arithmetic over latent vectors (note: not every mode is supported for every model type)
slerp (bool): if True use spherical interpolation otherwise use linear interpolation.
a, b (numpy arrays): latent vectors, if set to None you'll be prompted to choose images you like,
and use corresponding latent vectors instead.
should_display (bool): Display the generated images before saving them.
"""
model_path = os.path.join(BINARIES_PATH, model_name)
assert os.path.exists(model_path), f'Could not find the model {model_path}. You first need to train your generator.'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Prepare the correct (vanilla, cGAN, DCGAN, ...) model, load the weights and put the model into evaluation mode
model_state = torch.load(model_path)
gan_type = model_state["gan_type"]
print(f'Found {gan_type} GAN!')
_, generator = utils.get_gan(device, gan_type)
generator.load_state_dict(model_state["state_dict"], strict=True)
generator.eval()
# Generate a single image, save it and potentially display it
if generation_mode == GenerationMode.SINGLE_IMAGE:
generated_imgs_path = os.path.join(DATA_DIR_PATH, 'generated_imagery')
os.makedirs(generated_imgs_path, exist_ok=True)
generated_img, _ = generate_from_random_latent_vector(generator, cgan_digit if gan_type == GANType.CGAN.name else None)
utils.save_and_maybe_display_image(generated_imgs_path, generated_img, should_display=should_display)
# Pick 2 images you like between which you'd like to interpolate (by typing 'y' into console)
elif generation_mode == GenerationMode.INTERPOLATION:
assert gan_type == GANType.VANILLA.name or gan_type ==GANType.DCGAN.name, f'Got {gan_type} but only VANILLA/DCGAN are supported for the interpolation mode.'
interpolation_name = "spherical" if slerp else "linear"
interpolation_fn = spherical_interpolation if slerp else linear_interpolation
grid_interpolated_imgs_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery') # combined results dir
decomposed_interpolated_imgs_path = os.path.join(grid_interpolated_imgs_path, f'tmp_{gan_type}_{interpolation_name}_dump') # dump separate results
if os.path.exists(decomposed_interpolated_imgs_path):
shutil.rmtree(decomposed_interpolated_imgs_path)
os.makedirs(grid_interpolated_imgs_path, exist_ok=True)
os.makedirs(decomposed_interpolated_imgs_path, exist_ok=True)
latent_vector_a, latent_vector_b = [None, None]
# If a and b were not specified loop until the user picked the 2 images he/she likes.
found_good_vectors_flag = False
if a is None or b is None:
while not found_good_vectors_flag:
generated_img, latent_vector = generate_from_random_latent_vector(generator)
plt.imshow(generated_img); plt.title('Do you like this image?'); plt.show()
user_input = input("Do you like this generated image? [y for yes]:")
if user_input == 'y':
if latent_vector_a is None:
latent_vector_a = latent_vector
print('Saved the first latent vector.')
elif latent_vector_b is None:
latent_vector_b = latent_vector
print('Saved the second latent vector.')
found_good_vectors_flag = True
else:
print('Well lets generate a new one!')
continue
else:
print('Skipping latent vectors selection section and using cached ones.')
latent_vector_a, latent_vector_b = [a, b]
# Cache latent vectors
if a is None or b is None:
np.save(os.path.join(grid_interpolated_imgs_path, 'a.npy'), latent_vector_a)
np.save(os.path.join(grid_interpolated_imgs_path, 'b.npy'), latent_vector_b)
print(f'Lets do some {interpolation_name} interpolation!')
interpolation_resolution = 47 # number of images between the vectors a and b
num_interpolated_imgs = interpolation_resolution + 2 # + 2 so that we include a and b
generated_imgs = []
for i in range(num_interpolated_imgs):
t = i / (num_interpolated_imgs - 1) # goes from 0. to 1.
current_latent_vector = interpolation_fn(t, latent_vector_a, latent_vector_b)
generated_img = generate_from_specified_numpy_latent_vector(generator, current_latent_vector)
print(f'Generated image [{i+1}/{num_interpolated_imgs}].')
utils.save_and_maybe_display_image(decomposed_interpolated_imgs_path, generated_img, should_display=should_display)
# Move from channel last to channel first (CHW->HWC), PyTorch's save_image function expects BCHW format
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0)))
interpolated_block_img = torch.stack(generated_imgs)
interpolated_block_img = nn.Upsample(scale_factor=2.5, mode='nearest')(interpolated_block_img)
save_image(interpolated_block_img, os.path.join(grid_interpolated_imgs_path, utils.get_available_file_name(grid_interpolated_imgs_path)), nrow=int(np.sqrt(num_interpolated_imgs)))
elif generation_mode == GenerationMode.VECTOR_ARITHMETIC:
assert gan_type == GANType.DCGAN.name, f'Got {gan_type} but only DCGAN is supported for arithmetic mode.'
# Generate num_options face images and create a grid image from them
num_options = 100
generated_imgs = []
latent_vectors = []
padding = 2
for i in range(num_options):
generated_img, latent_vector = generate_from_random_latent_vector(generator)
generated_imgs.append(torch.tensor(np.moveaxis(generated_img, 2, 0))) # make_grid expects CHW format
latent_vectors.append(latent_vector)
stacked_tensor_imgs = torch.stack(generated_imgs)
final_tensor_img = make_grid(stacked_tensor_imgs, nrow=int(np.sqrt(num_options)), padding=padding)
display_img = np.moveaxis(final_tensor_img.numpy(), 0, 2)
# For storing latent vectors
num_of_vectors_per_category = 3
happy_woman_latent_vectors = []
neutral_woman_latent_vectors = []
neutral_man_latent_vectors = []
# Make it easy - by clicking on the plot you pick the image.
def onclick(event):
if event.dblclick:
pass
else: # single click
if event.button == 1: # left click
x_coord = event.xdata
y_coord = event.ydata
column = int(x_coord / (64 + padding))
row = int(y_coord / (64 + padding))
# Store latent vector corresponding to the image that the user clicked on.
if len(happy_woman_latent_vectors) < num_of_vectors_per_category:
happy_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(happy_woman_latent_vectors)}. happy woman.')
elif len(neutral_woman_latent_vectors) < num_of_vectors_per_category:
neutral_woman_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_woman_latent_vectors)}. neutral woman.')
elif len(neutral_man_latent_vectors) < num_of_vectors_per_category:
neutral_man_latent_vectors.append(latent_vectors[10*row + column])
print(f'Picked image row={row}, column={column} as {len(neutral_man_latent_vectors)}. neutral man.')
else:
plt.close()
plt.figure(figsize=(10, 10))
plt.imshow(display_img)
# This is just an example you could also pick 3 neutral woman images with sunglasses, etc.
plt.title('Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)')
cid = plt.gcf().canvas.mpl_connect('button_press_event', onclick)
plt.show()
plt.gcf().canvas.mpl_disconnect(cid)
print('Done choosing images.')
# Calculate the average latent vector for every category (happy woman, neutral woman, neutral man)
happy_woman_avg_latent_vector = np.mean(np.array(happy_woman_latent_vectors), axis=0)
neutral_woman_avg_latent_vector = np.mean(np.array(neutral_woman_latent_vectors), axis=0)
neutral_man_avg_latent_vector = np.mean(np.array(neutral_man_latent_vectors), axis=0)
# By subtracting neutral woman from the happy woman we capture the "vector of smiling". Adding that vector
# to a neutral man we get a happy man's latent vector! Our latent space has amazingly beautiful structure!
happy_man_latent_vector = neutral_man_avg_latent_vector + (happy_woman_avg_latent_vector - neutral_woman_avg_latent_vector)
# Generate images from these latent vectors
happy_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in happy_woman_latent_vectors])
neutral_women_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_woman_latent_vectors])
neutral_men_imgs = np.hstack([generate_from_specified_numpy_latent_vector(generator, v) for v in neutral_man_latent_vectors])
happy_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, happy_woman_avg_latent_vector)
neutral_woman_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_woman_avg_latent_vector)
neutral_man_avg_img = generate_from_specified_numpy_latent_vector(generator, neutral_man_avg_latent_vector)
happy_man_img = generate_from_specified_numpy_latent_vector(generator, happy_man_latent_vector)
display_vector_arithmetic_results([happy_women_imgs, happy_woman_avg_img, neutral_women_imgs, neutral_woman_avg_img, neutral_men_imgs, neutral_man_avg_img, happy_man_img])
else:
raise Exception(f'Generation mode not yet supported.')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Pre-trained generator model name", default=r'VANILLA_000000.pth')
parser.add_argument("--cgan_digit", type=int, help="Used only for cGAN - generate specified digit", default=3)
parser.add_argument("--generation_mode", type=bool, help="Pick between 3 generation modes", default=GenerationMode.SINGLE_IMAGE)
parser.add_argument("--slerp", type=bool, help="Should use spherical interpolation (default No)", default=False)
parser.add_argument("--should_display", type=bool, help="Display intermediate results", default=True)
args = parser.parse_args()
# The first time you start generation in the interpolation mode it will cache a and b
# which you'll choose the first time you run the it.
a_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')
b_path = os.path.join(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')
latent_vector_a = np.load(a_path) if os.path.exists(a_path) else None
latent_vector_b = np.load(b_path) if os.path.exists(b_path) else None
generate_new_images(
args.model_name,
args.cgan_digit,
generation_mode=args.generation_mode,
slerp=args.slerp,
a=latent_vector_a,
b=latent_vector_b,
should_display=args.should_display)
| [
"numpy.sqrt",
"utils.utils.save_and_maybe_display_image",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"numpy.sin",
"numpy.moveaxis",
"utils.utils.get_available_file_name",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.repeat",
"argparse.ArgumentParser",
"numpy.max",... | [((1093, 1114), 'numpy.min', 'np.min', (['generated_img'], {}), '(generated_img)\n', (1099, 1114), True, 'import numpy as np\n'), ((1136, 1157), 'numpy.max', 'np.max', (['generated_img'], {}), '(generated_img)\n', (1142, 1157), True, 'import numpy as np\n'), ((3254, 3267), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (3260, 3267), True, 'import numpy as np\n'), ((3446, 3472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3456, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3726), 'numpy.zeros', 'np.zeros', (['num_display_imgs'], {'dtype': 'object'}), '(num_display_imgs, dtype=object)\n', (3694, 3726), True, 'import numpy as np\n'), ((4509, 4519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4517, 4519), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5559), 'os.path.join', 'os.path.join', (['BINARIES_PATH', 'model_name'], {}), '(BINARIES_PATH, model_name)\n', (5532, 5559), False, 'import os\n'), ((5571, 5597), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (5585, 5597), False, 'import os\n'), ((5892, 5914), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (5902, 5914), False, 'import torch\n'), ((6009, 6040), 'utils.utils.get_gan', 'utils.get_gan', (['device', 'gan_type'], {}), '(device, gan_type)\n', (6022, 6040), True, 'import utils.utils as utils\n'), ((15467, 15492), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15490, 15492), False, 'import argparse\n'), ((16278, 16338), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""', '"""a.npy"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery', 'a.npy')\n", (16290, 16338), False, 'import os\n'), ((16352, 16412), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""', '"""b.npy"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery', 'b.npy')\n", (16364, 16412), False, 'import os\n'), ((924, 959), 'numpy.repeat', 'np.repeat', (['generated_img', '(3)'], {'axis': '(2)'}), '(generated_img, 3, axis=2)\n', (933, 959), True, 'import numpy as np\n'), ((1263, 1278), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1276, 1278), False, 'import torch\n'), ((2177, 2192), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2190, 2192), False, 'import torch\n'), ((6284, 6332), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""generated_imagery"""'], {}), "(DATA_DIR_PATH, 'generated_imagery')\n", (6296, 6332), False, 'import os\n'), ((6341, 6388), 'os.makedirs', 'os.makedirs', (['generated_imgs_path'], {'exist_ok': '(True)'}), '(generated_imgs_path, exist_ok=True)\n', (6352, 6388), False, 'import os\n'), ((6526, 6631), 'utils.utils.save_and_maybe_display_image', 'utils.save_and_maybe_display_image', (['generated_imgs_path', 'generated_img'], {'should_display': 'should_display'}), '(generated_imgs_path, generated_img,\n should_display=should_display)\n', (6560, 6631), True, 'import utils.utils as utils\n'), ((16454, 16476), 'os.path.exists', 'os.path.exists', (['a_path'], {}), '(a_path)\n', (16468, 16476), False, 'import os\n'), ((16435, 16450), 'numpy.load', 'np.load', (['a_path'], {}), '(a_path)\n', (16442, 16450), True, 'import numpy as np\n'), ((16528, 16550), 'os.path.exists', 'os.path.exists', (['b_path'], {}), '(b_path)\n', (16542, 16550), False, 'import os\n'), ((16509, 16524), 'numpy.load', 'np.load', (['b_path'], {}), '(b_path)\n', (16516, 16524), True, 'import numpy as np\n'), ((1586, 1631), 'torch.tensor', 'torch.tensor', (['[cgan_digit]'], {'dtype': 'torch.int64'}), '([cgan_digit], dtype=torch.int64)\n', (1598, 1631), False, 'import torch\n'), ((3038, 3057), 'numpy.allclose', 'np.allclose', (['p0', 'p1'], {}), '(p0, p1)\n', (3049, 3057), True, 'import numpy as np\n'), ((4259, 4345), 'cv2.resize', 'cv.resize', (['imgs_to_display[i]', '(0, 0)'], {'fx': '(3)', 'fy': '(3)', 'interpolation': 'cv.INTER_NEAREST'}), '(imgs_to_display[i], (0, 0), fx=3, fy=3, interpolation=cv.\n INTER_NEAREST)\n', (4268, 4345), True, 'import cv2 as cv\n'), ((5718, 5743), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5741, 5743), False, 'import torch\n'), ((7140, 7191), 'os.path.join', 'os.path.join', (['DATA_DIR_PATH', '"""interpolated_imagery"""'], {}), "(DATA_DIR_PATH, 'interpolated_imagery')\n", (7152, 7191), False, 'import os\n'), ((7260, 7350), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', 'f"""tmp_{gan_type}_{interpolation_name}_dump"""'], {}), "(grid_interpolated_imgs_path,\n f'tmp_{gan_type}_{interpolation_name}_dump')\n", (7272, 7350), False, 'import os\n'), ((7383, 7432), 'os.path.exists', 'os.path.exists', (['decomposed_interpolated_imgs_path'], {}), '(decomposed_interpolated_imgs_path)\n', (7397, 7432), False, 'import os\n'), ((7503, 7558), 'os.makedirs', 'os.makedirs', (['grid_interpolated_imgs_path'], {'exist_ok': '(True)'}), '(grid_interpolated_imgs_path, exist_ok=True)\n', (7514, 7558), False, 'import os\n'), ((7567, 7628), 'os.makedirs', 'os.makedirs', (['decomposed_interpolated_imgs_path'], {'exist_ok': '(True)'}), '(decomposed_interpolated_imgs_path, exist_ok=True)\n', (7578, 7628), False, 'import os\n'), ((10138, 10165), 'torch.stack', 'torch.stack', (['generated_imgs'], {}), '(generated_imgs)\n', (10149, 10165), False, 'import torch\n'), ((3192, 3210), 'numpy.linalg.norm', 'np.linalg.norm', (['p0'], {}), '(p0)\n', (3206, 3210), True, 'import numpy as np\n'), ((3217, 3235), 'numpy.linalg.norm', 'np.linalg.norm', (['p1'], {}), '(p1)\n', (3231, 3235), True, 'import numpy as np\n'), ((3298, 3323), 'numpy.sin', 'np.sin', (['((1.0 - t) * omega)'], {}), '((1.0 - t) * omega)\n', (3304, 3323), True, 'import numpy as np\n'), ((3343, 3360), 'numpy.sin', 'np.sin', (['(t * omega)'], {}), '(t * omega)\n', (3349, 3360), True, 'import numpy as np\n'), ((7446, 7494), 'shutil.rmtree', 'shutil.rmtree', (['decomposed_interpolated_imgs_path'], {}), '(decomposed_interpolated_imgs_path)\n', (7459, 7494), False, 'import shutil\n'), ((9789, 9908), 'utils.utils.save_and_maybe_display_image', 'utils.save_and_maybe_display_image', (['decomposed_interpolated_imgs_path', 'generated_img'], {'should_display': 'should_display'}), '(decomposed_interpolated_imgs_path,\n generated_img, should_display=should_display)\n', (9823, 9908), True, 'import utils.utils as utils\n'), ((10199, 10244), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2.5)', 'mode': '"""nearest"""'}), "(scale_factor=2.5, mode='nearest')\n", (10210, 10244), False, 'from torch import nn\n'), ((11133, 11160), 'torch.stack', 'torch.stack', (['generated_imgs'], {}), '(generated_imgs)\n', (11144, 11160), False, 'import torch\n'), ((13054, 13082), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13064, 13082), True, 'import matplotlib.pyplot as plt\n'), ((13091, 13114), 'matplotlib.pyplot.imshow', 'plt.imshow', (['display_img'], {}), '(display_img)\n', (13101, 13114), True, 'import matplotlib.pyplot as plt\n'), ((13222, 13332), 'matplotlib.pyplot.title', 'plt.title', (['"""Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)"""'], {}), '(\n """Click on 3 happy women, 3 neutral women and \n 3 neutral men images (order matters!)"""\n )\n', (13231, 13332), True, 'import matplotlib.pyplot as plt\n'), ((13402, 13412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13410, 13412), True, 'import matplotlib.pyplot as plt\n'), ((8012, 8037), 'matplotlib.pyplot.imshow', 'plt.imshow', (['generated_img'], {}), '(generated_img)\n', (8022, 8037), True, 'import matplotlib.pyplot as plt\n'), ((8039, 8075), 'matplotlib.pyplot.title', 'plt.title', (['"""Do you like this image?"""'], {}), "('Do you like this image?')\n", (8048, 8075), True, 'import matplotlib.pyplot as plt\n'), ((8077, 8087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8085, 8087), True, 'import matplotlib.pyplot as plt\n'), ((8956, 9006), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', '"""a.npy"""'], {}), "(grid_interpolated_imgs_path, 'a.npy')\n", (8968, 9006), False, 'import os\n'), ((9045, 9095), 'os.path.join', 'os.path.join', (['grid_interpolated_imgs_path', '"""b.npy"""'], {}), "(grid_interpolated_imgs_path, 'b.npy')\n", (9057, 9095), False, 'import os\n'), ((10354, 10412), 'utils.utils.get_available_file_name', 'utils.get_available_file_name', (['grid_interpolated_imgs_path'], {}), '(grid_interpolated_imgs_path)\n', (10383, 10412), True, 'import utils.utils as utils\n'), ((13653, 13689), 'numpy.array', 'np.array', (['happy_woman_latent_vectors'], {}), '(happy_woman_latent_vectors)\n', (13661, 13689), True, 'import numpy as np\n'), ((13749, 13787), 'numpy.array', 'np.array', (['neutral_woman_latent_vectors'], {}), '(neutral_woman_latent_vectors)\n', (13757, 13787), True, 'import numpy as np\n'), ((13845, 13881), 'numpy.array', 'np.array', (['neutral_man_latent_vectors'], {}), '(neutral_man_latent_vectors)\n', (13853, 13881), True, 'import numpy as np\n'), ((10069, 10101), 'numpy.moveaxis', 'np.moveaxis', (['generated_img', '(2)', '(0)'], {}), '(generated_img, 2, 0)\n', (10080, 10101), True, 'import numpy as np\n'), ((10424, 10454), 'numpy.sqrt', 'np.sqrt', (['num_interpolated_imgs'], {}), '(num_interpolated_imgs)\n', (10431, 10454), True, 'import numpy as np\n'), ((1673, 1730), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['ref_label', 'MNIST_NUM_CLASSES'], {}), '(ref_label, MNIST_NUM_CLASSES)\n', (1700, 1730), False, 'import torch\n'), ((10987, 11019), 'numpy.moveaxis', 'np.moveaxis', (['generated_img', '(2)', '(0)'], {}), '(generated_img, 2, 0)\n', (10998, 11019), True, 'import numpy as np\n'), ((11228, 11248), 'numpy.sqrt', 'np.sqrt', (['num_options'], {}), '(num_options)\n', (11235, 11248), True, 'import numpy as np\n'), ((13334, 13343), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13341, 13343), True, 'import matplotlib.pyplot as plt\n'), ((13421, 13430), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13428, 13430), True, 'import matplotlib.pyplot as plt\n'), ((13033, 13044), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13042, 13044), True, 'import matplotlib.pyplot as plt\n')] |
from direct.gui.DirectGui import OnscreenText, DirectButton
from panda3d.core import *
from direct.interval.IntervalGlobal import *
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
class DMenuDisclaimer(DirectObject):
notify = directNotify.newCategory('DisclaimerScreen')
def __init__(self):
DirectObject.__init__(self)
base.setBackgroundColor(0, 0, 0)
disclaimerText = "Project Altis is a not-for-profit fanmade parody made under Fair Use. Project Altis is not affiliated with The Walt Disney Company and/or the Disney Interactive Media Group (collectively referred to as \"Disney\") by clicking I agree you hereby agree that you acknowledge this fact."
self.disclaimer = OnscreenText(text = disclaimerText, font = ToontownGlobals.getMinnieFont(), style = 3, wordwrap = 30, scale = .08, pos = (0, .3, 0))
gui = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui.bam')
yesUp = gui.find('**/tt_t_gui_mat_okUp')
yesDown = gui.find('**/tt_t_gui_mat_okDown')
noUp = gui.find('**/tt_t_gui_mat_closeUp')
noDown = gui.find('**/tt_t_gui_mat_closeDown')
self.accept = DirectButton(parent = aspect2d, relief = None, image = (yesUp, yesDown, yesUp), image_scale = (0.6, 0.6, 0.6), image1_scale = (0.7, 0.7, 0.7), image2_scale = (0.7, 0.7, 0.7), text = ('', 'I Agree', 'I Agree'), text_pos=(0, -0.175), text_style = 3, text_scale=0.08, pos = (.4, 0, -.5), command = self.accept)
self.deny = DirectButton(parent = aspect2d, relief = None, image = (noUp, noDown, noUp), image_scale = (0.6, 0.6, 0.6), image1_scale = (0.7, 0.7, 0.7), image2_scale = (0.7, 0.7, 0.7), text = ('', 'I Disagree', 'I Disagree'), text_pos=(0, -0.175), text_style = 3, text_scale=0.08, pos = (-.4, 0, -.5), command = self.deny)
def accept(self):
self.disclaimer['text'] = 'Loading...'
self.accept.destroy()
self.deny.destroy()
base.graphicsEngine.renderFrame()
messenger.send("AgreeToGame")
base.cr.hasAccepted = True
self.disclaimer.removeNode()
def deny(self):
base.exitFunc() | [
"toontown.toonbase.ToontownGlobals.getMinnieFont",
"direct.gui.DirectGui.DirectButton",
"direct.showbase.DirectObject.DirectObject.__init__"
] | [((365, 392), 'direct.showbase.DirectObject.DirectObject.__init__', 'DirectObject.__init__', (['self'], {}), '(self)\n', (386, 392), False, 'from direct.showbase.DirectObject import DirectObject\n'), ((1220, 1522), 'direct.gui.DirectGui.DirectButton', 'DirectButton', ([], {'parent': 'aspect2d', 'relief': 'None', 'image': '(yesUp, yesDown, yesUp)', 'image_scale': '(0.6, 0.6, 0.6)', 'image1_scale': '(0.7, 0.7, 0.7)', 'image2_scale': '(0.7, 0.7, 0.7)', 'text': "('', 'I Agree', 'I Agree')", 'text_pos': '(0, -0.175)', 'text_style': '(3)', 'text_scale': '(0.08)', 'pos': '(0.4, 0, -0.5)', 'command': 'self.accept'}), "(parent=aspect2d, relief=None, image=(yesUp, yesDown, yesUp),\n image_scale=(0.6, 0.6, 0.6), image1_scale=(0.7, 0.7, 0.7), image2_scale\n =(0.7, 0.7, 0.7), text=('', 'I Agree', 'I Agree'), text_pos=(0, -0.175),\n text_style=3, text_scale=0.08, pos=(0.4, 0, -0.5), command=self.accept)\n", (1232, 1522), False, 'from direct.gui.DirectGui import OnscreenText, DirectButton\n'), ((1557, 1867), 'direct.gui.DirectGui.DirectButton', 'DirectButton', ([], {'parent': 'aspect2d', 'relief': 'None', 'image': '(noUp, noDown, noUp)', 'image_scale': '(0.6, 0.6, 0.6)', 'image1_scale': '(0.7, 0.7, 0.7)', 'image2_scale': '(0.7, 0.7, 0.7)', 'text': "('', 'I Disagree', 'I Disagree')", 'text_pos': '(0, -0.175)', 'text_style': '(3)', 'text_scale': '(0.08)', 'pos': '(-0.4, 0, -0.5)', 'command': 'self.deny'}), "(parent=aspect2d, relief=None, image=(noUp, noDown, noUp),\n image_scale=(0.6, 0.6, 0.6), image1_scale=(0.7, 0.7, 0.7), image2_scale\n =(0.7, 0.7, 0.7), text=('', 'I Disagree', 'I Disagree'), text_pos=(0, -\n 0.175), text_style=3, text_scale=0.08, pos=(-0.4, 0, -0.5), command=\n self.deny)\n", (1569, 1867), False, 'from direct.gui.DirectGui import OnscreenText, DirectButton\n'), ((813, 844), 'toontown.toonbase.ToontownGlobals.getMinnieFont', 'ToontownGlobals.getMinnieFont', ([], {}), '()\n', (842, 844), False, 'from toontown.toonbase import ToontownGlobals\n')] |
"""a module solely for finding how add_a_list and add_tuple_list compare.
it's effectively the empirical proof for how LongIntTable.add() chooses
the fastest method with it's get_fastest_method() function."""
from __future__ import print_function
from math import log10
import time
import random
from os import getcwd
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
from dicetables.additiveevents import AdditiveEvents
WELCOME_TXT = 'hi'
def input_py_2_and_3(question):
try:
return raw_input(question)
except NameError:
return input(question)
def generate_tuple_list_with_increasing_number_of_events(first_event, start_length, event_occurrences,
len_increase_step=1):
"""
:param first_event:
:param start_length:
:param event_occurrences:
:param len_increase_step: =1
:return: generator(next)
"""
tuple_list_of_events = [(first_event, event_occurrences)]
for add_to_first_event in range(1, start_length):
tuple_list_of_events.append((first_event + add_to_first_event, event_occurrences))
while True:
yield tuple_list_of_events
highest_event = tuple_list_of_events[-1][0]
new_tuples = [(highest_event + 1 + step, event_occurrences) for step in range(len_increase_step)]
tuple_list_of_events += new_tuples
def generate_tuple_list_with_increasing_occurrences(first_event, start_length, increment, exponential_increase=True):
"""
:param first_event:
:param start_length:
:param increment:
:param exponential_increase: =True
:return: generator(next)
"""
tuple_list_of_events = [(event, 1) for event in range(first_event, first_event + start_length)]
growth = 0.0
while True:
yield tuple_list_of_events
growth += increment
if exponential_increase:
tuple_list_of_events = [(event, int(2 ** growth)) for
event in range(first_event, first_event + start_length)]
else:
tuple_list_of_events = [(event, int(growth)) for
event in range(first_event, first_event + start_length)]
def generate_tuple_list_with_increasing_gaps(first_event, start_length, event_occurrences=1, gaps_per_iteration=1,
randomize=True):
"""
:param first_event:
:param start_length:
:param event_occurrences: =1
:param gaps_per_iteration: =1
:param randomize: =True
:return: generator
"""
tuple_list_of_events = [(first_event + index, event_occurrences) for index in range(start_length)]
while sum([event[1] for event in tuple_list_of_events]) > 2 * event_occurrences:
yield tuple_list_of_events
for _ in range(gaps_per_iteration):
if randomize:
start_search_index = random.randrange(1, start_length - 1)
else:
start_search_index = len(tuple_list_of_events) - 2
only_occurrences = [event[1] for event in tuple_list_of_events]
while not only_occurrences[start_search_index:-1].count(event_occurrences) and start_search_index:
start_search_index -= 1
index_to_make_zero = only_occurrences[start_search_index:].index(event_occurrences) + start_search_index
event_value = tuple_list_of_events[index_to_make_zero][0]
tuple_list_of_events[index_to_make_zero] = (event_value, 0)
def get_generator(variable_name, first_event, start_length,
growth_increment=1.,
event_occurrences=1,
len_increase_step=1,
gaps_per_iteration=1,
randomize=True,
exponential_increase=True):
"""
:param variable_name: 'list_length', 'event_occurrences', 'increasing_gaps'
:param first_event:
:param start_length:
:param growth_increment: =1.0
:param event_occurrences: =1
:param len_increase_step: =1
:param gaps_per_iteration: =1
:param randomize: True
:param exponential_increase: =True
:return:
"""
if variable_name == 'list_length':
return generate_tuple_list_with_increasing_number_of_events(first_event, start_length,
event_occurrences, len_increase_step)
if variable_name == 'event_occurrences':
return generate_tuple_list_with_increasing_occurrences(first_event, start_length,
growth_increment, exponential_increase)
if variable_name == 'increasing_gaps':
return generate_tuple_list_with_increasing_gaps(first_event, start_length,
event_occurrences, gaps_per_iteration, randomize)
def one_time_trial(combine_times, events_tuples, input_dict_size=1, use_exponential_occurrences=True):
"""
:param combine_times:
:param events_tuples:
:param input_dict_size: =1
:param use_exponential_occurrences: =True
:return: (list_len, # occurrences, log10(# occurrences), range/events, start dict size)\n
, control time, IndexedValues time
"""
if events_tuples[0][1] < 10**100:
print('one_time_trial prepped list [{} .. {}]'.format(events_tuples[0], events_tuples[-1]))
input_dict = get_input_dict(input_dict_size, use_exponential_occurrences)
events_tuples = [pair for pair in events_tuples if pair[1]]
control_time, indexed_values_time = get_control_and_indexed_values_times(combine_times, events_tuples, input_dict)
list_length = float(len(events_tuples))
event_occurrences = float(events_tuples[0][1])
event_occurrences_exponent = log10(events_tuples[0][1])
events_range_vs_events = (max(events_tuples)[0] - min(events_tuples)[0] + 1) / float(list_length)
start_dict_size = float(input_dict_size)
y_axis_variables = (list_length, event_occurrences, event_occurrences_exponent, events_range_vs_events,
start_dict_size)
return y_axis_variables, control_time, indexed_values_time
def get_input_dict(input_dict_size, use_exponential_occurrences):
if use_exponential_occurrences:
input_dict = dict([(event, 1 + 2 ** (event % 1000)) for event in range(input_dict_size)])
else:
input_dict = dict([(event, 1 + event % 1000) for event in range(input_dict_size)])
return input_dict
def get_control_and_indexed_values_times(combine_times, events_tuples, input_dict):
control_events_action = get_control_action(input_dict, events_tuples)
events_for_indexed_values = AdditiveEvents(input_dict)
events_to_add = AdditiveEvents(dict(events_tuples))
indexed_values_start = time.clock()
events_for_indexed_values.combine_by_indexed_values(events_to_add, combine_times)
indexed_values_time = time.clock() - indexed_values_start
control_start = time.clock()
control_events_action(events_to_add, combine_times)
control_time = time.clock() - control_start
return control_time, indexed_values_time
def get_control_action(input_dict, events_tuples):
control_events = AdditiveEvents(input_dict)
control_method_str = get_control_method_str(events_tuples)
control_method_dict = {'tuple_list': control_events.combine_by_dictionary,
'flattened_list': control_events.combine_by_flattened_list}
control_events_action = control_method_dict[control_method_str]
return control_events_action
def get_control_method_str(prepped_list):
if prepped_list[0][1] == 1:
return 'flattened_list'
else:
return 'tuple_list'
def time_trial_vary_start_dict(events_tuple_list, input_dict_start_size=1000, input_dict_downward_step=5,
number_of_adds=1, use_exponential_occurrences=True):
"""
:param events_tuple_list:
:param input_dict_start_size: =1000
:param input_dict_downward_step: =5
:param number_of_adds: =1
:param use_exponential_occurrences: =False
:return:
"""
adds_per_trial = number_of_adds
variable_name = 'start_dict_size'
variable_values = []
control_times = []
indexed_values_times = []
print('please wait for the down to reach zero')
input_dict_size = input_dict_start_size
while input_dict_size > 0:
print('adds {}'.format(adds_per_trial))
y_axis, control_time, indexed_values_time = one_time_trial(
adds_per_trial,
events_tuple_list,
input_dict_size=input_dict_size,
use_exponential_occurrences=use_exponential_occurrences
)
input_dict_size -= input_dict_downward_step
variable = y_axis[4]
print('results: variable: {:.2}, control: {:.3e}, IndexedValues: {:.3e}'.format(variable,
control_time,
indexed_values_time))
print('count down: {}\n'.format(input_dict_size))
variable_values.append(variable)
control_times.append(control_time)
indexed_values_times.append(indexed_values_time)
return variable_values, variable_name, control_times, indexed_values_times
def time_trial(generator, variable_name, adds_per_trial=1, automatic_adds_per_trial=False, input_dict_size=1,
number_of_data_pts=100):
"""
:param generator:
:param variable_name: 'list_length', 'event_occurrences_linear', 'event_occurrences', 'increasing_gaps'
:param adds_per_trial: =1
:param automatic_adds_per_trial: =False
:param input_dict_size: =1
:param number_of_data_pts: =100
:return: variable_values, variable_name, control_times, indexed_values_times
"""
tuple_list_length_times_add_times = 2200
variable_values = []
control_times = []
indexed_values_times = []
count = number_of_data_pts
print('please wait for the count-up/down to reach zero')
while count > 0:
try:
tuple_list_for_trial = next(generator)
except StopIteration:
break
if automatic_adds_per_trial:
adds_per_trial = int(max(1, tuple_list_length_times_add_times / len(tuple_list_for_trial)))
print('adds {}'.format(adds_per_trial))
y_axis, control_time, indexed_values_time = one_time_trial(adds_per_trial, tuple_list_for_trial,
input_dict_size=input_dict_size)
variable_order = ['list_length', 'event_occurrences_linear', 'event_occurrences', 'increasing_gaps']
index = variable_order.index(variable_name)
variable = y_axis[index]
print('results: variable: {:.2}, control: {:.3e}, IndexedValues: {:.3e}'.format(variable,
control_time,
indexed_values_time))
print('count down: {}\n'.format(count))
count -= 1
variable_values.append(variable)
control_times.append(control_time)
indexed_values_times.append(indexed_values_time)
return variable_values, variable_name, control_times, indexed_values_times
def plot_trial_with_ratio(variable_values, variable_name, control_times, iv_times, title='none', figure=1,
style='bo-', label='', base_line=False):
"""
:param variable_values:
:param variable_name: 'list_length', 'event_occurrences', 'event_occurrences_linear', 'increasing_gaps', 'dict_size'
:param control_times:
:param iv_times:
:param title:
:param figure:
:param style: ='bo-'
:param label: =''
:param base_line: =False
:return:
"""
plt.ion()
# use_figure = plt.figure(figure)
# use_figure.clf()
speed_ratios = []
equality_line = [1.0] * len(control_times)
for index, numerator in enumerate(control_times):
speed_ratios.append(numerator / iv_times[index])
plt.plot(variable_values, speed_ratios, style, label=label)
if base_line:
plt.plot(variable_values, equality_line, 'g-', label='equal speed')
plt.ylabel('speed of indexed values over speed of control')
x_labels = {'list_length': 'size of tuple list',
'event_occurrences': '10 ** exponent event occurrences',
'event_occurrences_linear': 'event occurrences',
'increasing_gaps': 'ratio of events range to non-zero events',
'start_dict_size': 'number of events in starting dictionary'}
plt.xlabel(x_labels[variable_name])
plt.legend()
plt.title(title)
plt.pause(0.01)
def plot_trial_two_lines(variable_values, variable_name, control_times, iv_times, title='none', figure=1):
"""
:param variable_values:
:param variable_name:'list_length', 'event_occurrences', 'increasing_gaps', 'dict_size'
:param control_times:
:param iv_times:
:param title:
:param figure:
:return:
"""
plt.ion()
use_figure = plt.figure(figure)
use_figure.clf()
plt.plot(variable_values, control_times, 'bo-', label='control')
plt.plot(variable_values, iv_times, 'r*-', label='IndexedValues')
plt.ylabel('time')
x_labels = {'list_length': 'size of tuple list',
'event_occurrences': '10 ** exponent event occurrences',
'increasing_gaps': 'ratio of events range to non-zero events',
'start_dict_size': 'number of events in starting dictionary'}
plt.xlabel(x_labels[variable_name])
plt.legend()
intersection, control_fit, iv_fit = get_poly_fit_and_intersection(variable_values, control_times, iv_times)
title += '\nintersection = {}'.format(intersection)
plt.title(title)
plt.plot(variable_values, control_fit, 'c-')
plt.plot(variable_values, iv_fit, 'c-')
plt.pause(0.01)
return intersection
def get_poly_fit_and_intersection(variable_values, control_times, iv_times):
control_slope, control_constant = np.polyfit(variable_values, control_times, 1)
iv_slope, iv_constant = np.polyfit(variable_values, iv_times, 1)
intersection = (control_constant - iv_constant) / (iv_slope - control_slope)
control_poly_fit_values = [(control_slope * x + control_constant) for x in variable_values]
iv_poly_fit_values = [(iv_slope * x + iv_constant) for x in variable_values]
return intersection, control_poly_fit_values, iv_poly_fit_values
def get_welcome():
"""return welcome_message.txt"""
try:
welcome_file_name = getcwd() + '\\' + 'welcome_message.txt'
welcome_file = open(welcome_file_name, 'r')
welcome_message = welcome_file.read()
except IOError:
welcome_message = 'took a guess where "welcome_' \
'message.txt" was, and I was wrong.'
return welcome_message
def get_int(question):
"""makes sure user input is an int. quit if "q" """
while True:
answer = input_py_2_and_3(question + '\n>>> ')
if answer == 'q':
raise SystemExit
try:
output = int(answer)
return output
except ValueError:
print('must be int OR "q" to quit')
continue
def get_answer(question, min_val, max_val):
question = '{} between {} and {}'.format(question, min_val, max_val)
raw_val = get_int(question)
return min(max_val, (max(min_val, raw_val)))
def get_plot_style_generator():
pt_style = cycle(['o', '<', '>', 'v', 's', 'p', '*',
'+', 'x', 'D', 'd'])
colors = cycle(['b', 'y', 'r', 'c', 'm', 'k', 'g'])
while True:
yield '{}{}-'.format(next(colors), next(pt_style))
def do_trials_vary_start_dict(add_list_len=10, occurrences_are_many=False, use_exponential_occurrences=True,
adds_list=(1, 2, 5)):
"""
:param add_list_len: =10
:param occurrences_are_many: =False
:param use_exponential_occurrences: =False
:param adds_list: =(1, 2, 5)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
list_for_vary_start_dict = get_generator('list_length', 0, add_list_len, event_occurrences=occurrences)
tuple_list_for_time_trial = next(list_for_vary_start_dict)
for add_variable in adds_list:
title = 'vary size of start dict. number of adds = {}\n'.format(add_variable)
title += 'input occurrences = {}. input list length = {}'.format(occurrences, add_list_len)
results = time_trial_vary_start_dict(tuple_list_for_time_trial, input_dict_start_size=1000,
input_dict_downward_step=10, number_of_adds=add_variable,
use_exponential_occurrences=use_exponential_occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_event_occurrences(add_list_len=10, start_dict_size=1, adds_list=(1, 2, 5), exponential_growth=True):
"""
:param add_list_len: =10
:param start_dict_size: =1
:param adds_list: =(1, 2, 5)
:param exponential_growth: =True
:return:
"""
style_generator = get_plot_style_generator()
for add_variable in adds_list:
if exponential_growth:
increment = 0.2
time_trial_variable = 'event_occurrences'
else:
increment = 1
time_trial_variable = 'event_occurrences_linear'
event_occurrences_generator = get_generator('event_occurrences', 0, add_list_len, growth_increment=increment,
exponential_increase=exponential_growth)
results = time_trial(event_occurrences_generator, time_trial_variable, adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'increasing event occurrences.\n'
title += 'starting dict size={}. input list length = {}'.format(start_dict_size, add_list_len)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_list_length(start_dict_size=1, occurrences_are_many=False, adds_list=(1, 2, 5)):
"""
:param start_dict_size: =1
:param occurrences_are_many: =False
:param adds_list: =(1, 2, 4)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
for add_variable in adds_list:
list_length_generator = get_generator('list_length', 0, 2, event_occurrences=occurrences, len_increase_step=1)
results = time_trial(list_length_generator, 'list_length', adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'increasing list length.\n'
title += 'starting dict size={}. input list occurrences = {}'.format(start_dict_size, occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def do_trials_vary_gaps_in_list(add_list_len=100, start_dict_size=1, occurrences_are_many=False, randomize_gaps=True,
adds_list=(1, 2, 5)):
"""
:param add_list_len: =100
:param start_dict_size: =1
:param occurrences_are_many: =False
:param randomize_gaps: =True
:param adds_list: =(1, 2, 5)
:return:
"""
style_generator = get_plot_style_generator()
if occurrences_are_many:
occurrences = 10
else:
occurrences = 1
gaps_per_iteration = max(1, add_list_len // 100)
for add_variable in adds_list:
increasing_gaps_generator = get_generator('increasing_gaps', 0, add_list_len, event_occurrences=occurrences,
gaps_per_iteration=gaps_per_iteration, randomize=randomize_gaps)
results = time_trial(increasing_gaps_generator, 'increasing_gaps', adds_per_trial=add_variable,
input_dict_size=start_dict_size, number_of_data_pts=100)
title = 'making many gaps in list.\n'
title += 'starting dict size={}. input list length: {}, occurrences: {}'.format(start_dict_size,
add_list_len,
occurrences)
do_base_line = False
if add_variable == adds_list[-1]:
do_base_line = True
plot_trial_with_ratio(*results, figure=1, title=title, label='add: {}'.format(add_variable),
style=next(style_generator), base_line=do_base_line)
def graphing_ui():
"""a UI to demonstrate add speeds"""
print(WELCOME_TXT)
"""
'list_length', 'event_occurrences', 'increasing_gaps', 'dict_size'
"""
plt_figure = 1
while True:
plt.figure(plt_figure)
plt_figure += 1
plt.ion()
variable_choice = get_answer('enter "1" for varying input events\' length\n' +
'enter "2" for varying input events\' # of occurrences\n' +
'enter "3" for varying input events\' gaps in values\n' +
'enter "4" for varying the size of the start dictionary',
1, 4)
variable_dict = {1: 'list_length',
2: 'event_occurrences',
3: 'increasing_gaps',
4: 'dict_size'}
action_dict = {1: do_trials_vary_list_length,
2: do_trials_vary_event_occurrences,
3: do_trials_vary_gaps_in_list,
4: do_trials_vary_start_dict}
variable = variable_dict[variable_choice]
action = action_dict[variable_choice]
print('chose {}'.format(variable))
input_variables = get_kwargs(variable)
action(**input_variables)
plt.pause(0.1)
def get_kwargs(request):
default_adds_list = [1, 2, 3, 4, 5]
keys = ['start_dict_size', 'add_list_len', 'occurrences_are_many', 'exponential_growth']
questions = ['what size for starting dictionary?',
'how large a list to add?',
'should the list have many occurrences? 1=True, 0=False',
'should the occurrences increase exponentially? 1=True, 0=False'
]
min_max = [(1, 2000), (2, 500), (0, 1), (0, 1), (0, 1)]
if request == 'dict_size':
min_max[1] = (2, 100)
request_and_indices = {'list_length': (0, 2),
'event_occurrences': (0, 1, 3),
'increasing_gaps': (0, 1, 2),
'dict_size': (1, 2)}
output_kwargs = {}
for index in request_and_indices[request]:
output_kwargs[keys[index]] = get_answer(questions[index], *min_max[index])
if min_max[index] == (0, 1):
output_kwargs[keys[index]] = bool(output_kwargs[keys[index]])
if request != 'list_length':
adds_list = get_adds_list(output_kwargs)
output_kwargs['adds_list'] = adds_list
else:
output_kwargs['adds_list'] = default_adds_list
return output_kwargs
def get_adds_list(dictionary):
start_size = dictionary.get('start_dict_size', 1000)
add_list_size = dictionary['add_list_len']
complete_add_list = [1, 2, 3, 4, 5, 10, 50, 100, 500]
max_adds = 5
if start_size <= 100:
max_list_size_for_add = [(3, 500), (6, 100), (9, 50), (20, 10), (10000, 5)]
for pair in max_list_size_for_add:
if add_list_size <= pair[0]:
max_adds = pair[1]
break
else:
max_list_size_for_add = [(4, 50), (9, 10), (10000, 5)]
for pair in max_list_size_for_add:
if add_list_size <= pair[0]:
max_adds = pair[1]
break
adds_list_end = complete_add_list.index(max_adds)
return complete_add_list[: adds_list_end + 1]
def get_tuple_list(size, many_occurrences=False, step=1):
if many_occurrences:
occur = 10
else:
occur = 1
return [(event, occur) for event in range(0, size, step)]
def get_indexed_advantage_ratio(start_dict_size, adds, tuple_list_sizes, many_occurrences):
events_tuples = get_tuple_list(tuple_list_sizes, many_occurrences)
input_dict = get_input_dict(start_dict_size, True)
control_time, indexed_values_time = get_control_and_indexed_values_times(adds, events_tuples, input_dict)
return control_time / indexed_values_time
def get_data_list(many_occurrences):
titles = ('ADDS', 'DICT SIZE', 'LIST SIZE', 'OCCUR MANY', 'RESULT')
adds = [1, 2, 3, 4, 5, 10, 20, 50, 100, 500, 1000, 2000]
start_dict_sizes = [1, 10, 50, 100, 200, 500, 1000, 2000, 5000]
tuple_list_sizes = [2, 3, 4, 6, 8, 10, 20, 50, 100]
all_data = [titles]
for add_time in adds:
print(add_time)
for start_size in start_dict_sizes:
for tuple_size in tuple_list_sizes:
if add_time * tuple_size <= 4000:
datum = get_indexed_advantage_ratio(start_size, add_time, tuple_size, many_occurrences)
data_line = (float(add_time), float(start_size), float(tuple_size), float(many_occurrences), datum)
all_data.append(data_line)
return all_data
def data_grouper(data_list, index_priority=(0, 1, 2, 3, 4)):
new_list = []
for data in data_list:
new_data = []
for index in index_priority:
new_data.append(data[index])
new_list.append(tuple(new_data))
new_labels = new_list[0]
the_rest = sorted(new_list[1:])
return [new_labels] + the_rest
def get_result_str(data_list):
labels = data_list[0]
result_index = labels.index('RESULT')
bool_index = labels.index('OCCUR MANY')
star_the_result = 1.0
number_of_labels = len(labels)
middle_just = '10'
template = '\n' + ('{:^' + middle_just + '}|') * number_of_labels
template.rstrip('|')
table_descriptor = template.format(*labels)
line_len = len(table_descriptor)
table_descriptor = add_sep_line(table_descriptor, line_len, '*')
table_descriptor = '\n' + line_len * '=' + table_descriptor
first_element = -1
second_element = -1
output_str = ''
for line in data_list[1:]:
new_first_element = int(line[0])
new_second_element = int(line[1])
if new_first_element != first_element:
output_str += table_descriptor
if new_second_element != second_element:
output_str = add_sep_line(output_str, line_len, '-')
first_element = new_first_element
second_element = new_second_element
line_strings = []
for index, element in enumerate(line):
if index == result_index:
to_add = '{:.3f}'.format(element)
elif index == bool_index:
to_add = str(bool(element))
else:
to_add = str(int(element))
line_strings.append(to_add)
output_str += template.format(*line_strings)
result = float(line[result_index])
if result > star_the_result:
output_str += ' *** '
return output_str
def add_sep_line(input_str, line_length, separator):
return input_str + '\n' + line_length * separator
def save_data_pts(data_flat, data_bumpy):
flat_save = np.array(data_flat)
np.save('save_flat_data', flat_save)
bumpy_save = np.array(data_bumpy)
np.save('save_bumpy_data', bumpy_save)
def load_data_pts(full_file_name):
np_array = np.load(full_file_name)
output = []
for data_tuple in np_array.tolist():
try:
output.append(tuple([float(number) for number in data_tuple]))
except ValueError:
output.append(tuple(data_tuple))
return output
def get_saved_data():
data_points_flat = load_data_pts('save_flat_data.npy')
data_points_bumpy = load_data_pts('save_bumpy_data.npy')
return data_points_flat, data_points_bumpy
def data_points_ui():
try:
get_new_data = input_py_2_and_3('generate new data pts (will take some minutes)? type "y" for yes.\n>>> ')
if get_new_data == 'y':
raise IOError
data_points_flat, data_points_bumpy = get_saved_data()
except IOError:
print('generating data points. this will take a few minutes')
data_points_flat = get_data_list(False)
data_points_bumpy = get_data_list(True)
save_data_pts(data_points_flat, data_points_bumpy)
labels_dict = dict(enumerate(data_points_flat[0]))
intro = """
here are the values whose order you may change
{}
at the prompt put in a new 5-digit string showing how you want the data ordered
so "01234" will order the data by ('ADDS', 'DICT SIZE', 'LIST SIZE', 'OCCUR MANY', 'RESULT')
"21034" will order the data by ('LIST SIZE', 'DICT SIZE', 'ADDS', 'OCCUR MANY', 'RESULT')
when prompted, enter the base name for the file.
"test" would create 3 files.
"test_flat.txt", "test_many.txt", "test_combined.txt". they will be text files showing the data
grouped accordingly. flat show adding events that occurred once and many shows events that occurred 10 times.
the result column shows how many times faster the index_values method is and so any time
indexed values is faster, it is starred.
"""
print(intro.format(str(labels_dict).replace(',', '\n')))
while True:
print(str(labels_dict).replace(',', '\n'))
new_order = input_py_2_and_3('new order or "q" quits >>> ')
if new_order == 'q':
break
change_list = []
for digit in new_order:
change_list.append(int(digit))
result_to_print_flat = data_grouper(data_points_flat, change_list)
result_to_print_bumpy = data_grouper(data_points_bumpy, change_list)
flat = get_result_str(result_to_print_flat)
many = get_result_str(result_to_print_bumpy)
name = input_py_2_and_3('file base name >>> ')
with open(name + '_flat.txt', 'w') as file:
file.write(flat)
with open(name + '_many.txt', 'w') as file:
file.write(many)
with open(name + '_combined.txt', 'w') as file:
file.write(get_side_by_side_data(flat, many))
def get_side_by_side_data(left_answer, right_answer):
left_lines = left_answer.split('\n')
right_lines = right_answer.split('\n')
left_just_line_len = 64
joined_lines = []
for index, line in enumerate(left_lines):
new_line = '{:<{}}{}'.format(line, left_just_line_len, right_lines[index])
joined_lines.append(new_line)
joined_answer = '\n'.join(joined_lines)
return joined_answer
if __name__ == '__main__':
graphing_ui()
# data_points_ui()
| [
"itertools.cycle",
"time.clock",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"random.randrange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.getcwd",
"numpy.array",
"matplotlib.pyplot.figure",
"dicetables.additiveevents.AdditiveEvents",
"numpy.save",
"matplotlib.pyplot.ion",
... | [((5825, 5851), 'math.log10', 'log10', (['events_tuples[0][1]'], {}), '(events_tuples[0][1])\n', (5830, 5851), False, 'from math import log10\n'), ((6730, 6756), 'dicetables.additiveevents.AdditiveEvents', 'AdditiveEvents', (['input_dict'], {}), '(input_dict)\n', (6744, 6756), False, 'from dicetables.additiveevents import AdditiveEvents\n'), ((6840, 6852), 'time.clock', 'time.clock', ([], {}), '()\n', (6850, 6852), False, 'import time\n'), ((7021, 7033), 'time.clock', 'time.clock', ([], {}), '()\n', (7031, 7033), False, 'import time\n'), ((7257, 7283), 'dicetables.additiveevents.AdditiveEvents', 'AdditiveEvents', (['input_dict'], {}), '(input_dict)\n', (7271, 7283), False, 'from dicetables.additiveevents import AdditiveEvents\n'), ((11995, 12004), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (12002, 12004), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12310), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'speed_ratios', 'style'], {'label': 'label'}), '(variable_values, speed_ratios, style, label=label)\n', (12259, 12310), True, 'import matplotlib.pyplot as plt\n'), ((12409, 12468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""speed of indexed values over speed of control"""'], {}), "('speed of indexed values over speed of control')\n", (12419, 12468), True, 'import matplotlib.pyplot as plt\n'), ((12821, 12856), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_labels[variable_name]'], {}), '(x_labels[variable_name])\n', (12831, 12856), True, 'import matplotlib.pyplot as plt\n'), ((12862, 12874), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12872, 12874), True, 'import matplotlib.pyplot as plt\n'), ((12879, 12895), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (12888, 12895), True, 'import matplotlib.pyplot as plt\n'), ((12900, 12915), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (12909, 12915), True, 'import matplotlib.pyplot as plt\n'), ((13263, 13272), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (13270, 13272), True, 'import matplotlib.pyplot as plt\n'), ((13291, 13309), 'matplotlib.pyplot.figure', 'plt.figure', (['figure'], {}), '(figure)\n', (13301, 13309), True, 'import matplotlib.pyplot as plt\n'), ((13335, 13399), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'control_times', '"""bo-"""'], {'label': '"""control"""'}), "(variable_values, control_times, 'bo-', label='control')\n", (13343, 13399), True, 'import matplotlib.pyplot as plt\n'), ((13404, 13469), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'iv_times', '"""r*-"""'], {'label': '"""IndexedValues"""'}), "(variable_values, iv_times, 'r*-', label='IndexedValues')\n", (13412, 13469), True, 'import matplotlib.pyplot as plt\n'), ((13474, 13492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time"""'], {}), "('time')\n", (13484, 13492), True, 'import matplotlib.pyplot as plt\n'), ((13780, 13815), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_labels[variable_name]'], {}), '(x_labels[variable_name])\n', (13790, 13815), True, 'import matplotlib.pyplot as plt\n'), ((13821, 13833), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13831, 13833), True, 'import matplotlib.pyplot as plt\n'), ((14006, 14022), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (14015, 14022), True, 'import matplotlib.pyplot as plt\n'), ((14027, 14071), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'control_fit', '"""c-"""'], {}), "(variable_values, control_fit, 'c-')\n", (14035, 14071), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14115), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'iv_fit', '"""c-"""'], {}), "(variable_values, iv_fit, 'c-')\n", (14084, 14115), True, 'import matplotlib.pyplot as plt\n'), ((14120, 14135), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (14129, 14135), True, 'import matplotlib.pyplot as plt\n'), ((14277, 14322), 'numpy.polyfit', 'np.polyfit', (['variable_values', 'control_times', '(1)'], {}), '(variable_values, control_times, 1)\n', (14287, 14322), True, 'import numpy as np\n'), ((14351, 14391), 'numpy.polyfit', 'np.polyfit', (['variable_values', 'iv_times', '(1)'], {}), '(variable_values, iv_times, 1)\n', (14361, 14391), True, 'import numpy as np\n'), ((15745, 15807), 'itertools.cycle', 'cycle', (["['o', '<', '>', 'v', 's', 'p', '*', '+', 'x', 'D', 'd']"], {}), "(['o', '<', '>', 'v', 's', 'p', '*', '+', 'x', 'D', 'd'])\n", (15750, 15807), False, 'from itertools import cycle\n'), ((15843, 15885), 'itertools.cycle', 'cycle', (["['b', 'y', 'r', 'c', 'm', 'k', 'g']"], {}), "(['b', 'y', 'r', 'c', 'm', 'k', 'g'])\n", (15848, 15885), False, 'from itertools import cycle\n'), ((28495, 28514), 'numpy.array', 'np.array', (['data_flat'], {}), '(data_flat)\n', (28503, 28514), True, 'import numpy as np\n'), ((28519, 28555), 'numpy.save', 'np.save', (['"""save_flat_data"""', 'flat_save'], {}), "('save_flat_data', flat_save)\n", (28526, 28555), True, 'import numpy as np\n'), ((28574, 28594), 'numpy.array', 'np.array', (['data_bumpy'], {}), '(data_bumpy)\n', (28582, 28594), True, 'import numpy as np\n'), ((28599, 28637), 'numpy.save', 'np.save', (['"""save_bumpy_data"""', 'bumpy_save'], {}), "('save_bumpy_data', bumpy_save)\n", (28606, 28637), True, 'import numpy as np\n'), ((28690, 28713), 'numpy.load', 'np.load', (['full_file_name'], {}), '(full_file_name)\n', (28697, 28713), True, 'import numpy as np\n'), ((6965, 6977), 'time.clock', 'time.clock', ([], {}), '()\n', (6975, 6977), False, 'import time\n'), ((7109, 7121), 'time.clock', 'time.clock', ([], {}), '()\n', (7119, 7121), False, 'import time\n'), ((12337, 12404), 'matplotlib.pyplot.plot', 'plt.plot', (['variable_values', 'equality_line', '"""g-"""'], {'label': '"""equal speed"""'}), "(variable_values, equality_line, 'g-', label='equal speed')\n", (12345, 12404), True, 'import matplotlib.pyplot as plt\n'), ((21874, 21896), 'matplotlib.pyplot.figure', 'plt.figure', (['plt_figure'], {}), '(plt_figure)\n', (21884, 21896), True, 'import matplotlib.pyplot as plt\n'), ((21929, 21938), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (21936, 21938), True, 'import matplotlib.pyplot as plt\n'), ((22988, 23002), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (22997, 23002), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2966), 'random.randrange', 'random.randrange', (['(1)', '(start_length - 1)'], {}), '(1, start_length - 1)\n', (2945, 2966), False, 'import random\n'), ((14814, 14822), 'os.getcwd', 'getcwd', ([], {}), '()\n', (14820, 14822), False, 'from os import getcwd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
import sys
from orangewidget.settings import Setting
from orangewidget import gui as orangegui
from orangecontrib.wonder.widgets.gui.ow_generic_parameter_widget import OWGenericInstrumentalDiffractionPatternParametersWidget, ParameterBox
from orangecontrib.wonder.util.gui_utility import gui
from oasys.widgets import congruence
from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters
class OWLorentzPolarization(OWGenericInstrumentalDiffractionPatternParametersWidget):
name = "Lorentz-Polarization Factors"
description = "Define Lorentz-Polarization Factor"
icon = "icons/lorentz_polarization.png"
priority = 9
use_lorentz_factor = Setting([1])
lorentz_formula = Setting([LorentzFormula.Shkl_Shkl])
use_polarization_factor = Setting([0])
degree_of_polarization = Setting([0.0])
beampath = Setting([Beampath.PRIMARY])
use_twotheta_mono = Setting([1])
twotheta_mono = Setting([28.443])
def __init__(self):
super().__init__()
def get_max_height(self):
return 500
def get_parameter_name(self):
return "Lorentz-Polarization"
def get_current_dimension(self):
return len(self.use_lorentz_factor)
def get_parameter_box_instance(self, parameter_tab, index):
return PolarizationParametersBox(widget=self,
parent=parameter_tab,
index=index,
use_lorentz_factor=self.use_lorentz_factor[index],
lorentz_formula=self.lorentz_formula[index],
use_polarization_factor=self.use_polarization_factor[index],
degree_of_polarization=self.degree_of_polarization[index],
beampath=self.beampath[index],
use_twotheta_mono=self.use_twotheta_mono[index],
twotheta_mono=self.twotheta_mono[index])
def get_empty_parameter_box_instance(self, parameter_tab, index):
return PolarizationParametersBox(widget=self, parent=parameter_tab, index=index)
def set_parameter_data(self):
self.fit_global_parameters.set_instrumental_profile_parameters([self.get_parameter_box(index).get_lorentz_polarization() for index in range(self.get_current_dimension())])
def get_parameter_array(self):
return self.fit_global_parameters.get_instrumental_profile_parameters(PolarizationParameters.__name__)
def get_parameter_item(self, diffraction_pattern_index):
return self.fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
def get_instrumental_parameter_array(self, instrumental_parameters):
return instrumental_parameters.get_instrumental_profile_parameters(PolarizationParameters.__name__)
def get_instrumental_parameter_item(self, instrumental_parameters, diffraction_pattern_index):
return instrumental_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
def dumpSettings(self):
self.dump_use_lorentz_factor()
self.dump_lorentz_formula()
self.dump_use_polarization_factor()
self.dump_degree_of_polarization()
self.dump_beampath()
self.dump_use_twotheta_mono()
self.dump_twotheta_mono()
def dump_use_lorentz_factor(self): self.dump_variable("use_lorentz_factor")
def dump_lorentz_formula(self): self.dump_variable("lorentz_formula")
def dump_use_polarization_factor(self): self.dump_variable("use_polarization_factor")
def dump_degree_of_polarization(self): self.dump_variable("degree_of_polarization")
def dump_beampath(self): self.dump_variable("beampath")
def dump_use_twotheta_mono(self): self.dump_variable("use_twotheta_mono")
def dump_twotheta_mono(self): self.dump_variable("twotheta_mono")
class PolarizationParametersBox(ParameterBox):
def __init__(self,
widget=None,
parent=None,
index=0,
use_lorentz_factor=1,
lorentz_formula=LorentzFormula.Shkl_Shkl,
use_polarization_factor=0,
degree_of_polarization=0.0,
beampath=Beampath.PRIMARY,
use_twotheta_mono=1,
twotheta_mono=28.443):
super(PolarizationParametersBox, self).__init__(widget=widget,
parent=parent,
index=index,
use_lorentz_factor=use_lorentz_factor,
lorentz_formula = lorentz_formula,
use_polarization_factor = use_polarization_factor,
degree_of_polarization = degree_of_polarization,
beampath = beampath,
use_twotheta_mono = use_twotheta_mono,
twotheta_mono = twotheta_mono)
def init_fields(self, **kwargs):
self.use_lorentz_factor = kwargs["use_lorentz_factor"]
self.lorentz_formula = kwargs["lorentz_formula"]
self.use_polarization_factor = kwargs["use_polarization_factor"]
self.degree_of_polarization = kwargs["degree_of_polarization"]
self.beampath = kwargs["beampath"]
self.use_twotheta_mono = kwargs["use_twotheta_mono"]
self.twotheta_mono = kwargs["twotheta_mono"]
def init_gui(self, container):
orangegui.comboBox(container, self, "use_lorentz_factor", label="Add Lorentz Factor", items=["No", "Yes"], labelWidth=300, orientation="horizontal", callback=self.set_LorentzFactor)
self.lorentz_box = gui.widgetBox(container, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=30)
self.lorentz_box_empty = gui.widgetBox(container, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=30)
orangegui.comboBox(self.lorentz_box, self, "lorentz_formula", label="Formula", items=LorentzFormula.tuple(), labelWidth=300, orientation="horizontal", callback=self.widget.dump_lorentz_formula)
self.set_LorentzFactor()
orangegui.separator(container)
orangegui.comboBox(container, self, "use_polarization_factor", label="Add Polarization Factor", items=["No", "Yes"], labelWidth=300,
orientation="horizontal", callback=self.set_Polarization)
self.polarization_box = gui.widgetBox(container, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=200)
self.polarization_box_empty = gui.widgetBox(container, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=200)
gui.lineEdit(self.polarization_box, self, "degree_of_polarization", "Deg. Pol. (0\u2264Q\u22641)", labelWidth=300, valueType=float, callback=self.widget.dump_degree_of_polarization)
orangegui.comboBox(self.polarization_box, self, "use_twotheta_mono", label="Use Monochromator", items=["No", "Yes"], labelWidth=300,
orientation="horizontal", callback=self.set_Monochromator)
self.monochromator_box = gui.widgetBox(self.polarization_box, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=95)
self.monochromator_box_empty = gui.widgetBox(self.polarization_box, "", orientation="vertical", width=self.CONTROL_AREA_WIDTH - 20, height=95)
orangegui.comboBox(self.monochromator_box, self, "beampath", label="Beampath", items=Beampath.tuple(), labelWidth=300,
orientation="horizontal", callback=self.widget.dump_beampath)
gui.lineEdit(self.monochromator_box, self, "twotheta_mono", "2\u03B8 Monochromator [deg]", labelWidth=300, valueType=float, callback=self.widget.dump_twotheta_mono)
self.set_Polarization()
def set_LorentzFactor(self):
self.lorentz_box.setVisible(self.use_lorentz_factor==1)
self.lorentz_box_empty.setVisible(self.use_lorentz_factor==0)
if not self.is_on_init: self.widget.dump_use_lorentz_factor()
def set_Monochromator(self):
self.monochromator_box.setVisible(self.use_twotheta_mono==1)
self.monochromator_box_empty.setVisible(self.use_twotheta_mono==0)
if not self.is_on_init: self.widget.dump_use_twotheta_mono()
def set_Polarization(self):
self.polarization_box.setVisible(self.use_polarization_factor==1)
self.polarization_box_empty.setVisible(self.use_polarization_factor==0)
if self.use_polarization_factor==1: self.set_Monochromator()
if not self.is_on_init: self.widget.dump_use_polarization_factor()
def get_basic_parameter_prefix(self):
return PolarizationParameters.get_parameters_prefix()
def get_lorentz_polarization(self):
if self.use_polarization_factor == 1:
congruence.checkPositiveNumber(self.degree_of_polarization, "Deg. Pol.")
congruence.checkLessOrEqualThan(self.degree_of_polarization, 1.0, "Deg. Pol.", "1.0")
if self.use_polarization_factor == 1 and self.use_twotheta_mono==1:
congruence.checkStrictlyPositiveAngle(self.twotheta_mono, "2\u03B8 Monochromator")
return PolarizationParameters(use_lorentz_factor=self.use_lorentz_factor == 1,
lorentz_formula=self.lorentz_formula,
use_polarization_factor=self.use_polarization_factor,
twotheta_mono=None if (self.use_polarization_factor == 0 or self.use_twotheta_mono == 0) else self.twotheta_mono,
beampath=self.beampath,
degree_of_polarization=self.degree_of_polarization)
def set_data(self, polarization_parameters):
self.use_lorentz_factor = 1 if polarization_parameters.use_lorentz_factor else self.use_lorentz_factor
self.lorentz_formula = polarization_parameters.lorentz_formula
self.use_polarization_factor = 1 if polarization_parameters.use_polarization_factor else self.use_polarization_factor
if self.use_polarization_factor == 1:
self.degree_of_polarization = polarization_parameters.degree_of_polarization
twotheta_mono = polarization_parameters.twotheta_mono
if not twotheta_mono is None:
self.use_twotheta_mono = 1
self.twotheta_mono = twotheta_mono
self.beampath = polarization_parameters.beampath
else:
self.use_twotheta_mono = 0
self.set_LorentzFactor()
self.set_Polarization()
from PyQt5.QtWidgets import QApplication
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWLorentzPolarization()
ow.show()
a.exec_()
ow.saveSettings()
| [
"orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.PolarizationParameters.get_parameters_prefix",
"orangewidget.gui.comboBox",
"orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.PolarizationParameters",
"orangecontrib.wonder.util.gui_utility.gui.lineEdit",
"orangeco... | [((4131, 4143), 'orangewidget.settings.Setting', 'Setting', (['[1]'], {}), '([1])\n', (4138, 4143), False, 'from orangewidget.settings import Setting\n'), ((4174, 4209), 'orangewidget.settings.Setting', 'Setting', (['[LorentzFormula.Shkl_Shkl]'], {}), '([LorentzFormula.Shkl_Shkl])\n', (4181, 4209), False, 'from orangewidget.settings import Setting\n'), ((4240, 4252), 'orangewidget.settings.Setting', 'Setting', (['[0]'], {}), '([0])\n', (4247, 4252), False, 'from orangewidget.settings import Setting\n'), ((4283, 4297), 'orangewidget.settings.Setting', 'Setting', (['[0.0]'], {}), '([0.0])\n', (4290, 4297), False, 'from orangewidget.settings import Setting\n'), ((4328, 4355), 'orangewidget.settings.Setting', 'Setting', (['[Beampath.PRIMARY]'], {}), '([Beampath.PRIMARY])\n', (4335, 4355), False, 'from orangewidget.settings import Setting\n'), ((4386, 4398), 'orangewidget.settings.Setting', 'Setting', (['[1]'], {}), '([1])\n', (4393, 4398), False, 'from orangewidget.settings import Setting\n'), ((4429, 4446), 'orangewidget.settings.Setting', 'Setting', (['[28.443]'], {}), '([28.443])\n', (4436, 4446), False, 'from orangewidget.settings import Setting\n'), ((14708, 14730), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (14720, 14730), False, 'from PyQt5.QtWidgets import QApplication\n'), ((9429, 9620), 'orangewidget.gui.comboBox', 'orangegui.comboBox', (['container', 'self', '"""use_lorentz_factor"""'], {'label': '"""Add Lorentz Factor"""', 'items': "['No', 'Yes']", 'labelWidth': '(300)', 'orientation': '"""horizontal"""', 'callback': 'self.set_LorentzFactor'}), "(container, self, 'use_lorentz_factor', label=\n 'Add Lorentz Factor', items=['No', 'Yes'], labelWidth=300, orientation=\n 'horizontal', callback=self.set_LorentzFactor)\n", (9447, 9620), True, 'from orangewidget import gui as orangegui\n'), ((9639, 9743), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['container', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(30)'}), "(container, '', orientation='vertical', width=self.\n CONTROL_AREA_WIDTH - 20, height=30)\n", (9652, 9743), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((9772, 9876), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['container', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(30)'}), "(container, '', orientation='vertical', width=self.\n CONTROL_AREA_WIDTH - 20, height=30)\n", (9785, 9876), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((10118, 10148), 'orangewidget.gui.separator', 'orangegui.separator', (['container'], {}), '(container)\n', (10137, 10148), True, 'from orangewidget import gui as orangegui\n'), ((10158, 10357), 'orangewidget.gui.comboBox', 'orangegui.comboBox', (['container', 'self', '"""use_polarization_factor"""'], {'label': '"""Add Polarization Factor"""', 'items': "['No', 'Yes']", 'labelWidth': '(300)', 'orientation': '"""horizontal"""', 'callback': 'self.set_Polarization'}), "(container, self, 'use_polarization_factor', label=\n 'Add Polarization Factor', items=['No', 'Yes'], labelWidth=300,\n orientation='horizontal', callback=self.set_Polarization)\n", (10176, 10357), True, 'from orangewidget import gui as orangegui\n'), ((10409, 10514), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['container', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(200)'}), "(container, '', orientation='vertical', width=self.\n CONTROL_AREA_WIDTH - 20, height=200)\n", (10422, 10514), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((10548, 10653), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['container', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(200)'}), "(container, '', orientation='vertical', width=self.\n CONTROL_AREA_WIDTH - 20, height=200)\n", (10561, 10653), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((10658, 10838), 'orangecontrib.wonder.util.gui_utility.gui.lineEdit', 'gui.lineEdit', (['self.polarization_box', 'self', '"""degree_of_polarization"""', '"""Deg. Pol. (0≤Q≤1)"""'], {'labelWidth': '(300)', 'valueType': 'float', 'callback': 'self.widget.dump_degree_of_polarization'}), "(self.polarization_box, self, 'degree_of_polarization',\n 'Deg. Pol. (0≤Q≤1)', labelWidth=300, valueType=float, callback=self.\n widget.dump_degree_of_polarization)\n", (10670, 10838), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((10849, 11050), 'orangewidget.gui.comboBox', 'orangegui.comboBox', (['self.polarization_box', 'self', '"""use_twotheta_mono"""'], {'label': '"""Use Monochromator"""', 'items': "['No', 'Yes']", 'labelWidth': '(300)', 'orientation': '"""horizontal"""', 'callback': 'self.set_Monochromator'}), "(self.polarization_box, self, 'use_twotheta_mono', label=\n 'Use Monochromator', items=['No', 'Yes'], labelWidth=300, orientation=\n 'horizontal', callback=self.set_Monochromator)\n", (10867, 11050), True, 'from orangewidget import gui as orangegui\n'), ((11102, 11218), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['self.polarization_box', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(95)'}), "(self.polarization_box, '', orientation='vertical', width=self\n .CONTROL_AREA_WIDTH - 20, height=95)\n", (11115, 11218), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((11253, 11369), 'orangecontrib.wonder.util.gui_utility.gui.widgetBox', 'gui.widgetBox', (['self.polarization_box', '""""""'], {'orientation': '"""vertical"""', 'width': '(self.CONTROL_AREA_WIDTH - 20)', 'height': '(95)'}), "(self.polarization_box, '', orientation='vertical', width=self\n .CONTROL_AREA_WIDTH - 20, height=95)\n", (11266, 11369), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((11591, 11759), 'orangecontrib.wonder.util.gui_utility.gui.lineEdit', 'gui.lineEdit', (['self.monochromator_box', 'self', '"""twotheta_mono"""', '"""2θ Monochromator [deg]"""'], {'labelWidth': '(300)', 'valueType': 'float', 'callback': 'self.widget.dump_twotheta_mono'}), "(self.monochromator_box, self, 'twotheta_mono',\n '2θ Monochromator [deg]', labelWidth=300, valueType=float, callback=\n self.widget.dump_twotheta_mono)\n", (11603, 11759), False, 'from orangecontrib.wonder.util.gui_utility import gui\n'), ((12690, 12736), 'orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.PolarizationParameters.get_parameters_prefix', 'PolarizationParameters.get_parameters_prefix', ([], {}), '()\n', (12734, 12736), False, 'from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters\n'), ((13195, 13570), 'orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.PolarizationParameters', 'PolarizationParameters', ([], {'use_lorentz_factor': '(self.use_lorentz_factor == 1)', 'lorentz_formula': 'self.lorentz_formula', 'use_polarization_factor': 'self.use_polarization_factor', 'twotheta_mono': '(None if self.use_polarization_factor == 0 or self.use_twotheta_mono == 0 else\n self.twotheta_mono)', 'beampath': 'self.beampath', 'degree_of_polarization': 'self.degree_of_polarization'}), '(use_lorentz_factor=self.use_lorentz_factor == 1,\n lorentz_formula=self.lorentz_formula, use_polarization_factor=self.\n use_polarization_factor, twotheta_mono=None if self.\n use_polarization_factor == 0 or self.use_twotheta_mono == 0 else self.\n twotheta_mono, beampath=self.beampath, degree_of_polarization=self.\n degree_of_polarization)\n', (13217, 13570), False, 'from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters\n'), ((12836, 12908), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.degree_of_polarization', '"""Deg. Pol."""'], {}), "(self.degree_of_polarization, 'Deg. Pol.')\n", (12866, 12908), False, 'from oasys.widgets import congruence\n'), ((12921, 13010), 'oasys.widgets.congruence.checkLessOrEqualThan', 'congruence.checkLessOrEqualThan', (['self.degree_of_polarization', '(1.0)', '"""Deg. Pol."""', '"""1.0"""'], {}), "(self.degree_of_polarization, 1.0,\n 'Deg. Pol.', '1.0')\n", (12952, 13010), False, 'from oasys.widgets import congruence\n'), ((13096, 13173), 'oasys.widgets.congruence.checkStrictlyPositiveAngle', 'congruence.checkStrictlyPositiveAngle', (['self.twotheta_mono', '"""2θ Monochromator"""'], {}), "(self.twotheta_mono, '2θ Monochromator')\n", (13133, 13173), False, 'from oasys.widgets import congruence\n'), ((9966, 9988), 'orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.LorentzFormula.tuple', 'LorentzFormula.tuple', ([], {}), '()\n', (9986, 9988), False, 'from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters\n'), ((11459, 11475), 'orangecontrib.wonder.fit.parameters.instrument.polarization_parameters.Beampath.tuple', 'Beampath.tuple', ([], {}), '()\n', (11473, 11475), False, 'from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters\n')] |
from appshell.base import View
from appshell.templates import confirmation, message
from flask import request, flash, redirect
from flask_babelex import Babel, Domain
mydomain = Domain('appshell')
_ = mydomain.gettext
lazy_gettext = mydomain.lazy_gettext
class ConfirmationEndpoint(View):
methods = ("GET", "POST")
redirect_to = None
def prepare(self):
pass
def dispatch_request(self, **args):
self.prepare(**args)
if request.method == "POST":
self.do_it(**args)
return self.done()
else:
return confirmation(self.confirmation_message)
def done(self):
if self.flash_message:
flash(*self.flash_message)
if self.redirect_to:
return redirect(self.redirect_to)
return message(_("Done"))
| [
"flask.redirect",
"flask_babelex.Domain",
"appshell.templates.confirmation",
"flask.flash"
] | [((179, 197), 'flask_babelex.Domain', 'Domain', (['"""appshell"""'], {}), "('appshell')\n", (185, 197), False, 'from flask_babelex import Babel, Domain\n'), ((605, 644), 'appshell.templates.confirmation', 'confirmation', (['self.confirmation_message'], {}), '(self.confirmation_message)\n', (617, 644), False, 'from appshell.templates import confirmation, message\n'), ((717, 743), 'flask.flash', 'flash', (['*self.flash_message'], {}), '(*self.flash_message)\n', (722, 743), False, 'from flask import request, flash, redirect\n'), ((792, 818), 'flask.redirect', 'redirect', (['self.redirect_to'], {}), '(self.redirect_to)\n', (800, 818), False, 'from flask import request, flash, redirect\n')] |
from flask import g, request
from flask_restful import reqparse
from werkzeug import datastructures
from ..exceptions.system_error import SystemError
from ..exceptions.system_exception import SystemException
from ..exceptions.service_error import ServiceError
from ..exceptions.service_exception import ServiceException
def _get_request():
if 'req' not in g:
g.req = reqparse.RequestParser()
return g.req
def get_argument(
key, *, default=None, type=str, location=None,
help=None, required=False, action='store'
):
'''
:param default: The value produced if the argument is absent from the
request.
:param type: The type to which the request argument should be
converted. If a type raises an exception, the message in the
error will be returned in the response. Defaults to :class:`unicode`
in python2 and :class:`str` in python3.
:param action: The basic type of action to be taken when this argument
is encountered in the request. Valid options are "store" and "append".
:param location: The attributes of the :class:`flask.Request` object
to source the arguments from (ex: headers, args, etc.), can be an
iterator. The last item listed takes precedence in the result set.
:param help: A brief description of the argument, returned in the
response when the argument is invalid. May optionally contain
an "{error_msg}" interpolation token, which will be replaced with
the text of the error raised by the type converter.
'''
cur_type = type # 保存参数初始时的状态
type = str if type == int else cur_type # 当类型为int时,先转换成str的获取形式
kwargs = dict(default=default, type=type, action=action)
if location:
kwargs['location'] = location
if type == 'file':
kwargs['type'] = datastructures.FileStorage
kwargs['location'] = location if location else 'files'
parser = _get_request()
parser.add_argument(key, **kwargs)
args = parser.parse_args()
if cur_type == int and args[key]: # 将str的结果转换成int
try:
args[key] = cur_type(args[key])
type = cur_type
except:
raise ServiceException(ServiceError.INVALID_VALUE, key)
if required and action == 'store' and \
(args[key] is None or type == str and args[key].strip() == '' and key != '_id'):
raise SystemException(SystemError.MISSING_REQUIRED_PARAMETER, help if help else key)
return args[key]
def get_request_ip():
if request.remote_addr == '127.0.0.1':
return '127.0.0.1'
ip_list = request.headers['X-Forwarded-For']
ip = ip_list.split(',')[0]
return ip
| [
"flask_restful.reqparse.RequestParser"
] | [((382, 406), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (404, 406), False, 'from flask_restful import reqparse\n')] |
import bpy
import mathutils
from src.main.Module import Module
from src.utility.BlenderUtility import check_intersection, check_bb_intersection, get_all_mesh_objects
class ObjectPoseSampler(Module):
"""
Samples positions and rotations of selected object inside the sampling volume while performing mesh and
bounding box collision checks.
Example 1: Sample poses (locations and rotations) for objects with a suctom property `sample_pose` set to True.
.. code-block:: yaml
{
"module": "object.ObjectPoseSampler",
"config":{
"max_iterations": 1000,
"objects_to_sample": {
"provider": "getter.Entity",
"condition": {
"cp_sample_pose": True
}
},
"pos_sampler":{
"provider": "sampler.Uniform3d",
"max": [5,5,5],
"min": [-5,-5,-5]
},
"rot_sampler": {
"provider": "sampler.Uniform3d",
"max": [0,0,0],
"min": [6.28,6.28,6.28]
}
}
}
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - objects_to_sample
- Here call an appropriate Provider (Getter) in order to select objects. Default: all mesh objects.
- Provider
* - max_iterations
- Amount of tries before giving up on an object and moving to the next one. Default: 1000.
- int
* - pos_sampler
- Here call an appropriate Provider (Sampler) in order to sample position (XYZ 3d vector) for each object.
- Provider
* - rot_sampler
- Here call an appropriate Provider (Sampler) in order to sample rotation (Euler angles 3d vector) for
each object.
- Provider
"""
def __init__(self, config):
Module.__init__(self, config)
def run(self):
"""
Samples positions and rotations of selected object inside the sampling volume while performing mesh and
bounding box collision checks in the following steps:
1. While we have objects remaining and have not run out of tries - sample a point.
2. If no collisions are found keep the point.
"""
# While we have objects remaining and have not run out of tries - sample a point
# List of successfully placed objects
placed = []
# After this many tries we give up on current object and continue with the rest
max_tries = self.config.get_int("max_iterations", 1000)
objects = self.config.get_list("objects_to_sample", get_all_mesh_objects())
if max_tries <= 0:
raise ValueError("The value of max_tries must be greater than zero: {}".format(max_tries))
if not objects:
raise Exception("The list of objects can not be empty!")
# cache to fasten collision detection
bvh_cache = {}
# for every selected object
for obj in objects:
if obj.type == "MESH":
no_collision = True
amount_of_tries_done = -1
# Try max_iter amount of times
for i in range(max_tries):
# Put the top object in queue at the sampled point in space
position = self.config.get_vector3d("pos_sampler")
rotation = self.config.get_vector3d("rot_sampler")
no_collision = ObjectPoseSampler.check_pose_for_object(obj, position, rotation, bvh_cache,
placed, [])
# If no collision then keep the position
if no_collision:
amount_of_tries_done = i
break
if amount_of_tries_done == -1:
amount_of_tries_done = max_tries
placed.append(obj)
if not no_collision:
print("Could not place " + obj.name + " without a collision.")
else:
print("It took " + str(amount_of_tries_done + 1) + " tries to place " + obj.name)
def insert_key_frames(self, obj, frame_id):
""" Insert key frames for given object pose
:param obj: Loaded object. Type: blender object.
:param frame_id: The frame number where key frames should be inserted. Type: int.
"""
obj.keyframe_insert(data_path='location', frame=frame_id)
obj.keyframe_insert(data_path='rotation_euler', frame=frame_id)
@staticmethod
def check_pose_for_object(obj: bpy.types.Object, position: mathutils.Vector, rotation: mathutils.Vector,
bvh_cache: dict, objects_to_check_against: list,
list_of_objects_with_no_inside_check: list):
"""
Checks if a object placed at the given pose intersects with any object given in the list.
The bvh_cache adds all current objects to the bvh tree, which increases the speed.
If an object is already in the cache it is removed, before performing the check.
:param obj: Object which should be checked. Type: :class:`bpy.types.Object`
:param position: 3D Vector of the location of the object. Type: :class:`mathutils.Vector`
:param rotation: 3D Vector of the rotation in euler angles. If this is None, the rotation is not changed \
Type: :class:`mathutils.Vector`
:param bvh_cache: Dict of all the bvh trees, removes the `obj` from the cache before adding it again. \
Type: :class:`dict`
:param objects_to_check_against: List of objects which the object is checked again \
Type: :class:`list`
:param list_of_objects_with_no_inside_check: List of objects on which no inside check is performed. \
This check is only done for the objects in \
`objects_to_check_against`. Type: :class:`list`
:return: Type: :class:`bool`, True if no collision was found, false if at least one collision was found
"""
# assign it a new pose
obj.location = position
if rotation:
obj.rotation_euler = rotation
bpy.context.view_layer.update()
# Remove bvh cache, as object has changed
if obj.name in bvh_cache:
del bvh_cache[obj.name]
no_collision = True
# Now check for collisions
for already_placed in objects_to_check_against:
# First check if bounding boxes collides
intersection = check_bb_intersection(obj, already_placed)
# if they do
if intersection:
skip_inside_check = already_placed in list_of_objects_with_no_inside_check
# then check for more refined collisions
intersection, bvh_cache = check_intersection(obj, already_placed, bvh_cache=bvh_cache,
skip_inside_check=skip_inside_check)
if intersection:
no_collision = False
break
return no_collision
| [
"bpy.context.view_layer.update",
"src.main.Module.Module.__init__",
"src.utility.BlenderUtility.check_intersection",
"src.utility.BlenderUtility.get_all_mesh_objects",
"src.utility.BlenderUtility.check_bb_intersection"
] | [((1971, 2000), 'src.main.Module.Module.__init__', 'Module.__init__', (['self', 'config'], {}), '(self, config)\n', (1986, 2000), False, 'from src.main.Module import Module\n'), ((6509, 6540), 'bpy.context.view_layer.update', 'bpy.context.view_layer.update', ([], {}), '()\n', (6538, 6540), False, 'import bpy\n'), ((2732, 2754), 'src.utility.BlenderUtility.get_all_mesh_objects', 'get_all_mesh_objects', ([], {}), '()\n', (2752, 2754), False, 'from src.utility.BlenderUtility import check_intersection, check_bb_intersection, get_all_mesh_objects\n'), ((6861, 6903), 'src.utility.BlenderUtility.check_bb_intersection', 'check_bb_intersection', (['obj', 'already_placed'], {}), '(obj, already_placed)\n', (6882, 6903), False, 'from src.utility.BlenderUtility import check_intersection, check_bb_intersection, get_all_mesh_objects\n'), ((7148, 7249), 'src.utility.BlenderUtility.check_intersection', 'check_intersection', (['obj', 'already_placed'], {'bvh_cache': 'bvh_cache', 'skip_inside_check': 'skip_inside_check'}), '(obj, already_placed, bvh_cache=bvh_cache,\n skip_inside_check=skip_inside_check)\n', (7166, 7249), False, 'from src.utility.BlenderUtility import check_intersection, check_bb_intersection, get_all_mesh_objects\n')] |
#!python
"""This module is for messing with input characters."""
import os
import sys
unicurses_path = os.path.dirname(os.path.abspath(__file__)) + '/../libs/unicurses'
sys.path.insert(0, unicurses_path)
import unicurses as curses
def key_info(key):
try:
_ord = ord(key)
except:
_ord = -1
try:
_chr = chr(key)
except:
_chr = -1
try:
unctrl = curses.unctrl(key)
except:
unctrl = 'no unctrl'
try:
name = curses.keyname(key)
except:
name = 'no name'
return ('repr: {}, type: {}, ord: {}, chr: {}, unctrl: {}, name: {}\n'
.format(repr(key), type(key), _ord, _chr, unctrl, name))
def getchar(stdscr):
while 1:
try:
char = stdscr.get_wch()
break
except curses.error:
pass
stdscr.addstr(key_info(char))
if isinstance(char, str):
_ord = ord(char)
# Replace special characters with a readable string
if _ord == 27:
result = 'Esc'
elif _ord == 10:
result = '\n'
elif _ord == 9:
result = '\t'
elif _ord < 32:
result = curses.unctrl(char)
result = result.decode()
result = 'Ctrl-' + result[1]
else:
result = char
elif isinstance(char, int):
# char must be some kind of function key
if char == curses.KEY_BACKSPACE:
result = '\b'
else:
result = curses.keyname(char)
result = result.decode()
result = result[4] + result[5:].lower()
# Remove parenthesis for function keys
result.replace('(', '')
result.replace(')', '')
else:
raise IOError('Can\'t handle input character type: {}.'
.format(str(type(char))))
stdscr.addstr(key_info(result))
return result
def main(stdscr):
stdscr.keypad(1)
curses.raw()
for i in range(127):
stdscr.addstr(repr(chr(i)))
stdscr.addstr('\n\n')
for i in range(127):
stdscr.addstr(repr(curses.unctrl(chr(i))))
stdscr.addstr('special characters: {}\n\n'.format('œă好'))
while 1:
c = getchar(stdscr)
if c == 'q':
break
curses.wrapper(main)
| [
"sys.path.insert",
"unicurses.keyname",
"unicurses.unctrl",
"unicurses.raw",
"unicurses.wrapper",
"os.path.abspath"
] | [((169, 203), 'sys.path.insert', 'sys.path.insert', (['(0)', 'unicurses_path'], {}), '(0, unicurses_path)\n', (184, 203), False, 'import sys\n'), ((2287, 2307), 'unicurses.wrapper', 'curses.wrapper', (['main'], {}), '(main)\n', (2301, 2307), True, 'import unicurses as curses\n'), ((1964, 1976), 'unicurses.raw', 'curses.raw', ([], {}), '()\n', (1974, 1976), True, 'import unicurses as curses\n'), ((119, 144), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'import os\n'), ((404, 422), 'unicurses.unctrl', 'curses.unctrl', (['key'], {}), '(key)\n', (417, 422), True, 'import unicurses as curses\n'), ((488, 507), 'unicurses.keyname', 'curses.keyname', (['key'], {}), '(key)\n', (502, 507), True, 'import unicurses as curses\n'), ((1509, 1529), 'unicurses.keyname', 'curses.keyname', (['char'], {}), '(char)\n', (1523, 1529), True, 'import unicurses as curses\n'), ((1187, 1206), 'unicurses.unctrl', 'curses.unctrl', (['char'], {}), '(char)\n', (1200, 1206), True, 'import unicurses as curses\n')] |
from selenium import webdriver
def _options_factory():
"""Produces a selenium.webdriver.ChromeOptions object. Used to force "headless" on invocation. You shouldn't call this function."""
ret = webdriver.ChromeOptions()
ret.add_argument("headless")
return ret
def get_driver(*varargs,args=[]):
"""Creates headless selenium.webdriver.Chrome object. Supply command-line options in args or varargs."""
args.extend(varargs)
args = list(set(args))
opt = _options_factory()
for arg in args:
if arg=="headless": continue # already headless
opt.add_argument(arg)
return webdriver.Chrome(chrome_options=opt)
# import other useful things
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
# BeautifulSoup support
from bs4 import BeautifulSoup
def soupify(driver):
return BeautifulSoup(driver.page_source,"html.parser")
| [
"bs4.BeautifulSoup",
"selenium.webdriver.Chrome",
"selenium.webdriver.ChromeOptions"
] | [((196, 221), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (219, 221), False, 'from selenium import webdriver\n'), ((577, 613), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'opt'}), '(chrome_options=opt)\n', (593, 613), False, 'from selenium import webdriver\n'), ((901, 949), 'bs4.BeautifulSoup', 'BeautifulSoup', (['driver.page_source', '"""html.parser"""'], {}), "(driver.page_source, 'html.parser')\n", (914, 949), False, 'from bs4 import BeautifulSoup\n')] |