content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from __future__ import print_function
import argparse
import os
import sys
import time
import random
import string
from typing import getch
import torch
import torch.nn as nn
from torch.autograd import Variable
from char_rnn import CharRNN
class ProgressBar(object):
def __init__(self, total=100, stream=sys.stderr):
self.total = total
self.stream = stream
self.last_len = 0
self.curr = 0
def count(self):
self.curr += 1
self.print_progress(self.curr)
def print_progress(self, value):
self.stream.write('\b' * self.last_len)
self.curr = value
pct = 100 * self.curr / self.total
out = '{:.2f}% [{}/{}] \r'.format(pct, self.curr, self.total)
self.last_len = len(out)
self.stream.write(out)
self.stream.flush()
def random_training_set(chunk_len, batch_size, file, args):
'''
TODO: Convert to stateful LSTM with more features
'''
inp = torch.LongTensor(batch_size, chunk_len)
target = torch.LongTensor(batch_size, chunk_len)
file_len = len(file)
for bi in range(batch_size):
start_index = random.randint(0, file_len - chunk_len)
end_index = start_index + chunk_len + 1
chunk = file[start_index:end_index]
if args.debug:
print ('chunk', chunk)
inp[bi] = char_tensor(chunk[:-1])
target[bi] = char_tensor(chunk[1:])
inp = Variable(inp)
target = Variable(target)
if args.cuda:
inp = inp.cuda()
target = target.cuda()
if args.debug:
print (inp, target)
return inp, target
def train_on_batch(inp, target, args):
hidden = decoder.init_hidden(args.batch_size)
if args.cuda: hidden = hidden.cuda()
decoder.zero_grad()
loss = 0
for c in range(args.chunk_len):
output, hidden = decoder(inp[:,c], hidden)
loss += criterion(output.view(args.batch_size, -1), target[:,c])
loss.backward()
decoder_optimizer.step()
return loss.data[0] / args.chunk_len
def save(args):
save_filename = os.path.splitext(os.path.basename(args.filename))[0] + '.pt'
torch.save(decoder, save_filename)
print('Saved as %s' % save_filename)
class Generator(object):
'''
Class to encapsulate generator functionality
'''
def __init__(self, decoder):
self.decoder = decoder
def generate(self, *args, **kwargs):
raise NotImplementedError
class SimpleGenerator(Generator):
def generate(self,
prime_str='int ',
predict_len=100,
temperature=0.1,
cuda=False,
args=None,
hidden=None):
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if not hidden:
hidden = decoder.init_hidden(1)
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if cuda:
hidden = hidden.cuda()
prime_input = prime_input.cuda()
# Use priming string to "build up" hidden state
for p in range(len(prime_str) - 1):
_, hidden = decoder(prime_input[:,p], hidden)
predicted = ''
inp = prime_input[:,-1]
p_list = []
for p in range(predict_len):
output, hidden = decoder(inp, hidden)
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
p_list.append(top_i)
# Add predicted character to string and use as next input
predicted_char = all_characters[top_i]
predicted += predicted_char
inp = Variable(char_tensor(predicted_char).unsqueeze(0))
if cuda: inp = inp.cuda()
# print (p_list)
return predicted, hidden
def generate(decoder,
prime_str='int ',
predict_len=100,
temperature=0.35,
cuda=False,
args=None,
hidden=None):
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if not hidden:
hidden = decoder.init_hidden(1)
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if cuda:
hidden = hidden.cuda()
prime_input = prime_input.cuda()
# Use priming string to "build up" hidden state
for p in range(len(prime_str) - 1):
_, hidden = decoder(prime_input[:,p], hidden)
predicted = ''
inp = prime_input[:,-1]
p_list = []
for p in range(predict_len):
output, hidden = decoder(inp, hidden)
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
p_list.append(top_i)
# Add predicted character to string and use as next input
predicted_char = all_characters[top_i]
predicted += predicted_char
inp = Variable(char_tensor(predicted_char).unsqueeze(0))
if cuda: inp = inp.cuda()
# print (p_list)
return predicted, hidden
def generate_token(decoder,
prime_str='int ',
temperature=0.35,
cuda=False,
args=None,
init_hidden=None):
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if not init_hidden:
hidden = decoder.init_hidden(1)
prime_input = Variable(char_tensor(prime_str).unsqueeze(0))
if cuda:
hidden = hidden.cuda()
prime_input = prime_input.cuda()
# Use priming string to "build up" hidden state
for p in range(len(prime_str) - 1):
_, hidden = decoder(prime_input[:,p], hidden)
init_hidden = hidden
init_inp = prime_input[:,-1]
is_good = False
while (not is_good):
is_good = True
predicted = ''
p_list = []
hidden = init_hidden
inp = init_inp
stopped = False
while (not stopped):
print ('generate_token', inp [:10], hidden [:10])
output, hidden = decoder(inp, hidden)
print ('output', output[:10])
raise Exception
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
top_i = torch.multinomial(output_dist, 1)[0]
try:
if top_i == p_list[-1] and top_i == p_list[-2]:
is_good = False
except:
pass
p_list.append(top_i)
# Add predicted character to string and use as next input
predicted_char = all_characters[top_i]
if predicted_char in string.whitespace:
stopped = True
predicted += predicted_char
print ('predicted', predicted)
inp = Variable(char_tensor(predicted_char).unsqueeze(0))
if cuda: inp = inp.cuda()
if len(predicted) > 15:
is_good = False
# print (p_list)
return predicted, hidden
# Initialize models and start training
def build_parser():
argparser = argparse.ArgumentParser()
argparser.add_argument('--filename', type=str)
argparser.add_argument('--n_epochs', type=int, default=2000)
argparser.add_argument('--print_every', type=int, default=1)
argparser.add_argument('--hidden_size', type=int, default=256)
argparser.add_argument('--n_layers', type=int, default=3)
argparser.add_argument('--learning_rate', type=float, default=0.01)
argparser.add_argument('--chunk_len', type=int, default=100)
argparser.add_argument('--batch_size', type=int, default=64)
argparser.add_argument('--cuda', action='store_true')
argparser.add_argument('--debug', default=False)
argparser.add_argument('--type', default=False, action='store_true')
args = argparser.parse_args()
if args.cuda:
print("Using CUDA")
return args
def read_file(filename):
file = open(file)
return file, len(file)
def char_tensor(string):
tensor = torch.zeros(len(string)).long()
for c in range(len(string)):
try:
tensor[c] = all_characters.index(string[c])
except:
continue
return tensor
if __name__ == '__main__':
args = build_parser()
SYMBOL_TABLE = os.path.join('../saved_model', 'vocab.sym')
if args.type and os.path.exists(SYMBOL_TABLE):
all_characters = list(set(open(SYMBOL_TABLE).read()))
else:
file = open(args.filename).read()
print('Loaded file', args.filename)
print('File length', len(file)/80, 'lines')
all_characters = list(set(file))
with open(SYMBOL_TABLE, 'w') as vocab:
print("".join(all_characters), file=vocab)
n_characters = len(all_characters)
decoder = CharRNN(n_characters, args.hidden_size,
n_characters, n_layers=args.n_layers)
decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.learning_rate)
criterion = nn.CrossEntropyLoss()
if args.type:
# Enter typing mode
print ('Typing Mode...')
decoder = torch.load('../saved_model/linux.pt')
from typing import build_getch
with build_getch() as getch:
try:
getchar = getch()
hidden = None
generator = SimpleGenerator(decoder)
prime_text = 'struct'
sys.stdout.write(prime_text)
while(getchar!='~'):
#output_text, hidden = generate(decoder, prime_text, 20,
# cuda=args.cuda, args=args,
# hidden=hidden)
output_text, hidden = generator.generate(prime_text, 20,
cuda=args.cuda, args=args,
hidden=hidden)
sys.stdout.write(output_text)
prime_text += output_text
getchar = getch()
if len(prime_text) > 100:
prime_text = prime_text[-100:]
getch.reset()
except (KeyboardInterrupt, Exception) as e:
getch.reset()
print (e.message)
raise e
raise Exception('Exit!')
else: # Train model
if args.cuda: decoder.cuda()
start = time.time()
all_losses = []
loss_avg = 0
try:
SAMPLES_PER_EPOCH = 10000
total_samples = 0
print("Training for %d epochs..." % args.n_epochs)
for epoch in range(1, args.n_epochs + 1):
samples_processed = 0
progress_bar = ProgressBar(SAMPLES_PER_EPOCH)
while(samples_processed) < SAMPLES_PER_EPOCH:
inp, target = random_training_set(args.chunk_len,
args.batch_size,
file, args)
loss = train_on_batch(inp, target, args)
samples_processed += args.batch_size
progress_bar.print_progress(samples_processed)
total_samples += samples_processed
if epoch % args.print_every == 0:
def time_since(start):
return time.time() - start
print('[elapsed : %s epoch (%d %d%%) loss%.4f]' % \
(time_since(start), epoch,
epoch / args.n_epochs * 100, loss_avg/float(samples_processed)))
text, hidden = generate(decoder, 'int', 1000,
cuda=args.cuda, args=args)
print(text)
print("Epoch {} : Saving...".format(epoch))
save(args)
except KeyboardInterrupt:
print("Saving before quit...")
save(args)
|
nilq/baby-python
|
python
|
from zipline.errors import UnsupportedPipelineOutput
from zipline.utils.input_validation import (
expect_element,
expect_types,
optional,
)
from .domain import Domain, GENERIC, infer_domain
from .graph import ExecutionPlan, TermGraph, SCREEN_NAME
from .filters import Filter
from .term import AssetExists, ComputableTerm, Term
class Pipeline:
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
:class:`~zipline.pipeline.Term` instances, and 'screen', a
:class:`~zipline.pipeline.Filter` representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
``attach_pipeline`` in their ``initialize`` function to register that the
pipeline should be computed each trading day. The most recent outputs of an
attached pipeline can be retrieved by calling ``pipeline_output`` from
``handle_data``, ``before_trading_start``, or a scheduled function.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.Filter, optional
Initial screen.
"""
__slots__ = ('_columns', '_screen', '_domain', '__weakref__')
@expect_types(
columns=optional(dict),
screen=optional(Filter),
domain=Domain
)
def __init__(self, columns=None, screen=None, domain=GENERIC):
if columns is None:
columns = {}
validate_column = self.validate_column
for column_name, term in columns.items():
validate_column(column_name, term)
if not isinstance(term, ComputableTerm):
raise TypeError(
"Column {column_name!r} contains an invalid pipeline term "
"({term}). Did you mean to append '.latest'?".format(
column_name=column_name, term=term,
)
)
self._columns = columns
self._screen = screen
self._domain = domain
@property
def columns(self):
"""The output columns of this pipeline.
Returns
-------
columns : dict[str, zipline.pipeline.ComputableTerm]
Map from column name to expression computing that column's output.
"""
return self._columns
@property
def screen(self):
"""
The screen of this pipeline.
Returns
-------
screen : zipline.pipeline.Filter or None
Term defining the screen for this pipeline. If ``screen`` is a
filter, rows that do not pass the filter (i.e., rows for which the
filter computed ``False``) will be dropped from the output of this
pipeline before returning results.
Notes
-----
Setting a screen on a Pipeline does not change the values produced for
any rows: it only affects whether a given row is returned. Computing a
pipeline with a screen is logically equivalent to computing the
pipeline without the screen and then, as a post-processing-step,
filtering out any rows for which the screen computed ``False``.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""Add a column.
The results of computing ``term`` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError(f"Column '{name}' already exists.")
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter, overwrite=(bool, int))
def set_screen(self, screen, overwrite=False):
"""Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
)
def to_simple_graph(self, default_screen):
"""
Compile into a simple TermGraph with no extra row metadata.
Parameters
----------
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
Returns
-------
graph : zipline.pipeline.graph.TermGraph
Graph encoding term dependencies.
"""
return TermGraph(self._prepare_graph_terms(default_screen))
def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns
@expect_element(format=('svg', 'png', 'jpeg'))
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
@staticmethod
@expect_types(term=Term, column_name=str)
def validate_column(column_name, term):
if term.ndim == 1:
raise UnsupportedPipelineOutput(column_name=column_name, term=term)
@property
def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(self._columns.values())
screen = self.screen
if screen is not None:
terms.append(screen)
return terms
@expect_types(default=Domain)
def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.domain.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.domain.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# ToMaTo (Topology management software)
# Copyright (C) 2010 Dennis Schwerdel, University of Kaiserslautern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
Grants
------
For security reasons, the fileserver uses so called *grants* to verify that
an upload or download request is authorized by the hostmanager. The grants are
pseudo-random strings that are very unlikely to be guessed.
Note that grants have an internal timeout and loose their validity after that.
Uploading files
---------------
The filemanager accepts file uploads for valid grants under the URL
``http://SERVER:PORT/GRANT/upload``. Uploads have to be sent via POST with
*multipart/form-data* encoding. After sucessfully uploading a file, a successs
message is shown. A redirect to a different URL can be requested by appending
``?redirect=URL_BASE64`` to the upload URL where *URL_BASE64* is the
base64-encoded destination URL.
A simple upload form can be accessed under the URL
``http://SERVER:PORT/GRANT/upload_form``.
Downloading files
-----------------
The filemanager accepts file download requests for valid grants under the URL
``http://SERVER:PORT/GRANT/download``. Downloads have to be requested via GET
requests. The filemanager accepts the following parameters for downloads:
``name``
The name of the file that is being sent to the client
``mimetype``
The content-type of the file that is being sent to the client
The fileserver will also honor the ``If-modified-since`` header.
"""
import SocketServer, BaseHTTPServer, hashlib, cgi, urlparse, urllib, shutil, base64, time, os.path, datetime, sys
try: #python >=2.6
from urlparse import parse_qsl #@UnusedImport
except: #python <2.6
from cgi import parse_qsl #@Reimport
from .. import util #@UnresolvedImport
from ... import config
ACTION_UPLOAD = "upload"
ACTION_DOWNLOAD = "download"
_httpd = None
_seed = os.urandom(8)
_grants = {}
def deleteGrantFile(grant):
if os.path.exists(grant.path):
os.remove(grant.path)
def _code(path):
return hashlib.md5(_seed+path).hexdigest()
def addGrant(path, *args, **kwargs):
code = _code(path)
_grants[code] = Grant(path, *args, **kwargs)
return code
def delGrant(code):
if code in _grants:
del _grants[code]
def getGrant(code):
return _grants.get(code)
def timeout():
for grant in _grants.values():
if grant.until < time.time():
grant.remove()
class Grant:
def __init__(self, path, action, until=None, triggerFn=None, repeated=False, timeout=None, removeFn=None):
self.path = path
self.action = action
if until:
self.until = until
else:
if not timeout:
timeout = {"upload": 3600, "download": 12*3600}[action]
self.until = time.time() + timeout
self.triggerFn = triggerFn
self.removeFn = removeFn
self.repeated = repeated
def trigger(self):
if callable(self.triggerFn):
self.triggerFn(self)
if not self.repeated:
self.remove()
def check(self, action):
if not self.until >= time.time():
self.remove()
return False
return action == self.action
def remove(self):
if callable(self.removeFn):
self.removeFn(self)
delGrant(_code(self.path))
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def process_request(self):
_, _, path, _, query, _ = urlparse.urlparse(self.path)
params = dict(parse_qsl(query))
return (path, params)
def error(self, code, message):
self.send_error(code, message)
self.end_headers()
self.finish()
def html(self, html, code=200, redirect=None):
self.send_response(code)
self.end_headers()
self.wfile.write("<html>")
if redirect:
self.wfile.write("<head><meta http-equiv=\"refresh\" content=\"0;url=%s\"/></head>" % redirect)
self.wfile.write("<body>")
self.wfile.write(html)
self.wfile.write("</body></html>")
self.finish()
def do_POST(self):
return self._handle()
def do_HEAD(self):
return self._handle()
def do_GET(self):
return self._handle()
def _handle(self):
path, params = self.process_request()
try:
parts = path.split("/")
if len(parts) != 3 or parts[0]:
return self.error(404, "Not Found")
(dummy, grant, action) = parts
if hasattr(self, "_handle_%s" % action):
return getattr(self, "_handle_%s" % action)(grant, **params)
else:
return self.error(404, "Not Found")
except Exception, exc:
import traceback
traceback.print_exc()
self.error(500, "%s failed: %s" % (path, exc))
def _handle_download(self, grant, name="download", mimetype="application/octet-stream", **params):
grant = getGrant(grant)
if not (grant and grant.check(ACTION_DOWNLOAD)):
self.error(403, "Invalid grant")
return
filename = grant.path
if not os.path.exists(filename):
grant.trigger()
return self.error(404, "File not found")
if "If-Modified-Since" in self.headers:
date = datetime.datetime.strptime(self.headers.get("If-Modified-Since"), "%a, %d %b %Y %H:%M:%S %Z")
fdate = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
if fdate <= date:
grant.trigger()
return self.error(304, "Not modified")
with open(filename, "rb") as file_:
self.send_response(200)
if name:
self.send_header('Content-Disposition', 'attachment; filename="%s"' % name)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', os.path.getsize(filename))
self.end_headers()
if self.command != "HEAD":
shutil.copyfileobj(file_, self.wfile)
grant.trigger()
self.finish()
def _handle_upload_form(self, grant, **params):
params = urllib.urlencode(params)
return self.html('<form method="POST" enctype="multipart/form-data" action="/%s/upload?%s"><input type="file" name="upload"><input type="submit"></form>' % (grant, params))
def _handle_upload(self, grant, redirect=None, **params):
grant = getGrant(grant)
if not (grant and grant.check(ACTION_UPLOAD)):
self.error(403, "Invalid grant")
return
filename = grant.path
with open(filename, "wb") as file_:
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD':self.command, 'CONTENT_TYPE':self.headers['Content-Type']})
upload = form["upload"].file
shutil.copyfileobj(upload, file_)
grant.trigger()
if redirect:
self.html("success, redirecting...", redirect=base64.b64decode(redirect))
else:
self.html("upload successful")
def log_message(self, format, *args): #@ReservedAssignment
return
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""Handle requests in a separate thread."""
def start():
print >>sys.stderr, "Starting fileserver on port %d" % config.FILESERVER["PORT"]
global _httpd
_httpd = ThreadedHTTPServer(('', config.FILESERVER["PORT"]), RequestHandler)
util.start_thread(_httpd.serve_forever)
def stop():
_httpd.server_close()
|
nilq/baby-python
|
python
|
# This file is part of Radicale Server - Calendar Server
# Copyright © 2014 Jean-Marc Martins
# Copyright © 2012-2017 Guillaume Ayoub
# Copyright © 2017-2018 Unrud <unrud@outlook.com>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import os
from tempfile import TemporaryDirectory
from radicale import pathutils, storage
class CollectionDeleteMixin:
def delete(self, href=None):
if href is None:
# Delete the collection
j.sal.bcdbfs.dir_remove(self._filesystem_path)
else:
# Delete an item
if not pathutils.is_safe_filesystem_path_component(href):
raise pathutils.UnsafePathError(href)
path = pathutils.path_to_filesystem(self._filesystem_path, href)
if not j.sal.bcdbfs.is_file(path):
raise storage.ComponentNotFoundError(href)
j.sal.bcdbfs.file_remove(path)
# Track the change
self._update_history_etag(href, None)
self._clean_history()
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
from .logger import Log
from .calibration import GetCalibratedCamera, WarpMachine
from .filtering import EdgeDetector
from .lane_fitting import LaneFit
from .save import chmod_rw_all, delete_file
from .profiler import Profiler
def draw_overlay(warper, lane_fitting, undistorted, warped):
# get curvature and vehicle position
left_cr, right_cr = lane_fitting.get_curvature()
pos = lane_fitting.get_vehicle_position()
# get fitpoints
pts_y, left_fitx, right_fitx = lane_fitting.get_fitpoints()
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, pts_y]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, pts_y])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
overlay = warper.unwarp(color_warp)
# Combine the result with the original image
vis_overlay = cv2.addWeighted(undistorted, 1, overlay, 0.3, 0)
pos_str = "Left" if pos < 0 else "Right"
crl_text = "Radius of curvature (left) = %.1f km" % (left_cr / 1000)
crr_text = "Radius of curvature (right) = %.1f km" % (right_cr / 1000)
cr_text = "Radius of curvature (avg) = %.1f km" % ((left_cr + right_cr) / 2000)
pos_text = "Vehicle is %.1f m %s from the lane center" % (np.abs(pos), pos_str)
def put_text(image, text, color=(255, 255, 255), ypos=100):
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, text, (350, ypos), font, 1, color, 2, cv2.LINE_AA)
put_text(vis_overlay, crl_text, ypos=50)
put_text(vis_overlay, crr_text, ypos=100)
put_text(vis_overlay, cr_text, ypos=150)
put_text(vis_overlay, pos_text, ypos=200)
return vis_overlay
class LaneLinesTracker(object):
def __init__(self):
self.camera = GetCalibratedCamera()
self.warper = WarpMachine()
# profiling
self.p_video = Profiler("Total Time")
self.p_undistort = Profiler("Distortion Correction")
self.p_edges = Profiler("Edge Detection")
self.p_warp = Profiler("Perspective Transform")
self.p_fitting = Profiler("Lane Fitting")
self.p_overlay = Profiler("Overlay Drawing")
def process_video(self, input_file, output_file, subclip_seconds=None):
# delete output file to avoid permission problems between docker/user on write
delete_file(output_file)
self.p_video.start()
# read
Log.subsection("Reading video file: %s" % input_file)
clip = VideoFileClip(input_file)
# subclip
if subclip_seconds:
Log.info("Clipping video to: %.1f s" % subclip_seconds)
clip = clip.subclip(0, subclip_seconds)
# set image handler
Log.info("Setting Image Handler ...")
clip = clip.fl_image(self.process_image)
# process / save
Log.subsection("Processing Video ...")
clip.write_videofile(output_file, audio=False, verbose=False)
chmod_rw_all(output_file)
self.p_video.update()
# display profiling results
Log.subsection("Profiling Results ...")
total_secs = self.p_video.get_elapsed()
self.p_video.display_elapsed(total_secs)
self.p_undistort.display_elapsed(total_secs)
self.p_edges.display_elapsed(total_secs)
self.p_warp.display_elapsed(total_secs)
self.p_fitting.display_elapsed(total_secs)
self.p_overlay.display_elapsed(total_secs)
self.p_video.display_processing_factor(clip.duration)
def process_image(self, image):
# Distortion correction
self.p_undistort.start()
undistorted = self.camera.undistort(image)
self.p_undistort.update()
# Edge Detection
self.p_edges.start()
edge_detector = EdgeDetector()
edges = edge_detector.detect(undistorted)
self.p_edges.update()
# Perspective Transform
self.p_warp.start()
warped = self.warper.warp(edges)
self.p_warp.update()
# Lane Fitting
self.p_fitting.start()
lane_fitting = LaneFit(image.shape[1], image.shape[0])
vis_lanes = lane_fitting.fit_polynomial(warped)
self.p_fitting.update()
# Draw Overlay
self.p_overlay.start()
vis_overlay = draw_overlay(self.warper, lane_fitting, undistorted, warped)
self.p_overlay.update()
return vis_overlay
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from chigre.models import KegType
class KegTypeSerializer(serializers.ModelSerializer):
class Meta:
model = KegType
fields = ('id', 'name', 'size', 'pints', 'canyas')
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
from calParser import obtainSchedule
from audit_parser import audit_info
from lsa_recommender import export_to_master,filter_available_classes
from decision_tree import preference_score,top_preferred_courses
from collaborative_filtering import loadAudits, inputData, buildRecommender, makePrediction, compileDepartScores
from time import time
import json
from CONSTANTS import *
|
nilq/baby-python
|
python
|
'''
File: property.py
Project: 08-class
File Created: Saturday, 25th July 2020 9:16:43 pm
Author: lanling (https://github.com/muyuuuu)
-----------
Last Modified: Saturday, 25th July 2020 9:16:46 pm
Modified By: lanling (https://github.com/muyuuuu)
Copyright 2020 - 2020 NCST, NCST
-----------
@ 佛祖保佑,永无BUG--
'''
# Python内置的@property装饰器就是负责把一个方法变成属性调用
# 防止代码的冗余
import math
class Student(object):
def __init__(self, score):
self._score = score
# Getter function 方法转属性
@property
def score(self):
return self._score
@score.setter
def score(self, value):
if not isinstance(value, int):
raise ValueError('score must be an integer!')
if value < 0 or value > 100:
raise ValueError('score must between 0 ~ 100!')
self._score = value
# del s.score 出发
@score.deleter
def score(self):
raise AttributeError("Can't delete attribute")
s = Student(89)
# 方法转属性
print(s.score)
# 直接改属性,不推荐
s._score = 90
s.set_score = 98
# 方法变成属性赋值,于是就拥有一个可控的属性操作
print(s.score)
# 不要写这种没有做任何其他额外操作的property。
# 首先,它会让你的代码变得很臃肿
# 其次,它还会让你的程序运行起来变慢很多
class People(object):
@property
def birth(self):
return self._birth
# 没有初始化时,不能改动函数名
@birth.setter
def birth(self, value):
self._birth = value
# 设置为只读属性
@property
def age(self):
return 2020 - self._birth
s = People()
s.birth = 1998
# 赋值会错误
# s.age = 23
print(s.age)
# 动态计算attribute的方法。 这种类型的attributes并不会被实际的存储,而是在需要的时候计算出来。
class Circle:
def __init__(self, radius):
self.radius = radius
@property
def area(self):
return math.pi * self.radius ** 2
@property
def diameter(self):
return self.radius * 2
@property
def perimeter(self):
return 2 * math.pi * self.radius
c = Circle(4.0)
print(c.perimeter)
# 不要像下面这样写有大量重复代码的property定义(具体如何修改需要参考后文)
class Person:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
self._first_name = value
# Repeated property code, but for a different name (bad!)
@property
def last_name(self):
return self._last_name
@last_name.setter
def last_name(self, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
self._last_name = value
# 子类中扩展property
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
import os
import os.path
import logging
log = logging.getLogger(__name__)
def filelist(folderpath, ext=None):
'''
Returns a list of all the files contained in the folder specified by `folderpath`.
To filter the files by extension simply add a list containing all the extension with `.` as the second argument.
If `flat` is False, then the Path objects are returned.
'''
if not ext:
ext = []
if os.path.exists(folderpath) and os.path.isdir(folderpath):
return [ os.path.join(folderpath, f) for f in os.listdir(folderpath) if os.path.isfile(os.path.join(folderpath, f)) and os.path.splitext(f)[1] in ext ]
else:
log.warn('"{}" does not exist or is not a directory'.format(folderpath))
def particles(category=None):
'''
Returns a dict containing old greek particles grouped by category.
'''
filepath = os.path.join(os.path.dirname(__file__), './particles.json')
with open(filepath) as f:
try:
particles = json.load(f)
except ValueError as e:
log.error('Bad json format in "{}"'.format(filepath))
else:
if category:
if category in particles:
return particles[category]
else:
log.warn('Category "{}" not contained in particle dictionary!'.format(category))
return particles
def bookname(bookindex):
'''
Returns the name of the book given the index.
'''
nt = {
0: 'Matthew',
1: 'Mark',
2: 'Luke',
3: 'John',
4: 'Acts',
5: 'Romans',
6: 'Corinthians 1',
7: 'Corinthians 2',
8: 'Galatians',
9: 'Ephesians',
10: 'Philippians',
11: 'Colossians',
12: 'Thessalonians 1',
13: 'Thessalonians 2',
14: 'Timothy 1',
15: 'Timothy 2',
16: 'Titus',
17: 'Philemon',
18: 'Hebrews',
19: 'James',
20: 'Peter 1',
21: 'Peter 2',
22: 'John 1',
23: 'John 2',
24: 'John 3',
25: 'Jude',
26: 'Revelation'
}
# book indices are beginning from 1
return nt[bookindex - 1]
def parts():
'''
Returns the dictionary with the part as key and the contained book as indices.
'''
parts = {
'Canon': [ _ for _ in range(1, 5) ],
'Apostle': [ 5 ],
'Paul': [ _ for _ in range(6, 19) ],
'General': [ _ for _ in range(19, 26) ],
'Apocalypse': [ 27 ]
}
return parts
|
nilq/baby-python
|
python
|
'''
Copyright (C) 2016-2021 Mo Zhou <lumin@debian.org>
License: MIT/Expat
'''
import os
import math
import time
import random
from .cuda_selector import CudaSelector
RESOURCE_DEFAULT = 'void'
RESOURCE_TYPES = (RESOURCE_DEFAULT, 'virtual', 'cpu', 'memory', 'gpu', 'vmem')
if str(os.getenv('TASQUE_RESOURCE', '')):
RESOURCE_DEFAULT = str(os.getenv('TASQUE_RESOURCE'))
class AbstractResource:
def __init__(self):
'''
Attributes:
self.book: tracking resource assignment
'''
self.book = dict()
self.acquire = dict()
self.release = dict()
def idle(self):
'''
Wait for some time.
'''
time.sleep(2)
def avail(self) -> float:
'''
Total amount of available specific <kind> of resource.
'''
raise NotImplementedError('how to determine available resource?')
def canalloc(self, rsc: float) -> bool:
'''
check whether <rsc> of resource can be allocated. does not block.
'''
raise NotImplementedError(f'can I allocate <{rsc}>?')
def waitfor(self, rsc: float) -> None:
'''
wait until <rsc> of resource can be allocated. does indeed block.
'''
raise NotImplementedError(f'is there <{rsc}>?')
def request(self, pid: int, rsc: float) -> (callable, callable):
'''
generate callback functions for allocating the requested resource
'''
def acquire():
raise NotImplementedError('how to allocate resource?')
def release():
raise NotImplementedError('how to release resource?')
return (acquire, release)
class VoidResource(AbstractResource):
'''
Void resource / sequential execution. (default)
'''
def avail(self) -> float:
return math.nan
def canalloc(self, rsc: float) -> bool:
return (0 == len(self.book))
def waitfor(self, rsc: float) -> None:
return None
def request(self, pid: int, rsc: float) -> None:
self.acquire[pid] = lambda: self.book.__setitem__(pid, rsc)
self.release[pid] = lambda: self.book.pop(pid)
class VirtualResource(AbstractResource):
'''
Virtual resource. And imagined resource with upper bound as <1.0>.
Can be used to arrange some taks to run in parallel.
'''
def avail(self) -> float:
return 1.0
def canalloc(self, rsc: float) -> bool:
return (rsc <= self.avail() - sum(self.book.values()))
def waitfor(self, rsc: float) -> None:
while not self.canalloc(rsc):
self.idle()
def request(self, pid: int, rsc: float) -> None:
self.acquire[pid] = lambda: self.book.__setitem__(pid, rsc)
self.release[pid] = lambda: self.book.pop(pid)
class GpuResource(AbstractResource):
'''
GPU (CUDA) Resource. Allocate cards (as a whole) for the requestors.
We only consider a card "available" when >=97% video memory is free.
'''
cusel = CudaSelector()
def avail(self) -> float:
# Number of available cards
return float(len(cusel.availCards()))
def canalloc(self, rsc: float) -> bool:
# available cards
cards = self.cusel.availCards()
# excluding those registered in self.book
cards = [card for card in cards if card.index not in self.book.values()]
return len(cards) > 0
def request(self, pid: int, rsc: float) -> None:
# currently only support allocating 1 card at a time.
assert(int(rsc) == 1)
exclude = set(self.book.values())
selcard = random.choice(self.cusel.availCards())
def acquire():
os.putenv('CUDA_VISIBLE_DEVICES', str(selcard.index))
self.book[pid] = selcard.index
self.acquire[pid] = acquire
self.release[pid] = lambda: self.book.pop(pid)
class VmemResource(AbstractResource):
'''
CUDA Video Memory Resource. Allocate video memories for the requestors.
In this way we can allocate GPU resources in a fine-grained manner and
smartly jam various tasks on the GPUs as appropriate. Unlike
coarse-grained GPU allocation such as Slurm(CUDA) which allocate each
card as a whole to the requestors.
'''
cusel = CudaSelector()
def avail(self) -> float:
cards = self.cusel.getCards()
return float(sum(card.memory_free for card in cards))
def canalloc(self, rsc: float) -> bool:
# First round: cards that have enough free memory
cards = self.cusel.getCards()
cards = [card for card in cards if card.memory_free >= rsc]
# Second round: remove cards that have been allocated in the book
cards = [card for card in cards if card.index not in self.book.values()]
return len(cards) > 0
def request(self, pid: int, rsc: float) -> None:
exclude = self.book.values()
device_index = self.cusel.selectCard(rsc, exclude=exclude)
def acquire():
os.putenv('CUDA_VISIBLE_DEVICES', str(device_index))
self.book[pid] = rsc
self.acquire[pid] = acquire
self.release[pid] = lambda: self.book.pop(pid)
class CpuResource(AbstractResource):
def __init__(self):
super(CpuResource, self).__init__()
raise NotImplementedError()
class MemoryResource(AbstractResource):
def __init__(self):
super(MemoryResource, self).__init__()
raise NotImplementedError()
def create(name: str):
'''
factory function
'''
mapping = {
RESOURCE_DEFAULT: VoidResource,
'virtual': VirtualResource,
'cpu': CpuResource,
'memory': MemoryResource,
'gpu': GpuResource,
'vmem': VmemResource,
}
return mapping[name]()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from wcstring import wcstr
import re
class PipelineTable(object):
'''
Pipeline Table Object.
Attributes
----------
data : 2-dimension list
1st dimension indicates the column
2nd dimension indicates the index, with combined
indexes grouped in a list
colwidth : list of int
set width of every column
'''
def __init__(self, data=[[]], colwidth=None):
self.data = data
self.align = None
if colwidth and len(data) == len(colwidth):
self.colwidth = colwidth
else:
self.colwidth = [max([len(''.join(i)) for i in data])]
def autofmt(self, boader=2, maxwidth=76, align='c'):
if len(data) > len(align):
align = align + align[-1] * (len(data) - len(align))
self.align = align
self.space_fill(align=align)
def space_fill(self, align='c'):
for col in range(len(self.data)):
for ind in range(len(self.data[col])):
if align[col] == 'l':
self.data[col][ind] = [wcstr(i).ljust(self.colwidth[col])
for i in self.data[col][ind]]
if align[col] == 'r':
self.data[col][ind] = [wcstr(i).rjust(self.colwidth[col])
for i in self.data[col][ind]]
else:
self.data[col][ind] = [wcstr(i).center(self.colwidth[col])
for i in self.data[col][ind]]
def read_pipeline(string, mode='strict'):
'''
Read a pipeline table.
Parameters
----------
string : str
a string containing a pipeline table
'''
# differentiate wordlines and separating lines
lines = [wcstr(i) for i in string.split('\n') if re.findall('^ *\|?.+\|? *',i)]
seplines = [i for i in range(len(lines)) if re.findall(' *\|?[-:|]+\|? *',lines[i])]
wordlines = [i for i in range(len(lines)) if i not in seplines]
if len(seplines) != 1:
raise ValueError("Multiple seplines detected") if len(seplines)>1 \
else ValueError("No sepline detected")
sepline = seplines[0]
coldata = [[i for i in re.split(r"(?<!\\)\|", j) if i.strip()] for j in wordlines]
colcount = len(coldata[0])
# Check column length
for i in len(coldata):
if len(coldata[i]) < colcount:
coldata[i].extend([""]*(colcount - len(coldata[i])))
elif len(colcount[i]) > colcount:
raise ValueError("Length of columns of data is larger than header")
coldata = list(zip(*coldata))
print(coldata)
return PipelineTable(data=coldata)
def put_pipeline(pt, align='c'):
'''
Put down a pipeline table.
Parameters
----------
pt : PipelineTable
align : str or iterable containing align characters
'l' : left-aligned
'r' : right-aligned
'c' : centered
'''
pt.autofmt(align=align)
# column name first
print('|','|'.join([''.join(i[0]) for i in pt.data]),'|',sep='')
print('|','|'.join([i*'-' for i in pt.colwidth]),'|',sep='')
colcounter = [1] * len(pt.data)
indcounter = [0] * len(pt.data)
bdrindic = []
nextline = []
# the remaining parts
while(colcounter[0] < len(pt.data[0])):
for col in range(len(pt.data)):
if indcounter[col] >= len(pt.data[col][colcounter[col]]):
nextline.append('-'*pt.colwidth[col])
colcounter[col] += 1
indcounter[col] = 0
bdrindic.append(True)
else:
nextline.append(pt.data[col][colcounter[col]][indcounter[col]])
indcounter[col] += 1
bdrindic.append(False)
bdrindic.append(False)
print('|', end='')
for col in range(len(pt.data)):
print(nextline[col], end='')
print('|', end='')
print()
nextline = []
bdrindic = []
return
|
nilq/baby-python
|
python
|
from itertools import groupby
from pathlib import Path
inp = Path('input.txt').read_text()
vowels = set('aeiou')
nope = 'ab cd pq xy'.split()
print(sum(
(
sum(c in vowels for c in line) >= 3 and
len(list(groupby(line))) < len(line) and
not any(s in line for s in nope)
)
for line in inp.splitlines()
))
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
x_values = list(range(1, 5001))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values)
plt.show()
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import catboost
import pandas as pd
DATA_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset.txt')
MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../models/model_reg.cbm")
DATA_SAMPLE_PREDICT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results'
'.txt')
def main():
# load dataset
features = [
f'f_{i}'
for i in range(13)
]
target = 'target'
df = pd.read_csv(DATA_PATH, names=features + ['target'])
# fit model
model = catboost.CatBoost({
'loss_function': 'RMSE',
'verbose': False,
'random_seed': 0
})
model.fit(df[features], df[target])
model.save_model(MODEL_PATH)
# predict on sample
predicts = model.predict(df[features])
pd.DataFrame({
'x': predicts
}).to_csv(DATA_SAMPLE_PREDICT_PATH, index=False, header=False)
# predict on one sample
print('Parameters:')
r = df[:1][features].to_dict('records')
for k, v in r[0].items():
print(f'input.put("{k}", {v}d);')
print('Expected predict:')
print(model.predict(df[:1])[0])
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from enum import Enum
import regex
from ..config import Config
from ..utils import Api
class OsuConsts(Enum):
"""
all constants related to osu
"""
# "": 0,
MODS = {
"NF": 1 << 0,
"EZ": 1 << 1,
"TD": 1 << 2,
"HD": 1 << 3,
"HR": 1 << 4,
"SD": 1 << 5,
"DT": 1 << 6,
"RX": 1 << 7,
"HT": 1 << 8,
"NC": 1 << 9,
"FL": 1 << 10,
"AT": 1 << 11,
"SO": 1 << 12,
"AP": 1 << 13,
"PF": 1 << 14,
"4K": 1 << 15,
"5K": 1 << 16,
"6K": 1 << 17,
"7K": 1 << 18,
"8K": 1 << 19,
"FI": 1 << 20,
"RD": 1 << 21,
"LM": 1 << 22,
"TR": 1 << 23,
"9K": 1 << 24,
"10K": 1 << 25,
"1K": 1 << 26,
"3K": 1 << 27,
"2K": 1 << 28,
"V2": 1 << 29
}
MODS_INT = {v: k for k, v in MODS.items()}
DIFF_MODS = ["HR", "EZ", "DT", "HT", "NC", "FL", "HD", "NF"]
TIME_MODS = ["DT", "HT", "NC"]
AR_MS_STEP1 = 120
AR_MS_STEP2 = 150
AR0_MS = 1800
AR5_MS = 1200
AR10_MS = 450
OD_MS_STEP = 6
OD0_MS = 79.5
OD10_MS = 19.5
DT_SPD = 1.5
HT_SPD = .75
HR_AR = 1.4
EZ_AR = 0.5
HR_CS = 1.3
EZ_CS = 0.5
HR_OD = 1.4
EZ_OD = 0.5
HR_HP = 1.4
EZ_HP = 0.5
STRAIN_STEP = 400.0
DECAY_BASE = [0.3, 0.15]
STAR_SCALING_FACTOR = 0.0675
EXTREME_SCALING_FACTOR = 0.5
DECAY_WEIGHT = 0.9
MODS_RE = regex.compile(rf"^({'|'.join(OsuConsts.MODS.value.keys())})+$")
OSU_API = Api("https://osu.ppy.sh/api", 60, {"k": Config.credentials.osu_api_key})
# todo make a list of apis for multi server comparability
__all__ = ["OsuConsts", "MODS_RE", "OSU_API", "utils", "apiTools", "stating", "graphing", "embedding"]
|
nilq/baby-python
|
python
|
"""
This file is part of L3Morpho.
L3Morpho is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
L3Morpho is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with L3Morpho. If not, see <http://www.gnu.org/licenses/>.
--------------------------------------------------------------------
Author: Michael Gasser <gasser@cs.indiana.edu>
Create Language, Morphology, and POSMorphology objects for Amharic.
All functions specific to Amharic morphology are here (or imported
from geez.py).
"""
from . import language
from .geez import *
### Various functions that will be values of attributes of Amharic Morphology
### and POSMorphology objects.
def vb_get_citation(root, fs, simplified=False, guess=False, vc_as=False):
'''Return the canonical (prf, 3sm) form for the root and featstructs in featstruct set fss.
If vc_as is True, preserve the voice and aspect of the original word.
'''
if root == 'al_e':
return "'ale"
# Return root if no citation is found
result = root
# Unfreeze the feature structure
fs = fs.unfreeze()
# Update the feature structure to incorporate default (with or without vc and as)
fs.update(AM.morphology['v'].citationFS if vc_as else AM.morphology['v'].defaultFS)
# Refreeze the feature structure
fs.freeze()
# Find the first citation form compatible with the updated feature structure
citation = AM.morphology['v'].gen(root, fs, from_dict=False,
simplified=simplified, guess=guess)
if citation:
result = citation[0][0]
elif not vc_as:
# Verb may not occur in simplex form; try passive
fs = fs.unfreeze()
fs.update({'vc': 'ps'})
fs.freeze()
citation = AM.morphology['v'].gen(root, fs, from_dict=False,
simplified=simplified, guess=guess)
if citation:
result = citation[0][0]
return result
def n_get_citation(root, fs, simplified=False, guess=False, vc_as=False):
'''Return the canonical (prf, 3sm) form for the root and featstructs in featstruct set fss.
If vc_as is True, preserve the voice and aspect of the original word.
'''
if fs.get('v'):
# It's a deverbal noun
return vb_get_citation(root, fs, simplified=simplified, guess=guess, vc_as=vc_as)
else:
return root
def simplify(word):
"""Simplify Amharic orthography."""
word = word.replace("`", "'").replace('H', 'h').replace('^', '').replace('_', '')
return word
def orthographize(word):
'''Convert phonological romanization to orthographic.'''
word = word.replace('_', '').replace('I', '')
return word
def cop_anal2string(anal):
'''Convert a copula analysis to a string.
anal is ("cop", "new", "new", gramFS)
'''
s = 'POS: copula'
if anal[1]:
s += ', root: <' + anal[1] + '>'
s += '\n'
fs = anal[3]
if fs:
sb = fs['sb']
s += ' subj:'
s += arg2string(sb)
if fs.get('neg'):
s += ' negative\n'
cj = fs.get('cj2')
if cj:
s += ' conjunctive suffix: ' + cj + '\n'
return s
def n_anal2string(anal):
'''Convert a noun analysis to a string.
anal is ("(*)n", root, citation, gramFS)
'''
root = anal[1]
citation = anal[2]
fs = anal[3]
deverbal = fs and fs.get('v')
POS = '?POS: ' if '?' in anal[0] else 'POS: '
s = POS
if deverbal:
if deverbal == 'agt':
s += 'agentive noun'
elif deverbal == 'man':
s += 'manner noun'
elif deverbal == 'inf':
s += 'infinitive'
else:
s += 'instrumental noun'
if root:
s += ', root: <' + root + '>'
if citation:
s += ', citation: ' + citation
else:
s += 'noun'
if citation:
s += ', stem: ' + citation
elif root:
s += ', stem: ' + root
s += '\n'
if fs:
poss = fs.get('poss')
if poss and poss.get('expl'):
s += ' possessor:'
s += arg2string(poss, True)
gram = ''
# For agent, infinitive, instrumental, give aspect and voice unless both are simple
asp = fs.get('as')
vc = fs.get('vc')
rl = fs.get('rl')
any_gram = False
if deverbal and asp == 'it':
gram += ' iterative'
any_gram = True
elif deverbal and asp == 'rc':
if any_gram: gram += ','
gram += ' reciprocal'
any_gram = True
if deverbal and vc == 'ps':
if any_gram: gram += ','
gram += ' passive'
any_gram = True
elif vc == 'tr':
if any_gram: gram += ','
gram += ' transitive'
any_gram = True
elif vc == 'cs':
if any_gram: gram += ','
gram += ' causative'
any_gram = True
if fs.get('neg'):
# Only possible for infinitive
if any_gram: gram += ','
gram += ' negative'
any_gram = True
if fs.get('plr'):
if any_gram: gram += ','
gram += ' plural'
any_gram = True
if fs.get('def'):
if any_gram: gram += ','
any_gram = True
gram += ' definite'
if fs.get('dis'):
if any_gram: gram += ','
any_gram = True
gram += ' distrib(Iyye-)'
if rl and rl.get('acc'):
if any_gram: gram += ','
any_gram = True
gram += ' accusative'
if rl and rl.get('gen'):
if any_gram: gram += ','
any_gram = True
gram += ' genitive'
# der = fs.get('der')
# if der and der.get('ass'):
# if any_gram: gram += ','
# any_gram = True
# gram += ' assoc(-awi)'
if any_gram:
s += ' grammar:' + gram + '\n'
pp = fs.get('pp')
cnj = fs.get('cnj')
if pp or cnj:
if pp:
s += ' preposition: ' + pp
if cnj:
if pp: s += ','
s += ' conjunctive suffix: ' + cnj
s += '\n'
return s
def vb_anal2string(anal):
'''Convert a verb analysis to a string.
anal is ("(*)v", root, citation, gramFS)
'''
pos = 'verb'
root = anal[1]
citation = anal[2]
fs = anal[3]
POS = '?POS: ' if '?' in anal[0] else 'POS: '
s = POS + pos
if root:
if '{' in root:
# Segmented form; not root
s += ', segmentation: ' + root
else:
s += ', root: <' + root + '>'
if citation:
s += ', citation: ' + citation
s += '\n'
if fs:
sb = fs['sb']
s += ' subject:'
s += arg2string(sb)
ob = fs.get('ob')
if ob and ob.get('expl'):
s += ' object:'
s += arg2string(ob, True)
s += ' grammar:'
rl = fs.get('rl')
tm = fs.get('tm')
if tm == 'prf':
s += ' perfective'
elif tm == 'imf':
s += ' imperfective'
elif tm == 'j_i':
s += ' jussive/imperative'
elif tm == 'ger':
s += ' gerundive'
else:
s += ' present'
if fs.get('ax'):
s += ', aux:alle'
asp = fs['as']
if asp == 'it':
s += ', iterative'
elif asp == 'rc':
s += ', reciprocal'
vc = fs['vc']
if vc == 'ps':
s += ', passive'
elif vc == 'tr':
s += ', transitive'
elif vc == 'cs':
s += ', causative'
if fs.get('rel') or fs.get('neg'):
if fs.get('rel'):
s += ', relative'
if rl and rl.get('acc'):
s += ', accusative'
if fs.get('def'):
s += ', definite'
if fs.get('neg'):
s += ', negative'
s += '\n'
cj1 = fs.get('cj1')
cj2 = fs.get('cj2')
prep = fs.get('pp')
if cj1 or cj2 or prep:
any_affix = False
if prep:
any_affix = True
s += ' preposition: ' + prep
if cj1:
if any_affix: s += ','
s += ' conjunctive prefix: ' + cj1
if cj2:
if any_affix: s += ','
s += ' conjunctive suffix: ' + cj2
s += '\n'
return s
def arg2string(fs, obj=False):
'''Convert an argument Feature Structure to a string.'''
s = ''
if fs.get('p1'):
s += ' 1'
elif fs.get('p2'):
s += ' 2'
else:
s += ' 3'
if fs.get('plr'):
s += ', plur'
else:
s += ', sing'
if not fs.get('plr') and (fs.get('p2') or not fs.get('p1')):
if fs.get('fem'):
s += ', fem'
elif not fs.get('frm'):
s += ', masc'
if obj:
if fs.get('p2'):
if fs.get('frm'):
s += ', formal'
if fs.get('prp'):
if fs.get('l'):
s += ', prep: -l-'
else:
s += ', prep: -b-'
s += '\n'
return s
def vb_anal_to_dict(root, fs):
'''Convert a verb analysis Feature Structure to a dict.'''
args = []
# List of features that are true
bools = []
strings = {}
gram = {}
gram['root'] = root
sbj = fs['sb']
obj = fs.get('ob', None)
vc = fs['vc']
asp = fs['as']
tm = fs['tm']
cj1 = fs.get('cj1', None)
cj2 = fs.get('cj2', None)
prp = fs.get('pp', None)
rl = fs.get('rl', {})
# Subject and object
prep = False
formal = False
labels = ['person', 'number', 'gender']
if obj.get('expl'):
if obj.get('p2'):
formal = True
labels.append('formality')
prep = True
labels.append('prepositional')
args.append(labels)
args1 = []
args1.append(agr_to_list(sbj, 'subject', formal))
if obj.get('expl'):
args1.append(agr_to_list(obj, 'object', formal))
args.append(args1)
# TAM
if tm == 'imf':
strings['tense/mood'] = 'imperfective'
elif tm == 'prf':
strings['tense/mood'] = 'perfective'
elif tm == 'ger':
strings['tense/mood'] = 'gerundive'
else:
strings['tense/mood'] = 'jussive/imperative'
# DERIVATIONAL STUFF
if vc == 'ps':
strings['voice'] = 'passive'
elif vc == 'tr':
strings['voice'] = 'transitive'
elif vc == 'cs':
strings['voice'] = 'causative'
if asp == 'it':
strings['aspect'] = 'iterative'
elif asp == 'rc':
strings['aspect'] = 'reciprocal'
# NEGATION
if fs.get('neg'):
bools.append('negative')
# RELATIVIZATION
if fs.get('rel'):
bools.append('relative')
# CASE
if rl and rl.get('acc'):
bools.append('accusative')
# CONJUNCTIONS AND PREPOSITIONS
if cj1:
strings['prefix conjunction'] = cj1
if cj2:
strings['suffix conjunction'] = cj2
if prp:
strings['preposition'] = prp
gram['args'] = args
gram['strings'] = strings
gram['bools'] = bools
return gram
def vb_dict_to_anal(root, dct, freeze=True):
'''Convert a verb analysis dict to a Feature Structure.'''
fs = FeatStruct()
root = root or dct['root']
# Arguments
sbj = list_to_arg(dct, 'sbj')
if dct.get('obj'):
obj = list_to_arg(dct, 'obj')
else:
obj = FeatStruct()
obj['expl'] = False
fs['sb'] = sbj
fs['ob'] = obj
# TAM: labels are the same as FS values
fs['tm'] = dct.get('tam', 'prf')
# DERIVATIONAL STUFF
fs['as'] = dct.get('asp', 'smp')
fs['vc'] = dct.get('voice_am', 'smp')
# OTHER GRAMMAR
fs['neg'] = dct.get('neg', False)
fs['rel'] = dct.get('rel', False)
fs['acc'] = dct.get('acc', False)
if dct.get('aux'):
fs['aux'] = 'al'
else:
fs['aux'] = None
# PREPOSITIONS and CONJUNCTIONS
fs['pp'] = dct.get('prep_am')
if fs['pp']:
fs['sub'] = True
fs['cj1'] = dct.get('preconj_am')
if fs['cj1']:
fs['sub'] = True
fs['cj2'] = dct.get('sufconj_am')
return [root, FSSet(fs)]
def agr_to_list(agr, cat, formal=False):
'''Convert an agreement Feature Structure to a list.
Category, then person, number, gender, formality (2nd prs), prepositional.
'''
gram = [cat]
if agr.get('p1'):
gram.append('1')
elif agr.get('p2'):
gram.append('2')
else:
gram.append('3')
if agr.get('plr'):
gram.append('plural')
else:
gram.append('singular')
if not agr.get('p1') and not agr.get('plr'):
# Gender only for 2nd and 3rd person singular
if agr.get('fem'):
gram.append('feminine')
else:
gram.append('masculine')
else:
gram.append('')
if formal:
if cat == 'object' and agr.get('p2'):
if agr.get('frm'):
gram.append('formal')
else:
gram.append('informal')
if agr.get('prp'):
if agr.get('b'):
gram.append('b-')
else:
gram.append('l-')
elif cat == 'object':
gram.append('no')
return gram
def list_to_arg(dct, prefix):
'''Convert a dict to an argument Feature Structure.'''
arg = FeatStruct()
person = dct.get(prefix + '_pers')
number = dct.get(prefix + '_num')
gender = dct.get(prefix + '_gen')
arg['expl'] = True
# Person
if person == '1':
arg['p1'] = True
arg['p2'] = False
elif person == '2':
arg['p2'] = True
arg['p1'] = False
else:
# 3rd person the default
arg['p1'] = False
arg['p2'] = False
# Number
if number == 'plur':
arg['plr'] = True
else:
# Singular the default
arg['plr'] = False
# Gender
if person != '1':
if gender == 'fem':
arg['fem'] = True
else:
arg['fem'] = False
# 2nd person: formality
if person == '2':
formality = dct.get(prefix + '_form')
if formality == 'form':
arg['frm'] = True
else:
# Informal the default
arg['frm'] = False
# Prepositional (object only)
if prefix == 'obj':
prep = dct.get(prefix + '_prep_am')
if prep == 'l':
arg['prp'] = 'l'
elif prep == 'b':
arg['prp'] = 'b'
else:
arg['prp'] = None
return arg
def root_postproc(root, geez=False):
'''Postprocess a root, with or without converting to Geez.'''
if geez:
return root2geez(GEEZ_SERA['am'][1], root, lang='am')
else:
# # Irregular
# if root == "al_e":
# return '<al_e>'
return '<' + root + '>'
def n_postproc(analysis):
'''Postprocess a noun, replacing the root, if deverbal with postprocessed form.'''
gram1 = list(analysis[1])[0]
if analysis[0]:
if not gram1.get('v'):
# This is not deverbal; convert the "root" (really the stem) to Geez
analysis[0] = sera2geez(GEEZ_SERA['am'][1], analysis[0], lang='am')
## Create Language object for Amharic, including preprocessing, postprocessing,
## and segmentation units (phones).
AM = language.Language("Amharic", 'am',
postproc=lambda form: sera2geez(GEEZ_SERA['am'][1], form, lang='am'),
preproc=lambda form: geez2sera(GEEZ_SERA['am'][0], form, lang='am', simp=True),
postpostproc=lambda form: ta_convert(form),
stat_root_feats=['vc', 'as'],
stat_feats=[['poss', 'expl'], ['cnj'], ['cj1'], ['cj2'], ['pp'], ['rel']],
seg_units=[["a", "e", "E", "i", "I", "o", "u", "H", "w", "y", "'", "`", "_", "|", "*"],
{"b": ["b", "bW"], "c": ["c", "cW"], "C": ["C", "CW"],
"d": ["d", "dW"], "f": ["f", "fW"], "g": ["g", "gW"],
"h": ["h", "hW"], "j": ["j", "jW"], "k": ["k", "kW"],
"l": ["l", "lW"], "m": ["m", "mW"], "n": ["n", "nW"],
"p": ["p", "pW"], "P": ["P", "PW"],
"N": ["N", "NW"], "q": ["q", "qW"], "r": ["r", "rW"],
"s": ["s", "sW"], "S": ["S", "SW"], "t": ["t", "tW"],
"T": ["T", "TW"], "v": ["v", "vW"], "x": ["x", "xW"],
"z": ["z", "zW"], "Z": ["Z", "ZW"],
"^": ["^s", "^S", "^h", "^hW", "^sW", "^SW"]}])
## Create Morphology object and noun, verb, and copula POSMorphology objects for Amharic,
## including punctuation and ASCII characters that are part of the romanization.
AM.set_morphology(language.Morphology((),
pos_morphs=[('cop',), ('n',), ('v',)],
# Exclude ^ and - (because it can be used in compounds)
punctuation=r'[“‘”’–—:;/,<>?.!%$()[\]{}|#@&*\_+=\"፡።፣፤፥፦፧፨]',
# Include digits?
characters=r'[a-zA-Zሀ-ፚ\'`^]'))
### Assign various attributes to Morphology and POSMorphology objects
# Functions that simplifies Amharic orthography
AM.morphology.simplify = lambda word: simplify(word)
AM.morphology.orthographize = lambda word: orthographize(word)
# Function that performs trivial analysis on forms that don't require romanization
AM.morphology.triv_anal = lambda form: no_convert(form)
## Functions converting between feature structures and simple dicts
AM.morphology['v'].anal_to_dict = lambda root, anal: vb_anal_to_dict(root, anal)
AM.morphology['v'].dict_to_anal = lambda root, anal: vb_dict_to_anal(root, anal)
## Default feature structures for POSMorphology objects
## Used in generation and production of citation form
AM.morphology['v'].defaultFS = \
language.FeatStruct("[pos=v,tm=prf,as=smp,vc=smp,sb=[-p1,-p2,-plr,-fem],ob=[-expl,-p1,-p2,-plr,-fem,-b,-l,-prp,-frm],cj1=None,cj2=None,pp=None,ax=None,-neg,-rel,-sub,-def,-acc,-ye,rl=[-p,-acc]]")
AM.morphology['v'].FS_implic = {'rel': ['def', 'sub'],
'cj1': ['sub'],
'pp': ['rel', 'sub'],
('pp', ('be', 'le', 'ke', 'wede', 'Inde', 'sIle', 'Iske', 'Iyye')): [['rl', ['p']]],
'def': ['rel', 'sub'],
'l': ['prp'],
'b': ['prp'],
'ob': [['expl']]}
# defaultFS with voice and aspect unspecified
AM.morphology['v'].citationFS = language.FeatStruct("[pos=v,tm=prf,sb=[-p1,-p2,-plr,-fem],ob=[-expl],cj1=None,cj2=None,pp=None,ax=None,-neg,-rel,-sub,-def,-ye,-acc,rl=[-p,-acc]]")
AM.morphology['n'].defaultFS = \
language.FeatStruct("[pos=n,-acc,-def,-neg,-fem,-itu,as=smp,cnj=None,-dis,-gen,-plr,poss=[-expl,-p1,-p2,-plr,-fem,-frm],pp=None,v=None,vc=smp,rl=[-p,-gen,-acc]]")
AM.morphology['n'].FS_implic = {'poss': [['expl'], 'def'],
('pp', ('be', 'le', 'ke', 'wede', 'Inde', 'sIle', 'Iske')): [['rl', ['p']]],
('gen', True): [['rl', ['gen']]],
('acc', True): [['rl', ['acc']]]}
# defaultFS with voice and aspect unspecified
AM.morphology['n'].citationFS = language.FeatStruct("[-acc,-def,-neg,cnj=None,-dis,-gen,-plr,poss=[-expl],pp=None,v=inf]")
AM.morphology['cop'].defaultFS = language.FeatStruct("[cj2=None,-neg,ob=[-expl],-rel,sb=[-fem,-p1,-p2,-plr,-frm],-sub,tm=prs]")
## Functions that return the citation forms for words
AM.morphology['v'].citation = lambda root, fss, simplified, guess, vc_as: vb_get_citation(root, fss, simplified, guess, vc_as)
AM.morphology['n'].citation = lambda root, fss, simplified, guess, vc_as: n_get_citation(root, fss, simplified, guess, vc_as)
## Functions that convert analyses to strings
AM.morphology['v'].anal2string = lambda fss: vb_anal2string(fss)
AM.morphology['n'].anal2string = lambda fss: n_anal2string(fss)
AM.morphology['cop'].anal2string = lambda fss: cop_anal2string(fss)
## Postprocessing function for nouns (treats roots differently)
# AM.morphology['v'].postproc = lambda analysis: vb_postproc(analysis)
AM.morphology['n'].postproc = lambda analysis: n_postproc(analysis)
# AM.morphology['cop'].postproc = lambda analysis: cop_postproc(analysis)
def load_anal(pos='v', lex=True, guess=False):
if lex:
AM.morphology[pos].load_fst(True, verbose=True)
if guess:
AM.morphology[pos].load_fst(True, guess=True, verbose=True)
def load_gen(pos='v', lex=True, guess=False):
if lex:
AM.morphology[pos].load_fst(True, generate=True, invert=True, verbose=True)
if guess:
AM.morphology[pos].load_fst(True, generate=True, invert=True, guess=True, verbose=True)
|
nilq/baby-python
|
python
|
# This program allows you to mark a square on the map using a two-digit system.
# The first digit is the vertical column number and the second digit is the horizontal row number.
row1 = ["⬜️", "⬜️", "⬜️"]
row2 = ["⬜️", "⬜️", "⬜️"]
row3 = ["⬜️", "⬜️", "⬜️"]
map = [row1, row2, row3]
print(f"{row1}\n{row2}\n{row3}")
position = input("Where do you want to put the treasure? ")
row = int(position[0]) - 1
column = int(position[1]) - 1
map[column][row] = "X"
print(f"{row1}\n{row2}\n{row3}")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django import template
import datetime
# import timedelta
register = template.Library()
def nice_repr(timedelta, display="long", sep=", "):
"""
Turns a datetime.timedelta object into a nice string repr.
display can be "minimal", "short" or "long" [default].
>>> from datetime import timedelta as td
>>> nice_repr(td(days=1, hours=2, minutes=3, seconds=4))
'1 day, 2 hours, 3 minutes, 4 seconds'
>>> nice_repr(td(days=1, seconds=1), "minimal")
'1d, 1s'
"""
assert isinstance(timedelta, datetime.timedelta), "First argument must be a timedelta."
result = []
weeks = timedelta.days / 7
days = timedelta.days % 7
hours = timedelta.seconds / 3600
minutes = (timedelta.seconds % 3600) / 60
seconds = timedelta.seconds % 60
if display == "sql":
days += weeks * 7
return "%i %02i:%02i:%02i" % (days, hours, minutes, seconds)
elif display == 'minimal':
words = ["w", "d", "h", "m", "s"]
elif display == 'short':
words = [" wks", " days", " hrs", " min", " sec"]
else:
words = [" weeks", " days", " hours", " minutes", " seconds"]
values = [weeks, days, hours, minutes, seconds]
for i in range(len(values)):
if values[i]:
if values[i] == 1 and len(words[i]) > 1:
result.append("%i%s" % (values[i], words[i].rstrip('s')))
else:
result.append("%i%s" % (values[i], words[i]))
return sep.join(result)
def iso8601_repr(timedelta):
"""
Represent a timedelta as an ISO8601 duration.
http://en.wikipedia.org/wiki/ISO_8601#Durations
>>> from datetime import timedelta as td
>>> iso8601_repr(td(days=1, hours=2, minutes=3, seconds=4))
'P1DT2H3M4S'
"""
years = timedelta.days / 365
weeks = (timedelta.days % 365) / 7
days = timedelta.days % 7
hours = timedelta.seconds / 3600
minutes = (timedelta.seconds % 3600) / 60
seconds = timedelta.seconds % 60
formatting = (
('P', (
('Y', years),
('W', weeks),
('D', days),
)),
('T', (
('H', hours),
('M', minutes),
('S', seconds),
)),
)
result = []
for category, subcats in formatting:
result += category
for format, value in subcats:
if value:
result.append('%d%c' % (value, format))
return "".join(result)
@register.filter(name='timedelta')
def timedelta(value, display="long"):
if value is None:
return value
return nice_repr(value, display)
@register.filter(name='iso8601')
def iso8601(value):
if value is None:
return value
return iso8601_repr(value)
|
nilq/baby-python
|
python
|
'''
Escreva um programa que converta uma temperatura
digitada em °C e converta em °F.
'''
c = float(input('Digite a temperatura em °C: '))
f = (9*c + 160)/5
print(f'A temperatura de {c}°C é {f}°F!')
|
nilq/baby-python
|
python
|
import logging
import numpy as np
import pandas as pd
import scipy.special
import scipy.stats
def encode_array(vals, sep=',', fmt='{:.6g}'):
return sep.join(map(fmt.format, vals))
def decode_array(vals, sep=','):
return np.asarray(list(map(float, vals.split(','))))
def encode_matrix(vals, sep1=',', sep2=';', fmt='{:.6g}'):
return sep2.join(encode_array(vals1, sep=sep1, fmt=fmt) for vals1 in vals)
def decode_matrix(vals, sep1=',', sep2=';'):
return np.asarray([decode_array(vals1, sep=sep1) for vals1 in vals.split(';')])
def load(path):
cands = [
MCAlphaPrediction,
AlphaPrediction,
WMCProbPrediction,
MCProbPrediction,
ProbPrediction,
]
errors = []
for cls in cands:
try:
return cls.load(path)
except KeyError as e:
errors.append(e)
for e in errors:
logging.error(e)
raise NotImplementedError
class Prediction:
@property
def ids(self):
return self._ids
def get_probs(self): # (N, K)
return self._probs
@classmethod
def load(cls, path):
raise NotImplementedError
def save(self, path, ids):
raise NotImplementedError
def get_posterior(self, hists):
raise NotImplementedError
def hist_likelihood(hists, probs): # (..., K), (..., K) -> (...,)
return (probs ** hists).sum(axis=-1)
def get_posterior_dirichlet0(hists, alpha0=1.):
K = hists.shape[1] # (N, K)
alpha = alpha0 * np.ones(K) / K
post_alpha = hists + alpha[:, None]
return AlphaPrediction(post_alpha, pred.ids)
def get_posterior_dirichlet(pred, hists, alpha0=1.):
probs = pred.get_probs()
alpha = alpha0 * probs
assert hists.shape == probs.shape # (N, K)
post_alpha = hists + alpha
return AlphaPrediction(post_alpha, pred.ids)
class ProbPrediction(Prediction):
def __init__(self, probs, ids):
self._probs = np.asarray(probs) # (N, K)
assert len(self._probs.shape) == 2
self._ids = ids
def get_agreement_probs(self): # (N,)
return (self._probs ** 2).sum(axis=1)
@classmethod
def load(cls, path):
tab = pd.read_csv(path, sep='\t')
probs = np.asarray(list(map(decode_array, tab['prob'])))
return cls(probs, tab['id'])
def save(self, path):
columns = ['id', 'prob']
tab = pd.DataFrame({
'id': self._ids,
'prob': list(map(encode_array, self._probs)),
}, columns=columns)
tab.to_csv(path, sep='\t', index=False)
class MCProbPrediction(Prediction):
def __init__(self, mc_probs, ids):
self._mc_probs = np.asarray(mc_probs) # (N, S, K)
assert len(self._mc_probs.shape) == 3
self._probs = self._mc_probs.mean(axis=1) # (N, K)
self._ids = ids
def get_agreement_probs(self): # (N,)
mc_agree_probs = (self._mc_probs ** 2).sum(axis=2) # (N, S)
return mc_agree_probs.mean(axis=1)
@classmethod
def load(cls, path):
tab = pd.read_csv(path, sep='\t')
mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob'])))
return cls(mc_probs, tab['id'])
def save(self, path):
columns = ['id', 'mc_prob']
tab = pd.DataFrame({
'id': self._ids,
'mc_prob': list(map(encode_matrix, self._mc_probs)),
}, columns=columns)
tab.to_csv(path, sep='\t', index=False)
def get_posterior(self, hists):
hl = hist_likelihood(hists[:, None, :], self._mc_probs) # (N, S, K) -> (N, S)
weights = hl / hl.sum(axis=-1, keepdims=True) # normalized -> (N, S)
logging.info(weights)
wmc_pred = WMCProbPrediction(self._mc_probs, weights, ids=self.ids) # (N, S, K), (N, S)
return wmc_pred
class WMCProbPrediction(Prediction):
def __init__(self, mc_probs, mc_weights, ids):
self._mc_probs = np.asarray(mc_probs) # (N, S, K)
self._mc_weights = np.asarray(mc_weights) # (N, S) or (1, S)
assert len(self._mc_probs.shape) == 3
assert self._mc_weights.shape == self._mc_probs.shape[:2]
self._probs = (self._mc_probs * self._mc_weights[:, :, None]).sum(axis=1) # (N, K)
self._ids = ids
@classmethod
def load(cls, path):
tab = pd.read_csv(path, sep='\t')
mc_probs = np.asarray(list(map(decode_matrix, tab['mc_prob'])))
mc_weights = np.asarray(list(map(decode_array, tab['mc_weight'])))
return cls(mc_probs, mc_weights, tab['id'])
def save(self, path):
columns = ['id', 'mc_prob', 'mc_weight']
tab = pd.DataFrame({
'id': self._ids,
'mc_prob': list(map(encode_matrix, self._mc_probs)),
'mc_weight': list(map(encode_array, self._mc_weights)),
}, columns=columns)
tab.to_csv(path, sep='\t', index=False)
class AlphaPrediction(Prediction):
eps = clip_min = np.finfo(float).eps
clip_max = 1./np.finfo(float).eps
def __init__(self, alphas, ids):
self._alphas = np.asarray(alphas) # (N, K)
self._alphas[np.isnan(self._alphas)] = self.clip_min # Repair underflowed values
self._alphas = np.clip(self._alphas, self.clip_min, self.clip_max)
assert len(self._alphas.shape) == 2
self._alpha0s = self._alphas.sum(axis=1)
self._probs = self._alphas / self._alpha0s[:,None]
self._ids = ids
def get_alphas(self):
return self._alphas
def get_agreement_probs(self): # (N,)
denom = self._alpha0s * (self._alpha0s + 1)
square_moments = self._alphas * (self._alphas + 1) / denom[:, None] # (N, K)
agree_probs = square_moments.sum(axis=1) # (N,)
return agree_probs
@classmethod
def load(cls, path):
tab = pd.read_csv(path, sep='\t')
alphas = np.asarray(list(map(decode_array, tab['alpha'])))
return cls(alphas, tab['id'])
def save(self, path):
columns = ['id', 'alpha']
tab = pd.DataFrame({
'id': self._ids,
'alpha': list(map(encode_array, self._alphas)),
}, columns=columns)
tab.to_csv(path, sep='\t', index=False)
def get_posterior(self, hists):
alpha = self._alphas
assert hists.shape == alpha.shape # (N, K)
post_alpha = hists + alpha
return AlphaPrediction(post_alpha, self.ids)
class MCAlphaPrediction(Prediction):
eps = clip_min = np.finfo(float).eps
clip_max = 1./np.finfo(float).eps
def __init__(self, mc_alphas, ids):
self._mc_alphas = np.asarray(mc_alphas) # (N, S, K)
self._mc_alphas[np.isnan(self._mc_alphas)] = self.clip_min # repair underflowed values
self._mc_alphas = np.clip(self._mc_alphas, self.clip_min, self.clip_max)
assert len(self._mc_alphas.shape) == 3
self._alphas = self._mc_alphas.mean(axis=1) # (N, K)
self._mc_alpha0s = self._mc_alphas.sum(axis=2) # (N, S)
self._mc_mean_probs = self._mc_alphas / self._mc_alpha0s[:, :, None] #(N, S, K)
self._probs = self._mc_mean_probs.mean(axis=1) #(N, K)
self._ids = ids
def get_alphas(self):
return self._alphas
def get_agreement_probs(self): # (N,)
mc_square_moments = self._mc_alphas * (self._mc_alphas + 1) / (self._mc_alpha0s * (self._mc_alpha0s + 1))[:, :, None] # (N, S, K)
mc_agree_probs = mc_square_moments.sum(axis=2) # (N, S)
return mc_agree_probs.mean(axis=1)
@classmethod
def load(cls, path):
tab = pd.read_csv(path, sep='\t')
mc_alphas = np.asarray(list(map(decode_matrix, tab['mc_alpha'])))
return cls(mc_alphas, tab['id'])
def save(self, path):
columns = ['id', 'mc_alpha']
tab = pd.DataFrame({
'id': self._ids,
'mc_alpha': list(map(encode_matrix, self._mc_alphas)),
}, columns=columns)
tab.to_csv(path, sep='\t', index=False)
|
nilq/baby-python
|
python
|
import unittest
import sys
sys.path.insert(0, '../')
from view_header import Route, PresentView, Flash, MSG_TYPE
class TestRoute(unittest.TestCase):
r1 = Route(True, 'test', {})
r2 = Route(True, 'test', {0:1, 1:'obj'})
def test_is_redirect(self):
self.assertEqual(self.r1.is_redirect(), True)
def test_get_name(self):
self.assertEqual(self.r1.get_name(), 'test')
def test_get_args1(self):
self.assertEqual(len(self.r1.get_args()), 0)
def test_get_args2(self):
self.assertEqual(len(self.r2.get_args()), 2)
self.assertEqual(self.r2.get_args()[0], 1)
self.assertEqual(self.r2.get_args()[1], 'obj')
def test_equals1(self): #basic
self.assertFalse(self.r1.equals(self.r2))
def test_equals2(self):
r2_copy = Route(True, 'test', {0:1, 1:'obj'})
self.assertTrue(self.r2.equals(r2_copy))
def test_equals3(self):
r1_copy = Route(True, 'test', {})
self.assertTrue(self.r1.equals(r1_copy))
def test_equals4(self):
temp = Route(True, 'test_', {})
self.assertFalse(self.r1.equals(temp))
def test_equals5(self):
temp = Route(False, 'test', {})
self.assertFalse(self.r1.equals(temp))
def test_equals6(self): #testing the isinstance
self.assertFalse(self.r1.equals(2))
self.assertFalse(self.r1.equals('asdf'))
self.assertFalse(self.r1.equals({}))
self.assertFalse(self.r1.equals([2]))
class TestPresentView(unittest.TestCase):
r1 = Route(True, 'test', {})
r2 = Route(True, 'test', {0:1, 1:'obj'})
f1 = Flash("test", MSG_TYPE.SUCCESS)
f2 = Flash("test", MSG_TYPE.FAIL)
v11 = PresentView(r1, f1)
v12 = PresentView(r1, f2)
v21 = PresentView(r2, f1)
v22 = PresentView(r2, f2)
def test_get_route1(self): #deep equality
temp = Route(True, 'test', {})
self.assertTrue(self.v11.get_route().equals(temp))
def test_get_route2(self): # pointer equality
self.assertEqual(self.v11.get_route(), self.r1)
def test_get_route3(self): # pointer equality
temp = Route(True, 'test', {})
self.assertNotEqual(self.v11.get_route(), temp)
def test_get_flash1(self):
temp = Flash("test", MSG_TYPE.FAIL)
self.assertTrue(self.v22.get_flash().equals(temp))
def test_get_flash2(self): # pointer equality
self.assertEqual(self.v11.get_flash(), self.f1)
def test_get_flash3(self): # pointer equality
temp = Flash("test", MSG_TYPE.SUCCESS)
self.assertNotEqual(self.v11.get_flash(), temp)
def test_get_flash4(self): # pointer equality
temp = PresentView(self.r2)
self.assertEqual(temp.get_flash(), None)
#
def test_equals1(self):
self.assertFalse(self.v11.equals(self.v12))
def test_equals2(self):
self.assertFalse(self.v11.equals(2))
def test_equals3(self):
self.assertTrue(self.v11.equals(self.v11))
def test_equals4(self):
temp = PresentView(self.r2, self.f1)
self.assertTrue(self.v21.equals(temp))
def test_equals5(self):
temp = Flash("FAILLL", MSG_TYPE.SUCCESS)
temp = PresentView(self.r2, temp)
self.assertFalse(self.v21.equals(temp))
def test_equals6(self): # None flash
temp = PresentView(self.r2)
self.assertFalse(temp.equals(self.v11))
def test_equals7(self): # None flash
temp = PresentView(self.r2)
self.assertFalse(self.v22.equals(temp))
def test_equals8(self): # None flash
temp = PresentView(self.r2)
temp2 = PresentView(self.r2)
self.assertFalse(temp.equals(temp2))
def test_equals9(self): # None flash
temp = PresentView(self.r2)
self.assertFalse(temp.equals(temp))
class TestFlash(unittest.TestCase):
f1 = Flash("test", MSG_TYPE.SUCCESS)
f2 = Flash("test", MSG_TYPE.FAIL)
f3 = Flash(1, MSG_TYPE.FAIL)
def test_equals1(self):
f1_copy = Flash("test", MSG_TYPE.SUCCESS)
self.assertTrue(self.f1.equals(f1_copy))
def test_equals2(self):
f1_copy = Flash("test 2", MSG_TYPE.SUCCESS)
self.assertFalse(self.f1.equals(f1_copy))
def test_equals3(self): #testing the isinstance
self.assertFalse(self.f1.equals(2))
self.assertFalse(self.f1.equals('asdf'))
self.assertFalse(self.f1.equals({}))
self.assertFalse(self.f1.equals([2]))
def test_equals4(self):
self.assertFalse(self.f1.equals(self.f2))
def test_gm1(self):
self.assertEqual(self.f1.get_msg(), 'test')
def test_gmt2(self):
self.assertEqual(self.f1.get_msg_type(), MSG_TYPE.SUCCESS)
def test_gmt3(self):
self.assertEqual(self.f2.get_msg_type(), MSG_TYPE.FAIL)
class Test_MSG_TYPE(unittest.TestCase):
s = MSG_TYPE.SUCCESS
f = MSG_TYPE.FAIL
def test_success(self):
self.assertEqual(self.s.value, 'success')
self.assertEqual(self.s.name, 'SUCCESS')
def test_fail(self):
self.assertEqual(self.f.value, 'danger')
self.assertEqual(self.f.name, 'FAIL')
# FAIL = 'danger'
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from datetime import datetime
def from_iso8601(date):
return datetime.fromisoformat(date)
def to_iso8601(year, month, day, hour, minute, second):
return datetime(year, month, day, hour,
minute, second, 0).isoformat()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 06:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0007_auto_20171005_1713'),
]
operations = [
migrations.CreateModel(
name='Column',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_name', models.CharField(max_length=100)),
('column_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='No_Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='No_Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grep_strings', models.CharField(max_length=100)),
('no_relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Columns')),
],
),
migrations.CreateModel(
name='No_Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Relation_Columns',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
],
),
migrations.CreateModel(
name='Relation_Options',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('condition', models.CharField(max_length=100)),
('relation_column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Columns')),
],
),
migrations.CreateModel(
name='Relation_Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Column')),
('columns', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Columns')),
],
),
migrations.CreateModel(
name='Tables',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('name_id', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='skill',
name='category',
),
migrations.DeleteModel(
name='Skill',
),
migrations.DeleteModel(
name='SkillCategory',
),
migrations.AddField(
model_name='relation_columns',
name='relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Relation_Table'),
),
migrations.AddField(
model_name='no_relation_columns',
name='no_relation_table',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.No_Relation_Table'),
),
]
|
nilq/baby-python
|
python
|
#
# Copyright Bernhard Firner, 2019-2020
#
# Ship class and supporting classes
from collections import OrderedDict
from enum import Enum
import torch
from dice import ArmadaDice
from game_constants import (
ArmadaDimensions,
ArmadaTypes
)
class UpgradeType(Enum):
commander = 1
officer = 2
weapons_team = 3
support_team = 4
offensive_retrofit = 5
defensive_retrofit = 6
turbolasers = 7
ion_cannons = 8
ordnance = 9
fleet_support = 10
experimental_retrofit = 11
boarding_team = 12
title = 13
class Armament:
def __init__(self, redCount, blueCount, blackCount):
self.red = redCount
self.blue = blueCount
self.black = blackCount
class ShipType:
def __init__(self, name, attributes):
self.name = name
self.attributes = attributes
class Ship:
def __init__(self, name, player_number, template=None, upgrades=None, encoding=None, device=None):
"""Contsruct a specific instance of a ship.
Args:
name (str) : Name for this vessel.
player_number (int) : The player who controls this ship.
template (ShipType) : Ship template to copy.
upgrades (table str->str) : Upgrades to equip.
encoding (torch.Tensor) : An existing encoding to copy (if template and upgrades
are None)
device (str) : Default Tensor type ('cuda' or 'cpu'). Automatic if None.
"""
if (template is None or upgrades is None) and encoding is None:
raise RuntimeError("Ship requires either template and updrades or encoding.")
self.name = name
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.encoding = torch.zeros(Ship.encodeSize()).to(device)
if encoding is not None:
self.encoding.copy_(encoding)
else:
self.encoding.fill_(0.)
# Initialize attributes of this specific ship instance
self.set('player', player_number)
self.set('hull', int(template["Hull"]))
self.set("ship", 0.)
self.set("size", ArmadaDimensions.size_names.index(template['Size'].lower()))
idx, length = Ship.get_index("defense_tokens")
self.encoding[idx:idx + length] = 0.
for ttype in ArmadaTypes.defense_tokens:
tname = "Defense Token {}".format(ttype.capitalize())
token_idx = idx + ArmadaTypes.defense_tokens.index(ttype)
if tname in template:
if 0 == len(template[tname]):
self.encoding[token_idx] = 0
else:
self.encoding[token_idx] = int(template[tname])
# Max shields (current shields will be filled in the reset function)
idx = Ship.get_index("max_shields")[0]
for zone in ['left', 'right', 'front', 'rear']:
name = "Shields {}".format(zone.capitalize())
self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = int(template[name])
if 'Huge' == template['Size']:
for zone in ['left-auxiliary', 'right-auxiliary']:
name = "Shields {} {}".format(zone.capitalize())
self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = int(template[name])
# Presence of hull zones/firing arcs
idx, length = Ship.get_index("hull_zones")
self.encoding[idx:idx + length] = 0.
# Set the hull zones to indicate which are present
idx = Ship.get_index("hull_zones")[0]
for zone in ['left', 'right', 'front', 'rear']:
self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = 1.
if 'Huge' == template['Size']:
for zone in ['left-auxiliary', 'right-auxiliary']:
self.encoding[idx + ArmadaTypes.hull_zones.index(zone)] = 1.
# Initialize the armaments
idx = Ship.get_index("dice")[0]
for i, zone in enumerate(['left', 'right', 'front', 'rear']):
for j, color in enumerate(ArmadaDice.die_colors):
name = "Armament {} {}".format(zone.capitalize(), color.capitalize())
hull_offset = ArmadaTypes.hull_zones.index(zone)
if 0 < len(template[name]):
number = int(template[name])
else:
number = 0
self.encoding[idx + hull_offset * len(ArmadaDice.die_colors) + j] = number
if 'Huge' == template['Size']:
for i, zone in enumerate(['left-auxiliary', 'right-auxiliary']):
for j, color in enumerate(ArmadaDice.die_colors):
name = "Armament {} {}".format(zone.capitalize(), color.capitalize())
hull_offset = ArmadaTypes.hull_zones.index(zone)
number = int(template[name])
self.encoding[idx + hull_offset * len(ArmadaDice.die_colors) + j] = number
self.reset()
# TODO Check for legality and actually handle
self.width, self.height = ArmadaDimensions.ship_bases_feet[
ArmadaDimensions.size_names[int(self.get('size'))]]
self.upgrades = upgrades
@staticmethod
def _initialize_encoding():
"""Initialize the _enc_index and _enc_len variables."""
Ship._enc_index = OrderedDict()
Ship._enc_len = OrderedDict()
def addEntry(name, length, cur_idx):
Ship._enc_index[name] = cur_idx
Ship._enc_len[name] = length
return Ship._enc_index[name] + Ship._enc_len[name]
cur_idx = addEntry(name='player', length=1, cur_idx=0)
cur_idx = addEntry(name='hull', length=1, cur_idx=cur_idx)
cur_idx = addEntry(name='damage', length=1, cur_idx=cur_idx)
# TODO Face up damage card effects
cur_idx = addEntry(name='speed', length=1, cur_idx=cur_idx)
cur_idx = addEntry(name='ship', length=1, cur_idx=cur_idx)
cur_idx = addEntry(name='size', length=1, cur_idx=cur_idx)
# Defense tokens and state belong here, whether the token has been spent during this
# attack step is stored in the attack state
cur_idx = addEntry(name='defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx)
cur_idx = addEntry(name='green_defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx)
cur_idx = addEntry(name='red_defense_tokens', length=len(ArmadaTypes.defense_tokens), cur_idx=cur_idx)
cur_idx = addEntry(name='max_shields', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx)
cur_idx = addEntry(name='shields', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx)
# Presence of particular hull zones
cur_idx = addEntry(name='hull_zones', length=len(ArmadaTypes.hull_zones), cur_idx=cur_idx)
# Armament for each zone
cur_idx = addEntry(
name='dice',
length=len(ArmadaTypes.hull_zones) * len(ArmadaDice.die_colors), cur_idx=cur_idx)
# TODO Line of sight marker locations and firing arc locations
# TODO Upgrades
# TODO Ignition arc
cur_idx = addEntry(name='commands', length=ArmadaTypes.max_command_dials, cur_idx=cur_idx)
# Location is a pair of x and y coordinates in feet (since that is the range ruler size).
cur_idx = addEntry(name='location', length=2, cur_idx=cur_idx)
# The heading is the clockwise rotation of the ship in radians
cur_idx = addEntry(name='heading', length=1, cur_idx=cur_idx)
@staticmethod
def encodeSize():
"""Get the size of the ship encoding.
Returns:
int: Size of the ship encoding (number of Tensor elements)
"""
# Programmatically initialize the index lookup if it doesn't exist
if not hasattr(Ship, '_enc_index'):
Ship._initialize_encoding()
last_key = list(Ship._enc_index.keys())[-1]
size = Ship._enc_index[last_key] + Ship._enc_len[last_key]
return size
@staticmethod
def get_index(data_name):
"""Get the index of a data element.
Arguments:
data_name(str): Name of the data element.
Returns:
(int, int): Tuple of the beginning of the data and the length.
"""
# Programmatically initialize the index lookup if it doesn't exist
if not hasattr(Ship, '_enc_index'):
Ship._initialize_encoding()
if data_name not in Ship._enc_index:
raise RuntimeError("Ship has no attribute named {}".format(data_name))
return (Ship._enc_index[data_name], Ship._enc_len[data_name])
def base_size(self):
"""Get the ship width and length.
Returns:
tuple(int, int): width and length
"""
index = self.encoding[Ship._enc_index['size']]
return ArmadaDimensions.ship_bases[ArmadaDimensions.size_names[index]]
def token_count(self, index):
"""Get the number of green and red tokens at the given index.
The index corresponds to a particular type of token as defined in
ArmadaTypes.defense_tokens.
Returns:
tuple(int, int): The number of green and red tokens.
"""
green_idx = Ship._enc_index["green_defense_tokens"]
red_idx = Ship._enc_index["red_defense_tokens"]
return self.encoding[green_idx + index], self.encoding[red_idx + index]
def ready_defense_tokens(self):
"""Replace all red tokens with green versions."""
with torch.no_grad():
# Add the red tokens to the green tokens and set red tokens to 0
green_idx = Ship._enc_index["green_defense_tokens"]
red_idx = Ship._enc_index["red_defense_tokens"]
token_len = Ship._enc_len['green_defense_tokens']
self.encoding[green_idx:green_idx + token_len] += self.encoding[red_idx:red_idx + token_len]
self.encoding[red_idx:red_idx + src_len] = 0.
def spend_token(self, token_type, color_type):
"""Spend a token of the given type and color.
Args:
token_type (str): Token type to spend.
color_type (int): 0 for green, 1 for red
"""
red_idx = Ship._enc_index["red_defense_tokens"]
type_offset = ArmadaTypes.defense_tokens.index(token_type)
if 0 == color_type:
green_idx = Ship._enc_index["green_defense_tokens"]
self.encoding[green_idx + type_offset] -= 1
self.encoding[red_idx + type_offset] += 1
else:
self.encoding[red_idx + type_offset] -= 1
def ready_upgrade_cards(self):
"""Unexhaust upgrade cards."""
# Not implemented yet
pass
def adjacent_zones(self, zone):
"""Return hull zones adjacent to the given zone."""
index = int(self.encoding[Ship._enc_index['size']].item())
size = ArmadaDimensions.size_names[index]
if size == 'huge':
if zone not in ArmadaTypes.adjacent_huge_hull_zones:
raise RuntimeError("Unrecognized hull zone {}".format(zone))
return ArmadaTypes.adjacent_huge_hull_zones[zone]
else:
if zone not in ArmadaTypes.adjacent_hull_zones:
raise RuntimeError("Unrecognized hull zone {}".format(zone))
return ArmadaTypes.adjacent_hull_zones[zone]
def get(self, name):
"""Get a value from the encoding.
Arguments:
name (str): Name of the encoding field.
Returns:
value (float): The value of the encoding with the given name.
"""
index, length = Ship.get_index(name)
if 1 == length:
return self.encoding[index].item()
else:
raise RuntimeError("Use Ship.get_range for multi-element data.")
def get_range(self, name):
"""Get a view of the encoding of a field with multiple elements.
Arguments:
name (str): Name of the encoding field.
Returns:
value (torch.Tensor): The tensor is a view of the original data, clone or convert to a
list to avoid modification.
"""
index, length = Ship.get_index(name)
if 1 == length:
raise RuntimeError("Use Ship.get for single element data.")
else:
return self.encoding[index:index + length]
def set(self, name, value):
"""Set a value in encoding.
Arguments:
name (str): Name of the encoding field.
value (numeric, List, or torch.Tensor): A value assignable to a tensor.
"""
vtype = type(value)
if vtype is not int and vtype is not float and vtype is not list and vtype is not torch.Tensor:
raise RuntimeError('Ship.set does not have data type "{}"'.format(vtype))
index, length = Ship.get_index(name)
if 1 == length:
self.encoding[index] = value
else:
if type(value) is int or type(value) is float:
raise RuntimeError("Attempt to assign a scalar value to an encoding range.")
# Convert a list to a tensor to assign a range
if type(value) is list:
self.encoding[index:index + length] = torch.tensor(value)
else:
self.encoding[index:index + length] = value
def set_range(self, name, value):
"""Set a range in the encoding to a value.
Arguments:
name (str): Name of the encoding field.
value (numeric): Value to set.
"""
vtype = type(value)
if vtype is not int and vtype is not float:
raise RuntimeError('Ship.set_range does not support data type "{}"'.format(vtype))
index, length = Ship.get_index(name)
self.encoding[index:index + length] = value
def reset(self):
"""Resets shields, hull, and defense tokens and initialize values in the encoding."""
self.set("damage", 0.)
self.set("speed", 0.)
self.set_range("commands", 0.)
# Set defense tokens, and shields
# Initialize all tokens as green
self.set('green_defense_tokens', self.get_range('defense_tokens'))
self.set_range('red_defense_tokens', 0.)
self.set('shields', self.get_range('max_shields'))
# Set a location off of the board. Lump each player's ships together.
self.set("location", [-1., self.get('player') * -1.])
self.set("heading", 0.)
def roll(self, zone, distance):
"""
return an attack roll for the given arc at the given range.
Args:
zone (str) : One of front, left, right, and rear
distance (str) : short, medium, or long
Returns an array of colors and faces
"""
colors = []
faces = []
# TODO Extreme range
# Roll red dice at all valid ranges
die_offset = Ship._enc_index['dice']
hull_offset = die_offset + ArmadaTypes.hull_zones.index(zone) * len(ArmadaDice.die_colors)
if distance in ["short", "medium", "long"]:
red_offset = ArmadaDice.die_colors.index("red")
num_dice = int(self.encoding[hull_offset + red_offset].item())
colors = colors + ["red"] * num_dice
# Roll blue dice at all short to medium
if distance in ["short", "medium"]:
blue_offset = ArmadaDice.die_colors.index("blue")
num_dice = int(self.encoding[hull_offset + blue_offset].item())
colors = colors + ["blue"] * num_dice
# Roll black dice at short range
if distance in ["short"]:
black_offset = ArmadaDice.die_colors.index("black")
num_dice = int(self.encoding[hull_offset + black_offset].item())
colors = colors + ["black"] * num_dice
# TODO FIXME Only gathering should happen in the ship, rolling should follow in a different
# area of code
for color in colors:
faces.append(ArmadaDice.random_roll(color))
return colors, faces
def shield_damage(self, zone, amount):
"""
Deal damage to a hull zone but only deplete the shields, don't assign hull damage. Return
the amount of damage that is in excess of the shields.
Args:
zone (str): One of ArmadaTypes.hull_zones
amount (int): Amount of damage
Returns:
(int): Amount of damage that will be assigned to the hull.
"""
damage = amount
if "hull" != zone:
shield_offset = Ship._enc_index['shields'] + ArmadaTypes.hull_zones.index(zone)
shields = int(self.encoding[shield_offset].item())
if shields >= damage:
shields -= damage
damage = 0
else:
damage -= shields
shields = 0
self.encoding[shield_offset] = shields
return damage
def damage(self, zone, amount):
"""
Deal damage to a hull zone.
Args:
zone (str): One of ArmadaTypes.hull_zones or "hull"
amount (int): Amount of damage
"""
damage = amount
if "hull" != zone:
shield_offset = Ship._enc_index['shields'] + ArmadaTypes.hull_zones.index(zone)
shields = int(self.encoding[shield_offset].item())
if shields >= damage:
shields -= damage
damage = 0
else:
damage -= shields
shields = 0
self.encoding[shield_offset] = shields
# TODO FIXME This would be the correct time to handle the standard critical (or XX-9)
self.set('damage', self.get('damage') + damage)
def hull(self):
hull_offset = Ship._enc_index['hull']
hull = int(self.encoding[hull_offset].item())
return hull
def damage_cards(self):
return int(self.get('damage'))
def stringify(self):
"""Return a string version of the ship."""
shield_offset = Ship._enc_index['shields']
shield_length = Ship._enc_len['shields']
shields = self.encoding[shield_offset:shield_offset + shield_length]
green_def_idx = Ship._enc_index['green_defense_tokens']
green_def_len = Ship._enc_len['green_defense_tokens']
green_tokens = self.encoding[green_def_idx:green_def_idx + green_def_len]
red_def_idx = Ship._enc_index['red_defense_tokens']
red_def_len = Ship._enc_len['red_defense_tokens']
red_tokens = self.encoding[red_def_idx:red_def_idx + red_def_len]
return str(
"{}: hull ({}/{}), shields {}, green defense tokens {}, red defense tokens {}".format(
self.name, self.hull()-self.damage_cards(), self.hull(), shields, green_tokens, red_tokens))
def __str__(self):
return self.stringify()
def __repr__(self):
return self.stringify()
def parseShips(filename):
""" Returns a list of ships."""
keys = {}
ship_templates = {}
with open(filename, newline='') as ships:
shipreader = csv.reader(ships, delimiter=',', quotechar='|')
rowcount = 0
for row in shipreader:
# parse the header first to find the column keys
if ( 0 == rowcount ):
count = 0
for key in row:
count = count + 1
keys[count] = key
else:
newship = {}
count = 0
# Fill in all of the information on this vessel
for key in row:
count = count + 1
newship[keys[count]] = key
# Create a new ship template
ship_templates[newship['Ship Name']] = newship
rowcount = rowcount + 1
ship_types = {}
for name, attributes in ship_templates.items():
ship_types[name] = ShipType(name, attributes)
#print("{}:".format(name))
#for a_name, a_value in attributes.items():
# print(" {} : {}".format(a_name, a_value))
return ship_types
|
nilq/baby-python
|
python
|
import os
import sys
import time
import wave
import numpy as np
from datetime import datetime
from pyaudio import PyAudio, paInt16
class GenAudio(object):
def __init__(self):
self.num_samples = 2000 # pyaudio内置缓冲大小
self.sampling_rate = 8000 # 取样频率
self.level = 1500 # 声音保存的阈值
self.count_num = 20 # count_num个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
self.save_length = 8 # 声音记录的最小长度:save_length * num_samples 个取样
self.time_count = 1000 # 录音时间,单位s
self.voice_string = []
# 保存文件
def save_wav(self, filename):
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.sampling_rate)
wf.writeframes(np.array(self.voice_string).tobytes())
wf.close()
def read_audio(self):
pa = PyAudio()
stream = pa.open(format=paInt16, channels=1, rate=self.sampling_rate, input=True,
frames_per_buffer=self.num_samples)
save_count = 0
save_buffer = []
time_count = self.time_count
while True:
time_count -= 1
# 读入num_samples个取样
string_audio_data = stream.read(self.num_samples)
# 将读入的数据转换为数组
audio_data = np.frombuffer(string_audio_data, dtype=np.short)
# 计算大于 level 的取样的个数
large_sample_count = np.sum(audio_data > self.level)
print(np.max(audio_data)), "large_sample_count=>", large_sample_count
# 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块
if large_sample_count > self.count_num:
save_count = self.save_length
else:
save_count -= 1
if save_count < 0:
save_count = 0
if save_count > 0:
save_buffer.append(string_audio_data)
else:
if len(save_buffer) > 0:
self.voice_string = save_buffer
save_buffer = []
print("Recode a piece of voice successfully!")
return True
if time_count == 0:
if len(save_buffer) > 0:
self.voice_string = save_buffer
save_buffer = []
print("Recode a piece of voice successfully!")
return True
else:
return False
return True
def saveVoice():
r = GenAudio()
r.read_audio()
if os.path.exists("voice.wav"): # 如果文件存在
# 删除文件,可使用以下两种方法。
os.remove("voice.wav")
time.sleep(1)
r.save_wav("voice.wav")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 00:13:05 2018
@author: Gireesh Sundaram
"""
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, AdaBoostClassifier
from sklearn.metrics import f1_score, recall_score, precision_score, confusion_matrix
from imblearn.over_sampling import SMOTE
import xgboost as xgb
#%%
data = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\train.csv")
test = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\test.csv")
train = data.sample(frac = 0.9)
historic = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\historic_restruct.csv")
#%%
train['hour'] = pd.to_numeric(train['DateTime'].str.slice(11,13))
train["time"] = np.where(train['hour'].between(0, 4), "Midnight",
np.where(train['hour'].between(5, 8), "Early Morning",
np.where(train['hour'].between(9, 12), "Morning",
np.where(train['hour'].between(13, 16), "Afternoon",
np.where(train['hour'].between(17, 20), "Evening", "Night")))))
#%%
train = train.merge(historic, on = ['user_id', 'product'], how='left')
interest_view = train[['view', 'interest']]
interest_view = interest_view.fillna(value = 0)
#%%
selectedfeatures = ['product', 'campaign_id', 'webpage_id', 'product_category_1', 'gender', 'user_group_id', 'age_level', 'user_depth']
selectedcols = train[selectedfeatures]
#%%
#Tryig to see if some row has any of the missing values, but does not!
navaluecols = ['user_group_id', 'age_level', 'user_depth', 'city_development_index']
handlingna = data[navaluecols]
handlingna["user_id"] = train["user_id"]
handlingna = handlingna.drop_duplicates()
user_id = handlingna[handlingna["user_id"].duplicated(keep=False)]
#%%
selectedcols['gender'] = selectedcols['gender'].fillna(value = "Female")
selectedcols['age_level'] = selectedcols['age_level'].fillna(value = 2)
selectedcols['user_depth'] = selectedcols['user_depth'].fillna(value = 1)
#selectedcols['city_development_index'] = selectedcols['city_development_index'].fillna(value = 3)
selectedcols = selectedcols.fillna(value = -99)
LE = LabelEncoder()
selectedcols_1 = selectedcols.apply(LE.fit_transform)
#%%
OHE = OneHotEncoder()
selectedcols_2 = OHE.fit_transform(selectedcols_1).toarray()
selectedcols_2 = pd.DataFrame(selectedcols_2)
selectedcols_2['is_click'] = train['is_click'].reset_index(drop=True)
#selectedcols_2['interest'] = interest_view['interest']
#selectedcols_2['view'] = interest_view['view']
#%%
x_train, x_test, y_train, y_test = train_test_split(selectedcols_2.drop(columns = ['is_click']), selectedcols_2['is_click'])
sm = SMOTE()
train_ip_new, train_op_new = sm.fit_sample(x_train, y_train)
#%%
model = DecisionTreeClassifier()
model.fit(train_ip_new, train_op_new)
prediction = model.predict(x_test)
score = f1_score(y_test, prediction)
recall = recall_score(y_test, prediction)
precision = precision_score(y_test, prediction)
cm = confusion_matrix(y_test, prediction)
#%%
def featureselection(dataframe):
dataframe['hour'] = pd.to_numeric(dataframe['DateTime'].str.slice(11,13))
selectedcols = dataframe[selectedfeatures]
selectedcols['gender'] = selectedcols['gender'].fillna(value = "Female")
selectedcols['age_level'] = selectedcols['age_level'].fillna(value = 2)
selectedcols['user_depth'] = selectedcols['user_depth'].fillna(value = 1)
#selectedcols['city_development_index'] = selectedcols['city_development_index'].fillna(value = 3)
selectedcols = selectedcols.fillna(value = -99)
selectedcols_1 = selectedcols.apply(LE.fit_transform)
selectedcols_2 = OHE.fit_transform(selectedcols_1).toarray()
selectedcols_2 = pd.DataFrame(selectedcols_2)
return selectedcols_2
#%%
preprocessed = featureselection(test)
output = model.predict(preprocessed)
#%%
final_submission = pd.DataFrame()
final_submission["session_id"] = test['session_id']
final_submission["is_click"] = output
final_submission.to_csv("D:\\Hackathons\\Amex\\Datasets\\submission_10_DT_improving_features.csv", index = False)
#%%
for items in selectedfeatures:
print(items)
print(data[items].unique())
print(test[items].unique())
#%%
time_by_day = train[["hour", 'is_click']].groupby(["hour"]).sum()
count_gender = data.groupby(['product', 'gender']).size().reset_index(name='count')
count_age = data.groupby(['product', 'age_level']).size().reset_index(name='count')
count_depth = data.groupby(['product', 'user_depth']).size().reset_index(name='count')
count_city = data.groupby(['product', 'city_development_index']).size().reset_index(name='count')
#%%
interest = pd.read_csv("D:\\Hackathons\\Amex\\Datasets\\historical_user_logs.csv")
#%%
view = interest.groupby(['user_id', 'product', 'action']).size().reset_index(name='count')
view_p = view.pivot_table(index = ['user_id', 'product'], columns = 'action', values = 'count').reset_index().fillna(value = 0)
view_p.to_csv("D:\\Hackathons\\Amex\\Datasets\\historic_restruct.csv", index = False)
preprocessed.to_csv("D:\\Hackathons\\Amex\\Datasets\\preprocessed_op.csv", index = False)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
platform = "microblaze"
procs = ["microblaze"]
serial_port = "serial"
arch = "microblaze"
linux_compiler = "microblazeel-xilinx-linux-gnu-"
dtb_loadaddr = 0x81E00000
dtb_arch = "microblaze"
dtb_dtg = "microblaze-generic"
dtb_defconfig = "microblaze-generic_defconfig"
dtb_compiler = "microblazeel-xilinx-linux-gnu-"
kernel_loadaddr = 0x80000000
kernel_defconfig = "mmu_defconfig"
kernel_artifacts = ["arch/microblaze/boot/simpleImage.system.ub"]
kernel_image = "simpleImage.system.ub"
uboot_defconfig = "microblaze-generic_defconfig"
uboot_artifacts = ["u-boot"]
boot_scr_loadaddr = 0xBF200000
rootfs_loadaddr = 0x82E00000
overrides = ["microblaze"]
system_dtb = "microblaze-generic.dtb"
uboot_devicetree = "microblaze-generic"
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from terminaltables import AsciiTable
from colorclass import Color
class CostAnalysis:
def __init__(self, db):
self.db = db
def draw(self, market, symbol, args):
if len(args) != 0:
raise Exception('no argument required for {}'.format(CostAnalysis.__name__))
cb_cols = (
'耗用原料',
'耗用物料',
'直接人工',
'製造費用',
'製造成本',
'銷貨成本',
)
cb_data = self.db.cost_breakdown().query(
market, symbol, cb_cols,
)
me_cols = (
'薪資支出',
'保險費',
'修繕費',
'水電費',
'折舊',
'燃料費',
'包裝費',
'其他費用',
)
me_data = self.db.manufacturing_expense2().query(
market, symbol, me_cols,
)
data = dict()
for d in cb_data:
v = []
for c in cb_cols:
v.append(d[c])
data[d['year']] = v
for d in me_data:
if d['year'] not in data:
data[d['year']] = ('?', '?', '?', '?', '?', '?')
v = []
for c in me_cols:
v.append(d[c])
data[d['year']] += v
# Arrange them for terminaltables.
table_data = [('year',) + cb_cols + me_cols]
for year in sorted(data.keys()):
dd = data[year]
if len(dd) == 6:
dd += (None,)*8
row1 = (year,)
for d in dd:
row1 += (int(d/1000),) if d is not None else (' ',)
table_data.append(row1)
row2 = (' ',)
for d in dd[:6]:
t = "{:03.2f}%".format(d/dd[4]*100) if d is not None else ' '
row2 += (Color("{autogreen}" + t + "{/autogreen}"),)
for d in dd[6:]:
t = "{:03.2f}%".format(d/dd[3]*100) if d is not None else ' '
row2 += (Color("{autogreen}" + t + "{/autogreen}"),)
table_data.append(row2)
table = AsciiTable(table_data)
print(table.table)
|
nilq/baby-python
|
python
|
# Size of program memory (bytes)
MAX_PGM_MEM = 4096
# Size of context memory (bytes)
MAX_DATA_MEM = 2048
# Max stack size (bytes)
MAX_STACK = 512
# Number of registers
MAX_REGS = 11
# Default output indentation for some debug messages
IND = " " * 8
# Maximum values for various unsigned integers
MAX_UINT8 = 0xff
MAX_UINT16 = 0xffff
MAX_UINT32 = 0xffffffff
MAX_UINT64 = 0xffffffffffffffff
# +----------------+--------+--------------------+
# | 4 bits | 1 bit | 3 bits |
# | operation code | source | instruction class |
# +----------------+--------+--------------------+
# (MSB) (LSB)
# OpCode Classes
OPC_LD = 0x00 # load from immediate
OPC_LDX = 0x01 # load from register
OPC_ST = 0x02 # store immediate
OPC_STX = 0x03 # store value from register
OPC_ALU = 0x04 # 32 bits arithmetic operation
OPC_JMP = 0x05 # jump
OPC_RES = 0x06 # unused, reserved for future use
OPC_ALU64 = 0x07 # 64 bits arithmetic operation
# Operation codes (OPC_ALU or OPC_ALU64).
ALU_ADD = 0x00 # addition
ALU_SUB = 0x01 # subtraction
ALU_MUL = 0x02 # multiplication
ALU_DIV = 0x03 # division
ALU_OR = 0x04 # or
ALU_AND = 0x05 # and
ALU_LSH = 0x06 # left shift
ALU_RSH = 0x07 # right shift
ALU_NEG = 0x08 # negation
ALU_MOD = 0x09 # modulus
ALU_XOR = 0x0a # exclusive or
ALU_MOV = 0x0b # move
ALU_ARSH = 0x0c # sign extending right shift
ALU_ENDC = 0x0d # endianess conversion
# +--------+--------+-------------------+
# | 3 bits | 2 bits | 3 bits |
# | mode | size | instruction class |
# +--------+--------+-------------------+
# (MSB) (LSB)
# Load/Store Modes
LDST_IMM = 0x00 # immediate value
LDST_ABS = 0x01 # absolute
LDST_IND = 0x02 # indirect
LDST_MEM = 0x03 # load from / store to memory
# 0x04 # reserved
# 0x05 # reserved
LDST_XADD = 0x06 # exclusive add
# Sizes
LEN_W = 0x00 # word (4 bytes)
LEN_H = 0x01 # half-word (2 bytes)
LEN_B = 0x02 # byte (1 byte)
LEN_DW = 0x03 # double word (8 bytes)
# Operation codes (OPC_JMP)
JMP_JA = 0x00 # jump
JMP_JEQ = 0x01 # jump if equal
JMP_JGT = 0x02 # jump if greater than
JMP_JGE = 0x03 # jump if greater or equal
JMP_JSET = 0x04 # jump if `src`& `reg`
JMP_JNE = 0x05 # jump if not equal
JMP_JSGT = 0x06 # jump if greater than (signed)
JMP_JSGE = 0x07 # jump if greater or equal (signed)
JMP_CALL = 0x08 # helper function call
JMP_EXIT = 0x09 # return from program
JMP_JLT = 0x0a # jump if lower than
JMP_JLE = 0x0b # jump if lower ir equal
JMP_JSLT = 0x0c # jump if lower than (signed)
JMP_JSLE = 0x0d # jump if lower or equal (signed)
# Sources
JMP_K = 0x00 # 32-bit immediate value
JMP_X = 0x01 # `src` register
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
# https://docs.python.org/3/library/stdtypes.html#textseq
# https://docs.python.org/3/library/unittest.html#assert-methods
# https://docs.python.org/3/library/functions.html#isinstance
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, str))
# Both string and bytes literals may optionally be prefixed with a letter 'r'
# or 'R'; such strings are called raw strings and treat backslashes as literal
# characters. As a result, in string literals, '\U' and '\u' escapes in raw
# strings are not treated specially. Given that Python 2.x’s raw unicode
# literals behave differently than Python 3.x’s the 'ur' syntax is not supported.
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual('He said, "Go Away."', string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(r"Don't", string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
# https://docs.python.org/3/library/functions.html#len
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello "world"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual("Hello, ", original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(1, len(string))
|
nilq/baby-python
|
python
|
# Mount RPC client -- RFC 1094 (NFS), Appendix A
# This module demonstrates how to write your own RPC client in Python.
# When this example was written, there was no RPC compiler for
# Python. Without such a compiler, you must first create classes
# derived from Packer and Unpacker to handle the data types for the
# server you want to interface to. You then write the client class.
# If you want to support both the TCP and the UDP version of a
# protocol, use multiple inheritance as shown below.
import rpc
from rpc import Packer, Unpacker, TCPClient, UDPClient
# Program number and version for the mount protocol
MOUNTPROG = 100005
MOUNTVERS = 1
# Size of the 'fhandle' opaque structure
FHSIZE = 32
# Packer derived class for Mount protocol clients.
# The only thing we need to pack beyond basic types is an 'fhandle'
class MountPacker(Packer):
def pack_fhandle(self, fhandle):
self.pack_fopaque(FHSIZE, fhandle)
# Unpacker derived class for Mount protocol clients.
# The important types we need to unpack are fhandle, fhstatus,
# mountlist and exportlist; mountstruct, exportstruct and groups are
# used to unpack components of mountlist and exportlist and the
# corresponding functions are passed as function argument to the
# generic unpack_list function.
class MountUnpacker(Unpacker):
def unpack_fhandle(self):
return self.unpack_fopaque(FHSIZE)
def unpack_fhstatus(self):
status = self.unpack_uint()
if status == 0:
fh = self.unpack_fhandle()
else:
fh = None
return status, fh
def unpack_mountlist(self):
return self.unpack_list(self.unpack_mountstruct)
def unpack_mountstruct(self):
hostname = self.unpack_string()
directory = self.unpack_string()
return (hostname, directory)
def unpack_exportlist(self):
return self.unpack_list(self.unpack_exportstruct)
def unpack_exportstruct(self):
filesys = self.unpack_string()
groups = self.unpack_groups()
return (filesys, groups)
def unpack_groups(self):
return self.unpack_list(self.unpack_string)
# These are the procedures specific to the Mount client class.
# Think of this as a derived class of either TCPClient or UDPClient.
class PartialMountClient:
# This method is called by Client.__init__ to initialize
# self.packer and self.unpacker
def addpackers(self):
self.packer = MountPacker()
self.unpacker = MountUnpacker('')
# This method is called by Client.__init__ to bind the socket
# to a particular network interface and port. We use the
# default network interface, but if we're running as root,
# we want to bind to a reserved port
def bindsocket(self):
import os
try:
uid = os.getuid()
except AttributeError:
uid = 1
if uid == 0:
port = rpc.bindresvport(self.sock, '')
# 'port' is not used
else:
self.sock.bind(('', 0))
# This function is called to cough up a suitable
# authentication object for a call to procedure 'proc'.
def mkcred(self):
if self.cred == None:
self.cred = rpc.AUTH_UNIX, rpc.make_auth_unix_default()
return self.cred
# The methods Mnt, Dump etc. each implement one Remote
# Procedure Call. This is done by calling self.make_call()
# with as arguments:
#
# - the procedure number
# - the arguments (or None)
# - the "packer" function for the arguments (or None)
# - the "unpacker" function for the return value (or None)
#
# The packer and unpacker function, if not None, *must* be
# methods of self.packer and self.unpacker, respectively.
# A value of None means that there are no arguments or is no
# return value, respectively.
#
# The return value from make_call() is the return value from
# the remote procedure call, as unpacked by the "unpacker"
# function, or None if the unpacker function is None.
#
# (Even if you expect a result of None, you should still
# return the return value from make_call(), since this may be
# needed by a broadcasting version of the class.)
#
# If the call fails, make_call() raises an exception
# (this includes time-outs and invalid results).
#
# Note that (at least with the UDP protocol) there is no
# guarantee that a call is executed at most once. When you do
# get a reply, you know it has been executed at least once;
# when you don't get a reply, you know nothing.
def Mnt(self, directory):
return self.make_call(1, directory, \
self.packer.pack_string, \
self.unpacker.unpack_fhstatus)
def Dump(self):
return self.make_call(2, None, \
None, self.unpacker.unpack_mountlist)
def Umnt(self, directory):
return self.make_call(3, directory, \
self.packer.pack_string, None)
def Umntall(self):
return self.make_call(4, None, None, None)
def Export(self):
return self.make_call(5, None, \
None, self.unpacker.unpack_exportlist)
# We turn the partial Mount client into a full one for either protocol
# by use of multiple inheritance. (In general, when class C has base
# classes B1...Bn, if x is an instance of class C, methods of x are
# searched first in C, then in B1, then in B2, ..., finally in Bn.)
class TCPMountClient(PartialMountClient, TCPClient):
def __init__(self, host):
TCPClient.__init__(self, host, MOUNTPROG, MOUNTVERS)
class UDPMountClient(PartialMountClient, UDPClient):
def __init__(self, host):
UDPClient.__init__(self, host, MOUNTPROG, MOUNTVERS)
# A little test program for the Mount client. This takes a host as
# command line argument (default the local machine), prints its export
# list, and attempts to mount and unmount each exported files system.
# An optional first argument of -t or -u specifies the protocol to use
# (TCP or UDP), default is UDP.
def test():
import sys
if sys.argv[1:] and sys.argv[1] == '-t':
C = TCPMountClient
del sys.argv[1]
elif sys.argv[1:] and sys.argv[1] == '-u':
C = UDPMountClient
del sys.argv[1]
else:
C = UDPMountClient
if sys.argv[1:]: host = sys.argv[1]
else: host = ''
mcl = C(host)
list = mcl.Export()
for item in list:
print item
try:
mcl.Mnt(item[0])
except:
print 'Sorry'
continue
mcl.Umnt(item[0])
|
nilq/baby-python
|
python
|
import os
import librosa
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.preprocessing import LabelEncoder
# def get_feature_label(row, directory):
def get_feature_label(row, directory):
file_name = os.path.join(directory, str(row.ID) + '.wav')
# file_name = os.path.join("data_pipeline", "urban_sound_files", str(row.ID) + '.wav')
# handle exception to check if there isn't a file which is corrupted
try:
# here kaiser_fast is a technique used for faster extraction
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# extract mfcc feature from data
mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40)
mfccs_scaled: np = np.mean(mfccs.T, axis=0)
except Exception as e:
print("Error encountered while parsing file: ", file_name)
return None, None
feature: np = mfccs_scaled
label = row.Class
return feature, label
def get_data_labels(featues_df: DataFrame) -> DataFrame:
"""
Convert features and corresponding classification labels into numpy arrays so that they can be feeded into
neuronal network.
:param temp:
:return: X and y parameter y is our target variable
"""
X: np = np.array(featues_df.feature.tolist())
y: np = np.array(featues_df.label.tolist())
# encode label classification
le = LabelEncoder()
# one hot encoded labels
# yy = to_categorical(le.fit_transform(y))
return X, X# yy
def get_features_and_labels(data_in, directory):
"""
"""
# function to load files and extract features
train_temp: DataFrame = pd.DataFrame(columns=['feature', 'label'])
for idx, row in data_in.iterrows():
feature, label = get_feature_label(row, directory)
train_temp = train_temp.append({'feature': feature, 'label': label}, ignore_index=True)
train_temp.columns = ['feature', 'label']
x_train, y_train = get_data_labels(train_temp)
return x_train, y_train
|
nilq/baby-python
|
python
|
from urllib.parse import urlencode,parse_qs,unquote
def stringify(d,u=False):
qs = urlencode(d)
if u:
qs = unquote(qs)
return qs
def parse(url):
d = dict( (k, v if len(v)>1 else v[0] )
for k, v in parse_qs(url).items() )
return d
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#coding:utf-8
import json
import copy
import time
import os
endpoint = "bind9"
name_stats_path = "/var/named/data/named_stats.txt"
def main():
if os.path.isfile(name_stats_path):
os.remove(name_stats_path)
os.system("rndc stats")
ts = int(time.time())
payload = []
data = {"endpoint":endpoint,"metric":"","timestamp":ts,"step":60,"value":"","counterType":"COUNTER","tags":""}
f = open(name_stats_path)
for line in f:
if "++ Incoming Requests ++" in line:
data["tags"] = "tag=Incoming_Requests"
continue
elif "++ Incoming Queries ++" in line:
data["tags"] = "tag=Incoming_Queries"
continue
elif "++ Outgoing Queries ++" in line:
data["tags"] = "tag=Outgoing_Queries"
continue
elif "++ Name Server Statistics ++" in line:
data["tags"] = "tag=Name_Server_Statistics"
continue
elif "++ Zone Maintenance Statistics ++" in line:
data["tags"] = "tag=Zone_Maintenance_Statistics"
continue
elif "++ Resolver Statistics ++" in line:
data["tags"] = "tag=Resolver_Statistics"
continue
elif "++ Cache DB RRsets ++" in line:
data["tags"] = "tag=Cache DB RRsets"
continue
elif "++ Socket I/O Statistics ++" in line:
data["tags"] = "tag=Socket_I/O_Statistics"
continue
named_stats = line.strip().split(' ')
if named_stats[0].isdigit() != True:
continue
data["value"] = named_stats[0]
data["metric"] = string_join(named_stats)
payload.append(copy.copy(data))
os.remove(name_stats_path)
print json.dumps(payload,indent=4)
def string_join(split_list):
num = 0
join_str = split_list[1]
for string in split_list:
num = num + 1
if num <= 2:
continue
join_str = join_str + "_" + string
return join_str
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from plugins.database import db
class BaseModel:
def save(self):
try:
db.session.add(self)
db.session.commit()
return True
except:
return False
|
nilq/baby-python
|
python
|
"""Creates a custom kinematics body with two links and one joint
"""
from openravepy import *
from numpy import eye, array, zeros
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
with env:
robot=RaveCreateRobot(env,'')
robot.SetName('camera')
linkinfo=KinBody.LinkInfo()
linkinfo._name='camerabase'
ginfo=KinBody.GeometryInfo()
ginfo._type=GeometryType.Box
ginfo._vGeomData=[0.1,0.1,0.1] # box extents
ginfo._vDiffuseColor=[0,0,1]
ginfo._t = eye(4)
linkinfo._vgeometryinfos = [ginfo]
camera1info=Robot.AttachedSensorInfo()
camera1info._linkname='camerabase'
camera1info._name = 'ensenson10'
camera1info._sensorname = 'base_pinhole_camera'
camera1info._trelative = eye(4)
camera1info._trelative[0:3,3] = [0,0,0.1]
camera1info._sensorgeometry = CameraGeomData()
camera1info._sensorgeometry.width = 640
camera1info._sensorgeometry.height = 480
camera1info._sensorgeometry.intrinsics.K = array([[640.0,0,320],[0,640,240],[0,0,1]])
camera1info._sensorgeometry.intrinsics.distortion_coeffs = zeros(5)
camera1info._sensorgeometry.intrinsics.distortion_model = 'opencv'
camera1info._sensorgeometry.intrinsics.focal_length = 0.05
robot.Init([linkinfo],[],[],[])
env.Add(robot)
robot.AddAttachedSensor(camera1info,True)
|
nilq/baby-python
|
python
|
import itertools
from surprise import accuracy
from collections import defaultdict
class RecommenderMetrics:
def mae(predictions):
return accuracy.mae(predictions, verbose=False)
def rmse(predictions):
return accuracy.rmse(predictions, verbose=False)
|
nilq/baby-python
|
python
|
from setuptools import setup
install_requires = (
'beautifulsoup4==4.6.3',
)
tests_require = (
'pytest',
'pytest-cov',
'mock',
)
setup_requires = (
'pytest-runner',
'flake8',
)
setup(
name='tracking-id-injector',
version='1.0.1',
url='https://github.com/msufa/tracking-id-injector',
author='Maciek Sufa',
description=('Console script for injecting Google Analytics tracking IDs '
'into HTML files.'),
license='Apache 2.0',
packages=['tridinjector'],
install_requires=install_requires,
tests_require=tests_require,
setup_requires=setup_requires,
entry_points={
'console_scripts': [
'tracking-id-injector = tridinjector.injector:main'
]
},
)
|
nilq/baby-python
|
python
|
import argparse
import time
import math
import numpy as np
import sklearn.metrics as sk
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import data
import model
from utils_lm import batchify, get_batch, repackage_hidden
# go through rigamaroo to do ..utils.display_results import show_performance
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.display_results import show_performance
from utils.log_sum_exp import log_sum_exp
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='data/penn/',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (LSTM, QRNN, GRU)')
parser.add_argument('--emsize', type=int, default=400,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1150,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=3,
help='number of layers')
parser.add_argument('--lr', type=float, default=30,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=80, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=70,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.4,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.65,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.5,
help='amount of weight dropout to apply to the RNN hidden to hidden matrix')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
randomhash = ''.join(str(time.time()).split('.'))
parser.add_argument('--save', type=str, default=randomhash+'.pt',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=2,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=1.2e-6,
help='weight decay applied to all weights')
parser.add_argument('--resume', type=str, default='',
help='path of model to resume')
parser.add_argument('--optimizer', type=str, default='sgd',
help='optimizer to use (sgd, adam)')
parser.add_argument('--when', nargs="+", type=int, default=[-1],
help='When (which epochs) to divide the learning rate by 10 - accepts multiple')
parser.add_argument('--character_level', action='store_true', help="Use this flag to evaluate character-level models.")
args = parser.parse_args()
args.tied = True
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
def model_save(fn):
with open(fn, 'wb') as f:
torch.save([model, criterion, optimizer], f)
def model_load(fn):
global model, criterion, optimizer
with open(fn, 'rb') as f:
model, criterion, optimizer = torch.load(f)
import os
import hashlib
fn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset...')
corpus = data.Corpus(args.data)
torch.save(corpus, fn)
eval_batch_size = 10
test_batch_size = 1 # DON'T CHANGE THIS
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
print('Producing ood datasets...')
answers_corpus = data.OODCorpus('eng_web_tbk/answers/conll/answers_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
answers_data = batchify(answers_corpus.data, test_batch_size, args)
email_corpus = data.OODCorpus('eng_web_tbk/email/conll/email_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
email_data = batchify(email_corpus.data, test_batch_size, args)
newsgroup_corpus = data.OODCorpus('eng_web_tbk/newsgroup/conll/newsgroup_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
newsgroup_data = batchify(newsgroup_corpus.data, test_batch_size, args)
reviews_corpus = data.OODCorpus('eng_web_tbk/reviews/conll/reviews_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
reviews_data = batchify(reviews_corpus.data, test_batch_size, args)
weblog_corpus = data.OODCorpus('eng_web_tbk/weblog/conll/weblog_penntrees.dev.conll', corpus.dictionary, char=args.character_level)
weblog_data = batchify(weblog_corpus.data, test_batch_size, args)
###############################################################################
# Build the model
###############################################################################
from splitcross import SplitCrossEntropyLoss
criterion = None
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied)
###
assert args.resume, 'must provide a --resume argument'
print('Resuming model ...')
model_load(args.resume)
optimizer.param_groups[0]['lr'] = args.lr
model.dropouti, model.dropouth, model.dropout, args.dropoute = args.dropouti, args.dropouth, args.dropout, args.dropoute
if args.wdrop:
from weight_drop import WeightDrop
for rnn in model.rnns:
if type(rnn) == WeightDrop: rnn.dropout = args.wdrop
elif rnn.zoneout > 0: rnn.zoneout = args.wdrop
###
if not criterion:
splits = []
if ntokens > 500000:
# One Billion
# This produces fairly even matrix mults for the buckets:
# 0: 11723136, 1: 10854630, 2: 11270961, 3: 11219422
splits = [4200, 35000, 180000]
elif ntokens > 75000:
# WikiText-103
splits = [2800, 20000, 76000]
print('Using', splits)
criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False)
###
if args.cuda:
model = model.cuda()
criterion = criterion.cuda()
###
params = list(model.parameters()) + list(criterion.parameters())
total_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size())
print('Args:', args)
print('Model total parameters:', total_params)
###############################################################################
# Eval code
###############################################################################
ood_num_examples = test_data.size(0) // 5
expected_ap = ood_num_examples / (ood_num_examples + test_data.size(0))
recall_level = 0.9
def get_base_rates():
batch, i = 0, 0
seq_len = args.bptt
ntokens = len(corpus.dictionary)
token_counts = np.zeros(ntokens)
total_count = 0
for i in range(0, train_data.size(0), args.bptt): # Assume OE dataset is larger. It is, because we're using wikitext-2.
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
for j in range(targets.numel()):
token_counts[targets[j].data.cpu().numpy()[0]] += 1
total_count += 1
batch += 1
return token_counts / total_count
print('Getting base rates...')
# base_rates = get_base_rates()
# np.save('./base_rates.npy', base_rates)
base_rates = Variable(torch.from_numpy(np.load('./base_rates.npy').astype(np.float32))).cuda().float().squeeze() # shit happens
uniform_base_rates = Variable(torch.from_numpy(np.ones(len(corpus.dictionary)).astype(np.float32))).cuda().float().squeeze()
uniform_base_rates /= uniform_base_rates.numel()
print('Done.')
def evaluate(data_source, corpus, batch_size=10, ood=False):
# Turn on evaluation mode which disables dropout.
model.eval()
if args.model == 'QRNN': model.reset()
loss_accum = 0
losses = []
ntokens = len(corpus.dictionary)
for i in range(0, data_source.size(0) - 1, args.bptt):
if (i >= ood_num_examples // test_batch_size) and (ood is True):
break
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
data, targets = get_batch(data_source, i, args, evaluation=True)
output, hidden = model(data, hidden)
logits = model.decoder(output)
smaxes = F.softmax(logits - torch.max(logits, dim=1, keepdim=True)[0], dim=1)
tmp = smaxes[range(targets.size(0)), targets]
log_prob = torch.log(tmp).mean(0) # divided by seq len, so this is the negative nats per char
loss = -log_prob.data.cpu().numpy()[0]
loss_accum += loss
# losses.append(loss)
# Experimental!
# anomaly_score = -torch.max(smaxes, dim=1)[0].mean() # negative MSP
anomaly_score = ((smaxes).add(1e-18).log() * uniform_base_rates.unsqueeze(0)).sum(1).mean(0) # negative KL to uniform
losses.append(anomaly_score.data.cpu().numpy()[0])
#
return loss_accum / (len(data_source) // args.bptt), losses
# Run on test data.
print('\nPTB')
test_loss, test_losses = evaluate(test_data, corpus, test_batch_size)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
test_loss, math.exp(test_loss), test_loss / math.log(2)))
print('=' * 89)
print('\nAnswers (OOD)')
ood_loss, ood_losses = evaluate(answers_data, answers_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nEmail (OOD)')
ood_loss, ood_losses = evaluate(email_data, email_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nNewsgroup (OOD)')
ood_loss, ood_losses = evaluate(newsgroup_data, newsgroup_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nReviews (OOD)')
ood_loss, ood_losses = evaluate(reviews_data, reviews_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
print('\nWeblog (OOD)')
ood_loss, ood_losses = evaluate(weblog_data, weblog_corpus, test_batch_size, ood=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
ood_loss, math.exp(ood_loss), ood_loss / math.log(2)))
print('=' * 89)
show_performance(ood_losses, test_losses, expected_ap, recall_level=recall_level)
|
nilq/baby-python
|
python
|
from typing import Optional
from openslides_backend.action.actions.user.user_scope_permission_check_mixin import (
UserScope,
)
from openslides_backend.permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class ScopePermissionsTestMixin(BaseActionTestCase):
def setup_admin_scope_permissions(self, scope: Optional[UserScope]) -> None:
"""
Helper function to setup permissions for different scopes for user 1. If no scope is given, the user has no permissions.
"""
if scope is None:
self.set_organization_management_level(None)
elif scope == UserScope.Organization:
self.set_organization_management_level(
OrganizationManagementLevel.CAN_MANAGE_USERS
)
elif scope == UserScope.Committee:
self.update_model(
"user/1",
{
"organization_management_level": None,
"committee_$1_management_level": CommitteeManagementLevel.CAN_MANAGE,
},
)
elif scope == UserScope.Meeting:
self.create_meeting()
self.set_organization_management_level(None)
self.set_user_groups(1, [3])
self.set_group_permissions(3, [Permissions.User.CAN_MANAGE])
def setup_scoped_user(self, scope: UserScope) -> None:
"""
Helper function to setup user 111 in different scopes.
"""
if scope == UserScope.Organization:
self.set_models(
{
"committee/1": {"meeting_ids": [1]},
"committee/2": {"meeting_ids": [2]},
"meeting/1": {
"user_ids": [111],
"committee_id": 1,
"group_ids": [11],
"is_active_in_organization_id": 1,
},
"meeting/2": {
"user_ids": [111],
"committee_id": 2,
"group_ids": [22],
"is_active_in_organization_id": 1,
},
"user/111": {
"meeting_ids": [1, 2],
"committee_ids": [1, 2],
"group_$_ids": ["1", "2"],
"group_$1_ids": [11],
"group_$2_ids": [22],
},
"group/11": {"meeting_id": 1, "user_ids": [111]},
"group/22": {"meeting_id": 2, "user_ids": [111]},
}
)
elif scope == UserScope.Committee:
self.set_models(
{
"committee/1": {"meeting_ids": [1, 2]},
"meeting/1": {
"user_ids": [111],
"committee_id": 1,
"group_ids": [11],
"is_active_in_organization_id": 1,
},
"meeting/2": {
"user_ids": [111],
"committee_id": 1,
"group_ids": [11],
"is_active_in_organization_id": 1,
},
"user/111": {
"meeting_ids": [1, 2],
"committee_ids": [1],
"group_$_ids": ["1", "2"],
"group_$1_ids": [11],
"group_$2_ids": [22],
},
"group/11": {"meeting_id": 1, "user_ids": [111]},
"group/22": {"meeting_id": 2, "user_ids": [111]},
}
)
elif scope == UserScope.Meeting:
self.set_models(
{
"meeting/1": {"committee_id": 1, "is_active_in_organization_id": 1},
"user/111": {"meeting_ids": [1], "committee_ids": [1]},
}
)
|
nilq/baby-python
|
python
|
"""
GUI layout that allows free positioning of children.
@author Ben Giacalone
"""
from tools.envedit.gui.gui_layout import GUILayout
class GUIFreeLayout(GUILayout):
def __init__(self):
GUILayout.__init__(self)
self.children = []
# Adds a child to the layout
def add_child(self, child):
if self.rendering:
child.add_render()
self.children.append(child)
self.update()
# Removes a child from the layout
def remove_child(self, child):
child.stop_render()
self.children.remove(child)
self.update()
# Removes all children from the layout
def clear(self):
for _ in range(len(self.children)):
self.remove_child(self.children[0])
# Checks if this component contains a point in screen space, then propagates to children
# Note: this layout cannot respond to events
def get_selected_component(self, x, y):
if self.bbox.point_inside(x, y):
for child in self.children:
child_component = child.get_selected_component(x, y)
if child_component is not None:
return child_component
return None
def update(self):
for child in self.children:
# If child is outside bounds, reposition it back in
if child.bbox.x + child.bbox.width > self.bbox.x + self.bbox.width:
child.bbox.x -= (child.bbox.x + child.bbox.width) - (self.bbox.x + self.bbox.width)
if child.bbox.y + child.bbox.height > self.bbox.y + self.bbox.height:
child.bbox.y -= (child.bbox.y + child.bbox.height) - (self.bbox.y + self.bbox.height)
child.set_clip_region(self.clip_region.get_intersection(self.bbox))
child.update()
def add_render(self):
self.rendering = True
if self.rendering:
for child in self.children:
child.add_render()
def stop_render(self):
self.rendering = False
for child in self.children:
child.stop_render()
|
nilq/baby-python
|
python
|
#!/bin/python
#
# File: test-all.py
# Authors: Leonid Shamis (leonid.shamis@gmail.com)
# Keith Schwarz (htiek@cs.stanford.edu)
#
# A test harness that automatically runs your compiler on all of the tests
# in the 'samples' directory. This should help you diagnose errors in your
# compiler and will help you gauge your progress as you're going. It also
# will help catch any regressions you accidentally introduce later on in
# the project.
#
# That said, this test script is not designed to catch all errors and you
# will need to do your own testing. Be sure to look over these tests
# carefully and to think over what cases are covered and, more importantly,
# what cases are not.
import os
from subprocess import *
TEST_DIRECTORY = 'samples'
for _, _, files in os.walk(TEST_DIRECTORY):
for file in files:
if not (file.endswith('.decaf') or file.endswith('.frag')):
continue
refName = os.path.join(TEST_DIRECTORY, '%s.out' % file.split('.')[0])
testName = os.path.join(TEST_DIRECTORY, file)
result = Popen('./dcc < ' + testName, shell = True, stderr = STDOUT, stdout = PIPE)
result = Popen('diff -w - ' + refName, shell = True, stdin = result.stdout, stdout = PIPE)
print 'Executing test "%s"' % testName
print ''.join(result.stdout.readlines())
|
nilq/baby-python
|
python
|
number_1 = int(input('Enter your first number:'))
number_2 = int(input('Enter your second number:'))
operator = str(input('Enter your operator'))
if operator=='+':
print(number_1 + number_2)
elif operator=='-':
print(number_1 - number_2)
elif operator=='*':
print(number_1 * number_2)
elif operator=='/':
print(number_1 / number_2)
else:
print('Invalid operator')
|
nilq/baby-python
|
python
|
# Winston Peng
# SoftDev1 pd9
# K10 -- Jinja Tuning
# 2019-9-23
from flask import Flask, render_template
import static.script as script
app = Flask(__name__)
@app.route('/occupyflaskst')
def occupations():
return render_template(
'occ.html',
team = 'Connor Oh, Nahi Khan, Winston Peng -- Team Beaker',
# <h1>
header = 'Jinja Tuning -- Occupations',
# <title>
title = 'Job Occupations',
# This gets the random job
randOcc = script.randJob(),
# Dictionary of the list
occ = script.csvDict
)
if __name__ == '__main__':
app.debug = True
app.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# macro_avg.py v1.0 9-19-2012 Jeff Doak jeff.w.doak@gmail.com
from chargedensity import *
import numpy as np
import sys
if len(sys.argv) > 1:
if str(sys.argv[1]) == "CHG":
a = ChargeDensity(str(sys.argv[1]),format_="chgcar")
else:
a = ChargeDensity(str(sys.argv[1]))
else:
a = ChargeDensity("LOCPOT")
avg1 = a.avg_density_vol()
avg2 = np.average(a.density)
A = np.linalg.norm(a.unitcell.cell_vec[0])
B = np.linalg.norm(a.unitcell.cell_vec[1])
C = np.linalg.norm(a.unitcell.cell_vec[2])
area = A*B
print "avg1",avg1
print "avg2",avg2
print area
print A,B,C
sys.exit()
a.unitcell.scale = 1.0
den_z = a.integrate_z_density()
z_pos = np.linspace(0,a.unitcell.cell_vec[2,2],len(den_z))
macro_z = a.macro_avg_z(p1)
for i in range(len(den_z)):
print z_pos[i],den_z[i],macro_z[i]
# Calculate bulk and vacuum average, assuming that the bulk is located in the
# 1st half of the cell (along z) and the vacuum is in the second half of the
# cell.
bulk_start = 0.2
bulk_stop = 0.3
vac_start = 0.7
vac_stop = 0.8
bi = int(np.floor(bulk_start*len(den_z)))
bf = int(np.floor(bulk_stop*len(den_z)))
vi = int(np.floor(vac_start*len(den_z)))
vf = int(np.floor(vac_stop*len(den_z)))
bulk_avg = np.average(macro_z[bi:bf])
bulk_std = np.std(macro_z[bi:bf])
#bulk_center = macro_z[int(np.floor(0.25*len(den_z)))]
vac_avg = np.average(macro_z[vi:vf])
vac_std = np.std(macro_z[vi:vf])
#vac_center = macro_z[int(np.floor(0.75*len(den_z)))]
print
print "Bulk_avg_(eV) Bulk_std_(eV) Vac_avg_(eV) Vac_std_(eV)"
print bulk_avg,bulk_std,vac_avg,vac_std
#print "Bulk_avg_(eV) Bulk_center_(eV) Vac_avg_(eV) Vac_center_(eV)"
#print bulk_avg,bulk_center,vac_avg,vac_center
|
nilq/baby-python
|
python
|
"""
A python module to communicate with Elecrolux Connectivity Platform
"""
__all__ = [
'Error',
'LoginError',
'RequestError',
'ResponseError',
'Session'
]
from .Session import (
Error,
LoginError,
RequestError,
ResponseError,
Session
)
|
nilq/baby-python
|
python
|
from enum import Enum
class Transition(Enum):
"""
Enumeration of the transitions a job can go through.
"""
ACQUIRE = 0
RELEASE = 1
START = 2
PROGRESS = 3
FINISH = 4
ERROR = 5
RESET = 6
ABORT = 7
CANCEL = 8
@property
def json_property_name(self) -> str:
"""
Gets the name of the JSON property for this transition.
"""
return f"on_{self.name.lower()}"
|
nilq/baby-python
|
python
|
from ..models.box_daily_square import BoxDailySquare
class BoxDailySquareManager(object):
def create_box(self, data):
box, created = BoxDailySquare.objects.get_or_create(
user=data['user'],
office=data['office']
)
return box
|
nilq/baby-python
|
python
|
import os
import shutil
import subprocess
CONNECT_REPORTS_REPO_URL = 'https://github.com/cloudblue/connect-reports.git'
BASE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(
os.path.dirname(__file__),
'..',
),
),
)
REPO_EMBED_DIR = os.path.join(
BASE_DIR,
'connect/.data/connect_reports',
)
def get_latest_reports():
if os.path.exists(REPO_EMBED_DIR):
shutil.rmtree(REPO_EMBED_DIR)
print(f'Cloning {CONNECT_REPORTS_REPO_URL}...')
subprocess.check_call(
[
'git',
'clone',
CONNECT_REPORTS_REPO_URL,
REPO_EMBED_DIR,
],
)
result = subprocess.run(
[
'git', '-C', REPO_EMBED_DIR,
'rev-list', '--tags', '--max-count=1',
],
capture_output=True,
stdin=subprocess.DEVNULL,
start_new_session=True,
)
result.check_returncode()
commit_id = result.stdout.decode().replace('\n', '')
print(f'Checkout latest tag ({commit_id})...')
subprocess.check_call(
[
'git',
'-C',
REPO_EMBED_DIR,
'checkout',
commit_id,
],
)
print(f'Latest reports saved in {REPO_EMBED_DIR}')
if __name__ == '__main__':
get_latest_reports()
|
nilq/baby-python
|
python
|
from scipy import integrate
def integrand(x0, x1, x2):
return x2 * x1**2 + x0
x2_lim = (0.0, 0.5)
x1_lim = lambda x2:(0.0, 1.0-2.0*x2)
x0_lim = lambda x1,x2:(-1.0, 1.0+2.0*x2-x1)
# int_{x2=0}^{0.5} int_{x1=0}^{1-2x2} int_{x0=-1}^{1+2x2-x1} (x2 x1**2 + x0) dx0 dx1 dx2
integral,error = integrate.nquad(integrand, [x0_lim, x1_lim, x2_lim])
print(integral, error)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
import unittest
import platform
import warnings
import os
from xmlschema import XMLSchemaParseError, XMLSchemaIncludeWarning, XMLSchemaImportWarning
from xmlschema.etree import etree_element
from xmlschema.namespaces import SCHEMAS_DIR
from xmlschema.qnames import XSD_ELEMENT, XSI_TYPE
from xmlschema.validators import XMLSchema11
from xmlschema.testing import SKIP_REMOTE_TESTS, XsdValidatorTestCase, print_test_header
class TestXMLSchema10(XsdValidatorTestCase):
TEST_CASES_DIR = os.path.join(os.path.dirname(__file__), '../test_cases')
def test_schema_validation(self):
schema = self.schema_class(self.vh_xsd_file)
self.assertEqual(schema.validation, 'strict')
schema = self.schema_class(self.vh_xsd_file, validation='lax')
self.assertEqual(schema.validation, 'lax')
schema = self.schema_class(self.vh_xsd_file, validation='skip')
self.assertEqual(schema.validation, 'skip')
with self.assertRaises(ValueError):
self.schema_class(self.vh_xsd_file, validation='none')
def test_schema_string_repr(self):
schema = self.schema_class(self.vh_xsd_file)
tmpl = "%s(basename='vehicles.xsd', namespace='http://example.com/vehicles')"
self.assertEqual(str(schema), tmpl % self.schema_class.__name__)
def test_schema_copy(self):
schema = self.vh_schema.copy()
self.assertNotEqual(id(self.vh_schema), id(schema))
self.assertNotEqual(id(self.vh_schema.namespaces), id(schema.namespaces))
self.assertNotEqual(id(self.vh_schema.maps), id(schema.maps))
def test_resolve_qname(self):
schema = self.schema_class("""<xs:schema
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<xs:element name="root" />
</xs:schema>""")
self.assertEqual(schema.resolve_qname('xs:element'), XSD_ELEMENT)
self.assertEqual(schema.resolve_qname('xsi:type'), XSI_TYPE)
self.assertEqual(schema.resolve_qname(XSI_TYPE), XSI_TYPE)
self.assertEqual(schema.resolve_qname('element'), 'element')
self.assertRaises(ValueError, schema.resolve_qname, '')
self.assertRaises(ValueError, schema.resolve_qname, 'xsi:a type ')
self.assertRaises(ValueError, schema.resolve_qname, 'xml::lang')
def test_global_group_definitions(self):
schema = self.check_schema("""
<xs:group name="wrong_child">
<xs:element name="foo"/>
</xs:group>""", validation='lax')
self.assertEqual(len(schema.errors), 1)
self.check_schema('<xs:group name="empty" />', XMLSchemaParseError)
self.check_schema('<xs:group name="empty"><xs:annotation/></xs:group>', XMLSchemaParseError)
def test_wrong_includes_and_imports(self):
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
self.check_schema("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="ns">
<xs:include schemaLocation="example.xsd" />
<xs:import schemaLocation="example.xsd" />
<xs:redefine schemaLocation="example.xsd"/>
<xs:import namespace="http://missing.example.test/" />
<xs:import/>
</xs:schema>
""")
self.assertEqual(len(context), 3, "Wrong number of include/import warnings")
self.assertEqual(context[0].category, XMLSchemaIncludeWarning)
self.assertEqual(context[1].category, XMLSchemaIncludeWarning)
self.assertEqual(context[2].category, XMLSchemaImportWarning)
self.assertTrue(str(context[0].message).startswith("Include"))
self.assertTrue(str(context[1].message).startswith("Redefine"))
self.assertTrue(str(context[2].message).startswith("Import of namespace"))
def test_wrong_references(self):
# Wrong namespace for element type's reference
self.check_schema("""
<xs:element name="dimension" type="xs:dimensionType"/>
<xs:simpleType name="dimensionType">
<xs:restriction base="xs:short"/>
</xs:simpleType>
""", XMLSchemaParseError)
def test_annotations(self):
schema = self.check_schema("""
<xs:element name='foo'>
<xs:annotation />
</xs:element>""")
self.assertIsNotNone(schema.elements['foo'].annotation)
schema = self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation>
<xs:documentation> stuff </xs:documentation>
</xs:annotation>
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""")
self.assertIsNotNone(schema.types["Magic"].annotation)
self.check_schema("""
<xs:simpleType name='Magic'>
<xs:annotation />
<xs:annotation />
<xs:restriction base='xs:string'>
<xs:enumeration value='A'/>
</xs:restriction>
</xs:simpleType>""", XMLSchemaParseError)
def test_base_schemas(self):
self.schema_class(os.path.join(SCHEMAS_DIR, 'xml_minimal.xsd'))
def test_root_elements(self):
# Test issue #107 fix
schema = self.schema_class("""<?xml version="1.0" encoding="utf-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root1" type="root"/>
<xs:element name="root2" type="root"/>
<xs:complexType name="root">
<xs:sequence>
<xs:element name="elementWithNoType"/>
</xs:sequence>
</xs:complexType>
</xs:schema>""")
self.assertEqual(set(schema.root_elements), {schema.elements['root1'], schema.elements['root2']})
def test_is_restriction_method(self):
# Test issue #111 fix
schema = self.schema_class(source=self.casepath('issues/issue_111/issue_111.xsd'))
extended_header_def = schema.types['extendedHeaderDef']
self.assertTrue(extended_header_def.is_derived(schema.types['blockDef']))
@unittest.skipIf(SKIP_REMOTE_TESTS or platform.system() == 'Windows',
"Remote networks are not accessible or avoid SSL verification error on Windows.")
def test_remote_schemas_loading(self):
col_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/collection/collection.xsd",
timeout=300)
self.assertTrue(isinstance(col_schema, self.schema_class))
vh_schema = self.schema_class("https://raw.githubusercontent.com/brunato/xmlschema/master/"
"tests/test_cases/examples/vehicles/vehicles.xsd",
timeout=300)
self.assertTrue(isinstance(vh_schema, self.schema_class))
def test_schema_defuse(self):
vh_schema = self.schema_class(self.vh_xsd_file, defuse='always')
self.assertIsInstance(vh_schema.root, etree_element)
for schema in vh_schema.maps.iter_schemas():
self.assertIsInstance(schema.root, etree_element)
class TestXMLSchema11(TestXMLSchema10):
schema_class = XMLSchema11
if __name__ == '__main__':
print_test_header()
unittest.main()
|
nilq/baby-python
|
python
|
"""Dyson new v2 pure Hot+Cool device."""
import logging
from .const import HeatMode
from .dyson_pure_cool import DysonPureCool
from .utils import printable_fields
_LOGGER = logging.getLogger(__name__)
class DysonPureHotCool(DysonPureCool):
"""Dyson new Pure Hot+Cool device."""
def _parse_command_args(self, **kwargs):
"""Parse command arguments.
:param kwargs Arguments
:return payload dictionary
"""
data = super()._parse_command_args(**kwargs)
heat_target = kwargs.get('heat_target')
heat_mode = kwargs.get('heat_mode')
f_heat_target = heat_target if heat_target \
else self._current_state.heat_target
f_heat_mode = heat_mode.value if heat_mode \
else self._current_state.heat_mode
data["hmax"] = f_heat_target
data["hmod"] = f_heat_mode
return data
def enable_heat_mode(self):
"""Turn on head mode."""
data = {
"hmod": HeatMode.HEAT_ON.value
}
self.set_fan_configuration(data)
def disable_heat_mode(self):
"""Turn off head mode."""
data = {
"hmod": HeatMode.HEAT_OFF.value
}
self.set_fan_configuration(data)
def set_heat_target(self, heat_target):
"""Set temperature target.
Use either const.HeatTarget.celsius or const.HeatTarget.fahrenheit
to get a string representation of the target temperature in kelvins.
ex. set_heat_target(const.HeatTarget.celsius(24))
:param heat_target: target temperature in Kalvin
"""
data = {
"hmax": heat_target
}
self.set_fan_configuration(data)
def __repr__(self):
"""Return a String representation."""
fields = self._fields()
return 'DysonPureHotCool(' + ",".join(
printable_fields(fields)) + ')'
|
nilq/baby-python
|
python
|
from .data import COVID19India
from .mongo_db import get_data, upload_data
from .data_processing import get_daily_data, get_state_daily, get_interval_data
from .inshorts_news import InshortsNews
|
nilq/baby-python
|
python
|
from .csr import skeleton_to_csgraph, branch_statistics, summarize, Skeleton
__version__ = '0.10.0-dev'
__all__ = ['skeleton_to_csgraph',
'branch_statistics',
'summarize',
'Skeleton']
|
nilq/baby-python
|
python
|
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
if len(matrix) == 0:
self.dp = []
return
width, height = len(matrix[0]), len(matrix)
self.dp = [[0] * (width + 1) for _ in range(height + 1)]
for i in range(1, height+1):
for j in range(1, width+1):
# dp.c = dp.t + dp.l + m.c - d.tl
self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
if len(self.dp) == 0:
return 0
s = self.dp[row2+1][col2+1] - self.dp[row2+1][col1] - self.dp[row1][col2+1] + self.dp[row1][col1]
return s
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
|
nilq/baby-python
|
python
|
# ############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# ############################################################################
from __future__ import print_function
from __future__ import absolute_import
import logging
import time
from datetime import datetime
from yardstick.benchmark.scenarios import base
from yardstick.common import openstack_utils
LOG = logging.getLogger(__name__)
class Resize(base.Scenario):
"""Execute a cold migration for two hosts
Parameters
server_id - ID of the server
type: string
unit: N/A
default: null
server- dict of the server
type: dict
unit: N/A
default: null
Either server_id or server is required.
flavor_id - ID of the flavor
type: string
unit: N/A
default: null
flavor- dict of the flavor
type: dict
unit: N/A
default: null
Either flavor_id or flavor is required.
Outputs
rc - response code of resize operation
0 for success
1 for failure
type: int
unit: N/A
resize_time - the duration time resize operation used
type: float
unit: N/A
default: null
error_message - the error message(only if fail to resize)
type: string
unit: N/A
default: null
"""
__scenario_type__ = "RESIZE"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.options = self.scenario_cfg.get('options', {})
self.nova_client = openstack_utils.get_nova_client()
def run(self, result):
default_server_id = self.options.get('server', {}).get('id', '')
server_id = self.options.get('server_id', default_server_id)
default_flavor_id = self.options.get('flavor', {}).get('id', '')
flavor_id = self.options.get('flavor_id', default_flavor_id)
LOG.debug('Server id is %s, Flavor id is %s', server_id, flavor_id)
keys = self.scenario_cfg.get('output', '').split()
LOG.info('Start to resize')
try:
self.nova_client.servers.resize(server_id, flavor_id)
except Exception as e:
values = [1, str(e).split('.')[0]]
else:
start_time = datetime.now()
self._wait_check_status(server_id, 'verify_resize')
LOG.info('Server status change to VERIFY_RESIZE')
LOG.info('Start to comfirm resize')
self.nova_client.servers.confirm_resize(server_id)
self._wait_check_status(server_id, 'active')
LOG.info('Server status change to ACTIVE')
end_time = datetime.now()
LOG.info('Resize successful')
duration = end_time - start_time
resize_time = duration.seconds + duration.microseconds * 1.0 / 1e6
values = [0, resize_time]
return self._push_to_outputs(keys, values)
def _wait_check_status(self, server_id, wait_status):
while True:
status = self.nova_client.servers.get(server_id).status.lower()
if status == wait_status:
break
time.sleep(1)
|
nilq/baby-python
|
python
|
import os, sys, re, time
import urllib, urllib2
from BeautifulSoup import BeautifulSoup
#import beautifulsoup4
import gzip
from StringIO import StringIO
import MySQLdb
import simplejson as json
import datetime
import pandas as pd
import pymongo
#from cassandra.cluster import Cluster
import conf.config as config
from cryptocurry.crypto_settings import *
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
sleeptime = config.SLEEPTIME
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def decodeGzippedContent(encoded_content):
response_stream = StringIO(encoded_content)
decoded_content = ""
try:
gzipper = gzip.GzipFile(fileobj=response_stream)
decoded_content = gzipper.read()
except: # Maybe this isn't gzipped content after all....
decoded_content = encoded_content
return(decoded_content)
def getmongoclient():
client = pymongo.MongoClient(port=config.MONGO_PORT)
def scrapeFromInvest():
url = "https://www.investing.com/crypto/"
opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler())
http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'www.investing.com', 'Referer' : 'https://www.google.com' }
investing_request = urllib2.Request(url, None, http_headers)
investing_response = None
try:
investing_response = opener.open(investing_request)
except:
print "Could not get the raw cryptocurrency data - Error: %s\n"%sys.exc_info()[1].__str__()
return False
if not investing_response:
print "Could not retrieve response from the request to https://www.investing.com/crypto/"
return False
investing_data_enc = investing_response.read()
investing_data = decodeGzippedContent(investing_data_enc)
#print investing_data
soup = BeautifulSoup(investing_data)
datatds = soup.findAll("td", {'class' : 'flag'})
mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT))
db = mongoconn.cryptocurrency
for td in datatds:
currnametd = td.findNext('td')
currname = currnametd['title']
currnametd = currnametd.findNext('td')
currsymbol = currnametd['title']
currnametd = currnametd.findNext('td')
currprice = currnametd.getText()
currprice = currprice.replace("$", "")
currprice = currprice.replace(",", "")
currnametd = currnametd.findNext('td')
market_cap = currnametd.getText()
market_cap = market_cap.replace("$", "")
currnametd = currnametd.findNext('td')
vol24h = currnametd.getText()
vol24h = vol24h.replace("$", "")
currnametd = currnametd.findNext('td')
totalvol = currnametd.getText()
totalvol = totalvol.replace('%', '')
currnametd = currnametd.findNext('td')
chg24h = currnametd.getText()
chg24h = chg24h.replace('+', "")
chg24h = chg24h.replace('%', "")
currnametd = currnametd.findNext('td')
chg7d = currnametd.getText()
chg7d = chg7d.replace('+', "")
chg7d = chg7d.replace('%', "")
mongodata = {'currency_name' : currname, 'currency_symbol' : currsymbol, 'currency_price' : currprice, 'market_cap' : market_cap, 'volume_24hr' : vol24h, 'total_volume' : totalvol, 'change_24hr' : chg24h, 'change_7days' : chg7d, 'entrydatetime' : str(datetime.datetime.now())}
try:
result = db.investdata.insert_one(mongodata)
except:
print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__()
print "Done collecting data from investing at %s...\n"%str(datetime.datetime.now())
return True
def getDataFromCoinMarket():
coinmarketapikey = "edc74898-5367-43bf-b3cb-2af1ab8b42b7"
opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler())
http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'pro-api.coinmarketcap.com', 'X-CMC_PRO_API_KEY' : coinmarketapikey }
listings_latest_url = "https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?sort=market_cap&start=1&limit=50&convert=USD&cryptocurrency_type=coins"
listings_request = urllib2.Request(listings_latest_url, None, http_headers)
listings_response = None
try:
listings_response = opener.open(listings_request)
except:
print "Could not get the cryptocurrency listings data - Error: %s\n"%sys.exc_info()[1].__str__()
return False
if not listings_response:
print "Could not retrieve response from the request to https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest"
return False
listings_data_enc = listings_response.read()
listings_data = decodeGzippedContent(listings_data_enc)
#print listings_data
listings_dict = json.loads(listings_data)
listings_data_list = listings_dict['data']
curr_data_map = {}
mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT))
db = mongoconn.cryptocurrency
for elemdict in listings_data_list:
idno = elemdict['id']
name = elemdict['name']
volume_24h = elemdict['quote']['USD']['volume_24h']
price = elemdict['quote']['USD']['price']
percent_change_1h = elemdict['quote']['USD']['percent_change_1h']
percent_change_24h = elemdict['quote']['USD']['percent_change_24h']
percent_change_7d = elemdict['quote']['USD']['percent_change_7d']
last_updated = elemdict['quote']['USD']['last_updated']
mongodata = {'idno' : str(idno), 'currency_name' : name, 'currency_price' : price, 'volume_24hr' : volume_24h, 'percent_change_1hr' : percent_change_1h, 'percent_change_24hr' : percent_change_24h, 'percent_change_7days' : percent_change_7d, 'last_updated' : last_updated, 'entrydatetime' : str(datetime.datetime.now())}
try:
result = db.coinmarketdata.insert_one(mongodata)
except:
print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__()
print "Collected data from coinmarket at %s...\n"%str(datetime.datetime.now())
return curr_data_map
"""
This uses the coinmarketcap API - Basic Plan (Free).
"""
def coinmarketcap():
url = COIN_MARKET_CAP_DOMAIN + '/v1/cryptocurrency/listings/latest'
parameters = {
'start':'1',
'limit':'100',
'convert':'USD'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': COIN_MARKET_CAP_API_KEY,
}
session = Session()
session.headers.update(headers)
try:
response = session.get(url, params=parameters)
data = json.loads(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
print "Could not collect data from CoinMarketCap. Returning."
return 0
infolist = []
mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT))
db = mongoconn.cryptocurrency
cryptocurrencydatalist = data[u'data']
infolist = []
mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT))
for cryptodict in cryptocurrencydatalist:
last_updated, entrydatetime, cryptocurrname, cryptosymbol, marketcap,price, supply, volume, percent_change_1h, percent_change_24h, percent_change_7d = "", "", "", "", "", "", "", "", "", "", ""
entrydatetime = str(datetime.datetime.now())
if cryptodict.has_key('last_updated'):
last_updated = cryptodict['last_updated']
else:
last_updated = entrydatetime
if cryptodict.has_key(u'name'):
cryptocurrname = cryptodict[u'name']
else:
continue # If no name is found, then it is not of much use to us.
if cryptodict.has_key(u'symbol'):
cryptosymbol = cryptodict[u'symbol']
else:
cryptosymbol = cryptocurrname
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'market_cap'):
marketcap = cryptodict[u'quote'][u'USD'][u'market_cap']
else:
marketcap = 0.00
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'price'):
price = cryptodict[u'quote'][u'USD'][u'price']
else:
price = 0.00
if cryptodict.has_key(u'total_supply'):
supply = cryptodict['total_supply']
else:
supply = 0
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'volume_24h'):
volume = cryptodict[u'quote'][u'USD'][u'volume_24h']
else:
volume = 0.00
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_1h'):
percent_change_1h = cryptodict[u'quote'][u'USD'][u'percent_change_1h']
else:
percent_change_1h = 0.00
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_24h'):
percent_change_24h = cryptodict[u'quote'][u'USD'][u'percent_change_24h']
else:
percent_change_24h = 0.00
if cryptodict.has_key(u'quote') and cryptodict[u'quote'].has_key('USD') and cryptodict[u'quote'][u'USD'].has_key(u'percent_change_7d'):
percent_change_7d = cryptodict[u'quote'][u'USD'][u'percent_change_7d']
else:
percent_change_7d = 0.00
valdict = {'currency_name' : cryptocurrname, 'currency_symbol' : cryptosymbol, 'marketcap' : marketcap, 'currency_price' : price, 'supply' : supply, 'volume' : volume, 'percent1hr' : percent_change_1h, 'percent24hr' : percent_change_24h, 'percent7d' : percent_change_7d, 'entrydatetime' : str(last_updated)}
infolist.append(valdict)
try:
result = db.coinmarketcapdata.insert_one(valdict)
#print valdict,"\n\n"
except:
print "Could not enter data in mongo db. Error: %s\n"%sys.exc_info()[1].__str__()
print "Collected data from coinmarketcap website.\n"
return infolist
"""
This is an index for 30 cryptocurrencies combined on some mathematical basis. This
information is useful to those who want to invest in cryptocurrencies and hedge
their risks by putting various sums in the 30 selected cryptocurrencies. In order to
know more, please to the explanation at https://cci30.com/
"""
def cci30index():
cci30url = "https://cci30.com/ajax/getIndexHistory.php"
opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(), NoRedirectHandler())
http_headers = { 'User-Agent' : r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.110 Safari/537.36', 'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language' : 'en-US,en;q=0.8', 'Accept-Encoding' : 'gzip,deflate,sdch', 'Connection' : 'keep-alive', 'Host' : 'coinmarketcap.com', 'Referer' : 'https://www.google.com' }
cci30_request = urllib2.Request(cci30url, None, http_headers)
cci30_response = None
try:
cci30_response = opener.open(cci30_request)
except:
print "Could not get the raw cryptocurrency data - Error: %s\n"%sys.exc_info()[1].__str__()
return False
content = decodeGzippedContent(cci30_response.read())
# content is a csv formatted data set
mongoconn = pymongo.MongoClient("mongodb://%s:%s@localhost:%s/cryptocurrency"%(config.MONGO_USER, config.MONGO_PASSWD, config.MONGO_PORT))
db = mongoconn.cryptocurrency
headers = []
records = []
alldata = []
datarecs = content.split("\n")
headers = datarecs[0].split(",")
for i in range(headers.__len__()):
headers[i] = headers[i].strip() # Remove whitespace characters
for datastr in datarecs:
datalist = datastr.split(",")
for i in range(1, datalist.__len__()):
datalist[i] = datalist[i].strip()
records.append(datalist)
for recdata in records[1:]:
ictr = 0
datadict = {}
for rdata in recdata:
datadict[headers[ictr]] = rdata
ictr += 1
if ictr == headers.__len__():
break
try:
result = db.cci30data.insert_one(datadict)
alldata.append(datadict)
except:
print "Error: ", sys.exc_info()[1].__str__(), "\n"
print "collected data from cci30 index at %s"%datetime.datetime.now()
return alldata
"""
There doesn't seem to be any fucking location that provides a feed, either as an
API or as some screen data. How do I get the data from this asshole? Don't say I
have to pay to get it, 'cause if that is so, then they are going to get troubled
by illegal means.... Accidents happen all the time, buildings collapse for
no apparent reason, fire breaks out for myriad reasons, bank accounts get hacked,
footage of senior executives in a compromizing situations come out of nowhere,
people show up at the wrong place at the wrong time, and then they vanish...
Hmmmmm.... your actions route your life.
"""
def bloombergcryptoindex():
url = "https://www.bloomberg.com/professional/product/indices/bloomberg-galaxy-crypto-index/"
def collectionEventLoop(scraper_functions_list):
lasttime = 0
while True:
currtime = time.time()
if currtime - lasttime < sleeptime: # if we scraped within the last 'sleeptime', we go to sleep
time.sleep(sleeptime)
continue
for i in range(0, scraper_functions_list.__len__()):
scraper_functions_list[i]()
lasttime = currtime
if __name__ == "__main__":
scraperslist = [scrapeFromInvest, getDataFromCoinMarket, coinmarketcap, cci30index,] # Add scraper functions here.
# scraperslist = [scrapeFromInvest, getDataFromCoinMarket, cci30index,] # Add scraper functions here.
collectionEventLoop(scraperslist)
|
nilq/baby-python
|
python
|
n = input('Digite algo: ')
print('O timpo primitivo do que foi digitado é: {}'.format(type(n)))
print('Ele é numérico? {}'.format(n.isnumeric())) # Compara se é um numero, se sim envia a mensagem True
print('Ele é um texto? {}'.format(n.isalpha())) # Compara se é Letra, se sim envia a mensagem True
print('Ele tem um texto ou numero? {}'.format(n.isalnum())) # Compara se é Letra ou numero, se sim envia a mensagem True
print('Está tudo em maiúscula? {}'.format(n.isupper())) # Compara se tudo está em letra maíuscola
print('Está dentro da tabela ASCII? {}'.format(n.isascii())) # Compara se o n esta dentro da tabela ASCII
print('É somente espaços? {}'.format(n.isspace()))
|
nilq/baby-python
|
python
|
import sys
from rpython.tool.pairtype import pairtype
from rpython.flowspace.model import Constant
from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib import objectmodel, jit, rgc, types
from rpython.rlib.signature import signature
from rpython.rlib.objectmodel import specialize, likely
from rpython.rtyper.debug import ll_assert
from rpython.rlib.rarithmetic import r_uint, intmask
from rpython.rtyper import rmodel
from rpython.rtyper.error import TyperError
from rpython.rtyper.annlowlevel import llhelper
# ____________________________________________________________
#
# generic implementation of RPython dictionary, with parametric DICTKEY and
# DICTVALUE types. The basic implementation is a sparse array of indexes
# plus a dense array of structs that contain keys and values. struct looks
# like that:
#
#
# struct dictentry {
# DICTKEY key;
# DICTVALUE value;
# long f_hash; # (optional) key hash, if hard to recompute
# bool f_valid; # (optional) the entry is filled
# }
#
# struct dicttable {
# int num_live_items;
# int num_ever_used_items;
# int resize_counter;
# {byte, short, int, long} *indexes;
# dictentry *entries;
# lookup_function_no; # one of the four possible functions for different
# # size dicts; the rest of the word is a counter for how
# # many 'entries' at the start are known to be deleted
# (Function DICTKEY, DICTKEY -> bool) *fnkeyeq;
# (Function DICTKEY -> int) *fnkeyhash;
# }
#
#
@jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d))
@jit.oopspec('ordereddict.lookup(d, key, hash, flag)')
def ll_call_lookup_function(d, key, hash, flag):
fun = d.lookup_function_no & FUNC_MASK
# This likely() here forces gcc to compile the check for fun == FUNC_BYTE
# first. Otherwise, this is a regular switch and gcc (at least 4.7)
# compiles this as a series of checks, with the FUNC_BYTE case last.
# It sounds minor, but it is worth 6-7% on a PyPy microbenchmark.
if likely(fun == FUNC_BYTE):
return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE)
elif fun == FUNC_SHORT:
return ll_dict_lookup(d, key, hash, flag, TYPE_SHORT)
elif IS_64BIT and fun == FUNC_INT:
return ll_dict_lookup(d, key, hash, flag, TYPE_INT)
elif fun == FUNC_LONG:
return ll_dict_lookup(d, key, hash, flag, TYPE_LONG)
assert False
def get_ll_dict(DICTKEY, DICTVALUE, get_custom_eq_hash=None, DICT=None,
ll_fasthash_function=None, ll_hash_function=None,
ll_eq_function=None, method_cache={},
dummykeyobj=None, dummyvalueobj=None, rtyper=None):
# get the actual DICT type. if DICT is None, it's created, otherwise
# forward reference is becoming DICT
if DICT is None:
DICT = lltype.GcForwardReference()
# compute the shape of the DICTENTRY structure
entryfields = []
entrymeths = {
'allocate': lltype.typeMethod(_ll_malloc_entries),
'delete': _ll_free_entries,
'must_clear_key': (isinstance(DICTKEY, lltype.Ptr)
and DICTKEY._needsgc()),
'must_clear_value': (isinstance(DICTVALUE, lltype.Ptr)
and DICTVALUE._needsgc()),
}
if getattr(ll_eq_function, 'no_direct_compare', False):
entrymeths['no_direct_compare'] = True
# * the key
entryfields.append(("key", DICTKEY))
# * the state of the entry - trying to encode it as dummy objects
if dummykeyobj:
# all the state can be encoded in the key
entrymeths['dummy_obj'] = dummykeyobj
entrymeths['valid'] = ll_valid_from_key
entrymeths['mark_deleted'] = ll_mark_deleted_in_key
# the key is overwritten by 'dummy' when the entry is deleted
entrymeths['must_clear_key'] = False
elif dummyvalueobj:
# all the state can be encoded in the value
entrymeths['dummy_obj'] = dummyvalueobj
entrymeths['valid'] = ll_valid_from_value
entrymeths['mark_deleted'] = ll_mark_deleted_in_value
# value is overwritten by 'dummy' when entry is deleted
entrymeths['must_clear_value'] = False
else:
# we need a flag to know if the entry was ever used
entryfields.append(("f_valid", lltype.Bool))
entrymeths['valid'] = ll_valid_from_flag
entrymeths['mark_deleted'] = ll_mark_deleted_in_flag
# * the value
entryfields.append(("value", DICTVALUE))
if ll_fasthash_function is None:
entryfields.append(("f_hash", lltype.Signed))
entrymeths['hash'] = ll_hash_from_cache
else:
entrymeths['hash'] = ll_hash_recomputed
entrymeths['fasthashfn'] = ll_fasthash_function
# Build the lltype data structures
DICTENTRY = lltype.Struct("odictentry", *entryfields)
DICTENTRYARRAY = lltype.GcArray(DICTENTRY,
adtmeths=entrymeths)
fields = [ ("num_live_items", lltype.Signed),
("num_ever_used_items", lltype.Signed),
("resize_counter", lltype.Signed),
("indexes", llmemory.GCREF),
("lookup_function_no", lltype.Signed),
("entries", lltype.Ptr(DICTENTRYARRAY)) ]
if get_custom_eq_hash is not None:
r_rdict_eqfn, r_rdict_hashfn = get_custom_eq_hash()
fields.extend([ ("fnkeyeq", r_rdict_eqfn.lowleveltype),
("fnkeyhash", r_rdict_hashfn.lowleveltype) ])
adtmeths = {
'keyhash': ll_keyhash_custom,
'keyeq': ll_keyeq_custom,
'r_rdict_eqfn': r_rdict_eqfn,
'r_rdict_hashfn': r_rdict_hashfn,
'paranoia': True,
}
else:
# figure out which functions must be used to hash and compare
ll_keyhash = ll_hash_function
ll_keyeq = ll_eq_function
ll_keyhash = lltype.staticAdtMethod(ll_keyhash)
if ll_keyeq is not None:
ll_keyeq = lltype.staticAdtMethod(ll_keyeq)
adtmeths = {
'keyhash': ll_keyhash,
'keyeq': ll_keyeq,
'paranoia': False,
}
adtmeths['KEY'] = DICTKEY
adtmeths['VALUE'] = DICTVALUE
adtmeths['lookup_function'] = lltype.staticAdtMethod(ll_call_lookup_function)
adtmeths['allocate'] = lltype.typeMethod(_ll_malloc_dict)
DICT.become(lltype.GcStruct("dicttable", adtmeths=adtmeths,
*fields))
return DICT
class OrderedDictRepr(AbstractDictRepr):
def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
custom_eq_hash=None, force_non_null=False):
#assert not force_non_null
self.rtyper = rtyper
self.finalized = False
self.DICT = lltype.GcForwardReference()
self.lowleveltype = lltype.Ptr(self.DICT)
self.custom_eq_hash = custom_eq_hash is not None
if not isinstance(key_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(key_repr)
self._key_repr_computer = key_repr
else:
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if not isinstance(value_repr, rmodel.Repr): # not computed yet, done by setup()
assert callable(value_repr)
self._value_repr_computer = value_repr
else:
self.external_value_repr, self.value_repr = self.pickrepr(value_repr)
self.dictkey = dictkey
self.dictvalue = dictvalue
self.dict_cache = {}
self._custom_eq_hash_repr = custom_eq_hash
# setup() needs to be called to finish this initialization
def _externalvsinternal(self, rtyper, item_repr):
return rmodel.externalvsinternal(self.rtyper, item_repr)
def _setup_repr(self):
if 'key_repr' not in self.__dict__:
key_repr = self._key_repr_computer()
self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr)
if 'value_repr' not in self.__dict__:
self.external_value_repr, self.value_repr = self.pickrepr(self._value_repr_computer())
if isinstance(self.DICT, lltype.GcForwardReference):
DICTKEY = self.key_repr.lowleveltype
DICTVALUE = self.value_repr.lowleveltype
# * we need an explicit flag if the key and the value is not
# able to store dummy values
s_key = self.dictkey.s_value
s_value = self.dictvalue.s_value
kwd = {}
if self.custom_eq_hash:
self.r_rdict_eqfn, self.r_rdict_hashfn = (
self._custom_eq_hash_repr())
kwd['get_custom_eq_hash'] = self._custom_eq_hash_repr
else:
kwd['ll_hash_function'] = self.key_repr.get_ll_hash_function()
kwd['ll_eq_function'] = self.key_repr.get_ll_eq_function()
kwd['ll_fasthash_function'] = self.key_repr.get_ll_fasthash_function()
kwd['dummykeyobj'] = self.key_repr.get_ll_dummyval_obj(self.rtyper,
s_key)
kwd['dummyvalueobj'] = self.value_repr.get_ll_dummyval_obj(
self.rtyper, s_value)
get_ll_dict(DICTKEY, DICTVALUE, DICT=self.DICT,
rtyper=self.rtyper, **kwd)
def convert_const(self, dictobj):
from rpython.rtyper.lltypesystem import llmemory
# get object from bound dict methods
#dictobj = getattr(dictobj, '__self__', dictobj)
if dictobj is None:
return lltype.nullptr(self.DICT)
if not isinstance(dictobj, (dict, objectmodel.r_dict)):
raise TypeError("expected a dict: %r" % (dictobj,))
try:
key = Constant(dictobj)
return self.dict_cache[key]
except KeyError:
self.setup()
self.setup_final()
l_dict = ll_newdict_size(self.DICT, len(dictobj))
self.dict_cache[key] = l_dict
r_key = self.key_repr
if r_key.lowleveltype == llmemory.Address:
raise TypeError("No prebuilt dicts of address keys")
r_value = self.value_repr
if isinstance(dictobj, objectmodel.r_dict):
if self.r_rdict_eqfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_eqfn.convert_const(dictobj.key_eq)
l_dict.fnkeyeq = l_fn
if self.r_rdict_hashfn.lowleveltype != lltype.Void:
l_fn = self.r_rdict_hashfn.convert_const(dictobj.key_hash)
l_dict.fnkeyhash = l_fn
for dictkeycontainer, dictvalue in dictobj._dict.items():
llkey = r_key.convert_const(dictkeycontainer.key)
llvalue = r_value.convert_const(dictvalue)
_ll_dict_insertclean(l_dict, llkey, llvalue,
dictkeycontainer.hash)
return l_dict
else:
for dictkey, dictvalue in dictobj.items():
llkey = r_key.convert_const(dictkey)
llvalue = r_value.convert_const(dictvalue)
_ll_dict_insertclean(l_dict, llkey, llvalue,
l_dict.keyhash(llkey))
return l_dict
def rtype_len(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_len, v_dict)
def rtype_bool(self, hop):
v_dict, = hop.inputargs(self)
return hop.gendirectcall(ll_dict_bool, v_dict)
def make_iterator_repr(self, *variant):
return DictIteratorRepr(self, *variant)
def rtype_method_get(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_dict_get, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_setdefault(self, hop):
v_dict, v_key, v_default = hop.inputargs(self, self.key_repr,
self.value_repr)
hop.exception_cannot_occur()
v_res = hop.gendirectcall(ll_dict_setdefault, v_dict, v_key, v_default)
return self.recast_value(hop.llops, v_res)
def rtype_method_copy(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_dict_copy, v_dict)
def rtype_method_update(self, hop):
v_dic1, v_dic2 = hop.inputargs(self, self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_dict_update, v_dic1, v_dic2)
def rtype_method__prepare_dict_update(self, hop):
v_dict, v_num = hop.inputargs(self, lltype.Signed)
hop.exception_cannot_occur()
hop.gendirectcall(ll_prepare_dict_update, v_dict, v_num)
def _rtype_method_kvi(self, hop, ll_func):
v_dic, = hop.inputargs(self)
r_list = hop.r_result
cLIST = hop.inputconst(lltype.Void, r_list.lowleveltype.TO)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_func, cLIST, v_dic)
def rtype_method_keys(self, hop):
return self._rtype_method_kvi(hop, ll_dict_keys)
def rtype_method_values(self, hop):
return self._rtype_method_kvi(hop, ll_dict_values)
def rtype_method_items(self, hop):
return self._rtype_method_kvi(hop, ll_dict_items)
def rtype_bltn_list(self, hop):
return self._rtype_method_kvi(hop, ll_dict_keys)
def rtype_method_iterkeys(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "keys").newiter(hop)
def rtype_method_itervalues(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "values").newiter(hop)
def rtype_method_iteritems(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "items").newiter(hop)
def rtype_method_iterkeys_with_hash(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "keys_with_hash").newiter(hop)
def rtype_method_iteritems_with_hash(self, hop):
hop.exception_cannot_occur()
return DictIteratorRepr(self, "items_with_hash").newiter(hop)
def rtype_method_clear(self, hop):
v_dict, = hop.inputargs(self)
hop.exception_cannot_occur()
return hop.gendirectcall(ll_dict_clear, v_dict)
def rtype_method_popitem(self, hop):
v_dict, = hop.inputargs(self)
r_tuple = hop.r_result
cTUPLE = hop.inputconst(lltype.Void, r_tuple.lowleveltype)
hop.exception_is_here()
return hop.gendirectcall(ll_dict_popitem, cTUPLE, v_dict)
def rtype_method_pop(self, hop):
if hop.nb_args == 2:
v_args = hop.inputargs(self, self.key_repr)
target = ll_dict_pop
elif hop.nb_args == 3:
v_args = hop.inputargs(self, self.key_repr, self.value_repr)
target = ll_dict_pop_default
hop.exception_is_here()
v_res = hop.gendirectcall(target, *v_args)
return self.recast_value(hop.llops, v_res)
def rtype_method_contains_with_hash(self, hop):
v_dict, v_key, v_hash = hop.inputargs(self, self.key_repr,
lltype.Signed)
hop.exception_is_here()
return hop.gendirectcall(ll_dict_contains_with_hash,
v_dict, v_key, v_hash)
def rtype_method_setitem_with_hash(self, hop):
v_dict, v_key, v_hash, v_value = hop.inputargs(
self, self.key_repr, lltype.Signed, self.value_repr)
if self.custom_eq_hash:
hop.exception_is_here()
else:
hop.exception_cannot_occur()
hop.gendirectcall(ll_dict_setitem_with_hash,
v_dict, v_key, v_hash, v_value)
def rtype_method_getitem_with_hash(self, hop):
v_dict, v_key, v_hash = hop.inputargs(
self, self.key_repr, lltype.Signed)
if not self.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
v_res = hop.gendirectcall(ll_dict_getitem_with_hash,
v_dict, v_key, v_hash)
return self.recast_value(hop.llops, v_res)
def rtype_method_delitem_with_hash(self, hop):
v_dict, v_key, v_hash = hop.inputargs(
self, self.key_repr, lltype.Signed)
if not self.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
hop.gendirectcall(ll_dict_delitem_with_hash, v_dict, v_key, v_hash)
class __extend__(pairtype(OrderedDictRepr, rmodel.Repr)):
def rtype_getitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
v_res = hop.gendirectcall(ll_dict_getitem, v_dict, v_key)
return r_dict.recast_value(hop.llops, v_res)
def rtype_delitem((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
if not r_dict.custom_eq_hash:
hop.has_implicit_exception(KeyError) # record that we know about it
hop.exception_is_here()
hop.gendirectcall(ll_dict_delitem, v_dict, v_key)
def rtype_setitem((r_dict, r_key), hop):
v_dict, v_key, v_value = hop.inputargs(r_dict, r_dict.key_repr, r_dict.value_repr)
if r_dict.custom_eq_hash:
hop.exception_is_here()
else:
hop.exception_cannot_occur()
hop.gendirectcall(ll_dict_setitem, v_dict, v_key, v_value)
def rtype_contains((r_dict, r_key), hop):
v_dict, v_key = hop.inputargs(r_dict, r_dict.key_repr)
hop.exception_is_here()
return hop.gendirectcall(ll_dict_contains, v_dict, v_key)
class __extend__(pairtype(OrderedDictRepr, OrderedDictRepr)):
def convert_from_to((r_dict1, r_dict2), v, llops):
# check that we don't convert from Dicts with
# different key/value types
if r_dict1.dictkey is None or r_dict2.dictkey is None:
return NotImplemented
if r_dict1.dictkey is not r_dict2.dictkey:
return NotImplemented
if r_dict1.dictvalue is None or r_dict2.dictvalue is None:
return NotImplemented
if r_dict1.dictvalue is not r_dict2.dictvalue:
return NotImplemented
return v
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
DICTINDEX_LONG = lltype.Ptr(lltype.GcArray(lltype.Unsigned))
DICTINDEX_INT = lltype.Ptr(lltype.GcArray(rffi.UINT))
DICTINDEX_SHORT = lltype.Ptr(lltype.GcArray(rffi.USHORT))
DICTINDEX_BYTE = lltype.Ptr(lltype.GcArray(rffi.UCHAR))
IS_64BIT = sys.maxint != 2 ** 31 - 1
FUNC_SHIFT = 2
FUNC_MASK = 0x03 # two bits
if IS_64BIT:
FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4)
else:
FUNC_BYTE, FUNC_SHORT, FUNC_LONG = range(3)
TYPE_BYTE = rffi.UCHAR
TYPE_SHORT = rffi.USHORT
TYPE_INT = rffi.UINT
TYPE_LONG = lltype.Unsigned
def ll_malloc_indexes_and_choose_lookup(d, n):
# keep in sync with ll_clear_indexes() below
if n <= 256:
d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(DICTINDEX_BYTE.TO, n,
zero=True))
d.lookup_function_no = FUNC_BYTE
elif n <= 65536:
d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(DICTINDEX_SHORT.TO, n,
zero=True))
d.lookup_function_no = FUNC_SHORT
elif IS_64BIT and n <= 2 ** 32:
d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(DICTINDEX_INT.TO, n,
zero=True))
d.lookup_function_no = FUNC_INT
else:
d.indexes = lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(DICTINDEX_LONG.TO, n,
zero=True))
d.lookup_function_no = FUNC_LONG
def ll_clear_indexes(d, n):
fun = d.lookup_function_no & FUNC_MASK
d.lookup_function_no = fun
if fun == FUNC_BYTE:
rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes))
elif fun == FUNC_SHORT:
rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes))
elif IS_64BIT and fun == FUNC_INT:
rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes))
elif fun == FUNC_LONG:
rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes))
else:
assert False
@jit.dont_look_inside
def ll_call_insert_clean_function(d, hash, i):
fun = d.lookup_function_no & FUNC_MASK
if fun == FUNC_BYTE:
ll_dict_store_clean(d, hash, i, TYPE_BYTE)
elif fun == FUNC_SHORT:
ll_dict_store_clean(d, hash, i, TYPE_SHORT)
elif IS_64BIT and fun == FUNC_INT:
ll_dict_store_clean(d, hash, i, TYPE_INT)
elif fun == FUNC_LONG:
ll_dict_store_clean(d, hash, i, TYPE_LONG)
else:
assert False
def ll_call_delete_by_entry_index(d, hash, i):
fun = d.lookup_function_no & FUNC_MASK
if fun == FUNC_BYTE:
ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE)
elif fun == FUNC_SHORT:
ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT)
elif IS_64BIT and fun == FUNC_INT:
ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT)
elif fun == FUNC_LONG:
ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG)
else:
assert False
def ll_valid_from_flag(entries, i):
return entries[i].f_valid
def ll_valid_from_key(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
return entries[i].key != dummy
def ll_valid_from_value(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
return entries[i].value != dummy
def ll_mark_deleted_in_flag(entries, i):
entries[i].f_valid = False
def ll_mark_deleted_in_key(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
entries[i].key = dummy
def ll_mark_deleted_in_value(entries, i):
ENTRIES = lltype.typeOf(entries).TO
dummy = ENTRIES.dummy_obj.ll_dummy_value
entries[i].value = dummy
@signature(types.any(), types.int(), returns=types.any())
def ll_hash_from_cache(entries, i):
return entries[i].f_hash
@signature(types.any(), types.int(), returns=types.any())
def ll_hash_recomputed(entries, i):
ENTRIES = lltype.typeOf(entries).TO
return ENTRIES.fasthashfn(entries[i].key)
def ll_keyhash_custom(d, key):
DICT = lltype.typeOf(d).TO
return objectmodel.hlinvoke(DICT.r_rdict_hashfn, d.fnkeyhash, key)
def ll_keyeq_custom(d, key1, key2):
DICT = lltype.typeOf(d).TO
return objectmodel.hlinvoke(DICT.r_rdict_eqfn, d.fnkeyeq, key1, key2)
def ll_dict_len(d):
return d.num_live_items
def ll_dict_bool(d):
# check if a dict is True, allowing for None
return bool(d) and d.num_live_items != 0
def ll_dict_getitem(d, key):
return ll_dict_getitem_with_hash(d, key, d.keyhash(key))
def ll_dict_getitem_with_hash(d, key, hash):
index = d.lookup_function(d, key, hash, FLAG_LOOKUP)
if index >= 0:
return d.entries[index].value
else:
raise KeyError
def ll_dict_setitem(d, key, value):
ll_dict_setitem_with_hash(d, key, d.keyhash(key), value)
def ll_dict_setitem_with_hash(d, key, hash, value):
index = d.lookup_function(d, key, hash, FLAG_STORE)
_ll_dict_setitem_lookup_done(d, key, value, hash, index)
# It may be safe to look inside always, it has a few branches though, and their
# frequencies needs to be investigated.
@jit.look_inside_iff(lambda d, key, value, hash, i: jit.isvirtual(d) and jit.isconstant(key))
def _ll_dict_setitem_lookup_done(d, key, value, hash, i):
ENTRY = lltype.typeOf(d.entries).TO.OF
if i >= 0:
entry = d.entries[i]
entry.value = value
else:
reindexed = False
if len(d.entries) == d.num_ever_used_items:
try:
reindexed = ll_dict_grow(d)
except:
_ll_dict_rescue(d)
raise
rc = d.resize_counter - 3
if rc <= 0:
try:
ll_dict_resize(d)
reindexed = True
except:
_ll_dict_rescue(d)
raise
rc = d.resize_counter - 3
ll_assert(rc > 0, "ll_dict_resize failed?")
if reindexed:
ll_call_insert_clean_function(d, hash, d.num_ever_used_items)
#
d.resize_counter = rc
entry = d.entries[d.num_ever_used_items]
entry.key = key
entry.value = value
if hasattr(ENTRY, 'f_hash'):
entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'):
entry.f_valid = True
d.num_ever_used_items += 1
d.num_live_items += 1
@jit.dont_look_inside
def _ll_dict_rescue(d):
# MemoryError situation! The 'indexes' contains an invalid entry
# at this point. But we can call ll_dict_reindex() with the
# following arguments, ensuring no further malloc occurs.
ll_dict_reindex(d, _ll_len_of_d_indexes(d))
_ll_dict_rescue._dont_inline_ = True
def _ll_dict_insertclean(d, key, value, hash):
# never translated
ENTRY = lltype.typeOf(d.entries).TO.OF
ll_call_insert_clean_function(d, hash, d.num_ever_used_items)
entry = d.entries[d.num_ever_used_items]
entry.key = key
entry.value = value
if hasattr(ENTRY, 'f_hash'):
entry.f_hash = hash
if hasattr(ENTRY, 'f_valid'):
entry.f_valid = True
d.num_ever_used_items += 1
d.num_live_items += 1
rc = d.resize_counter - 3
d.resize_counter = rc
def _ll_len_of_d_indexes(d):
# xxx Haaaack: returns len(d.indexes). Works independently of
# the exact type pointed to by d, using a forced cast...
# Must only be called by @jit.dont_look_inside functions.
return lltype.length_of_simple_gcarray_from_opaque(d.indexes)
def _overallocate_entries_len(baselen):
# This over-allocates proportional to the list size, making room
# for additional growth. This over-allocates slightly more eagerly
# than with regular lists. The idea is that there are many more
# lists than dicts around in PyPy, and dicts of 5 to 8 items are
# not that rare (so a single jump from 0 to 8 is a good idea).
# The growth pattern is: 0, 8, 17, 27, 38, 50, 64, 80, 98, ...
newsize = baselen + (baselen >> 3)
return newsize + 8
@jit.look_inside_iff(lambda d: jit.isvirtual(d))
def ll_dict_grow(d):
# note: this @jit.look_inside_iff is here to inline the three lines
# at the end of this function. It's important because dicts start
# with a length-zero 'd.entries' which must be grown as soon as we
# insert an element.
if d.num_live_items < d.num_ever_used_items // 2:
# At least 50% of the allocated entries are dead, so perform a
# compaction. If ll_dict_remove_deleted_items detects that over
# 75% of allocated entries are dead, then it will also shrink the
# memory allocated at the same time as doing a compaction.
ll_dict_remove_deleted_items(d)
return True
new_allocated = _overallocate_entries_len(len(d.entries))
# Detect a relatively rare case where the indexes numeric type is too
# small to store all the entry indexes: there would be 'new_allocated'
# entries, which may in corner cases be larger than 253 even though we
# have single bytes in 'd.indexes' (and the same for the larger
# boundaries). The 'd.indexes' hashtable is never more than 2/3rd
# full, so we know that 'd.num_live_items' should be at most 2/3 * 256
# (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below
# at least 1/3rd items in 'd.entries' are free.
fun = d.lookup_function_no & FUNC_MASK
toobig = False
if fun == FUNC_BYTE:
assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES)
toobig = new_allocated > ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES)
elif fun == FUNC_SHORT:
assert d.num_live_items < ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES)
toobig = new_allocated > ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES)
elif IS_64BIT and fun == FUNC_INT:
assert d.num_live_items < ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES)
toobig = new_allocated > ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES)
#
if toobig:
ll_dict_remove_deleted_items(d)
assert d.num_live_items == d.num_ever_used_items
return True
newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated)
rgc.ll_arraycopy(d.entries, newitems, 0, 0, len(d.entries))
d.entries = newitems
return False
@jit.dont_look_inside
def ll_dict_remove_deleted_items(d):
if d.num_live_items < len(d.entries) // 4:
# At least 75% of the allocated entries are dead, so shrink the memory
# allocated as well as doing a compaction.
new_allocated = _overallocate_entries_len(d.num_live_items)
newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated)
else:
newitems = d.entries
# The loop below does a lot of writes into 'newitems'. It's a better
# idea to do a single gc_writebarrier rather than activating the
# card-by-card logic (worth 11% in microbenchmarks).
from rpython.rtyper.lltypesystem.lloperation import llop
llop.gc_writebarrier(lltype.Void, newitems)
#
ENTRIES = lltype.typeOf(d).TO.entries.TO
ENTRY = ENTRIES.OF
isrc = 0
idst = 0
isrclimit = d.num_ever_used_items
while isrc < isrclimit:
if d.entries.valid(isrc):
src = d.entries[isrc]
dst = newitems[idst]
dst.key = src.key
dst.value = src.value
if hasattr(ENTRY, 'f_hash'):
dst.f_hash = src.f_hash
if hasattr(ENTRY, 'f_valid'):
assert src.f_valid
dst.f_valid = True
idst += 1
isrc += 1
assert d.num_live_items == idst
d.num_ever_used_items = idst
if ((ENTRIES.must_clear_key or ENTRIES.must_clear_value) and
d.entries == newitems):
# must clear the extra entries: they may contain valid pointers
# which would create a temporary memory leak
while idst < isrclimit:
entry = newitems[idst]
if ENTRIES.must_clear_key:
entry.key = lltype.nullptr(ENTRY.key.TO)
if ENTRIES.must_clear_value:
entry.value = lltype.nullptr(ENTRY.value.TO)
idst += 1
else:
d.entries = newitems
ll_dict_reindex(d, _ll_len_of_d_indexes(d))
def ll_dict_delitem(d, key):
ll_dict_delitem_with_hash(d, key, d.keyhash(key))
def ll_dict_delitem_with_hash(d, key, hash):
index = d.lookup_function(d, key, hash, FLAG_DELETE)
if index < 0:
raise KeyError
_ll_dict_del(d, index)
@jit.look_inside_iff(lambda d, i: jit.isvirtual(d) and jit.isconstant(i))
def _ll_dict_del(d, index):
d.entries.mark_deleted(index)
d.num_live_items -= 1
# clear the key and the value if they are GC pointers
ENTRIES = lltype.typeOf(d.entries).TO
ENTRY = ENTRIES.OF
entry = d.entries[index]
if ENTRIES.must_clear_key:
entry.key = lltype.nullptr(ENTRY.key.TO)
if ENTRIES.must_clear_value:
entry.value = lltype.nullptr(ENTRY.value.TO)
if d.num_live_items == 0:
# Dict is now empty. Reset these fields.
d.num_ever_used_items = 0
d.lookup_function_no &= FUNC_MASK
elif index == d.num_ever_used_items - 1:
# The last element of the ordereddict has been deleted. Instead of
# simply marking the item as dead, we can safely reuse it. Since it's
# also possible that there are more dead items immediately behind the
# last one, we reclaim all the dead items at the end of the ordereditem
# at the same point.
i = d.num_ever_used_items - 2
while i >= 0 and not d.entries.valid(i):
i -= 1
j = i + 1
assert j >= 0
d.num_ever_used_items = j
# If the dictionary is at least 87.5% dead items, then consider shrinking
# it.
if d.num_live_items + DICT_INITSIZE <= len(d.entries) / 8:
ll_dict_resize(d)
def ll_dict_resize(d):
# make a 'new_size' estimate and shrink it if there are many
# deleted entry markers. See CPython for why it is a good idea to
# quadruple the dictionary size as long as it's not too big.
# (Quadrupling comes from '(d.num_live_items + d.num_live_items + 1) * 2'
# as long as num_live_items is not too large.)
num_extra = min(d.num_live_items + 1, 30000)
_ll_dict_resize_to(d, num_extra)
ll_dict_resize.oopspec = 'odict.resize(d)'
def _ll_dict_resize_to(d, num_extra):
new_estimate = (d.num_live_items + num_extra) * 2
new_size = DICT_INITSIZE
while new_size <= new_estimate:
new_size *= 2
if new_size < _ll_len_of_d_indexes(d):
ll_dict_remove_deleted_items(d)
else:
ll_dict_reindex(d, new_size)
def ll_dict_reindex(d, new_size):
if bool(d.indexes) and _ll_len_of_d_indexes(d) == new_size:
ll_clear_indexes(d, new_size) # hack: we can reuse the same array
else:
ll_malloc_indexes_and_choose_lookup(d, new_size)
d.resize_counter = new_size * 2 - d.num_live_items * 3
ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0")
ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0,
"reindex: lookup_fun >> SHIFT")
#
entries = d.entries
i = 0
ibound = d.num_ever_used_items
while i < ibound:
if entries.valid(i):
hash = entries.hash(i)
ll_call_insert_clean_function(d, hash, i)
i += 1
#old_entries.delete() XXXX!
# ------- a port of CPython's dictobject.c's lookdict implementation -------
PERTURB_SHIFT = 5
FREE = 0
DELETED = 1
VALID_OFFSET = 2
MIN_INDEXES_MINUS_ENTRIES = VALID_OFFSET + 1
FLAG_LOOKUP = 0
FLAG_STORE = 1
FLAG_DELETE = 2
@specialize.memo()
def _ll_ptr_to_array_of(T):
return lltype.Ptr(lltype.GcArray(T))
@jit.look_inside_iff(lambda d, key, hash, store_flag, T:
jit.isvirtual(d) and jit.isconstant(key))
@jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)')
def ll_dict_lookup(d, key, hash, store_flag, T):
INDEXES = _ll_ptr_to_array_of(T)
entries = d.entries
indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes)
mask = len(indexes) - 1
i = r_uint(hash & mask)
# do the first try before any looping
ENTRIES = lltype.typeOf(entries).TO
direct_compare = not hasattr(ENTRIES, 'no_direct_compare')
index = rffi.cast(lltype.Signed, indexes[intmask(i)])
if index >= VALID_OFFSET:
checkingkey = entries[index - VALID_OFFSET].key
if direct_compare and checkingkey == key:
if store_flag == FLAG_DELETE:
indexes[i] = rffi.cast(T, DELETED)
return index - VALID_OFFSET # found the entry
if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
#llop.debug_print(lltype.Void, "comparing keys", ll_debugrepr(checkingkey), ll_debugrepr(key), found)
if d.paranoia:
if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or
not entries.valid(index - VALID_OFFSET) or
entries[index - VALID_OFFSET].key != checkingkey):
# the compare did major nasty stuff to the dict: start over
return ll_dict_lookup(d, key, hash, store_flag, T)
if found:
if store_flag == FLAG_DELETE:
indexes[i] = rffi.cast(T, DELETED)
return index - VALID_OFFSET
deletedslot = -1
elif index == DELETED:
deletedslot = intmask(i)
else:
# pristine entry -- lookup failed
if store_flag == FLAG_STORE:
indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET)
return -1
# In the loop, a deleted entry (everused and not valid) is by far
# (factor of 100s) the least likely outcome, so test for that last.
perturb = r_uint(hash)
while 1:
# compute the next index using unsigned arithmetic
i = (i << 2) + i + perturb + 1
i = i & mask
index = rffi.cast(lltype.Signed, indexes[intmask(i)])
if index == FREE:
if store_flag == FLAG_STORE:
if deletedslot == -1:
deletedslot = intmask(i)
indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items +
VALID_OFFSET)
return -1
elif index >= VALID_OFFSET:
checkingkey = entries[index - VALID_OFFSET].key
if direct_compare and checkingkey == key:
if store_flag == FLAG_DELETE:
indexes[i] = rffi.cast(T, DELETED)
return index - VALID_OFFSET # found the entry
if d.keyeq is not None and entries.hash(index - VALID_OFFSET) == hash:
# correct hash, maybe the key is e.g. a different pointer to
# an equal object
found = d.keyeq(checkingkey, key)
if d.paranoia:
if (entries != d.entries or lltype.cast_opaque_ptr(llmemory.GCREF, indexes) != d.indexes or
not entries.valid(index - VALID_OFFSET) or
entries[index - VALID_OFFSET].key != checkingkey):
# the compare did major nasty stuff to the dict: start over
return ll_dict_lookup(d, key, hash, store_flag, T)
if found:
if store_flag == FLAG_DELETE:
indexes[i] = rffi.cast(T, DELETED)
return index - VALID_OFFSET
elif deletedslot == -1:
deletedslot = intmask(i)
perturb >>= PERTURB_SHIFT
def ll_dict_store_clean(d, hash, index, T):
# a simplified version of ll_dict_lookup() which assumes that the
# key is new, and the dictionary doesn't contain deleted entries.
# It only finds the next free slot for the given hash.
INDEXES = _ll_ptr_to_array_of(T)
indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes)
mask = len(indexes) - 1
i = r_uint(hash & mask)
perturb = r_uint(hash)
while rffi.cast(lltype.Signed, indexes[i]) != FREE:
i = (i << 2) + i + perturb + 1
i = i & mask
perturb >>= PERTURB_SHIFT
indexes[i] = rffi.cast(T, index + VALID_OFFSET)
def ll_dict_delete_by_entry_index(d, hash, locate_index, T):
# Another simplified version of ll_dict_lookup() which locates a
# hashtable entry with the given 'index' stored in it, and deletes it.
# This *should* be safe against evil user-level __eq__/__hash__
# functions because the 'hash' argument here should be the one stored
# into the directory, which is correct.
INDEXES = _ll_ptr_to_array_of(T)
indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes)
mask = len(indexes) - 1
i = r_uint(hash & mask)
perturb = r_uint(hash)
locate_value = locate_index + VALID_OFFSET
while rffi.cast(lltype.Signed, indexes[i]) != locate_value:
assert rffi.cast(lltype.Signed, indexes[i]) != FREE
i = (i << 2) + i + perturb + 1
i = i & mask
perturb >>= PERTURB_SHIFT
indexes[i] = rffi.cast(T, DELETED)
# ____________________________________________________________
#
# Irregular operations.
# Start the hashtable size at 16 rather than 8, as with rdict.py, because
# it is only an array of bytes
DICT_INITSIZE = 16
@specialize.memo()
def _ll_empty_array(DICT):
"""Memo function: cache a single prebuilt allocated empty array."""
return DICT.entries.TO.allocate(0)
def ll_newdict(DICT):
d = DICT.allocate()
d.entries = _ll_empty_array(DICT)
ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE)
d.num_live_items = 0
d.num_ever_used_items = 0
d.resize_counter = DICT_INITSIZE * 2
return d
OrderedDictRepr.ll_newdict = staticmethod(ll_newdict)
def ll_newdict_size(DICT, orig_length_estimate):
length_estimate = (orig_length_estimate // 2) * 3
n = DICT_INITSIZE
while n < length_estimate:
n *= 2
d = DICT.allocate()
d.entries = DICT.entries.TO.allocate(orig_length_estimate)
ll_malloc_indexes_and_choose_lookup(d, n)
d.num_live_items = 0
d.num_ever_used_items = 0
d.resize_counter = n * 2
return d
# rpython.memory.lldict uses a dict based on Struct and Array
# instead of GcStruct and GcArray, which is done by using different
# 'allocate' and 'delete' adtmethod implementations than the ones below
def _ll_malloc_dict(DICT):
return lltype.malloc(DICT)
def _ll_malloc_entries(ENTRIES, n):
return lltype.malloc(ENTRIES, n, zero=True)
def _ll_free_entries(entries):
pass
# ____________________________________________________________
#
# Iteration.
def get_ll_dictiter(DICTPTR):
return lltype.Ptr(lltype.GcStruct('dictiter',
('dict', DICTPTR),
('index', lltype.Signed)))
class DictIteratorRepr(AbstractDictIteratorRepr):
def __init__(self, r_dict, variant="keys"):
self.r_dict = r_dict
self.variant = variant
self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype)
if variant == 'reversed':
self.ll_dictiter = ll_dictiter_reversed
self._ll_dictnext = _ll_dictnext_reversed
else:
self.ll_dictiter = ll_dictiter
self._ll_dictnext = _ll_dictnext
def ll_dictiter(ITERPTR, d):
iter = lltype.malloc(ITERPTR.TO)
iter.dict = d
# initialize the index with usually 0, but occasionally a larger value
iter.index = d.lookup_function_no >> FUNC_SHIFT
return iter
@jit.look_inside_iff(lambda iter: jit.isvirtual(iter)
and (iter.dict is None or
jit.isvirtual(iter.dict)))
@jit.oopspec("odictiter.next(iter)")
def _ll_dictnext(iter):
dict = iter.dict
if dict:
entries = dict.entries
index = iter.index
assert index >= 0
entries_len = dict.num_ever_used_items
while index < entries_len:
nextindex = index + 1
if entries.valid(index):
iter.index = nextindex
return index
else:
# In case of repeated iteration over the start of
# a dict where the items get removed, like
# collections.OrderedDict.popitem(last=False),
# the hack below will increase the value stored in
# the high bits of lookup_function_no and so the
# next iteration will start at a higher value.
# We should carefully reset these high bits to zero
# as soon as we do something like ll_dict_reindex().
if index == (dict.lookup_function_no >> FUNC_SHIFT):
dict.lookup_function_no += (1 << FUNC_SHIFT)
index = nextindex
# clear the reference to the dict and prevent restarts
iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
raise StopIteration
def ll_dictiter_reversed(ITERPTR, d):
iter = lltype.malloc(ITERPTR.TO)
iter.dict = d
iter.index = d.num_ever_used_items
return iter
def _ll_dictnext_reversed(iter):
dict = iter.dict
if dict:
entries = dict.entries
index = iter.index - 1
while index >= 0:
if entries.valid(index):
iter.index = index
return index
index = index - 1
# clear the reference to the dict and prevent restarts
iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
raise StopIteration
# _____________________________________________________________
# methods
def ll_dict_get(dict, key, default):
index = dict.lookup_function(dict, key, dict.keyhash(key), FLAG_LOOKUP)
if index < 0:
return default
else:
return dict.entries[index].value
def ll_dict_setdefault(dict, key, default):
hash = dict.keyhash(key)
index = dict.lookup_function(dict, key, hash, FLAG_STORE)
if index < 0:
_ll_dict_setitem_lookup_done(dict, key, default, hash, -1)
return default
else:
return dict.entries[index].value
def ll_dict_copy(dict):
DICT = lltype.typeOf(dict).TO
newdict = DICT.allocate()
newdict.entries = DICT.entries.TO.allocate(len(dict.entries))
newdict.num_live_items = dict.num_live_items
newdict.num_ever_used_items = dict.num_ever_used_items
if hasattr(DICT, 'fnkeyeq'):
newdict.fnkeyeq = dict.fnkeyeq
if hasattr(DICT, 'fnkeyhash'):
newdict.fnkeyhash = dict.fnkeyhash
i = 0
while i < newdict.num_ever_used_items:
d_entry = newdict.entries[i]
entry = dict.entries[i]
ENTRY = lltype.typeOf(newdict.entries).TO.OF
d_entry.key = entry.key
if hasattr(ENTRY, 'f_valid'):
d_entry.f_valid = entry.f_valid
d_entry.value = entry.value
if hasattr(ENTRY, 'f_hash'):
d_entry.f_hash = entry.f_hash
i += 1
ll_dict_reindex(newdict, _ll_len_of_d_indexes(dict))
return newdict
ll_dict_copy.oopspec = 'odict.copy(dict)'
def ll_dict_clear(d):
if d.num_ever_used_items == 0:
return
DICT = lltype.typeOf(d).TO
old_entries = d.entries
d.entries = _ll_empty_array(DICT)
ll_malloc_indexes_and_choose_lookup(d, DICT_INITSIZE)
d.num_live_items = 0
d.num_ever_used_items = 0
d.resize_counter = DICT_INITSIZE * 2
# old_entries.delete() XXX
ll_dict_clear.oopspec = 'odict.clear(d)'
def ll_dict_update(dic1, dic2):
if dic1 == dic2:
return
ll_prepare_dict_update(dic1, dic2.num_live_items)
i = 0
while i < dic2.num_ever_used_items:
entries = dic2.entries
if entries.valid(i):
entry = entries[i]
hash = entries.hash(i)
key = entry.key
value = entry.value
index = dic1.lookup_function(dic1, key, hash, FLAG_STORE)
_ll_dict_setitem_lookup_done(dic1, key, value, hash, index)
i += 1
ll_dict_update.oopspec = 'odict.update(dic1, dic2)'
def ll_prepare_dict_update(d, num_extra):
# Prescale 'd' for 'num_extra' items, assuming that most items don't
# collide. If this assumption is false, 'd' becomes too large by at
# most 'num_extra'. The logic is based on:
# (d.resize_counter - 1) // 3 = room left in d
# so, if num_extra == 1, we need d.resize_counter > 3
# if num_extra == 2, we need d.resize_counter > 6 etc.
# Note however a further hack: if num_extra <= d.num_live_items,
# we avoid calling _ll_dict_resize_to here. This is to handle
# the case where dict.update() actually has a lot of collisions.
# If num_extra is much greater than d.num_live_items the conditional_call
# will trigger anyway, which is really the goal.
x = num_extra - d.num_live_items
jit.conditional_call(d.resize_counter <= x * 3,
_ll_dict_resize_to, d, num_extra)
# this is an implementation of keys(), values() and items()
# in a single function.
# note that by specialization on func, three different
# and very efficient functions are created.
def recast(P, v):
if isinstance(P, lltype.Ptr):
return lltype.cast_pointer(P, v)
else:
return v
def _make_ll_keys_values_items(kind):
def ll_kvi(LIST, dic):
res = LIST.ll_newlist(dic.num_live_items)
entries = dic.entries
dlen = dic.num_ever_used_items
items = res.ll_items()
i = 0
p = 0
while i < dlen:
if entries.valid(i):
ELEM = lltype.typeOf(items).TO.OF
if ELEM is not lltype.Void:
entry = entries[i]
if kind == 'items':
r = lltype.malloc(ELEM.TO)
r.item0 = recast(ELEM.TO.item0, entry.key)
r.item1 = recast(ELEM.TO.item1, entry.value)
items[p] = r
elif kind == 'keys':
items[p] = recast(ELEM, entry.key)
elif kind == 'values':
items[p] = recast(ELEM, entry.value)
p += 1
i += 1
assert p == res.ll_length()
return res
ll_kvi.oopspec = 'odict.%s(dic)' % kind
return ll_kvi
ll_dict_keys = _make_ll_keys_values_items('keys')
ll_dict_values = _make_ll_keys_values_items('values')
ll_dict_items = _make_ll_keys_values_items('items')
def ll_dict_contains(d, key):
return ll_dict_contains_with_hash(d, key, d.keyhash(key))
def ll_dict_contains_with_hash(d, key, hash):
i = d.lookup_function(d, key, hash, FLAG_LOOKUP)
return i >= 0
def _ll_getnextitem(dic):
if dic.num_live_items == 0:
raise KeyError
entries = dic.entries
# find the last entry. It's unclear if the loop below is still
# needed nowadays, because 'num_ever_used_items - 1' should always
# point to the last active item (we decrease it as needed in
# _ll_dict_del). Better safe than sorry.
while True:
i = dic.num_ever_used_items - 1
if entries.valid(i):
break
dic.num_ever_used_items -= 1
# we must remove the precise entry in the hashtable that points to 'i'
ll_call_delete_by_entry_index(dic, entries.hash(i), i)
return i
def ll_dict_popitem(ELEM, dic):
i = _ll_getnextitem(dic)
entry = dic.entries[i]
r = lltype.malloc(ELEM.TO)
r.item0 = recast(ELEM.TO.item0, entry.key)
r.item1 = recast(ELEM.TO.item1, entry.value)
_ll_dict_del(dic, i)
return r
def ll_dict_pop(dic, key):
index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE)
if index < 0:
raise KeyError
value = dic.entries[index].value
_ll_dict_del(dic, index)
return value
def ll_dict_pop_default(dic, key, dfl):
index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE)
if index < 0:
return dfl
value = dic.entries[index].value
_ll_dict_del(dic, index)
return value
|
nilq/baby-python
|
python
|
import numpy as np
i8 = np.int64()
i4 = np.int32()
u8 = np.uint64()
b_ = np.bool_()
i = int()
f8 = np.float64()
b_ >> f8 # E: No overload variant
i8 << f8 # E: No overload variant
i | f8 # E: Unsupported operand types
i8 ^ f8 # E: No overload variant
u8 & f8 # E: No overload variant
~f8 # E: Unsupported operand type
# mypys' error message for `NoReturn` is unfortunately pretty bad
# TODO: Reenable this once we add support for numerical precision for `number`s
# a = u8 | 0 # E: Need type annotation
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import datetime, json, logging, os, subprocess
from fast_reconcile_app import settings_app
from django.conf import settings
# from django.core.urlresolvers import reverse
log = logging.getLogger(__name__)
def get_commit():
""" Returns commit-string.
Called by views.info() """
original_directory = os.getcwd()
log.debug( 'BASE_DIR, ```%s```' % settings.BASE_DIR )
git_dir = settings.BASE_DIR
log.debug( 'git_dir, ```%s```' % git_dir )
os.chdir( git_dir )
output_utf8 = subprocess.check_output( ['git', 'log'], stderr=subprocess.STDOUT )
output = output_utf8.decode( 'utf-8' )
os.chdir( original_directory )
lines = output.split( '\n' )
commit = lines[0]
return commit
def get_branch():
""" Returns branch.
Called by views.info() """
original_directory = os.getcwd()
git_dir = settings.BASE_DIR
os.chdir( git_dir )
output_utf8 = subprocess.check_output( ['git', 'branch'], stderr=subprocess.STDOUT )
output = output_utf8.decode( 'utf-8' )
os.chdir( original_directory )
lines = output.split( '\n' )
branch = 'init'
for line in lines:
if line[0:1] == '*':
branch = line[2:]
break
return branch
def make_context( request, rq_now, info_txt, taken ):
""" Builds and returns context.
Called by views.info() """
cntxt = {
'request': {
'url': '%s://%s%s' % ( request.scheme,
request.META.get( 'HTTP_HOST', '127.0.0.1' ), # HTTP_HOST doesn't exist for client-tests
request.META.get('REQUEST_URI', request.META['PATH_INFO'])
),
'timestamp': str( rq_now )
},
'response': {
'documentation': settings_app.README_URL,
'version': info_txt,
'elapsed_time': str( taken )
}
}
return cntxt
|
nilq/baby-python
|
python
|
# Copyright 2019-2021 Foreseeti AB <https://foreseeti.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
from typing import Any
CONFIG_SCHEMA: dict[str, Any] = {
"definitions": {
"nonEmptyString": {"type": "string", "minLength": 1},
"nonEmptyStringArray": {
"type": "array",
"items": {"$ref": "#/definitions/nonEmptyString"},
"minItems": 1,
},
"account": {
"type": "object",
"oneOf": [
{
"properties": {
"access_key": {"$ref": "#/definitions/nonEmptyString"},
"secret_key": {"$ref": "#/definitions/nonEmptyString"},
"session_token": {"$ref": "#/definitions/nonEmptyString"},
"role": {"$ref": "#/definitions/nonEmptyString"},
"regions": {"$ref": "#/definitions/nonEmptyStringArray"},
"endpoint_url": {"$ref": "#/definitions/nonEmptyString"},
},
"required": ["access_key", "secret_key", "regions"],
"additionalProperties": False,
},
{
"properties": {
"role": {"$ref": "#/definitions/nonEmptyString"},
"regions": {"$ref": "#/definitions/nonEmptyStringArray"},
"profile": {"$ref": "#/definitions/nonEmptyString"},
"endpoint_url": {"$ref": "#/definitions/nonEmptyString"},
},
"additionalProperties": False,
},
],
},
"nonEmptyAccountArray": {
"type": "array",
"items": {"$ref": "#/definitions/account"},
"minItems": 1,
},
},
"type": "object",
"properties": {"accounts": {"$ref": "#/definitions/nonEmptyAccountArray"}},
"additionalProperties": False,
"required": ["accounts"],
}
DATA_SCHEMA: dict[str, Any] = {
"definitions": {
"nonEmptyString": {"type": "string", "minLength": 1},
"stringArray": {
"type": "array",
"items": {"$ref": "#/definitions/nonEmptyString"},
},
"globalServices": {
"type": "object",
"properties": {},
"additionalProperties": True,
"required": [],
},
"regionServices": {
"type": "object",
"properties": {"region_name": {"$ref": "#/definitions/nonEmptyString"}},
"additionalProperties": True,
"required": ["region_name"],
},
"nonEmptyRegionServicesArray": {
"type": "array",
"items": {"$ref": "#/definitions/regionServices"},
"minItems": 1,
},
"account": {
"type": "object",
"properties": {
"account_id": {"$ref": "#/definitions/nonEmptyString"},
"account_aliases": {"$ref": "#/definitions/stringArray"},
"global": {"$ref": "#/definitions/globalServices"},
"regions": {"$ref": "#/definitions/nonEmptyRegionServicesArray"},
},
"additionalProperties": False,
"required": ["account_id", "account_aliases", "global", "regions"],
},
"nonEmptyAccountArray": {
"type": "array",
"items": {"$ref": "#/definitions/account"},
"minItems": 1,
},
},
"type": "object",
"properties": {"accounts": {"$ref": "#/definitions/nonEmptyAccountArray"}},
"additionalProperties": False,
"required": ["accounts"],
}
def get_config_schema() -> dict[str, Any]:
config_schema = copy.deepcopy(CONFIG_SCHEMA)
return config_schema
def get_data_schema() -> dict[str, Any]:
# pylint: disable=import-outside-toplevel, cyclic-import
from securicad.aws_collector import PARSER_VERSION, PARSER_VERSION_FIELD
data_schema = copy.deepcopy(DATA_SCHEMA)
data_schema["properties"][PARSER_VERSION_FIELD] = {"const": PARSER_VERSION}
data_schema["required"].append(PARSER_VERSION_FIELD)
return data_schema
|
nilq/baby-python
|
python
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.dllib.utils.common import *
def init_fl_context(target="localhost:8980"):
callBigDlFunc("float", "initFLContext", target)
class FLClientClosable(JavaValue):
def __init__(self, jvalue=None, bigdl_type="float", *args):
super().__init__(jvalue, bigdl_type, *args)
def set_fl_client(self, fl_client):
return callBigDlFunc(self.bigdl_type, "flClientClosableSetFLClient", self.value, fl_client)
import unittest
import socket
from bigdl.dllib.utils.log4Error import invalidOperationError
class FLTest(unittest.TestCase):
def __init__(self, methodName='FLTest') -> None:
super().__init__(methodName)
self.port = 8980
self.port = self.get_available_port(self.port, self.port + 10)
self.target = f"localhost:{self.port}"
def get_available_port(self, port_start, port_end):
def is_available(p):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', p))
sock.close()
return result != 0
for p in range(port_start, port_end):
if is_available(p):
return p
else:
logging.info(f"port {p} is not avaible, trying another...")
invalidOperationError(False,
f"can not find available port in range [{port_start}, {port_end}]")
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
|
nilq/baby-python
|
python
|
# coding: utf-8
import clinica.engine as ce
class PetSurfaceLongitudinalCLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline."""
self._name = "pet-surface-longitudinal"
def define_description(self):
"""Define a description of this pipeline."""
self._description = (
"Longitudinal surface-based processing of PET images:\n"
"https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/PET_Surface_Longitudinal/"
)
def define_options(self):
"""Define the sub-command arguments."""
from clinica.engine.cmdparser import PIPELINE_CATEGORIES
# Clinica compulsory arguments (e.g. BIDS, CAPS, group_label)
clinica_comp = self._args.add_argument_group(
PIPELINE_CATEGORIES["CLINICA_COMPULSORY"]
)
clinica_comp.add_argument("bids_directory", help="Path to the BIDS directory.")
clinica_comp.add_argument(
"caps_directory",
help="Path to the CAPS directory. (Filled with results from t1-freesurfer-longitudinal pipeline",
)
clinica_comp.add_argument(
"acq_label",
type=str,
help="Name of the PET tracer label in the acquisition entity "
"(acq-<acq_label>).",
)
clinica_comp.add_argument(
"suvr_reference_region",
choices=["cerebellumPons", "pons"],
help="Intensity normalization using the average PET uptake in reference regions "
"resulting in a standardized uptake value ratio (SUVR) map. It can be "
"cerebellumPons (used for amyloid tracers) or pons (used for 18F-FDG tracers).",
)
clinica_comp.add_argument(
"pvc_psf_tsv",
help="TSV file containing for each PET image its point spread function (PSF) measured "
"in mm at x, y & z coordinates. Columns must contain: "
"participant_id, session_id, acq_label, psf_x, psf_y and psf_z.",
)
# Clinica standard arguments (e.g. --n_procs)
self.add_clinica_standard_arguments()
def run_command(self, args):
"""Run the pipeline with defined args."""
from networkx import Graph
from clinica.utils.ux import print_crash_files_and_exit, print_end_pipeline
from .pet_surface_pipeline import PetSurface
parameters = {
"acq_label": args.acq_label,
"suvr_reference_region": args.suvr_reference_region,
"pvc_psf_tsv": self.absolute_path(args.pvc_psf_tsv),
"longitudinal": True,
}
pipeline = PetSurface(
bids_directory=self.absolute_path(args.bids_directory),
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv),
base_dir=self.absolute_path(args.working_directory),
parameters=parameters,
name=self.name,
)
if args.n_procs:
exec_pipeline = pipeline.run(
plugin="MultiProc", plugin_args={"n_procs": args.n_procs}
)
else:
exec_pipeline = pipeline.run()
if isinstance(exec_pipeline, Graph):
print_end_pipeline(
self.name, pipeline.base_dir, pipeline.base_dir_was_specified
)
else:
print_crash_files_and_exit(args.logname, pipeline.base_dir)
|
nilq/baby-python
|
python
|
import time
from threading import Thread
from cassandra import ConsistencyLevel
from ccmlib.node import ToolError
from dtest import Tester, debug
from tools import insert_c1c2, query_c1c2, since
class TestRebuild(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
def simple_rebuild_test(self):
"""
@jira_ticket CASSANDRA-9119
Test rebuild from other dc works as expected.
"""
keys = 1000
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
cluster.add(node1, True, data_center='dc1')
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.LOCAL_ONE)
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
session.shutdown()
# Bootstrapping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# alter system_auth -- rebuilding it no longer possible after
# CASSANDRA-11848 prevented local node from being considered a source
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks')
self.rebuild_errors = 0
# rebuild dc2 from dc1
def rebuild():
try:
node2.nodetool('rebuild dc1')
except ToolError as e:
if 'Node is still rebuilding' in e.stdout:
self.rebuild_errors += 1
else:
raise e
class Runner(Thread):
def __init__(self, func):
Thread.__init__(self)
self.func = func
self.thread_exc_info = None
def run(self):
"""
Closes over self to catch any exceptions raised by func and
register them at self.thread_exc_info
Based on http://stackoverflow.com/a/1854263
"""
try:
self.func()
except Exception:
import sys
self.thread_exc_info = sys.exc_info()
cmd1 = Runner(rebuild)
cmd1.start()
# concurrent rebuild should not be allowed (CASSANDRA-9119)
# (following sleep is needed to avoid conflict in 'nodetool()' method setting up env.)
time.sleep(.1)
# we don't need to manually raise exeptions here -- already handled
rebuild()
cmd1.join()
# manually raise exception from cmd1 thread
# see http://stackoverflow.com/a/1854263
if cmd1.thread_exc_info is not None:
raise cmd1.thread_exc_info[1], None, cmd1.thread_exc_info[2]
# exactly 1 of the two nodetool calls should fail
# usually it will be the one in the main thread,
# but occasionally it wins the race with the one in the secondary thread,
# so we check that one succeeded and the other failed
self.assertEqual(self.rebuild_errors, 1,
msg='rebuild errors should be 1, but found {}. Concurrent rebuild should not be allowed, but one rebuild command should have succeeded.'.format(self.rebuild_errors))
# check data
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
@since('2.2')
def resumable_rebuild_test(self):
"""
@jira_ticket CASSANDRA-10810
Test rebuild operation is resumable
"""
self.ignore_log_patterns = self.ignore_log_patterns[:] + [r'Error while rebuilding node',
r'Streaming error occurred on session with peer 127.0.0.3',
r'Remote peer 127.0.0.3 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
# Create 2 nodes on dc1
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', None,
binary_interface=('127.0.0.1', 9042))
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node1, True, data_center='dc1')
cluster.add(node2, True, data_center='dc1')
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
# Insert data into node1 and node2
session = self.patient_exclusive_cql_connection(node1)
self.create_ks(session, 'ks', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
key = list(range(10000, 20000))
session = self.patient_exclusive_cql_connection(node2)
session.execute('USE ks')
insert_c1c2(session, keys=key, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Create a new node3 on dc2
node3 = cluster.create_node('node3', False,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300', '2002', None,
binary_interface=('127.0.0.3', 9042),
byteman_port='8300')
cluster.add(node3, False, data_center='dc2')
node3.start(wait_other_notice=False, wait_for_binary_proto=True)
# Wait for snitch to be refreshed
time.sleep(5)
# Alter necessary keyspace for rebuild operation
session = self.patient_exclusive_cql_connection(node3)
session.execute("ALTER KEYSPACE ks WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE system_auth WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
# Path to byteman script which makes node2 throw an exception making rebuild fail
script = ['./rebuild_failure_inject.btm']
node3.byteman_submit(script)
# First rebuild must fail and data must be incomplete
with self.assertRaises(ToolError, msg='Unexpected: SUCCEED'):
debug('Executing first rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: FAILED')
session.execute('USE ks')
with self.assertRaises(AssertionError, msg='Unexpected: COMPLETE'):
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: INCOMPLETE')
debug('Executing second rebuild -> '),
node3.nodetool('rebuild dc1')
debug('Expected: SUCCEED')
# Check all streaming sessions completed, streamed ranges are skipped and verify streamed data
node3.watch_log_for('All sessions completed')
node3.watch_log_for('Skipping streaming those ranges.')
debug('Checking data is complete -> '),
for i in xrange(0, 20000):
query_c1c2(session, i, ConsistencyLevel.LOCAL_ONE)
debug('Expected: COMPLETE')
@since('3.6')
def rebuild_ranges_test(self):
"""
@jira_ticket CASSANDRA-10406
"""
keys = 1000
cluster = self.cluster
tokens = cluster.balanced_tokens_across_dcs(['dc1', 'dc2'])
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
node1 = cluster.create_node('node1', False,
('127.0.0.1', 9160),
('127.0.0.1', 7000),
'7100', '2000', tokens[0],
binary_interface=('127.0.0.1', 9042))
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.add(node1, True, data_center='dc1')
node1 = cluster.nodelist()[0]
# start node in dc1
node1.start(wait_for_binary_proto=True)
# populate data in dc1
session = self.patient_exclusive_cql_connection(node1)
# ks1 will be rebuilt in node2
self.create_ks(session, 'ks1', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
# ks2 will not be rebuilt in node2
self.create_ks(session, 'ks2', {'dc1': 1})
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session.shutdown()
# Bootstraping a new node in dc2 with auto_bootstrap: false
node2 = cluster.create_node('node2', False,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200', '2001', tokens[1],
binary_interface=('127.0.0.2', 9042))
node2.set_configuration_options(values={'initial_token': tokens[1]})
cluster.add(node2, False, data_center='dc2')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# wait for snitch to reload
time.sleep(60)
# alter keyspace to replicate to dc2
session = self.patient_exclusive_cql_connection(node2)
session.execute("ALTER KEYSPACE ks1 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute("ALTER KEYSPACE ks2 WITH REPLICATION = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1};")
session.execute('USE ks1')
# rebuild only ks1 with range that is node1's replica
node2.nodetool('rebuild -ks ks1 -ts (%s,%s] dc1' % (tokens[1], str(pow(2, 63) - 1)))
# check data is sent by stopping node1
node1.stop()
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE)
# ks2 should not be streamed
session.execute('USE ks2')
for i in xrange(0, keys):
query_c1c2(session, i, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand
from schedule import models
from django.utils import timezone
from django.conf import settings
import requests
import requests.auth
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
scheduled = models.ScheduledPublication.objects.filter(scheduled__lt=timezone.now(), published=False)
for schedule in scheduled:
self.publish_article(schedule)
@staticmethod
def publish_article(schedule):
try:
message = {
"articles": [{
"id": schedule.article_identifier
}]
}
service = settings.DASHBOARD_PUBLISHING_SERVICE
auth = requests.auth.HTTPBasicAuth(settings.PUBLISHING_SERVICE_USER,
settings.PUBLISHING_SERVICE_PASSWORD)
response = requests.post(service, json=message, auth=auth)
if response.status_code is 200:
schedule.published = True
schedule.save()
else:
logger.error("response returned %s", response.status_code)
except Exception as e:
logger.error("An error has occurred. Exception: %s", e.message)
|
nilq/baby-python
|
python
|
######################################
#
# Nikolai Rozanov (C) 2017-Present
#
# nikolai.rozanov@gmail.com
#
#####################################
import numpy as np
#
# This file is a way of learning the Kernel and performing a hypothesis test, by computin the test statistics
#
class TEST(object):
'''
main class
test needs to have:
get_tstat()
get_estimate()
reset(params1,params2)
get_treshold
get_power()
'''
def __init__(self,test):
self.__test = test
# #######################################
# Optimise over the following parameters
def learn_kernel(self,params_vec1,params_vec2,method='power'):
'''
finds the optimal kernel wrt to (power, test stat itself.. others maybe later)
parmas1, params2 must be the same length
'''
if method=='power':
vec = self.__learn_kernel_power(params_vec1,params_vec2)
elif method=='tstat':
vec = self.__learn_kernel_tstat(params_vec1,params_vec2)
else:
vec = []
amax = np.argmax(vec)
max = np.max(vec)
return max, amax, vec
def __learn_kernel_power(self,params1,params2):
'''
power -
'''
num_ker = len(params1)
powers = np.zeros(num_ker)
for idx in range(num_ker):
self.__test.reset(params1[idx],params2[idx])
powers[idx] = self.__test.get_power()
return powers
def __learn_kernel_tstat(self,params1,params2):
'''
tstat -
'''
num_ker = len(params1)
powers = np.zeros(num_ker)
for idx in range(num_ker):
self.__test.reset(params1[idx],params2[idx])
powers[idx] = self.__test.get_tstat()
return powers
|
nilq/baby-python
|
python
|
import os
from PIL import Image
import tensorflow as tf
from Fishnet import FishNets
import numpy as np
import json
def onehot(label):
n_sample = len(label)
# n_class=max(label)+1
onehot_labels = np.zeros((n_sample, 6))
onehot_labels[np.arange(n_sample), label] = 1
return onehot_labels
def read(file_list):
# 构建文件名队列
x = tf.placeholder(tf.float32, [None, 224,224,3])
file_queue=tf.train.string_input_producer(file_list)
# 读取与解码
reader=tf.WholeFileReader()
_,value=reader.read(file_queue)
image_a=tf.image.decode_jpeg(value,channels=3)
image=tf.image.resize_images(image_a,[224,224])
image=tf.cast(image,tf.float32)
image=tf.reshape(image,shape=[224,224,3])
# 批处理
inputs=tf.train.batch([image],batch_size=22,num_threads=1,capacity=1)
network_planes = [64, 128, 256, 512, 512, 512, 384, 256, 320, 832, 1600]
num_res_blks = [2, 2, 6, 2, 1, 1, 1, 1, 2, 2]
num_trans_blks = [1, 1, 1, 1, 1, 4]
mode = FishNets(6, network_planes, num_res_blks, num_trans_blks)
value = mode(x, training=True)
va=tf.argmax(value,1)
# saver = tf.train.import_meta_graph("./tmp/train_model.ckpt")
saver=tf.train.Saver()
with tf.Session() as sess:
#model = tf.train.latest_checkpoint("./tmp")
#print(model)
# saver.recover_last_checkpoints("./tmp/train_model.ckpt")
saver.restore(sess,save_path="./tmp/train_model.ckpt")
cood=tf.train.Coordinator()
thread=tf.train.start_queue_runners(sess=sess,coord=cood)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
inputs=sess.run(inputs)
prediction,values=sess.run([va,value],feed_dict={x:inputs})
for i in range(len(file_list)):
print(prediction[i])
# result=[]
# for i in range(len(file_list)): # file_list图片地址
# disease_dict={}
# pic_file=file_list[i]
# pic_file=pic_file[8:]
# disease_dict["image_id"] = pic_file
# disease_dict["disease_class"]=int(prediction[i])+1
# result.append(disease_dict)
# with open ("./danyi.json",'w') as f:
# f.write(json.dumps(result))
# print("done")
cood.request_stop()
cood.join(thread)
filename=os.listdir("./image")
file_list=[os.path.join("./image/",file) for file in filename]
print(file_list)
a=read(file_list)
# def per_calss(imagefile):
# image=Image.open(imagefile)
# image=image.resize([227,227])
# image_array=np.array(image)
# image=tf.cast(image_array,tf.float32)
# image=tf.image.per_image_standardization(image)
# image=tf.reshape(image,shape=[1,227,227,3])
# saver=tf.train.Saver()
# with tf.Session() as sess:
# save_model=tf.train.latest_checkpoint("./tmp")
# saver.restore(sess,save_model)
# image=sess.run(image)
# prediction=sess.run(fc3,feed_dict={x:image})
# max_index=np.argmax(prediction)
# print(max_index)
# filename=os.listdir("./IDADP-PRCV2019-training/1")
# print(filename)
# file_list=[os.path.join("./dog/",file) for file in filename]
# a=per_calss(file_list)
# inputs=tf.nn.batch_normalization(inputs)
# inputs_shape = inputs.get_shape().as_list()
# batchsize, height, width, C = inputs_shape[0], inputs_shape[1], inputs_shape[2], inputs_shape[3]
# filter = tf.Variable(tf.truncated_normal([1, 1, C, 1], dtype=tf.float32, stddev=0.1), name='weights')
# filter1 = tf.Variable(tf.truncated_normal([1, 1, C, C], dtype=tf.float32, stddev=0.1), name='weights1')
# query_conv = tf.nn.conv2d(inputs, filter, strides=[1, 1, 1, 1], padding='VALID')
# print(query_conv)
# key_conv = tf.nn.conv2d(inputs, filter, strides=[1, 1, 1, 1], padding='VALID')
# print(key_conv)
# value_conv = tf.nn.conv2d(inputs, filter1, strides=[1, 1, 1, 1], padding='VALID')
# print(value_conv)
# proj_query = tf.reshape(query_conv, [batchsize, width * height, -1])
# print(proj_query)
# proj_key = tf.transpose((tf.reshape(key_conv, [batchsize, width * height, -1])), perm=[0, 2, 1])
# print(proj_key)
# energy = tf.matmul(proj_query, proj_key)
# print(energy)
# attention = tf.nn.softmax(energy)
# print(attention)
# proj_value = tf.reshape(value_conv, [batchsize, width * height, -1])
# print(proj_value)
# out = tf.matmul(attention, proj_value)
# print(out)
# out = tf.reshape(out, [batchsize, height, width, C])
# print(out)
# # out = out + inputs
|
nilq/baby-python
|
python
|
from numpy import dot, diag, ones, zeros, sqrt
from openopt.kernel.ooMisc import norm
def amsg2p(f, df, x0, epsilon, f_opt, gamma, callback = lambda x, f: False):
# returns optim point and iteration number
f0 = f(x0)
if f0 - f_opt <= epsilon: return x0, 0
x, n = x0.copy(), x0.size
df0 = df(x0)
ndf = norm(df0)
h, dzeta, p, B = gamma * (f0 - f_opt) / ndf, df0 / ndf, zeros(n), diag(ones(n, 'float64')) # TODO: add possibility to create B of type float128
k = 0
while True:
k += 1
x -= h * dot(B, dzeta)
F = f(x)
r = callback(x, F)
if r not in (0, False, None):
break # user-demanded stop
if F - f_opt <= epsilon: break
DF = df(x)
DF_dilated = dot(B.T, DF)
nDF_dilated = norm(DF_dilated)
dzeta_new, h = DF_dilated / nDF_dilated, gamma * (F-f_opt) / nDF_dilated
lambda1, lambda2 = -dot(p, dzeta_new), -dot(dzeta, dzeta_new)
c1, c2 = lambda1>0, lambda2>0
p = (lambda1 * p + lambda2 * dzeta)/sqrt(lambda1**2+lambda2**2) if c1 and c2 else dzeta if c2 and not c1 else zeros(n) if not c1 and not c2 else p
mu = dot(p, dzeta_new)
if -1 < mu < 0:
S = sqrt(1-mu**2)
nu = (1/S-1) * dzeta_new - (mu/S) * p
B += dot(dot(B, nu.reshape(n, 1)), dzeta_new.reshape(1, n))
h /= S
p = (p - mu * dzeta_new) / S
else:
p = zeros(n)
dzeta = dzeta_new
return x, k
|
nilq/baby-python
|
python
|
import torch as t
import torch.nn as nn
import torch.nn.functional as f
from config import config
from torch.optim import Adam, SGD, Adagrad
from torch.autograd import Variable
from data_utils import batch_by_num
from base_model import BaseModel, BaseModule
import logging
import os
class RotatEModule(BaseModule):
def __init__(self, n_ent, n_rel, config):
super(RotatEModule, self).__init__()
sigma = 0.2
self.gamma = nn.Parameter(
t.Tensor([12.0]),
requires_grad=False
)
self.rel_re_embed = nn.Embedding(n_rel, config.dim)
self.rel_im_embed = nn.Embedding(n_rel, config.dim)
self.ent_re_embed = nn.Embedding(n_ent, config.dim)
self.ent_im_embed = nn.Embedding(n_ent, config.dim)
for param in self.parameters():
param.data.div_((config.dim / sigma ** 2) ** (1 / 6))
def forward(self, src, rel, dst):
head_ie = self.ent_im_embed(src)
head_re = self.ent_re_embed(src)
relation_ie = self.rel_im_embed(rel)
relation_re = self.rel_re_embed(rel)
tail_ie = self.ent_im_embed(dst)
tail_re = self.ent_re_embed(dst)
re_score = head_re * relation_re - head_ie * relation_ie#*就是点积,哈达玛积
im_score = head_re * relation_ie + head_ie * relation_re#这两行就是复数乘积的公式(a+bj)*(c+dj)=(ac-bd)+(bc+ad)j
re_score = re_score - tail_re
im_score = im_score - tail_ie
score = t.stack([re_score, im_score], dim = 0)#list中的每个元素是结果中第dim维的每个元素
#score=(x,x)
score = score.norm(dim = 0)#每一dim上求一个L1范数(平方和开根)
#dim=0,就是其他维的index不变,dim这一维从0到size求一个L1范数,最后的个数是除了dim这一维以外其他维size的乘积
score = self.gamma.item() - score.sum(dim = 2)#a number minus matrix
return score
def score(self, src, rel, dst):
return -self.forward(src, rel, dst)
def dist(self, src, rel, dst):
return -self.forward(src, rel, dst)
def prob_logit(self, src, rel, dst):
return self.forward(src, rel, dst)
class RotatE(BaseModel):
def __init__(self, n_ent, n_rel, config):
super(RotatE, self).__init__()
self.mdl = RotatEModule(n_ent, n_rel, config)
self.mdl#.cuda()
self.config = config
self.weight_decay = config.lam / config.n_batch
def pretrain(self, train_data, corrupter, tester):
src, rel, dst = train_data
n_train = len(src)
n_epoch = self.config.n_epoch
n_batch = self.config.n_batch
optimizer = Adam(self.mdl.parameters(), weight_decay=self.weight_decay)
best_perf = 0
for epoch in range(n_epoch):
epoch_loss = 0
if epoch % self.config.sample_freq == 0:
rand_idx = t.randperm(n_train)
src = src[rand_idx]
rel = rel[rand_idx]
dst = dst[rand_idx]
src_corrupted, rel_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst)
src_corrupted = src_corrupted#.cuda()
rel_corrupted = rel_corrupted#.cuda()
dst_corrupted = dst_corrupted#.cuda()
for ss, rs, ts in batch_by_num(n_batch, src_corrupted, rel_corrupted, dst_corrupted, n_sample=n_train):
self.mdl.zero_grad()
label = t.zeros(len(ss)).type(t.LongTensor)#.cuda()
loss = t.sum(self.mdl.softmax_loss(Variable(ss), Variable(rs), Variable(ts), label))
loss.backward()
optimizer.step()
epoch_loss += loss.data[0]
logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train)
if (epoch + 1) % self.config.epoch_per_test == 0:
test_perf = tester()
if test_perf > best_perf:
self.save(os.path.join(config().task.dir, self.config.model_file))
best_perf = test_perf
return best_perf
|
nilq/baby-python
|
python
|
import math
import numpy as np
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from src.datasets.utility import find_sub_list
from src.scripts.tools.utility import get_device
class Embedder(torch.nn.Module):
def __init__(self, vocab, config):
super().__init__()
self.scale_grad = config['scale_emb_grad_by_freq']
self.embedding_dim = vocab.vectors.shape[1]
self.embeddings = torch.nn.Embedding(len(vocab), self.embedding_dim, scale_grad_by_freq=self.scale_grad)
self.embeddings.weight.data.copy_(vocab.vectors)
self.embeddings.weight.requires_grad = False
self.vocab = vocab
logging.info(f"Optimize embeddings = {config['optimize_embeddings']}")
logging.info(f"Scale grad by freq: {self.scale_grad}")
logging.info(f"Vocabulary size = {len(vocab.vectors)}")
def forward(self, input):
return self.embeddings(input)
class CharEmbedder(nn.Module):
def __init__(self, config, vocab):
super().__init__()
self.embeddings = nn.Embedding(len(vocab), config["char_embedding_size"], padding_idx=1)
self.embeddings.weight.data.uniform_(-0.001, 0.001)
self.dropout = nn.Dropout(p=config["dropout_rate"])
self.vocab = vocab
self.char_conv = nn.Conv2d(1, # input channels
config["char_channel_size"], # output channels
(config["char_embedding_size"], config["char_channel_width"]) # kernel size
)
def forward(self, input):
"""
:param x: (batch, seq_len, word_len)
:return: (batch, seq_len, char_channel_size)
"""
batch_size = input.size(0)
word_len = input.shape[-1]
# (batch, seq_len, word_len, char_dim)
x = self.dropout(self.embeddings(input))
char_dim = x.shape[-1]
# (batch * seq_len, 1, char_dim, word_len)
x = x.view(-1, char_dim, word_len).unsqueeze(1)
# (batch * seq_len, char_channel_size, conv_len)
x = self.char_conv(x).squeeze(-2)
# (batch * seq_len, char_channel_size)
x = F.max_pool1d(x, x.size(2)).squeeze(-1)
# (batch, seq_len, char_channel_size)
x = x.view(batch_size, -1, x.shape[-1])
return x
class HighwayNetwork(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = config["highway_layers"]
dim = config["highway_dim1"] + config["highway_dim2"]
for i in range(self.layers):
setattr(self, f'highway_linear{i}',
nn.Sequential(nn.Linear(dim, dim),
nn.ReLU()))
gate = nn.Linear(dim, dim)
# We should bias the highway layer to just carry its input forward when training starts.
# We do that by setting the bias on gate affine transformation to be positive, because
# that means `g` will be biased to be high, so we will carry the input forward.
# The bias on `B(x)` is the second half of the bias vector in each Linear layer.
gate.bias.data.fill_(1)
setattr(self, f'highway_gate{i}',
nn.Sequential(gate,
nn.Sigmoid()))
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=-1)
for i in range(self.layers):
h = getattr(self, f'highway_linear{i}')(x)
g = getattr(self, f'highway_gate{i}')(x)
x = (1 - g) * h + g * x
return x
class Encoder(torch.nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
def get_output_dim(self):
raise NotImplementedError("Objects need to implement this method!")
class RNN(Encoder):
def __init__(self, config):
super(RNN, self).__init__(config)
self.rnn = None
def forward(self, inp, lengths=None, padding_value=0., batch_first=True):
"""
:param inp: Shape BATCH_SIZE x LEN x H_DIM
"""
if lengths is None:
outp = self.rnn(inp)[0]
else:
sequence_len = inp.shape[1]
inp_packed = pack_padded_sequence(inp, lengths, batch_first=batch_first, enforce_sorted=False)
outp_packed = self.rnn(inp_packed)[0]
outp, output_lengths = pad_packed_sequence(outp_packed, batch_first=batch_first,
padding_value=padding_value, total_length=sequence_len)
return outp
def get_output_dim(self):
return self.output_dim
class BiLSTM(RNN):
def __init__(self, config):
super().__init__(config)
self.hidden_size = config['RNN_nhidden']
self.layers = config['RNN_layers']
self.rnn = torch.nn.LSTM(
config["RNN_input_dim"],
self.hidden_size, self.layers,
dropout=float(config['dropout_rate']),
batch_first=True,
bidirectional=True)
self.output_dim = config['RNN_nhidden'] * 2
class LSTM(RNN):
def __init__(self, config, init_hidden=None):
super().__init__(config)
self.hidden_size = config['RNN_nhidden']
self.layers = config['RNN_layers']
self.rnn = torch.nn.LSTM(
config["RNN_input_dim"],
self.hidden_size, self.layers,
dropout=config['dropout_rate'],
batch_first=True,
bidirectional=False)
self.output_dim = config['RNN_nhidden']
# @profile
def combine_surface_forms(valid_span_probabilities, batch_size, hacks, p_to_rerank, passage_length, score, pad_token=0):
if score == "logprobs":
# !!!!!sentinel is automatically assumed in this case!!!!
# presoftmax class score = log(P_class) + K
# save K, turn scores into probabilities
K = torch.FloatTensor(
np.nanmax((valid_span_probabilities - torch.log_softmax(valid_span_probabilities, -1)).cpu().numpy(), -1)) \
.to(
valid_span_probabilities.get_device() if valid_span_probabilities.get_device() >= 0 else torch.device(
"cpu"))
valid_span_probabilities = F.softmax(valid_span_probabilities, dim=-1)
valid_span_probabilities = valid_span_probabilities.view(batch_size, passage_length, passage_length)
valid_document_probabilities = valid_span_probabilities[:, 1:, 1:]
valid_document_probabilities = valid_document_probabilities.reshape(batch_size, -1)
passage_length -= 1
else:
valid_document_probabilities = valid_span_probabilities
# Re-ranking top-N based on surface form
sorted_scores, indices = torch.sort(valid_document_probabilities, dim=-1, descending=True)
span_start_indices = indices // (passage_length)
span_end_indices = indices % (passage_length)
N = p_to_rerank # top-N surface form reranking
sorted_scores, span_start_indices, span_end_indices = sorted_scores[:, :N], \
span_start_indices[:, :N], \
span_end_indices[:, :N]
if type(hacks["combine_surface_forms"][1]) == torch.Tensor:
hacks["combine_surface_forms"] = hacks["combine_surface_forms"][0], \
hacks["combine_surface_forms"][1].tolist()
### Casting to python floats may produce slightly different results, due to FP instability, e.g.:
# 28.7.2020, changed to pytorch vectorized addition
# ---------------------------------------------------------------------------------------
# Python floats
# 3.158890103804879e-05 + 2.225152506696304e-09
# returns 3.1591126190555485e-05
# ---------------------------------------------------------------------------------------
# Pytorch vectorized addition of floats
# (torch.Tensor([3.158890103804879e-05]) + torch.Tensor([2.225152506696304e-09]) ).item()
# returns 3.159112748107873e-05
# valid_document_probabilities_list = valid_document_probabilities.tolist()
valid_document_probabilities_list = valid_document_probabilities
for i in range(len(span_start_indices)):
bool_arr_processed = [[False for _ in range(passage_length)] for _ in range(passage_length)]
for a, e in zip(span_start_indices[i].tolist(), span_end_indices[i].tolist()):
if bool_arr_processed[a][e]:
continue
# HERE assuming 0 in the pad token
if hacks["combine_surface_forms"][1][i][a:e + 1] == [pad_token]:
continue
# OLD
# processed.append((a, e)) # do not adjust value of other spans with this span
bool_arr_processed[a][e] = True
span_occurences = find_sub_list(hacks["combine_surface_forms"][1][i][a:e + 1],
hacks["combine_surface_forms"][1][i])
if len(span_occurences) > 1:
for span in span_occurences:
if bool_arr_processed[span[0]][span[1]]:
continue
bool_arr_processed[span[0]][span[1]] = True
valid_document_probabilities_list[i][a * passage_length + e] += \
valid_document_probabilities_list[i][span[0] * passage_length + span[1]]
valid_document_probabilities_list[i][span[0] * passage_length + span[1]] = 0.
# valid_document_probabilities = torch.FloatTensor(valid_document_probabilities_list)
valid_document_probabilities = valid_document_probabilities_list
if score == "logprobs":
# turn back into pre-softmax scores
valid_span_probabilities[:, 1:, 1:] = valid_document_probabilities.view(batch_size, passage_length,
passage_length)
valid_span_probabilities = valid_span_probabilities.view(batch_size, -1)
valid_span_probabilities += K.unsqueeze(-1)
return valid_span_probabilities
class SpanPredictionModule(nn.Module):
def predict(self, batch):
start_pred_logits, end_pred_logits = self(batch)
start_pred, end_pred = torch.nn.functional.softmax(start_pred_logits, dim=1), torch.nn.functional.softmax(
end_pred_logits, dim=1)
return self.decode(start_pred, end_pred)
@staticmethod
def decode(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, has_sentinel=False, score="logprobs") -> \
(torch.Tensor, torch.Tensor):
"""
This method has been borrowed from AllenNLP
:param span_start_logits:
:param span_end_logits:
:return:
"""
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid
# mask these
if has_sentinel:
span_log_probs[:, 1:, 0] = -math.inf
span_log_probs[:, 0, 1:] = -math.inf
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = torch.triu(torch.ones((passage_length, passage_length),
device=device)).log().unsqueeze(0)
valid_span_log_probs = span_log_probs + span_log_mask
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1)
if score == "probs":
valid_span_scores = F.softmax(valid_span_log_probs, dim=-1)
elif score == "logprobs":
valid_span_scores = valid_span_log_probs
else:
raise NotImplemented(f"Unknown score type \"{score}\"")
best_span_scores, best_spans = valid_span_scores.max(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return best_span_scores, (span_start_indices, span_end_indices)
@staticmethod
def decode_wth_hacks(span_start_logits: torch.Tensor,
span_end_logits: torch.Tensor,
score="logprobs",
p_to_rerank=100,
has_sentinel=False,
hacks={
"max_answer_length": 30,
"combine_surface_forms": (False, None)
}) -> \
(torch.Tensor, torch.Tensor):
"""
This method has been borrowed from AllenNLP
:param span_start_logits:
:param span_end_logits:
:return:
"""
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if "combine_surface_forms" not in hacks:
hacks["combine_surface_forms"] = (False, None)
if hacks["combine_surface_forms"][0]:
assert hacks["combine_surface_forms"][1] is not None
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, passage_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid
# mask these
if has_sentinel:
span_log_probs[:, 1:, 0] = -math.inf
span_log_probs[:, 0, 1:] = -math.inf
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts.
span_log_mask = torch.triu(torch.ones((passage_length, passage_length),
device=device)).log().unsqueeze(0)
valid_span_log_probs = span_log_probs + span_log_mask
spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"]
for j in range(passage_length)] for i in range(passage_length)]) \
.to(valid_span_log_probs.get_device() if valid_span_log_probs.get_device() >= 0 else torch.device("cpu"))
valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf)
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1)
if score == "probs":
valid_span_scores = F.softmax(valid_span_log_probs, dim=-1)
elif score == "logprobs":
valid_span_scores = valid_span_log_probs
else:
raise NotImplemented(f"Unknown score type \"{score}\"")
if hacks["combine_surface_forms"][0]:
assert not (score == "probs" and has_sentinel), \
"Not a supported variant - probability decoding + has_sentinel"
pad_token_id = 0
if len(hacks["combine_surface_forms"]) == 3:
pad_token_id = hacks["combine_surface_forms"][-1]
valid_span_scores = combine_surface_forms(valid_span_scores,
batch_size, hacks,
p_to_rerank, passage_length,
score, pad_token=pad_token_id)
best_span_scores, best_spans = valid_span_scores.max(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return best_span_scores, (span_start_indices, span_end_indices)
@staticmethod
def decode_topN_joint(valid_span_log_probs: torch.Tensor, N: int = 100) -> \
Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
batch_size = valid_span_log_probs.shape[0]
passage_length = valid_span_log_probs.shape[1]
# Addition in log-domain = multiplication in real domain
# This will create a matrix containing addition of each span_start_logit with span_end_logit
# (batch_size, passage_length, passage_length)
span_log_probs = valid_span_log_probs
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
# valid_span_log_probs is a vector [s_00,s_01,...,s_0n,s10,s11,...,s1n, ... , sn0,sn1,..., snn] of span scores
# e.g. s_01 is a score of answer span from token 0 to token 1
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) # see image above, part 2.
# Turn all the log-probabilities into probabilities
valid_span_probs = F.softmax(valid_span_log_probs, dim=-1)
sorted_probs, indices = torch.sort(valid_span_probs, dim=-1, descending=True)
# best_span_probs of shape batch_size now contains all probabilities for each best span in the batch
# best_spans of shape batch_size now contains argmaxes of each answer from unrolled sequence valid_span_log_probs
span_start_indices = indices // passage_length
span_end_indices = indices % passage_length
# return just N best
return sorted_probs[:, :N], (span_start_indices[:, :N], span_end_indices[:, :N])
@staticmethod
def decode_topN_joint_wth_hacks(valid_span_log_probs: torch.Tensor, N: int = 100, score="probs", p_to_rerank=100,
has_sentinel=False,
hacks={
"max_answer_length": 30,
"combine_surface_forms": (False, None)
}) -> \
Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
This method has been borrowed from AllenNLP
:param valid_span_log_probs:
:return:
"""
if "combine_surface_forms" not in hacks:
hacks["combine_surface_forms"] = (False, None)
if hacks["combine_surface_forms"][0]:
assert hacks["combine_surface_forms"][1] is not None
batch_size = valid_span_log_probs.shape[0]
passage_length = valid_span_log_probs.shape[1]
if has_sentinel:
valid_span_log_probs[:, 1:, 0] = -math.inf
valid_span_log_probs[:, 0, 1:] = -math.inf
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"]
for j in range(passage_length)] for i in range(passage_length)]) \
.to(get_device(valid_span_log_probs))
valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf)
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1)
if score == "probs":
valid_span_scores = F.softmax(valid_span_log_probs, dim=-1)
elif score == "logprobs":
valid_span_scores = valid_span_log_probs
else:
raise NotImplemented(f"Unknown score type \"{score}\"")
if hacks["combine_surface_forms"][0]:
assert not (score == "probs" and has_sentinel), \
"Not a supported variant - proability decoding + has_sentinel"
pad_token_id = 0
if len(hacks["combine_surface_forms"]) == 3:
pad_token_id = hacks["combine_surface_forms"][-1]
valid_span_scores = combine_surface_forms(valid_span_scores,
batch_size, hacks,
p_to_rerank, passage_length,
score, pad_token=pad_token_id)
sorted_probs, indices = torch.topk(valid_span_scores, k=N, dim=-1, largest=True)
# best_span_probs of shape batch_size now contains topk probabilities for each best span in the batch
# best_spans of shape batch_size now contains argmaxes of topk answers from unrolled sequence valid_span_log_probs
span_start_indices = indices // passage_length
span_end_indices = indices % passage_length
return sorted_probs, (span_start_indices, span_end_indices)
@staticmethod
def decode_topN(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, N: int = 100) -> \
Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
This method has been borrowed from AllenNLP
:param span_start_logits: unnormalized start log probabilities
:param span_end_logits: unnormalized end log probabilities
:return:
"""
# We call the inputs "logits" - they could either be unnormalized logits or normalized log
# probabilities. A log_softmax operation is a constant shifting of the entire logit
# vector, so taking an argmax over either one gives the same result.
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, document_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# span_start_logits.unsqueeze(2) has shape:
# (batch_size, passage_length, 1)
# span_end_logits.unsqueeze(1) has shape:
# (batch_size, 1, passage_length)
# Addition in log-domain = multiplication in real domain
# This will create a matrix containing addition of each span_start_logit with span_end_logit
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts. We will mask these values out
span_log_mask = torch.triu(torch.ones((passage_length, passage_length),
device=device)).log().unsqueeze(0)
# The mask will look like this
# 0000000
# X000000
# XX00000
# XXX0000
# XXXX000
# XXXXX00
# XXXXXX0
# where X are -infinity
valid_span_log_probs = span_log_probs + span_log_mask # see image above, part 1.
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
# valid_span_log_probs is a vector [s_00,s_01,...,s_0n,s10,s11,...,s1n, ... , sn0,sn1,..., snn] of span scores
# e.g. s_01 is a score of answer span from token 0 to token 1
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1) # see image above, part 2.
# Turn all the log-probabilities into probabilities
valid_span_probs = F.softmax(valid_span_log_probs, dim=-1)
sorted_probs, indices = torch.sort(valid_span_probs, dim=-1, descending=True)
# best_span_probs of shape batch_size now contains all probabilities for each best span in the batch
# best_spans of shape batch_size now contains argmaxes of each answer from unrolled sequence valid_span_log_probs
span_start_indices = indices // passage_length
span_end_indices = indices % passage_length
# return just N best
return sorted_probs[:, :N], (span_start_indices[:, :N], span_end_indices[:, :N])
@staticmethod
def decode_topN_with_hacks(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, *args, **kwargs):
if span_start_logits.dim() != 2 or span_end_logits.dim() != 2:
raise ValueError("Input shapes must be (batch_size, document_length)")
batch_size, passage_length = span_start_logits.size()
device = span_start_logits.device
# span_start_logits.unsqueeze(2) has shape:
# (batch_size, passage_length, 1)
# span_end_logits.unsqueeze(1) has shape:
# (batch_size, 1, passage_length)
# Addition in log-domain = multiplication in real domain
# This will create a matrix containing addition of each span_start_logit with span_end_logit
# (batch_size, passage_length, passage_length)
span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1)
# Only the upper triangle of the span matrix is valid; the lower triangle has entries where
# the span ends before it starts. We will mask these values out
span_log_mask = torch.triu(torch.ones((passage_length, passage_length),
device=device)).log().unsqueeze(0)
valid_span_log_probs = span_log_probs + span_log_mask
return SpanPredictionModule.decode_topN_joint_wth_hacks(valid_span_log_probs, *args, **kwargs)
@staticmethod
def decode_joint(valid_span_log_probs: torch.Tensor, score="probs", has_sentinel=False) -> \
(torch.Tensor, torch.Tensor):
batch_size = valid_span_log_probs.shape[0]
passage_length = valid_span_log_probs.shape[1]
# if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid
# mask these
if has_sentinel:
valid_span_log_probs[:, 1:, 0] = -math.inf
valid_span_log_probs[:, 0, 1:] = -math.inf
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1)
if score == "probs":
valid_span_scores = F.softmax(valid_span_log_probs, dim=-1)
elif score == "logprobs":
valid_span_scores = valid_span_log_probs
else:
raise NotImplemented(f"Unknown score type \"{score}\"")
best_span_scores, best_spans = valid_span_scores.max(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return best_span_scores, (span_start_indices, span_end_indices)
@staticmethod
def decode_joint_with_hacks(valid_span_log_probs: torch.Tensor, score="probs", p_to_rerank=100, has_sentinel=False,
hacks={
"max_answer_length": 30,
"combine_surface_forms": (False, None)
}) -> (torch.Tensor, torch.Tensor):
"""
This method has been borrowed from AllenNLP
:param valid_span_log_probs:
:return:
"""
if "combine_surface_forms" not in hacks:
hacks["combine_surface_forms"] = (False, None)
if hacks["combine_surface_forms"][0]:
assert hacks["combine_surface_forms"][1] is not None
batch_size = valid_span_log_probs.shape[0]
passage_length = valid_span_log_probs.shape[1]
# if first token is sentinel, class, combinations (0,x) and (x,0); x!=0 are invalid
# mask these
if has_sentinel:
valid_span_log_probs[:, 1:, 0] = -math.inf
valid_span_log_probs[:, 0, 1:] = -math.inf
# Here we take the span matrix and flatten it, then find the best span using argmax. We
# can recover the start and end indices from this flattened list using simple modular
# arithmetic.
# (batch_size, passage_length * passage_length)
spans_longer_than_maxlen_mask = torch.Tensor([[j - i + 1 > hacks["max_answer_length"]
for j in range(passage_length)] for i in range(passage_length)]) \
.to(valid_span_log_probs.get_device() if valid_span_log_probs.get_device() >= 0 else torch.device("cpu"))
valid_span_log_probs.masked_fill_(spans_longer_than_maxlen_mask.unsqueeze(0).bool(), -math.inf)
valid_span_log_probs = valid_span_log_probs.view(batch_size, -1)
if score == "probs":
valid_span_scores = F.softmax(valid_span_log_probs, dim=-1)
elif score == "logprobs":
valid_span_scores = valid_span_log_probs
else:
raise NotImplemented(f"Unknown score type \"{score}\"")
if hacks["combine_surface_forms"][0]:
assert not (score == "probs" and has_sentinel), \
"Not a supported variant - proability decoding + has_sentinel"
pad_token_id = 0
if len(hacks["combine_surface_forms"]) == 3:
pad_token_id = hacks["combine_surface_forms"][-1]
valid_span_scores = combine_surface_forms(valid_span_scores,
batch_size, hacks,
p_to_rerank, passage_length,
score, pad_token=pad_token_id)
best_span_scores, best_spans = valid_span_scores.max(-1)
span_start_indices = best_spans // passage_length
span_end_indices = best_spans % passage_length
return best_span_scores, (span_start_indices, span_end_indices)
@staticmethod
def decode_conditional(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor, top_k_start_positions,
beam_search_bestn, max_answer_length) -> \
(torch.Tensor, torch.Tensor):
best_starts = []
best_ends = []
span_scores = []
max_n = []
for i, batch in enumerate(span_end_logits):
best_starts_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.int)
best_ends_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.int)
span_scores_for_b = torch.empty([beam_search_bestn, beam_search_bestn], dtype=torch.float)
# iteration over top n start logits
max_prob = float("-inf")
max_n.append(0)
for n, option in enumerate(span_end_logits[i]):
end_logits_softmax = torch.nn.functional.softmax(span_end_logits[i][n], dim=-1)
try:
start_logits_softmax = torch.nn.functional.softmax(span_start_logits[i], dim=-1)[
top_k_start_positions[i][n]]
except IndexError as e:
print(e)
break
total_prob = end_logits_softmax.max(-1)[0] + start_logits_softmax
if total_prob > max_prob:
max_prob = total_prob
max_n[i] = n
best_starts_for_b[n] = top_k_start_positions[i][n].repeat(beam_search_bestn)
best_ends_for_b[n] = torch.topk(span_end_logits[i][n], beam_search_bestn).indices
for j, be in enumerate(best_ends_for_b[n]):
span_scores_for_b[j] = torch.topk(end_logits_softmax, beam_search_bestn).values[
j] + start_logits_softmax
span_scores.append([float(s) for s in torch.flatten(span_scores_for_b)])
best_starts.append([int(s) for s in torch.flatten(best_starts_for_b)])
best_ends.append([int(e) for e in torch.flatten(best_ends_for_b)])
start_indexes, end_indexes, best_span_scores, logprobs_S, logprobs_E0, logprobs_Emax = \
best_starts, best_ends, span_scores, span_start_logits, span_end_logits[:, 0, :], \
span_end_logits[torch.arange(span_end_logits.size(0)), max_n, :]
best_scores_f = []
start_indexes_f = []
end_indexes_f = []
for sib, eib, ssb in zip(start_indexes, end_indexes, best_span_scores):
scores_l = []
end_l = []
start_l = []
for si, ei, ss in zip(sib, eib, ssb):
if ei - si <= max_answer_length and ei >= si:
scores_l.append(ss)
end_l.append(ei)
start_l.append(si)
best_scores_f.append(scores_l)
start_indexes_f.append(start_l)
end_indexes_f.append(end_l)
padded_S = torch.zeros(logprobs_E0.shape)
padded_S[:logprobs_S.shape[0], :] = logprobs_S
logprobs_S = padded_S
return logprobs_S, logprobs_E0, logprobs_Emax, best_scores_f, start_indexes_f, end_indexes_f
|
nilq/baby-python
|
python
|
# run tests to check coverage
import os
import asyncio
import discord as dpy
import prettify_exceptions
prettify_exceptions.hook()
import viper
from viper.exts import discord
basic_test = os.path.join("tests", "test_script.vp")
discordpy_test = os.path.join("tests", "discordpy_script_test.vp")
loop = asyncio.get_event_loop()
loop.run_until_complete(viper.eval_file(basic_test)) # run the basic script
class MockDpyObject:
def __init__(self, **kwargs):
for name, item in kwargs.items():
setattr(self, name, item)
class MockDpyContext:
def __init__(self):
async def error(*args):
print("SENDS: ", *args)
return self.message
self.send = error
self.author = usr = MockDpyObject(
name="Danny",
nick=None,
discriminator="0007",
id=123456,
send=error,
mention="<@!123456>"
)
self.me = MockDpyObject(
name="OAuth2 Sucks",
nick=None,
discriminator="3136",
id=168930860739985410,
send=error,
mention="<@!168930860739985410>"
)
self.guild = guild = MockDpyObject(
name="Discord.py",
member_count=123,
description="Discord.py Guild",
id=336642139381301249,
owner=usr,
get_member = lambda i: None,
get_member_name = lambda n: None
)
self.author.guild = guild
self.me.guild = guild
self.channel = channel = MockDpyObject(
id=336642776609456130,
name="General",
guild=guild,
is_nsfw=lambda: False,
is_news=lambda: False,
mention="<#336642776609456130>",
topic="Ahhh",
send=error
)
self.guild.text_channels = [channel]
self.guild.get_channel = lambda i: channel
self.message = MockDpyObject(
content="Hi there",
guild=guild,
channel=channel,
clean_content="Hi there",
flags=None,
jump_url="discord.com/url",
author=usr
)
runner = viper.Runtime()
loop.run_until_complete(viper.eval_file(discordpy_test, injected={"ctx": discord.SafeAccessContext(runner, MockDpyContext())}, runtime=runner))
|
nilq/baby-python
|
python
|
from flask import Flask
from flask_sslify import SSLify
app = Flask(__name__)
app.config.from_object('config')
sslify = SSLify(app)
from jawfish import views
|
nilq/baby-python
|
python
|
"""
A script that processes the Qualitivity XML files and creates CSV files of extracted data.
"""
import argparse
import os
import sys
from xml.etree import ElementTree
import numpy as np
import pandas as pd
# data frame columns
columns = ['Record ID', 'Segment ID', 'Total pause duration_300', 'Pause count_300',
'Total pause duration_500', 'Pause count_500', 'Total pause duration_1s', 'Pause count_1s',
'Keystrokes', 'Active ms', 'Record duration', 'Total duration']
# date time format used in the XML
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
def normalize_attribute(root):
""" Make all the attributes lower case since the source XML files are not consistent. """
for attr, value in root.attrib.items():
norm_attr = attr.lower()
if norm_attr != attr:
root.set(norm_attr, value)
root.attrib.pop(attr)
for child in root:
normalize_attribute(child)
def create_pause_counts_dict():
""" Dictionary that will hold our pause count and duration value for a <Record/> element in the XML."""
return {
'duration_300': 0,
'count_300': 0,
'duration_500': 0,
'count_500': 0,
'duration_1000': 0,
'count_1000': 0,
'total_pause_ms': 0,
'total_duration': 0
}
def ms(val):
""" Turn a float value into milliseconds as an integer. """
return int(val * 1000)
def categorize_pause(counts, pause_ms):
"""
The method that updates the count and duration values.
:param counts: the dict that holds our pause count and duration values.
:param pause_ms: the pause in milliseconds
:return: None.
"""
if pause_ms >= 300:
counts['duration_300'] += pause_ms
counts['count_300'] += 1
if pause_ms >= 500:
counts['duration_500'] += pause_ms
counts['count_500'] += 1
if pause_ms >= 1000:
counts['duration_1000'] += pause_ms
counts['count_1000'] += 1
counts['total_duration'] += pause_ms
def valid_keystroke(keystroke):
""" Are we dealing with a valid keystroke? False if its a 'system' keystroke. """
if keystroke.attrib['origin'] and keystroke.attrib['system'] and not keystroke.attrib['key']:
return False
elif not keystroke.attrib['selection'] and not keystroke.attrib['text'] and not keystroke.attrib['key'] and \
keystroke.attrib['shift'] == 'False' and keystroke.attrib['ctrl'] == 'False' \
and keystroke.attrib['alt'] == 'False':
return False
else:
return True
def process_file(xml_input):
"""
The method that updates the count and duration values.
:param xml_input: the XML file to be processes.
:return: a pandas data frame of data extracted from the xml.
"""
# empty data structure for the data
categorized_data = []
# keep track of all pauses
all_pauses_data = []
if not os.path.isfile(xml_input):
raise ValueError('{} is not a file'.format(xml_input))
# parse the document and get the root element
doc_tree = ElementTree.parse(xml_input)
root = doc_tree.getroot()
# make attributes lower case - source XML not consistent
normalize_attribute(root)
# find all the <Record/> elements
records = root.findall('.//Document/Record')
# go through the records, each will be a row in the CVS file
for record in records:
# get the date/time that the record data started
record_started = record.attrib['started']
record_started_dt = np.datetime64(record_started)
# get the date/time that the record data stopped
record_ended = record.attrib['stopped']
record_ended_dt = np.datetime64(record_ended)
# calculate the duration of the work on the record in milliseconds
duration_dt = record_ended_dt - record_started_dt
duration_ms = duration_dt.astype(int)
# we track 'milestones', i.e. where the last operation ended
last_milestone = record_started_dt
# values we want from the <Record/> attribute
record_id = record.attrib['id']
segment_id = record.attrib['segmentid']
active_ms = record.attrib['activemiliseconds']
# calculate pauses
pause_counts = create_pause_counts_dict()
# get all the keystrokes for a record
keystrokes = record.findall('.//ks')
# count all the keystrokes
keystrokes_count = len(keystrokes)
valid_keystroke_count = 0
if keystrokes_count == 0:
categorize_pause(pause_counts, duration_ms)
all_pauses_data.append([record_id, segment_id, duration_ms, 'No ks'])
elif keystrokes_count == 1 and not valid_keystroke(keystrokes[0]):
categorize_pause(pause_counts, duration_ms)
all_pauses_data.append([record_id, segment_id, duration_ms, '1 system ks omitted'])
keystrokes_count = 0
else:
# iterate over the keystrokes to calculate pauses
for ks in keystrokes:
# filter out 'system' keystrokes
if valid_keystroke(ks):
# keep track of valid keystrokes
valid_keystroke_count += 1
created = ks.attrib['created']
created_dt = np.datetime64(created)
diff = created_dt - last_milestone
diff_ms = diff.astype(int)
last_milestone = created_dt
# categorise
categorize_pause(pause_counts, diff_ms)
# not categorised, for the audit
all_pauses_data.append([record_id, segment_id, diff_ms, ''])
else:
all_pauses_data.append([record_id, segment_id, None, 'Omitted ks'])
if valid_keystroke_count > 0:
# calculate the pause between the last keystroke and when the record stopped.
last_pause_dt = record_ended_dt - last_milestone
last_pause_ms = last_pause_dt.astype(int)
categorize_pause(pause_counts, last_pause_ms)
all_pauses_data.append([record_id, segment_id, last_pause_ms, ''])
keystrokes_count = valid_keystroke_count
# create a row of data
row = [record_id, segment_id, pause_counts['duration_300'], pause_counts['count_300'],
pause_counts['duration_500'], pause_counts['count_500'], pause_counts['duration_1000'],
pause_counts['count_1000'], keystrokes_count, active_ms, duration_ms,
pause_counts['total_duration']]
# append to 2d array
categorized_data.append(row)
# create pandas data frames
df = pd.DataFrame(data=categorized_data, columns=columns)
all_df = pd.DataFrame(data=all_pauses_data, columns=['Record ID', 'Segment ID', 'Pause durations', 'Notes'])
return df, all_df
def process(input_dir, output_dir, combine):
"""
Process a folder of XML files and create a folder of CSV file or single file with the combined results.
:param input_dir: input directory with the source XML files.
:param output_dir output directory to save the CSV file.
:param combine boolean, (True) to combine the results, and (False) to create separate CSV files
for each XML files.
:return: a pandas data frame of data extracted from the xml.
"""
# holds data frames if we are combining
# into a single output file
combine_df = []
all_data_combined_df = []
omitted_combined_df = []
# check we have an input folder
if not os.path.isdir(input_dir):
print('Input is not a folder. Exiting')
sys.exit(1)
# check we have an output folder
if not os.path.isdir(output_dir):
print('Output is not a folder, creating it.')
os.makedirs(output_dir)
# walk the directory looking for files
for root, dirs, files in os.walk(input_dir):
# iterate the files
for file in files:
# we are interested in xml files
if file.endswith('.xml'):
# process the file and create a data frame
input_file = os.path.join(root, file)
df, all_df = process_file(input_file)
# if we are combining, we want the filename in the data (first column).
# add the data frame to our temporary array
if combine:
df.insert(0, 'File', file)
all_df.insert(0, 'File', file)
combine_df.append(df)
all_data_combined_df.append(all_df)
else:
# not combining, so create a CSV file for each xml file
output_file = os.path.join(output_dir, file.replace('.xml', '.csv'))
all_output_file = os.path.join(output_dir, file.replace('.xml', '-audit.csv'))
df.to_csv(output_file, index=False)
all_df.to_csv(all_output_file, index=False)
# if we are combining, combine output into two files
if combine:
df = pd.concat(combine_df, ignore_index=True)
df.to_csv(os.path.join(output_dir, 'combined.csv'), index=False)
all_df = pd.concat(all_data_combined_df, ignore_index=True)
all_df.to_csv(os.path.join(output_dir, 'combined-audit.csv'), index=False)
if __name__ == "__main__":
""" Main method that will get arguments on the command line. """
# define the command line parameters and switches
parser = argparse.ArgumentParser(description='Process Qualitivity XML files.')
parser.add_argument('input', type=str, help='folder with the source XML files')
parser.add_argument('output', type=str, help='folder for the output CSV files')
parser.add_argument('--combine', required=False, action='store_true',
help='Combine the output into a single CSV file')
# parse and process
args = parser.parse_args()
process(args.input, args.output, args.combine)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
from tkinter import filedialog
from tkinter import *
root = Tk()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("jpeg files","*.jpg"),("all files","*.*")))
print (root.filename)
img=cv2.imread(root.filename)
text=pytesseract.image_to_string(img)
print(text)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 18:00:19 2021
@author: dipu
"""
from rico import *
from utils import *
from moka import *
from datasets import *
from scipy.optimize import linear_sum_assignment
import os
import time
import sys
import shutil
import random
from time import strftime
import argparse
import numpy as np
import torch
import torch.utils.data
from config_rico import add_eval_args
#from data import PartNetDataset, Tree
from rico import Hierarchy
from datasets import RicoFlatDataset, RicoHierDataset
import utils
import time
from utils import mkdir_if_missing
from scipy.spatial.distance import cdist
def vis_fn(q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o):
return dict(
q_id = q_uxid,
query = q_o.to_string(render='html', labeled=True),
query_layout = q_o.plot(),
rank1_id = r1_id,
rank1 = r1_o.to_string(render='html', labeled=r1_o.is_labeled),
rank1_layout = r1_o.plot(),
rank2_id = r2_id,
rank2 = r2_o.to_string(render='html', labeled=r2_o.is_labeled),
rank2_layout = r2_o.plot(),
rank3_id = r3_id,
rank3 = r3_o.to_string(render='html', labeled=r3_o.is_labeled),
rank3_layout = r3_o.plot(),
rank4_id = r4_id,
rank4 = r4_o.to_string(render='html', labeled=r4_o.is_labeled),
rank4_layout = r4_o.plot(),
rank5_id = r5_id,
rank5 = r5_o.to_string(render='html', labeled=r5_o.is_labeled),
rank5_layout = r5_o.plot()
)
def test_vis_fn(q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o):
aa = [q_uxid, q_o, r1_id, r1_o, r2_id, r2_o, r3_id, r3_o, r4_id, r4_o, r5_id, r5_o]
return aa
def extract_features(conf, dataset, encoder):
device = torch.device(conf.device)
with torch.no_grad():
objects = []
for i, (uxid, o_gt) in enumerate(tqdm(dataset)):
o_gt = o_gt.to(device)
root_code = encoder.encode_structure(obj=o_gt)
if not conf.non_variational:
z, obj_kldiv_loss = torch.chunk(root_code, 2, 1)
else:
z = root_code
z = z.detach().cpu().numpy()
objects.append([uxid, o_gt, z])
return objects
def main():
parser = argparse.ArgumentParser()
parser = add_eval_args(parser)
eval_conf = parser.parse_args()
# Write here settings for debuging
eval_conf.category = 'rico'
eval_conf.exp_name = 'rico_hier_exp_AE_sem_wt_1_nnemb'
eval_conf.semantics = 'rico_plus'
eval_conf.test_dataset = '/home/dipu/dipu_ps/codes/UIGeneration/prj-ux-layout-copy/codes/scripts/rico_gen_data/rico_mtn_50_geq2_mcpn_10_V2/train_uxid.txt'
eval_conf.model_epoch = None
eval_conf.num_gen = 100
eval_conf.web_dir = './www'
eval_conf.semantic_representation = 'nn_embedding'
eval_conf.device = 'cuda:3'
# load train config
conf = torch.load(os.path.join(eval_conf.model_path, eval_conf.exp_name, 'conf.pth'))
eval_conf.data_path = conf.data_path
# merge training and evaluation configurations, giving evaluation parameters precendence
conf.__dict__.update(eval_conf.__dict__)
# load object category information
if conf.semantics:
Hierarchy.set_semantics(conf.semantics)
if conf.extract_hier:
assert conf.semantics == 'rico_plus'
# load model
models = utils.get_model_module(conf.model_version)
# set up device
device = torch.device(conf.device)
print(f'Using device: {conf.device}')
# check if eval results already exist. If so, delete it.
# if os.path.exists(os.path.join(conf.result_path, conf.exp_name)):
# response = input('Eval results for "%s" already exists, overwrite? (y/n) ' % (conf.exp_name))
# if response != 'y':
# sys.exit()
# shutil.rmtree(os.path.join(conf.result_path, conf.exp_name))
# create a new directory to store eval results
# result_dir = os.path.join(conf.result_path, conf.exp_name)
# mkdir_if_missing()
# os.makedirs(os.path.join(conf.result_path, conf.exp_name))
# result_dir = os.path.join(conf.result_path, conf.exp_name)
# create models
encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=not conf.non_variational)
decoder = models.RecursiveDecoder(conf)
models = [encoder, decoder]
model_names = ['encoder', 'decoder']
print('\n\n')
#print(f'non_probabilistic: {conf.non_probabilistic}')
print(f'non_variational: {conf.non_variational}')
# load pretrained model
__ = utils.load_checkpoint(
models=models, model_names=model_names,
dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=conf.model_epoch,
strict=True)
# send to device
for m in models:
m.to(device)
# set models to evaluation mode
for m in models:
m.eval()
# create dataset and data loader
data_features = ['uxid', 'object']
DatasetClass = globals()[conf.DatasetClass]
print('Using dataset:', DatasetClass)
test_dataset = DatasetClass(conf.data_path, conf.test_dataset, ['uxid', 'object'],
is_train=False, permute=False, n_permutes=1)
#dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, collate_fn=lambda x: list(zip(*x)))
# visualize(P, conf, conf.exp_name, test_dataset, encoder, decoder, result_dir, conf.web_dir, show=False)
feats_objects = extract_features(conf, test_dataset, encoder)
feats = np.concatenate([x[-1] for x in feats_objects])
uxids = [x[0] for x in feats_objects]
hiers = [x[1] for x in feats_objects]
uxid2hier = dict((k,v) for k,v in zip(uxids, hiers))
distances = cdist(feats, feats, metric= 'euclidean')
sort_inds = np.argsort(distances)
sample_retrievals = []
for ii in range(100):
q_uxid = uxids[ii]
ranked_uxids = []
ranked_hiers = []
for yy in sort_inds[ii,:5]:
ranked_uxids.append(uxids[yy])
ranked_hiers.append(uxid2hier[uxids[yy]])
# ranked_uxids = [uxids[yy] for yy in sort_inds[ii,:5]]
# ranked_hiers = [uxid2hier[id] for id in ranked_uxids ]
ranked = [None] * (len(ranked_uxids) + len(ranked_hiers))
ranked[::2] = ranked_uxids
ranked[1::2] = ranked_hiers
sample_retrievals.append([q_uxid, uxid2hier[q_uxid]] + ranked)
visualize_retrieved_images(conf, sample_retrievals, web_dir = 'www', show=False )
def visualize_retrieved_images(conf, sample_retrievals, web_dir='www', show=False, refresh=False):
split = 'train' if 'train' in conf.test_dataset else 'val'
if conf.model_epoch is None:
html = HTML(f'/retrieval_{split}@{conf.exp_name}', conf.exp_name, base_url=web_dir, inverted=True, overwrite=True, refresh=int(refresh))
else:
html = HTML(f'/retrieval_{split}@{conf.exp_name}_epoch_{conf.model_epoch}', conf.expname, base_url=web_dir, inverted=True, overwrite=True, refresh=int(refresh))
html.add_table().add([vis_fn(*_) for _ in tqdm(sample_retrievals)])
html.save()
domain = conf.domain if hasattr(conf, 'domain') else None
if show: html.show(domain)
#else: P.print(html.url(domain))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import numpy as np
import os
import cv2
def make_image_noisy(image, noise_typ):
if noise_typ == "gauss":
row, col, ch = image.shape
mean = 0
var = 40
sigma = var**0.5
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape((row, col, ch))
noisy_image = image + gauss
return noisy_image.clip(0, 255)
elif noise_typ == "zero":
amount = 0.05 # percentage of zero pixels
out = np.copy(image)
num_zeros = np.ceil(amount * image.shape[0]*image.shape[1])
coords = [np.random.randint(0, i - 1, int(num_zeros))
for i in image.shape[:2]]
out[:, :, 0][coords] = 0
out[:, :, 1][coords] = 0
out[:, :, 2][coords] = 0
return out.astype(np.uint8)
elif noise_typ == "s&p":
raise RuntimeError("Test it properly before using!")
row, col, ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
raise RuntimeError("Test it properly before using!")
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy_image = np.random.poisson(image * vals) / float(vals)
return noisy_image
elif noise_typ == "speckle":
raise RuntimeError("Test it properly before using!")
row, col, ch = image.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape((row, col, ch))
noisy_image = image + image * gauss
return noisy_image
else:
raise RuntimeError(f"Unknown noisy_type: {noise_typ}")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
import urllib, requests, json
from timetable.models import Course
from ngram import NGram
class SearchConfig(AppConfig):
name = 'curso'
class SearchOb(object):
"""docstring for SearchOb"""
def __init__(self, uri=None):
from pymongo import MongoClient
self.client = MongoClient(uri)
self.db = self.client['timetable']
self.SrchCollect = self.db['CourseSearch']
self.cursoNgram = NGram((i['key'] for i in self.SrchCollect.find({}, {'key':1, '_id':False})))
def search(self, keyword, school):
cursor = self.SrchCollect.find({'key':keyword}, {school:1, '_id':False}).limit(1)
if cursor.count() > 0:
pass
else:
keyword = self.cursoNgram.find(keyword)
if keyword:
cursor = self.SrchCollect.find({'key':keyword}, {school:1, '_id':False}).limit(1)
else:
return []
return cursor[0][school]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""The setup script."""
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import find_packages, setup
setup(name='hyperscan-python',
version='0.1',
description='Simple Python bindings for the Hyperscan project.',
author='Andreas Moser',
author_email='grrrrrrrrr@surfsup.at',
license='Apache License, Version 2.0',
packages=find_packages('.', exclude=[
'tests'
]))
|
nilq/baby-python
|
python
|
def test_geoadd(judge_command):
judge_command(
'GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"',
{
"command": "GEOADD",
"key": "Sicily",
"longitude": "15.087269",
"latitude": "37.502669",
"member": '"Catania"',
},
)
def test_georadiusbymember(judge_command):
judge_command(
"GEORADIUSBYMEMBER Sicily Agrigento 100 km",
{
"command": "GEORADIUSBYMEMBER",
"key": "Sicily",
"member": "Agrigento",
"float": "100",
"distunit": "km",
},
)
def test_georadius(judge_command):
judge_command(
"GEORADIUS Sicily 15 37 200 km WITHDIST WITHCOORD ",
{
"command": "GEORADIUS",
"key": "Sicily",
"longitude": "15",
"latitude": "37",
"float": "200",
"distunit": "km",
"geochoice": "WITHCOORD",
},
)
|
nilq/baby-python
|
python
|
import jpype
jpype.startJVM()
from asposecells.api import Workbook, PdfSaveOptions, ImageOrPrintOptions, SheetRender
import cv2
import numpy as np
DEBUG_MODE = False
def excel2imgs(excel_path):
workbook = Workbook(excel_path)
''' Excel to PDF '''
# pdfOptions = PdfSaveOptions()
# pdfOptions.setOnePagePerSheet(True)
# workbook.save("../test_images/example.pdf", pdfOptions)
imgOptions = ImageOrPrintOptions()
imgOptions.setHorizontalResolution(300)
imgOptions.setVerticalResolution(300)
imgOptions.setCellAutoFit(True)
imgOptions.setOnePagePerSheet(True)
img_datasets = []
sheet_Count = workbook.getWorksheets().getCount()
for i in range(sheet_Count):
sheet = workbook.getWorksheets().get(i)
sr = SheetRender(sheet, imgOptions)
imgbytes_content = sr.toImageBytes(0)
img = cv2.imdecode(np.frombuffer(imgbytes_content, np.uint8), cv2.IMREAD_COLOR)
img_datasets.append(img)
if DEBUG_MODE:
cv2.imwrite("../test_results/Excel2Image/bytes2cvimg_" + str(i) + ".png", img)
# sr.toImage(i, "../test_results/Excel2Image/excel2img_" + str(i) +".png")
# jpype.shutdownJVM()
return img_datasets, sheet_Count
###############################
if __name__ == "__main__":
excel_path = "/home/elimen/Data/helloflask/FlaskTutorial/rewrite.xls"
img_datasets = excel2imgs(excel_path)
print(" Number of images: {}".format(len(img_datasets)))
print(" Type of image: {}".format(type(img_datasets[0])))
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
import cv2 as cv
import os
import argparse
import numpy as np
import pandas as pd
import time
from utils import choose_run_mode, load_pretrain_model, set_video_writer
from Pose.pose_visualizer import TfPoseVisualizer
from Action.recognizer import load_action_premodel, framewise_recognize
parser = argparse.ArgumentParser(description='Action Recognition by OpenPose')
parser.add_argument( '-img', '--image', required="True", help='Path to image folder.')
args = parser.parse_args()
# imported related models
estimator = load_pretrain_model('VGG_origin')
action_classifier = load_action_premodel('Action/framewise_recognition.h5')
# parameter initialization
realtime_fps = '0.0000'
start_time = time.time()
fps_interval = 1
fps_count = 0
run_timer = 0
frame_count = 0
folder_path = args.image
# create df for saving joints
columns = ["nose_x", "nose_y", "neck_x", "neck_y", "Rshoulder_x", "Rshoulder_y", "Relbow_x",
"Relbow_y", "Rwrist_x", "RWrist_y", "LShoulder_x", "LShoulder_y", "LElbow_x", "LElbow_y",
"LWrist_x", "LWrist_y", "RHip_x", "RHip_y", "RKnee_x", "RKnee_y", "RAnkle_x", "RAnkle_y",
"LHip_x", "LHip_y", "LKnee_x", "LKnee_y", "LAnkle_x", "LAnkle_y", "REye_x", "REye_y",
"LEye_x", "LEye_y", "REar_x", "REar_y", "LEar_x", "LEar_y", "class"]
df = pd.DataFrame(columns=columns)
for f_name in os.listdir(folder_path):
sub_f = folder_path + "/" + f_name
# folder_out = "test_out" + "/" + f_name
print("f_name: " + f_name)
# if not os.path.isdir(folder_out):
# os.mkdir(folder_out)
for img in os.listdir(sub_f):
print("image name: " + img)
show = cv.imread(sub_f + "/" + img)
fps_count += 1
frame_count += 1
# pose estimation
humans = estimator.inference(show)
# print(len(humans))
# print(humans[0].uidx_list)
# print(humans[0].body_parts)
# get pose info
pose = TfPoseVisualizer.draw_pose_rgb(show, humans) # return frame, joints, bboxes, xcenter
# recognize the action framewise
show = framewise_recognize(pose, action_classifier)
# height, width = show.shape[:2]
# # Display real-time FPS values
# if (time.time() - start_time) > fps_interval:
# # 计算这个interval过程中的帧数,若interval为1秒,则为FPS
# # Calculate the number of frames in this interval. If the interval is 1 second, it is FPS.
# realtime_fps = fps_count / (time.time() - start_time)
# fps_count = 0 # Clear the number of frames
# start_time = time.time()
# fps_label = 'FPS:{0:.2f}'.format(realtime_fps)
# cv.putText(show, fps_label, (width-160, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# # Show the number of people detected
# num_label = "Human: {0}".format(len(humans))
# cv.putText(show, num_label, (5, height-45), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# # Show current run time and total frames
# if frame_count == 1:
# run_timer = time.time()
# run_time = time.time() - run_timer
# time_frame_label = '[Time:{0:.2f} | Frame:{1}]'.format(run_time, frame_count)
# cv.putText(show, time_frame_label, (5, height-15), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# cv.imshow('Action Recognition based on OpenPose', show)
# img_out = img.split(".")[0] + "_out_" + ".png"
# cv.imwrite(folder_out + "/" + img, show)
# video_writer.write(show)
# # Collect data for training process (for training)
joints_norm_per_frame = np.array(pose[-1]).astype(np.str)
# print("length of joints frames: " + str(len(joints_norm_per_frame)))
# only select joints_norm_per_frame with 1 human
if len(joints_norm_per_frame) == 36:
row = np.append(joints_norm_per_frame, f_name)
series = pd.Series(dict(zip(df.columns, row)))
df = df.append(series, ignore_index=True)
# saving df to csv
df.to_csv("Action/training/human_keypoint.csv", index=False)
|
nilq/baby-python
|
python
|
from typing import Optional, List
from reconbot.notificationprinters.embedformat import EmbedFormat
class NotificationFormat(object):
def __init__(self, content: Optional[str], embeds: Optional[List[EmbedFormat]] = None):
self.content = content
if embeds is None:
self.embeds = []
else:
self.embeds = embeds
|
nilq/baby-python
|
python
|
from typing import List, Dict, Union
from sse_starlette.sse import EventSourceResponse
from fastapi import Depends, FastAPI, Request
from fastapi_users import FastAPIUsers, BaseUserManager
from fastapi_users.authentication import JWTAuthentication
from sqlalchemy.orm import Session
from . import crud, schemas
from .argo import get_argo_router
from .database import SessionLocal
from .adapter import SQLAlchemyORMUserDatabase
from .schemas import User, UserCreate, UserUpdate, UserDB
from .utils import incident_event_generator
db_session = SessionLocal()
SECRET = "OpenSOAR@11042018"
auth_backends = []
jwt_authentication = JWTAuthentication(
secret=SECRET, lifetime_seconds=3600, tokenUrl="auth/jwt/login"
)
auth_backends.append(jwt_authentication)
class UserManager(BaseUserManager[UserCreate, UserDB]):
user_db_model = UserDB
reset_password_token_secret = SECRET
verification_token_secret = SECRET
def get_user_db():
yield SQLAlchemyORMUserDatabase(UserDB, db_session)
def get_user_manager(user_db=Depends(get_user_db)):
yield UserManager(user_db)
fastapi_users = FastAPIUsers(
get_user_manager,
auth_backends,
User,
UserCreate,
UserUpdate,
UserDB,
)
app = FastAPI(root_path="/api")
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.get("/")
def read_root():
return {}
app.include_router(
fastapi_users.get_auth_router(jwt_authentication), prefix="/auth/jwt", tags=["auth"]
)
app.include_router(
fastapi_users.get_register_router(),
prefix="/auth",
tags=["auth"],
)
app.include_router(
fastapi_users.get_users_router(),
prefix="/users",
tags=["users"],
)
app.include_router(
get_argo_router(fastapi_users),
prefix="/argo",
)
@app.get("/users", response_model=List[User])
def read_users(
db: Session = Depends(get_db),
user: User = Depends(fastapi_users.current_user(active=True)),
):
return crud.read_users(db)
@app.get("/incidents", response_model=Dict[str, Union[List[schemas.IncidentRead], int]])
def read_incidents(
skip: int = 0,
limit: int = 10,
query_filter: str = None,
db: Session = Depends(get_db),
user: User = Depends(fastapi_users.current_user(active=True)),
):
return crud.get_incidents(db, skip=skip, limit=limit, query_filter=query_filter)
@app.post("/incidents", response_model=schemas.Incident)
def create_incident(
incident: schemas.IncidentCreate,
db: Session = Depends(get_db),
user: User = Depends(fastapi_users.current_user(active=True)),
):
return crud.create_incident(db, incident)
@app.get("/incidents/stream")
async def read_incidents_from_stream(
request: Request,
db: Session = Depends(get_db),
user: User = Depends(fastapi_users.current_user(active=True)),
):
incident_generator = incident_event_generator(db, request)
return EventSourceResponse(incident_generator)
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2020 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Same as :mod:`lino_book.projects.noi1e`, but using :ref:`react` as front end.
This uses :ref:`hosting.multiple_frontends`.
.. autosummary::
:toctree:
settings
tests
"""
|
nilq/baby-python
|
python
|
from mongoengine import signals
__author__ = 'Enis Simsar'
import json
import re
import threading
from datetime import datetime
from decouple import config
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from models.Tweet import Tweet
from models.Topic import Topic
def get_info(topic_dic):
keywords = []
topics = []
lang = []
for key in topic_dic:
topic = topic_dic[key]
topics = topics + [topic['id']]
keywords = keywords + topic['keywords']
lang = lang + topic['languages']
lang = list(set(lang))
lang = [str(l) for l in lang]
keywords = list(set(keywords))
keywords = [str(keyword) for keyword in keywords]
result = {
'topics': sorted(topics),
'keywords': keywords,
'lang': lang
}
return result
def create_tweet(topic_id, tweet):
topic = Topic.objects.get(id=topic_id)
tweet_obj = Tweet()
tweet_obj.topic_id = topic.id
tweet_obj.published_at = datetime.fromtimestamp(int(tweet['timestamp_ms']) / 1e3)
tweet_obj.entry = tweet
tweet_obj.save()
topic.last_tweet_at = datetime.now
topic.save()
def separates_tweet(topic_dic, tweet):
for key in topic_dic:
topic = topic_dic[key]
if tweet['lang'] in topic['languages']:
for keyword in topic['keywords']:
keyword = re.compile(keyword.replace(" ", "(.?)"), re.IGNORECASE)
if 'extended_tweet' in tweet and 'full_text' in tweet['extended_tweet']:
if re.search(keyword, str(tweet['extended_tweet']['full_text'])):
create_tweet(key, tweet)
break
else:
if re.search(keyword, str(tweet['text'])):
create_tweet(key, tweet)
break
# Accessing Twitter API
consumer_key = config("TWITTER_CONSUMER_KEY") # API key
consumer_secret = config("TWITTER_CONSUMER_SECRET") # API secret
access_token = config("TWITTER_ACCESS_TOKEN")
access_secret = config("TWITTER_ACCESS_SECRET")
# This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def __init__(self, topic_dic):
self.topic_dic = topic_dic
self.terminate = False
self.connection = True
super(StdOutListener, self).__init__()
def on_data(self, data):
if not self.terminate:
tweet = json.loads(data)
separates_tweet(self.topic_dic, tweet)
return True
else:
return False
def on_disconnect(self, notice):
self.connection = False
return True
def on_error(self, status):
print(status)
if status == 420:
return False
def stop(self):
self.terminate = True
def on_timeout(self):
return True # To continue listening
class StreamCreator():
def __init__(self, topic_dic):
# This handles Twitter authetification and the connection to Twitter Streaming API
self.l = StdOutListener(topic_dic)
signals.post_save.connect(Tweet.post_save, sender=Tweet)
self.info = get_info(topic_dic=topic_dic)
self.keywords = self.info['keywords']
self.lang = self.info['lang']
self.topics = self.info['topics']
print(self.topics)
print(self.keywords)
print(self.lang)
self.auth = OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_secret)
self.stream = Stream(self.auth, self.l)
self.t = threading.Thread(target=self.stream.filter,
kwargs={'track': self.keywords, 'languages': self.lang, 'stall_warnings': True})
def start(self):
self.t.deamon = True
self.t.start()
def terminate(self):
self.l.running = False
self.l.stop()
self.l.terminate = True
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse, os
"""
Trenco Module for arguments
"""
def txn_args(parser):
parser.add_argument('--annotation-file',
dest = 'annotfname',
default = '',
help="Genode annotations file in gtf format (overwrites --annotation-version and --organism")
parser.add_argument('--annotation-version',
dest="annotations",
default="vM4",
help="The Gencode annotations file in gtf format. (Default: vM4) (WARNING: All entries are indexed to this version)")
parser.add_argument('--organism',
default="mouse",
help="Organism gencode to download (Default: mouse)"
)
parser.add_argument('-b', '--biotypes',
help="The biotypes to get transcript TSS. (default: protein)",
nargs='+',
default=['protein_coding'])
def enh_bound_args(parser, tot = True):
if tot:
parser.add_argument('-t', '--tss',
help="The Gencode TSS file.",
required=True)
parser.add_argument('-s', '--sizes',
help="The chrome sizes file or genome number (ie mm10)",
required=True)
parser.add_argument('-p', '--peaks',
help="The full path to peak files in bed format",
nargs='+',
required=True)
#parser.add_argument('--geneGTF',
# help="GTF file of genes from Gencode (Default gene_txn.gtf from get_trancript script)",
# default = "gene_txn.gtf")
parser.add_argument('-r', '--region',
help="The number of bases pairs to exclude around TSS (Default: 2500)",
type=int,
default=2500)
parser.add_argument('-q', '--promoter-range',
help="Range of numbers before TSS and after TSS to consider as Promoter (Default: 1000-200)",
type=str,
default="1000-200")
parser.add_argument('-d', '--distance',
help="The number of bases pairs to merge between adjacent peaks (Default: 150)",
type=int,
default=150)
def merge_txn_args(parser):
parser.add_argument('-e', '--expression',
help="The full path to peak files in tsv format",
nargs='+',
required=True)
def merge_enh_args(parser):
parser.add_argument('-e', '--enhancers',
help="The universe of enhancer files.",
required=True)
parser.add_argument("-t", "--enhMarks",
dest='target',
type=str,
default="H3K27ac",
help="Mark for enchancers: Default H3K27ac")
parser.add_argument('-a', '--alignments',
help="The full path to sorted alignment files in bam format.",
nargs='+',
required=True)
def full_trenco_args(parser):
path = '/'.join(os.path.realpath(__file__).split('/')[:-2])
parser.add_argument("--design",
type=str,
required=True,
help="Design file containing link information to samples.")
parser.add_argument("--alignment",
nargs='+',
required=True,
help="Full path to ChIP alingment files in bam format")
parser.add_argument("--expression",
nargs='+',
required=True,
help="Full path to transcript expression table in tsv format")
parser.add_argument("--enhMarks",
dest='target',
type=str,
default="H3K27ac",
help="Mark for enchancers: Default H3K27ac")
parser.add_argument("--tadBED",
type=str,
default="%s/common_files/TAD_positions_mm10_sorted.bed" % path,
help="TAD file: Default - mm10 TAD in common_files")
def tf_mtx_args(parser, spec = True):
parser.add_argument("--meme-db",
type=str,
default="cis-bp",
help="MEME database to use (Default: cis-bp)")
parser.add_argument("--db",
type=str,
help="Motif database name if different from species (ex JASPER CORE 2014 for jasper)")
if spec:
parser.add_argument('-s', '--species',
dest='refID',
nargs='+',
required=True,
help = "Scientific name of organism (can use these names for ease: human, mouse, fly, worm)")
parser.add_argument('-g', '--genome-version',
dest='gvers',
type=str,
help = "Version of genome to use. Default is newest")
parser.add_argument('--bed',
dest='bed',
type=str,
help = "ChIP and Promoter bed file for getting motifs (ex enh.bed,promoter.bed)")
def enh_gene_nw_args(parser):
parser.add_argument("-e", "--enh", help="enhancer by samples log2 TPM quantification matrix", type=str)
parser.add_argument("-g", "--gene", help="gene by samples log2 TPM quantification matrix", type=str)
parser.add_argument("-ta", "--tadBED", help='sorted tad annotation in bed file format', type=str)
parser.add_argument("-ga", "--geneBED", help='gene annotation in bed file format', type=str)
parser.add_argument("-ea", "--enhBED", help='enh annotation in bed file format')
parser.add_argument("-s", "--sample", help='sample to construct the network', type=str)
parser.add_argument("-o", "--output", help="output directory", type=str)
parser.add_argument("-p", "--threads", help="Threads", type=int, default=30)
def tis_gene_networks(parser):
parser.add_argument("-d", "--dir", help="directory containing the output of get_enh_gene_networks.py", type=str)
parser.add_argument("-s", "--sample", help='sample to construct the network', type=str)
parser.add_argument("-p", "--threads", help='Number of threads to use', type=int, default=30)
parser.add_argument("-x1", "--matrix1", help='TF by enchancer matrix file path', type=str)
parser.add_argument("-x2", "--matrix2", help="TF by gene promoter matrix file path", type=str)
parser.add_argument("-v", "--vector", help="Expression vector for the sample from RNA-seq", type=str)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import json
import sys
from datetime import datetime
from time import sleep
from splinter import Browser
from tvlist_loader import xlparser
from tvlist_loader import scraper
from tvlist_loader import projects_parser as pp
def main():
# Parse cli arguments
parser = argparse.ArgumentParser()
parser.add_argument("FILE", help="Файл программы передач в формате Excel")
parser.add_argument(
"-s", "--sheet", help="Имя листа с программой передач. По умолчанию 'Лист1'")
parser.add_argument("-a", "--auth", help="Файл с адресом сайта, логином и паролем в формате JSON")
parser.add_argument("-b", "--browser", help="Браузер, который будет использоваться для открывания ссылок. Доступные значения 'firefox' (по умолчанию), 'chrome'.")
parser.add_argument("-H", "--headless", action="store_true", default=False, help="Запустить браузер без графического интерфейса.")
args = vars(parser.parse_args())
# Set sheet to read
if args["sheet"]:
sheet = args["sheet"]
else:
sheet = "Лист1"
if args["auth"]:
file_client = args["auth"]
else:
file_client = "client_id.json"
try:
with open(file_client, "r") as file_json:
client = json.load(file_json)
except FileNotFoundError:
print(f"Не удалось открыть {file_client}. Поместите файл 'client_id.json' в папку запуска программы или укажите другой файл с помощью параметра -a")
sys.exit(1)
except json.decoder.JSONDecodeError:
print(f"Файл {file_client} не является корректным JSON.")
sys.exit(1)
if args["browser"] == "firefox" or args["browser"] == "chrome":
browse_with = args["browser"]
else:
browse_with = "firefox"
site = client['site']
table = xlparser.get_table(args["FILE"], sheet)
week = xlparser.get_dates(table)
with Browser(browse_with, headless=args["headless"]) as browser:
projects = pp.get_projects(browser, site)
for day, value in week.items():
week[day]["programs"] = xlparser.get_program(table, value["id"], projects)
with open("schedule.json", "w", encoding="utf-8") as file_json:
json.dump(week, file_json, indent=4, ensure_ascii=False)
scraper.login(browser, site, client['login'], client['password'])
scraper.open_schedule(browser, site)
for days in week.values():
scraper.add_day(browser, days["day"], days["date"])
for programs in days["programs"].values():
scraper.add_program(
browser, programs["name"], programs["time"], programs["age"], programs["project"], programs["project_name"])
scraper.commit(browser)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
import shutil
# =========== HYPERPARAMETERS ==========
UNIVARIATE_DISTRIBUTIONS = ['chi_square_9', 'exp_9']
NUM_SAMPLES = 20000
NUM_TRIALS = 5
# ========== OUTPUT DIRECTORIES ==========
OUTPUT_DIR = 'examples/power_analyses/univariate_output/'
MODELS_OUTPUT_DIR = OUTPUT_DIR + 'MODELS/'
SYN_DATA_OUTPUT_DIR = OUTPUT_DIR + 'SYN_DATA/'
REAL_DATA_OUTPUT_DIR = OUTPUT_DIR + 'REAL_DATA/'
POWER_OUTPUT_DIR = OUTPUT_DIR + 'POWER/'
RESULTS_DIR = 'RESULTS/'
# shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
# os.makedirs(MODELS_OUTPUT_DIR)
# os.makedirs(SYN_DATA_OUTPUT_DIR)
# os.makedirs(REAL_DATA_OUTPUT_DIR)
# os.makedirs(POWER_OUTPUT_DIR)
os.makedirs(RESULTS_DIR)
# ========== RUN PIPELINE ==========
def generate_real_cmd(dist, num_samples, output_dir):
return 'python3 sample_prob_dist.py {0} {1} {2}/'.format(dist, num_samples, output_dir)
def train_gan_cmd(real_data_dir, output_dir):
return 'python3 train_prob_gan.py {0}data.npy {1}'.format(real_data_dir, output_dir)
def generate_syn_cmd(gen_dir, num_samples, output_dir):
return 'python3 generate_prob_gan.py {0}generator {1} {2}'.format(gen_dir, num_samples, output_dir)
def power_analysis_cmd(real_data_1_dir, real_data_2_dir, syn_data_1_dir, syn_data_2_dir, output_dir):
return 'python3 univariate_power_analysis.py {0}data.npy {1}data.npy {2}data.npy {3}data.npy {4}'.format(real_data_1_dir, syn_data_1_dir, real_data_2_dir, syn_data_2_dir, output_dir)
def output_dirs(dist, k):
model_tag_base = '[{0}]_[k={1}]'.format(dist, k)
model_1_tag = model_tag_base + '_[v=1]'
model_2_tag = model_tag_base + '_[v=2]'
real_data_1_dir = '{0}{1}/'.format(REAL_DATA_OUTPUT_DIR, model_1_tag)
real_data_2_dir = '{0}{1}/'.format(REAL_DATA_OUTPUT_DIR, model_2_tag)
model_1_dir = '{0}{1}/'.format(MODELS_OUTPUT_DIR, model_1_tag)
model_2_dir = '{0}{1}/'.format(MODELS_OUTPUT_DIR, model_2_tag)
syn_data_1_dir = '{0}{1}/'.format(SYN_DATA_OUTPUT_DIR, model_1_tag)
syn_data_2_dir = '{0}{1}/'.format(SYN_DATA_OUTPUT_DIR, model_2_tag)
return real_data_1_dir, real_data_2_dir, model_1_dir, model_2_dir, syn_data_1_dir, syn_data_2_dir
def run_cmd_sequence(cmds):
for cmd in cmds:
os.system(cmd)
def generate_real_data_samples():
for i in range(len(UNIVARIATE_DISTRIBUTIONS)):
for k in range(NUM_TRIALS):
dist_i = UNIVARIATE_DISTRIBUTIONS[i]
real_data_1_dir, real_data_2_dir, _, _, _, _ = output_dirs(dist_i, k)
sample_real_1 = generate_real_cmd(dist_i, NUM_SAMPLES, real_data_1_dir)
sample_real_2 = generate_real_cmd(dist_i, NUM_SAMPLES, real_data_2_dir)
run_cmd_sequence([sample_real_1, sample_real_2])
def train_gans():
for i in range(len(UNIVARIATE_DISTRIBUTIONS)):
for k in range(NUM_TRIALS):
dist_i = UNIVARIATE_DISTRIBUTIONS[i]
real_data_1_dir, real_data_2_dir, model_1_dir, model_2_dir, _, _ = output_dirs(dist_i, k)
train_gan_1 = train_gan_cmd(real_data_1_dir, model_1_dir)
train_gan_2 = train_gan_cmd(real_data_2_dir, model_2_dir)
run_cmd_sequence([train_gan_1, train_gan_2])
def generate_syn_data_samples():
for i in range(len(UNIVARIATE_DISTRIBUTIONS)):
for k in range(NUM_TRIALS):
dist_i = UNIVARIATE_DISTRIBUTIONS[i]
_, _, model_1_dir, model_2_dir, syn_data_1_dir, syn_data_2_dir = output_dirs(dist_i, k)
sample_syn_1 = generate_syn_cmd(model_1_dir, NUM_SAMPLES, syn_data_1_dir)
sample_syn_2 = generate_syn_cmd(model_2_dir, NUM_SAMPLES, syn_data_2_dir)
run_cmd_sequence([sample_syn_1, sample_syn_2])
def run_power_analyses():
for i in range(len(UNIVARIATE_DISTRIBUTIONS)):
for j in range(i, len(UNIVARIATE_DISTRIBUTIONS)):
for k in range(NUM_TRIALS):
dist_i = UNIVARIATE_DISTRIBUTIONS[i]
dist_j = UNIVARIATE_DISTRIBUTIONS[j]
real_data_1_dir_i, real_data_2_dir_i, _, _, syn_data_1_dir_i, syn_data_2_dir_i = output_dirs(dist_i, k)
real_data_1_dir_j, real_data_2_dir_j, _, _, syn_data_1_dir_j, syn_data_2_dir_j = output_dirs(dist_j, k)
output_dir = '{0}[{1}_VS_{2}]_[k={3}]/'.format(POWER_OUTPUT_DIR, dist_i, dist_j, k)
cmd = power_analysis_cmd(real_data_1_dir_i, real_data_2_dir_j, syn_data_1_dir_i, syn_data_2_dir_j, output_dir)
run_cmd_sequence([cmd])
def visualize():
for i in range(len(UNIVARIATE_DISTRIBUTIONS)):
for j in range(i, len(UNIVARIATE_DISTRIBUTIONS)):
figure, axes = plt.subplots(nrows=1, ncols=1)
n = None
t_test_real_power = []
mmd_test_real_power = []
t_test_syn_power = []
mmd_test_syn_power = []
for k in range(NUM_TRIALS):
dist_i = UNIVARIATE_DISTRIBUTIONS[i]
dist_j = UNIVARIATE_DISTRIBUTIONS[j]
power_dir_k = '{0}[{1}_VS_{2}]_[k={3}]/'.format(POWER_OUTPUT_DIR, dist_i, dist_j, k)
if n is None:
n = np.load(power_dir_k+'n.npy')
t_test_real_power.append(np.load(power_dir_k+'t_test_real_power.npy'))
mmd_test_real_power.append(np.load(power_dir_k+'mmd_test_real_power.npy'))
t_test_syn_power.append(np.load(power_dir_k+'t_test_syn_power.npy'))
mmd_test_syn_power.append(np.load(power_dir_k+'mmd_test_syn_power.npy'))
n = np.array(n)
t_test_real_power = np.array(t_test_real_power)
mmd_test_real_power = np.array(mmd_test_real_power)
t_test_syn_power = np.array(t_test_syn_power)
mmd_test_syn_power = np.array(mmd_test_syn_power)
# Plot curve of n vs power
# sns.tsplot(data=t_test_real_power, time=n, ci=[68, 95], color='blue', condition='Real', ax=axes[0])
# sns.tsplot(data=t_test_syn_power, time=n, ci=[68, 95], color='orange', condition='Synthetic', ax=axes[0])
# axes[0].set_title('Sample Size vs T Test Power')
# axes[0].set_xlabel('Sample Size')
# axes[0].set_ylabel('Power')
# axes[0].set_ylim([-0.1, 1.1])
# axes[0].legend(loc="upper right")
sns.tsplot(data=mmd_test_real_power, time=n, ci=[68, 95], color='blue', condition='Real', ax=axes)
sns.tsplot(data=mmd_test_syn_power, time=n, ci=[68, 95], color='orange', condition='Synthetic', ax=axes)
axes.set_title('Sample Size vs MMD Test Power')
axes.set_xlabel('Sample Size')
axes.set_ylabel('Power')
axes.set_ylim([-0.1, 1.1])
axes.legend(loc="upper right")
# Save results
figure.tight_layout()
figure.savefig('{0}{1}_VS_{2}'.format(RESULTS_DIR, dist_i, dist_j), format='eps')
# ========== MAIN ==========
# generate_real_data_samples()
# train_gans()
# generate_syn_data_samples()
# run_power_analyses()
visualize()
|
nilq/baby-python
|
python
|
import scrapy
import re
from locations.items import GeojsonPointItem
DAY_MAPPING = {
"Mon": "Mo",
"Tues": "Tu",
"Wed": "We",
"Thur": "Th",
"Fri": "Fr",
"Sat": "Sa",
"Sun": "Su"
}
class KoppsSpider(scrapy.Spider):
name = "kopps"
item_attributes = { 'brand': "Kopps" }
allowed_domains = ["www.kopps.com"]
download_delay = 1.5
start_urls = (
'https://www.kopps.com/',
)
def parse_day(self, day):
if re.search('-', day):
days = day.split('-')
osm_days = []
if len(days) == 2:
for day in days:
osm_day = DAY_MAPPING[day.strip()]
osm_days.append(osm_day)
return "-".join(osm_days)
def parse_times(self, times):
if times.strip() == 'Open 24 hours':
return '24/7'
hours_to = [x.strip() for x in times.split('-')]
cleaned_times = []
for hour in hours_to:
if re.search('pm$', hour):
hour = re.sub('pm', '', hour).strip()
hour_min = hour.split(":")
if int(hour_min[0]) < 12:
hour_min[0] = str(12 + int(hour_min[0]))
cleaned_times.append(":".join(hour_min))
if re.search('am$', hour):
hour = re.sub('am', '', hour).strip()
hour_min = hour.split(":")
if len(hour_min[0]) <2:
hour_min[0] = hour_min[0].zfill(2)
else:
hour_min[0] = str( int(hour_min[0]))
cleaned_times.append(":".join(hour_min))
return "-".join(cleaned_times)
def parse_hours(self, lis):
hours = []
for li in lis:
day_times = li.xpath('normalize-space(./text())').extract_first()
day = re.findall(r"^[a-zA-Z-]+" , day_times)
if(len(day)>0):
day = day[0]
else:
day = 'Mon-Sun'
times = re.findall(r"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m" ,day_times)
times = times[0]
if times and day:
parsed_time = self.parse_times(times)
parsed_day = self.parse_day(day)
hours.append(parsed_day + ' ' + parsed_time)
return "; ".join(hours)
def parse(self, response):
locations = response.xpath('//div[@id="locations"]/div/div')
for location in locations:
properties = {
'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),
'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),
'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\r\n' ,''),
'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],
'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\r\n' ,''),
'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),
'website': response.url,
'lat':re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],
'lon': re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],
}
hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
|
nilq/baby-python
|
python
|
import smtplib
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email import encoders
from user import User
from mail import Mail
class ImportantUser(User):
'''
ImportantUser class inherits from User class. It is more complex version of it. It let's user add attachment to mail
paired with signature image (ex. logo).
Input:
sender, password, smtp_name - str
smtp_port - int
attachment_name, signature_img_name - is a str name of a file with the extension
attachment_path, signature_img_path - is a str absolute path to the folder with image
'''
def __init__(self, sender, password, smtp_name, smtp_port, signature_img_name, signature_img_path, attachment_name,
attachment_path):
super().__init__(sender, password, smtp_name, smtp_port)
self.signature_img_name = signature_img_name
self.signature_img_path = signature_img_path
self.attachment_name = attachment_name
self.attachment_path = attachment_path
def create_signature_image_object(self):
img = open(self.signature_img_path + self.signature_img_name, 'rb')
sgn_image = MIMEImage(img.read())
sgn_image.add_header('Content-ID', '<signature_image>')
return sgn_image
def create_attachment_object(self):
binary = open(self.attachment_path + self.attachment_name, 'rb')
payload = MIMEBase('application', 'octate-stream', Name=self.attachment_name)
payload.set_payload(binary.read())
encoders.encode_base64(payload)
payload.add_header('Content-Decomposition', 'attachment', filename=self.attachment_name)
return payload
def send_mail_with_attachment(self, receiver, subject, body):
'''
In body, please notice that signature img is denoted by a tag:
<img src="cid:signature_image">
it has to be at the end of html body of mail.
Method calls other class methods to create objects as image and payload to use in mail.
Image is a signature image.
Payload is any attachment to the mail.
'''
attachment_mail = Mail(self, receiver, subject, body)
image = self.create_signature_image_object()
payload = self.create_attachment_object()
attachment_mail.message.attach(image)
attachment_mail.message.attach(payload)
attachment_mail.create_session()
attachment_mail.attach_message()
attachment_mail.send_mail()
|
nilq/baby-python
|
python
|
/Users/NikhilArora/anaconda3/lib/python3.6/imp.py
|
nilq/baby-python
|
python
|
# coding: utf-8
"""Everythong related to parsing tracker responses"""
import urlparse
from lxml import etree
class BaseParser(object):
"""Abstract base class for tracker response parser"""
def parse_index(self, html):
"""Parse index html and return list of dicts"""
raise NotImplementedError()
def parse_torrent_page(self, html):
"""Parse torrent page and return dict"""
raise NotImplementedError()
def btih_from_href(url):
"""Extracts infohash from magnet link"""
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
xt = params['xt'][0]
return xt[9:]
def make_tree(html):
"""Make lxml.etree from html"""
htmlparser = etree.HTMLParser(encoding='utf-8')
return etree.fromstring(html, parser=htmlparser)
class Error(RuntimeError):
"""Parse error"""
pass
|
nilq/baby-python
|
python
|
from compas.datastructures import Network
def test_add_vertex():
network = Network()
assert network.add_vertex() == 0
assert network.add_vertex(x=0, y=0, z=0) == 1
assert network.add_vertex(key=2) == 2
assert network.add_vertex(key=0, x=1) == 0
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
%prog [options] pair_1.fastq pair_2.fastq
filter reads from paired fastq so that no unmatching reads remain.
output files are pair_1.fastq.trim and pair_2.fastq.trim
see: http://hackmap.blogspot.com/2010/09/filtering-paired-end-reads-high.html
"""
__version__ = "0.1.0"
from subprocess import Popen, PIPE
import sys
FASTX_CLIPPER="fastx_clipper"
FASTQ_QUALITY_TRIMMER="fastq_quality_trimmer"
def gen_pairs(fha, fhb, min_len, fastq):
def gen_headers(fastq):
fq = open(fastq)
r = fq.readline().rstrip("\r\n")
while r:
fq.readline()
fq.readline()
fq.readline()
yield r[:-2]
r = fq.readline().rstrip("\r\n")
aread, bread = fha.readline, fhb.readline
get_a = lambda: [aread().rstrip("\r\n") for i in range(4)]
get_b = lambda: [bread().rstrip("\r\n") for i in range(4)]
ah, bh = None, None
header_gen = gen_headers(fastq)
for header in header_gen:
a = get_a()
ah = a[0][:-2]
b = get_b()
bh = b[0][:-2]
while not header in (ah, bh):
header = header_gen.next()
if bh != header:
while ah != bh and ah:
a = get_a()
ah = a[0][:-2]
while header != bh:
header = header_gen.next()
if ah != header:
while ah != bh and bh:
b = get_b()
bh = b[0][:-2]
while header != bh:
header = header_gen.next()
if not ah and bh:
raise StopIteration
assert ah == bh
if len(a[1]) < min_len or len(b[1]) < min_len: continue
yield a, b
def main(adaptors, M, t, min_len, fastqs, sanger=False):
cmds = []
for fastq in fastqs:
cmd = []
for i, a in enumerate(adaptors):
if M == 0:
matches = len(a)
else:
matches = min(M, len(a))
cmd.append("%s -a %s -M %i %s -l 0" \
% (FASTX_CLIPPER, a, matches, "-Q 33" if sanger else "")) #, min_len))
trim_cmd = "%s -t %i -l 0" % (FASTQ_QUALITY_TRIMMER, t) #, min_len)
if sanger: trim_cmd += " -Q 33"
cmd.append(trim_cmd)
cmd[0] += " < %s" % fastq
cmds.append(" | ".join(cmd))
print "[running]:", cmds[-1]
procs = [Popen(cmd, stdout=PIPE, shell=True) for cmd in cmds]
trima = open("%s.trim" % fastqs[0], 'w')
trimb = open("%s.trim" % fastqs[1], 'w')
print >>sys.stderr, "writing %s and %s" % (trima.name, trimb.name)
# no temporary file, just read from stdouts.
for ra, rb in gen_pairs(procs[0].stdout, procs[1].stdout, min_len,
fastqs[0]):
print >>trima, "\n".join(ra)
print >>trimb, "\n".join(rb)
returncode = 0
for p in procs:
p.wait()
returncode |= p.returncode
if returncode != 0:
print >>sys.stderr, "ERROR: non-zero returncode from fastx toolkit"
sys.exit(returncode)
if __name__ == "__main__":
import optparse
p = optparse.OptionParser(__doc__)
p.add_option("-a", dest="a", help="adaptor sequence to clip seperate multiples with ','", default="")
p.add_option("-M", dest="M", help="require minimum adapter alignment length of N."
" If less than N nucleotides aligned with the adapter - don't clip it."
" default 0 means to require the full length of the adaptor to match. ",
default=0, type='int')
p.add_option("-t", dest="t", help="Quality threshold - nucleotides with lower"
" quality will be trimmed (from the end of the sequence ",
type='int', default=0)
p.add_option("-l", dest="l", help="Minimum length - sequences shorter than this (after trimming)"
"will be discarded. Default = 0 = no minimum length.",
type="int", default=0)
p.add_option("--sanger", dest="sanger", help="quality scores are ascii 33 sanger encoded (default is 64)", action="store_true")
opts, fastqs = p.parse_args()
fastqs[-1] = fastqs[-1].rstrip()
if not (fastqs and len(fastqs)) == 2:
sys.exit(p.print_help())
adaptors = [ad.strip() for ad in opts.a.split(",") if ad.strip()]
main(adaptors, opts.M, opts.t, opts.l, fastqs, opts.sanger)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# debris.db -- database-related operations for debris
import sqlite3
import time
from . import common
from .common import run_process
from .common import getconfig
from .common import log
class DebrisDB(object):
"""Object that can represent the database connection.
We are using sqlite3 as db.
"""
conn = None
def __init__(self, dbpath: str = None):
"""Init the DebrisDB object.
By default, the dbpath is given by loading config.
"""
if dbpath:
my_dbpath = dbpath
else:
my_dbpath = getconfig('DEBRIS_DB_FILE')
log.debug('connection sqlite db: {}'.format(my_dbpath))
self.conn = sqlite3.connect(my_dbpath)
self._sanity_check()
# TODO: Complete me
def _sanity_check(self):
"""Run a sanity check.
If there are any missing tables, create them.
"""
c = self.conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS `builtpkg` (`package` TEXT NOT NULL, `version` TEXT NOT NULL);')
c.execute('CREATE TABLE IF NOT EXISTS `command_history` (`timestamp` INTEGER NOT NULL, `CMDTYPE` TEXT NOT NULL, `OPERATION` TEXT);')
c.execute('CREATE TABLE IF NOT EXISTS `build_history` (`timestamp` INTEGER NOT NULL, `package` TEXT NOT NULL, `version` TEXTNOT NULL, `status` INTEGER NOT NULL, `stdout` BLOB, `stderr` BLOB);')
# TODO: recheck this
pass
def get_builtlist(self) -> list:
"""Retrieve a list for previously built packages.
:example::
[{'package': 'nixnote2', 'version': '2.0~beta9-1'},
{'package': 'qevercloud', 'version': '3.0.3+ds-1'}]
"""
builtlist = []
c = self.conn.cursor()
result = c.execute('SELECT `package`, `version` FROM `builtpkg`;').fetchall()
for i in result:
builtlist.append(dict(package=i[0], version=i[1]))
return builtlist
def log_transaction(
self,
package: str,
version: str,
status: bool,
stdout: bytes = None,
stderr: bytes = None,
):
"""Log one building attempt into the database.
"""
log.debug('logging build attempt...')
_current_time = int(time.time())
c = self.conn.cursor()
c.execute('INSERT INTO `build_history` (`timestamp`, `package`, `version`, `status`, `stdout`, `stderr`) VALUES (?, ?, ?, ?, ?, ?)', (_current_time, package, version, int(status), stdout, stderr,))
self.conn.commit()
|
nilq/baby-python
|
python
|
#AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"extract_tag": "om2.ipynb",
"contains_tag": "om2.ipynb",
"is_nbx": "om2.ipynb",
"is_nbx_cell": "om2.ipynb",
"is_magic_or_shell": "om2.ipynb",
"": "om2.ipynb",
"strip": "om2.ipynb",
"parse_xarg": "om2.ipynb",
"get_imports_from_src": "om.ipynb",
"Import": "om.ipynb",
"create_import_statement": "om.ipynb",
"extract_imports_from": "om.ipynb",
"Bunch": "om2.ipynb",
"load_nb": "om2.ipynb",
"parse_src": "om.ipynb",
"parse_nbx_cell": "om.ipynb",
"concat": "om2.ipynb",
"unzip": "om2.ipynb",
"negate": "om2.ipynb",
"is_constarg": "om2.ipynb",
"get_item": "om2.ipynb",
"get_items": "om2.ipynb",
"not_constarg": "om2.ipynb",
"parse_nb": "om.ipynb",
"get_arrays": "om2.ipynb",
"init_job": "om2.ipynb",
"cont_job": "om2.ipynb",
"chain_jobs": "om2.ipynb",
"check_parsed_nb": "om.ipynb",
"NbxBundle": "om.ipynb",
"BUNDLE_SUMMARY": "om.ipynb",
"regex_tag": "om2.ipynb",
"regex_magic": "om2.ipynb",
"parse_xarg_expr": "om2.ipynb",
"regex_xarg": "om2.ipynb",
"parse_src_with_parse_dict": "om2.ipynb",
"parse_none": "om2.ipynb",
"parse_nbx": "om2.ipynb",
"parse_xuse": "om2.ipynb",
"consume_line_below": "om2.ipynb",
"parse_nbx_cell_with_parse_dict": "om2.ipynb",
"PARSE_DICT": "om2.ipynb",
"parse_nb_with_parse_dict": "om2.ipynb",
"get_arrays_2": "om2.ipynb",
"chain_jobs_2": "om2.ipynb",
"add_if_necessary": "om2.ipynb",
"create_script": "om2.ipynb",
"create_om_files": "om2.ipynb",
"create_folders": "om2.ipynb",
"create_run_and_job_script": "om2.ipynb",
"create_job_script": "om2.ipynb",
"check_nb": "om2.ipynb",
"create_experiment_script": "om2.ipynb",
"create_raw_experiment": "om2.ipynb",
"tpath": "om2.ipynb",
"INSTRUCTIONS": "om2.ipynb",
"Axis": "pspace.ipynb",
"ParameterSpace": "pspace.ipynb",
"get_templ_args": "templ.ipynb",
"render_templ": "templ.ipynb",
"create_file_from_template": "templ.ipynb",
"render_template_from_string": "templ.ipynb"}
modules = ["om.py",
"om2.py",
"pspace.py",
"templ.py"]
git_url = "https://github.com/mirkoklukas/nbx/tree/master/"
def custom_doc_links(name): return None
|
nilq/baby-python
|
python
|
#Создай собственный Шутер!
from pygame import *
from random import randint
from time import time as timer
mixer.init()
mixer.music.load('Fonk.ogg')
mixer.music.play(-1)
mixer.music.set_volume(0.2)
fire_sound = mixer.Sound('blaster.ogg')
fire_sound.set_volume(0.1)
font.init()
font1 = font.SysFont('Arial',80)
win = font1.render('YOU WIN!!!', True,(255,255,255))
lose = font1.render('YOU LOSE!!!', True,(255,0,0))
font2 = font.SysFont('Arial',36)
img_back = 'galaxy.jpg'
img_hero = 'rrocket.png'
img_enemy = 'ufo.png'
img_bullet = 'bullet.png'
img_rocket = 'oruzhie.png'
img_kunai = 'kunai.png'
img_ast = 'asteroid.png'
score = 0
goal = 20
lost = 0
max_lost = 10
life = 3
class GameSprite(sprite.Sprite):
def __init__(self,player_image,player_x,player_y,size_x,size_y,player_speed):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(player_image), (size_x,size_y))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
keys = key.get_pressed()
if keys[K_LEFT] and self.rect.x > 5:
self.rect.x-=self.speed
if keys[K_RIGHT] and self.rect.x < win_width - 80:
self.rect.x+=self.speed
def fire(self):
bullet = Bullet(img_bullet,self.rect.centerx,self.rect.top,15, 20, -15)
bullets.add(bullet)
def kunai(self):
kunai = Bullet(img_kunai,self.rect.centerx,self.rect.top,15, 20, -15)
bullets.add(kunai)
def rocket(self):
rocket = Bullet(img_rocket,self.rect.centerx,self.rect.top,15, 20, -15)
bullets.add(rocket)
class Bullet(GameSprite):
def update (self):
self.rect.y += self.speed
if self.rect.y < 0:
self.kill()
class Enemy(GameSprite):
def update(self):
self.rect.y += self.speed
global lost
if self.rect.y > win_heigh:
self.rect.x = randint(80, win_width-80)
self.rect.y = 0
lost = lost + 1
win_width = 700
win_heigh = 500
window = display.set_mode((win_width, win_heigh))
display.set_caption("Shooter")
backgroun = transform.scale(image.load(img_back), (win_width,win_heigh))
ship = Player(img_hero,5,win_heigh-100,80,100,17)
monsters = sprite.Group()
bullets = sprite.Group()
asteroids = sprite.Group()
for i in range(1,6):
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(3,5))
monsters.add(monster)
for i in range(1,3):
asteroid = Enemy(img_ast, randint(30, win_width - 30), -40, 80, 50, randint(3,5))
asteroids.add(asteroid)
finish = False
run = True
game = True
rel_time = False
num_fire = 0
while game:
for e in event.get():
if e.type == QUIT:
game = False
elif e.type == KEYDOWN:
if e.key == K_SPACE:
if num_fire < 5 and rel_time == False:
num_fire = num_fire + 1
fire_sound.play()
ship.fire()
if num_fire >= 5 and rel_time == False:
last_time = timer()
rel_time = True
elif e.key == K_TAB:
if num_fire < 5 and rel_time == False:
num_fire = num_fire + 1
fire_sound.play()
ship.kunai()
elif e.key == K_LCTRL:
if num_fire < 5 and rel_time == False:
num_fire = num_fire + 1
fire_sound.play()
ship.rocket()
if not finish:
window.blit(backgroun,(0,0))
ship.reset()
ship.update()
monsters.update()
monsters.draw(window)
bullets.update()
bullets.draw(window)
asteroids.update()
asteroids.draw(window)
if rel_time == True:
now_time = timer()
if now_time - last_time < 3:
reload = font2.render('Wait, reload...', 1, (150,0,0))
window.blit(reload, (260, 460))
else:
num_fire = 0
rel_time = False
collides = sprite.groupcollide(monsters,bullets, True, True)
for c in collides:
score = score + 1
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(2,4))
monsters.add(monster)
if sprite.spritecollide(ship, monsters, False) or sprite.spritecollide(ship, asteroids, False):
sprite.spritecollide(ship, monsters, True)
sprite.spritecollide(ship, asteroids, True)
life = life - 1
if life == 0 or lost >= max_lost:
finish = True
window.blit(lose, (200,200))
if score >= goal:
finish = True
window.blit(win, (200,200))
text = font2.render('Сбито:' + str(score),1,(255,255,255))
window.blit(text, (10,20))
text_lose = font2.render('Пропущенно:' + str(lost),1,(255,255,255))
window.blit(text_lose, (10,50))
if life == 3:
life_color = (0, 255, 0)
if life == 2:
life_color = (255, 255, 0)
if life == 1:
life_color = (255, 0, 0)
text_life = font1.render(str(life), 1, life_color)
window.blit(text_life, (650,10))
display.update()
else:
finish = False
score = 0
lost = 0
num_fire = 0
life = 3
for b in bullets:
b.kill()
for m in monsters:
m.kill()
time.delay(3000)
for i in range(1,6):
monster = Enemy(img_enemy, randint(80, win_width - 80), -40, 80, 50, randint(2,4))
monsters.add(monster)
for i in range(1,3):
asteroid = Enemy(img_ast, randint(30, win_width - 30), -40, 80, 50, randint(3,5))
asteroids.add(asteroid)
time.delay(50)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# kcri.bap.shims.cgMLSTFinder - service shim to the cgMLSTFinder backend
#
import os, json, tempfile, logging
from pico.workflow.executor import Task
from pico.jobcontrol.job import JobSpec, Job
from .base import ServiceExecution, UserException
from .versions import BACKEND_VERSIONS
# Our service name and current backend version
SERVICE, VERSION = "cgMLSTFinder", BACKEND_VERSIONS['cgmlstfinder']
# Backend resource parameters: cpu, memory, disk, run time reqs
MAX_CPU = 1
MAX_MEM = 1
MAX_TIM = 10 * 60
class cgMLSTFinderShim:
'''Service shim that executes the backend.'''
def execute(self, sid, xid, blackboard, scheduler):
'''Invoked by the executor. Creates, starts and returns the Task.'''
# Check whether running is applicable, else throw to SKIP execution
scheme_lst = list(filter(None, blackboard.get_user_input('cq_s','').split(',')))
species_lst = blackboard.get_species(list())
if not (scheme_lst or species_lst):
raise UserException("no species is known and no cgMLST scheme specified")
execution = cgMLSTExecution(SERVICE, VERSION, sid, xid, blackboard, scheduler)
# From here run the execution, and FAIL it on exception
try:
db_dir = execution.get_db_path('cgmlstfinder')
db_cfg = os.path.join(db_dir, 'config')
# Note we do only one fq
fname = execution.get_fastqs_or_contigs_paths([])[0]
schemes = self.determine_schemes(db_cfg, scheme_lst, species_lst)
execution.start(schemes, fname, db_dir)
# Failing inputs will throw UserException
except UserException as e:
execution.fail(str(e))
# Deeper errors additionally dump stack
except Exception as e:
logging.exception(e)
execution.fail(str(e))
return execution
def determine_schemes(self, db_cfg, scheme_lst, species_lst):
'''Reads the database config to find out which schemes to run for the
given scheme and species lists. Returns a list of (scheme,loci)
tuples or raises a user interpretable error.'''
schemes = list()
spc_db = dict()
if not os.path.exists(db_cfg):
raise UserException("no database config file: %s" % db_cfg)
with open(db_cfg, 'r') as f:
for l in f:
l = l.strip()
if not l or l.startswith('#'): continue
r = l.split('\t')
if not len(r) == 3: continue
spc_db[r[1].strip()] = r[0].strip()
for db in scheme_lst:
if not db in spc_db.values():
raise UserException("unknown scheme: %s; valid schemes are: %s" %
(db, ', '.join(spc_db.values())))
elif not db in schemes:
schemes.append(db)
for s in species_lst:
if s.startswith('Shigella'): s = 'Escherichia coli' # argh: should be fixed in config
db = spc_db.get(s.split(' ')[0], spc_db.get(s))
if db and not db in schemes:
schemes.append(db)
if not schemes:
raise UserException("no applicable cgMLST scheme")
return schemes
class cgMLSTExecution(ServiceExecution):
'''A single execution of the service, returned by the shim's execute().'''
_jobs = list()
def start(self, schemes, fname, db_dir):
# Schedule a backend job for every scheme if all is good
if self.state == Task.State.STARTED:
for scheme in schemes:
self.run_scheme(scheme, fname, db_dir)
def run_scheme(self, scheme, fname, db_dir):
'''Spawn cgMLST for one scheme.'''
# Create a command line for the job
tmpdir = tempfile.TemporaryDirectory()
params = [
'-db', db_dir,
'-t', tmpdir.name,
# '-o', '.',
'-s', scheme,
fname ]
# Spawn the job and hold a record in the jobs table
job_spec = JobSpec('cgMLST.py', params, MAX_CPU, MAX_MEM, MAX_TIM)
job = self._scheduler.schedule_job('cgmlst_%s' % scheme, job_spec, os.path.join(SERVICE,scheme))
self._jobs.append((job, scheme, tmpdir))
def report(self):
'''Implements WorkflowService.Task.report(), update blackboard
if we are done and return our current state.'''
# If our outward state is STARTED check the jobs
if self.state == Task.State.STARTED:
# We may be running no jobs at all if no scheme applied
if len(self._jobs) == 0:
self.add_warning("no cgMLST scheme was found for the species")
self.store_results(list())
self.done()
# Else we report only once all our jobs are done
elif all(j[0].state in [ Job.State.COMPLETED, Job.State.FAILED ] for j in self._jobs):
typings = list()
for job, scheme, tmpdir in self._jobs:
if job.state == Job.State.COMPLETED:
typings.append(self.collect_output(job, scheme))
elif job.state == Job.State.FAILED:
self.add_error('%s: %s' % (job.name, job.error))
tmpdir.cleanup()
# Store result
self.store_results(typings)
# Report fail if none of the runs succeeded
if any(j[0].state == Job.State.COMPLETED for j in self._jobs):
self.done()
else:
self.fail('no successful cgMLSTFinder job')
return self.state
def collect_output(self, job, scheme):
typing = dict({'scheme': scheme })
try:
with open(job.file_path('data.json'), 'r') as f:
j = json.load(f)
d = j.get('cgMLSTFinder').get('results')
if d: # There should be at most one, as we have 1 FA or 1 fastq
hit = list(d.values())[0]
typing.update(hit)
self._blackboard.add_cgmlst(scheme, hit.get('cgST', 'NA'), hit.get('perc_allele_matches', 'NA'))
except Exception as e:
typing['error'] = "cgMLSTFinder ran successfully but output could not be parsed: %s" % str(e)
return typing
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.