text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python3
# file://mkpy3_finder_chart_survey_fits_image_get_v1.py
# Kenneth Mighell
# SETI Institute
# =============================================================================
def mkpy3_finder_chart_survey_fits_image_get_v1(
ra_deg=None,
dec_deg=None,
radius_arcmin=None,
survey=None,
cframe=None,
verbose=None,
):
"""
Function: mkpy3_finder_chart_survey_fits_image_get_v1()
Purpose:
Gets sky survey image data around a position on the sky.
Parameters
----------
ra_deg : float (optional)
right ascencsion [deg]
dec_deg : float (optional)
declination [deg]
radius_arcmin : float (optional)
radius (halfwidth and halfheight of image) [arcmin]
survey : string (optional) [e.g., '2MASS-J', 'DSS2 Red', etc.]
survey string name
cframe : str (optional)
coordinate frame name [e.g., 'fk5', 'icrs', etc.]
verbose : bool (optional)
if True, print extra information
Returns
-------
hdu :
Header/Data Unit (HDU) of the survey FITS file
hdr :
header associated with hdu
data :
data associated with hdu
wcs :
World Coordinate System from hdu
cframe :
coordinate frame of the survey data
Kenneth Mighell
SETI Institute
"""
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.skyview import SkyView
from astropy.wcs import WCS
#
if ra_deg is None:
ra_deg = 291.41829 # Kepler-93b
if dec_deg is None:
dec_deg = 38.67236 # Kepler-93b
if radius_arcmin is None:
radius_arcmin = 1.99
if survey is None:
survey = "2MASS-J" # alternate: 'DSS2 Red'
# ^--- to see all surveys: astroquery.skyview.SkyView.list_surveys()
if cframe is None:
cframe = "fk5" # N.B.: '2MASS-J' uses 'fk5'
if verbose is None:
verbose = False
if verbose:
print(ra_deg, "=ra_deg")
print(dec_deg, "=dec_deg")
print(radius_arcmin, "=radius_arcmin")
print("'%s' =survey" % (survey))
print("'%s' =cframe" % (cframe))
print(verbose, "=verbose")
print()
#
# sc <--- astropy sky coordinates
sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame=cframe)
# image list # assume that the list contains a single image
imgl = SkyView.get_images(
position=sc, survey=survey, radius=radius_arcmin * u.arcmin
)
#
# outputs:
hdu = imgl[0] # Header/Data Unit of the FITS image
hdr = hdu[0].header # header associated with the HDU
data = hdu[0].data # data associated with the HDU
wcs = WCS(hdr) # World Coordinate System from the FITS header of the survey image
#
return hdu, hdr, data, wcs, cframe
# fed
def xmkpy3_finder_chart_survey_fits_image_get_v1():
import lightkurve as lk
lk.log.setLevel("INFO")
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.visualization import ImageNormalize, PercentileInterval, SqrtStretch
import os
import ntpath
# Exoplanet Kelper-138b is "KIC 7603200":
tpf = lk.search_targetpixelfile(
target="kepler-138b", mission="kepler", cadence="long", quarter=10
).download(quality_bitmask=0)
print("TPF filename:", ntpath.basename(tpf.path))
print("TPF dirname: ", os.path.dirname(tpf.path))
target = "Kepler-138b"
ra_deg = tpf.ra
dec_deg = tpf.dec
# get survey image data
width_height_arcmin = 3.00
survey = "2MASS-J"
(
survey_hdu,
survey_hdr,
survey_data,
survey_wcs,
survey_cframe,
) = mkpy3_finder_chart_survey_fits_image_get_v1(
ra_deg, dec_deg, radius_arcmin=width_height_arcmin, survey=survey, verbose=True
)
# create a matplotlib figure object
fig = plt.figure(figsize=(12, 12))
# create a matplotlib axis object with right ascension and declination axes
ax = plt.subplot(projection=survey_wcs)
norm = ImageNormalize(
survey_data, interval=PercentileInterval(99.0), stretch=SqrtStretch()
)
ax.imshow(survey_data, origin="lower", norm=norm, cmap="gray_r")
ax.set_xlabel("Right Ascension (J2000)")
ax.set_ylabel("Declination (J2000)")
ax.set_title("")
plt.suptitle(target)
# put a yellow circle at the target position
ax.scatter(
ra_deg * u.deg,
dec_deg * u.deg,
transform=ax.get_transform(survey_cframe),
s=600,
edgecolor="yellow",
facecolor="None",
lw=3,
zorder=100,
)
pname = "mkpy3_plot.png"
if pname != "":
plt.savefig(pname, bbox_inches="tight")
print(pname, " <--- plot filename has been written! :-)\n")
# fi
return None
# fed
# =============================================================================
if __name__ == "__main__":
xmkpy3_finder_chart_survey_fits_image_get_v1()
# fi
# EOF
| 4,910 | 1,775 |
"""
marathontcp.py
Author: Steven Gantz
Date: 11/22/2016
These two classes are used as custom TCP Servers and its accompanying
handler that defines each request. These class are what forward the data
from the preset /metrics endpoints in the scaled marathon instances directly
to the TCP servers running from this application.
"""
# Official Imports
import socketserver
import urllib.request
class MarathonRedirectTCPServer(socketserver.TCPServer):
""" TCP Server that takes special extra arguments if needed """
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, api_url="Empty Request"):
# As per http://stackoverflow.com/questions/15889241/send-a-variable-to-a-tcphandler-in-python
self.api_url = api_url
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate=True)
class MarathonRedirectTCPHandler(socketserver.BaseRequestHandler):
""" Makes a metrics request and forwards to preset ports through the application"""
def handle(self):
print("Retrieving metrics from http://" + self.server.api_url + "/metrics")
# Make a request to the api_url metrics and fwd to page
encoded_response = urllib.request.urlopen("http://" + self.server.api_url + "/metrics")
# Change encoded response in to simple string
header = "HTTP/1.0 200 OK \r\n"
content_type = "Content-Type: text/plain\r\n\r\n"
text_response = header + content_type + encoded_response.read().decode()
# self.request is the TCP socket connected to the client
self.request.sendall(text_response.encode())
# Read Response to close request
res = self.request.recv(1024)
| 1,737 | 505 |
import pytest
import io
import random
from copy import deepcopy
from horcrux import io as hio
from horcrux.hrcx_pb2 import StreamBlock
from horcrux.sss import Share, Point
@pytest.fixture()
def hx():
return hio.Horcrux(io.BytesIO())
@pytest.fixture()
def share():
return Share(b'0123456789abcdef', 2, Point(0, b'123'))
@pytest.fixture()
def two_block_hrcx():
return io.BytesIO(b'\x1b\n\x100123456789ABCDEF\x10\x04\x1a\x05\x12\x03123\x08\n\x06'
b'566784\x00\x08\x12\x06abcdef\x02\x08\x01\n\x12\x08ghijklmn')
def test_init_horcrux():
h = hio.Horcrux(io.BytesIO())
def test_horcrux__write_bytes(hx):
hx._write_bytes(b'123')
assert hx.stream.getvalue() == b'\x03123'
def test_horcurx__read_message_bytes_small(hx):
hx._write_bytes(b'123')
hx._write_bytes(b'4567890')
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
m1 = hx._read_message_bytes()
assert m1 == b'123'
m2 = hx._read_message_bytes()
assert m2 == b'4567890'
def test_horcrux__read_message_bytes_large(hx):
m1 = bytes(255 for _ in range(500))
m2 = bytes(random.getrandbits(8) for _ in range(4))
m3 = bytes(random.getrandbits(8) for _ in range(4096))
for m in (m1, m2, m3):
hx._write_bytes(m)
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
assert hx._read_message_bytes() == m1
assert hx._read_message_bytes() == m2
assert hx._read_message_bytes() == m3
def test_horcrux_write_data_block(hx):
_id = 1
data = b'my data'
hx.write_data_block(_id, data)
out = hx.stream.getvalue()
print(out)
assert out == b'\x02\x08\x01\t\x12\x07my data'
def test_horcrux_write_share_header(hx, share):
hx._write_share_header(share)
stream = hx.stream
del hx
stream.seek(0)
print(stream.getvalue())
assert stream.getvalue() == b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123'
def test_horcrux_write_stream_header(hx):
header = b'u\x14Op\xa3\x13\x01Jt\xa8'
hx._write_stream_header(header)
hx._write_stream_header(header, encrypted_filename=b'testname')
stream = hx.stream
del hx
stream.seek(0)
hx = hio.Horcrux(stream)
h1 = hx._read_message_bytes()
assert h1 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8'
h2 = hx._read_message_bytes()
assert h2 == b'\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\x08testname'
def test_horcrux_init_write(hx, share):
cryptoheader = b'u\x14Op\xa3\x13\x01Jt\xa8'
hx.init_write(share, cryptoheader, encrypted_filename=b'slkfjwnfa;')
assert hx.hrcx_id == 0
stream = hx.stream
del hx
stream.seek(0)
headers = stream.getvalue()
print(headers)
assert headers == (
b'\x1b\n\x100123456789abcdef\x10\x02\x1a'
b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;')
def test_horcrux_init_read(share):
stream = io.BytesIO(
b'\x1b\n\x100123456789abcdef\x10\x02\x1a'
b'\x05\x12\x03123\x18\n\nu\x14Op\xa3\x13\x01Jt\xa8\x1a\nslkfjwnfa;')
stream.seek(0)
hx = hio.Horcrux(stream)
hx.init_read()
assert hx.share == share
assert hx.hrcx_id == 0
assert hx.encrypted_filename == b'slkfjwnfa;'
assert hx.next_block_id == None
def test_horcrux_read_block(hx):
data1 = bytes(random.getrandbits(8) for _ in range(30))
data2 = bytes(random.getrandbits(8) for _ in range(30))
hx.write_data_block(33, data1)
hx.write_data_block(45, data2)
stream = hx.stream
stream.seek(0)
del hx
hx = hio.Horcrux(stream)
hx._read_next_block_id()
_id, d = hx.read_block()
assert d == data1
assert _id == 33
_id, d = hx.read_block()
assert d == data2
assert _id == 45
def test_horcrux_skip_block(hx):
data1 = bytes(255 for _ in range(30))
data2 = bytes(255 for _ in range(30))
hx.write_data_block(33, data1)
hx.write_data_block(45, data2)
stream = hx.stream
stream.seek(0)
del hx
hx = hio.Horcrux(stream)
hx._read_next_block_id()
hx.skip_block()
_id, d = hx.read_block()
assert d == data2
assert _id == 45
def test_get_horcrux_files(tmpdir, share):
fn = 'test_horcrux'
shares = [deepcopy(share) for _ in range(4)]
crypto_header = b'1234567'
expected = b'\x1b\n\x100123456789abcdef\x10\x02\x1a\x05\x12\x03123\t\n\x071234567'
hxs = hio.get_horcrux_files(fn, shares, crypto_header, outdir=tmpdir)
assert len(hxs) == 4
for h in hxs:
h.stream.close()
with open(h.stream.name, 'rb') as fin:
assert fin.read() == expected
| 4,600 | 2,214 |
#!/usr/bin/env python3
"""
A Pandoc filter to create non-code diffs. `add` and `rm` are the classes that
can be added to a `Div` or a `Span`. `add` colors the text green, and `rm`
colors the text red. For HTML, `add` also underlines the text, and `rm` also
strikes out the text.
# Example
## `Div`
Unchanged portion
::: add
New paragraph
> Quotes
More new paragraphs
:::
## `Span`
> The return type is `decltype(`_e_(`m`)`)` [for the first form]{.add}.
"""
import panflute as pf
def action(elem, doc):
if not isinstance(elem, pf.Div) and not isinstance(elem, pf.Span):
return None
color_name = None
tag_name = None
for cls in elem.classes:
color_name = cls + 'color'
if cls == 'add':
tag_name = 'ins'
elif cls == 'rm':
tag_name = 'del'
if tag_name is None:
return None
open_tag = pf.RawInline('<{}>'.format(tag_name), 'html')
open_color = pf.RawInline('{{\\color{{{}}}'.format(color_name), 'tex')
close_color = pf.RawInline('}', 'tex')
close_tag = pf.RawInline('</{}>'.format(tag_name), 'html')
color = doc.get_metadata(color_name)
attributes = {} if color is None else {'style': 'color: #{}'.format(color)}
if isinstance(elem, pf.Div):
return pf.Div(pf.Plain(open_tag),
pf.Plain(open_color),
elem,
pf.Plain(close_color),
pf.Plain(close_tag),
attributes=attributes)
elif isinstance(elem, pf.Span):
return pf.Span(open_tag,
open_color,
elem,
close_color,
close_tag,
attributes=attributes)
if __name__ == '__main__':
pf.run_filter(action)
| 1,817 | 608 |
from __future__ import absolute_import, unicode_literals
class PaxfulError(Exception):
"""Base (catch-all) client exception."""
class RequestError(PaxfulError):
"""Raised when an API request to fails.
:ivar message: Error message.
:vartype message: str | unicode
:ivar url: API endpoint.
:vartype url: str | unicode
:ivar body: Raw response body from Pax.
:vartype body: str | unicode
:ivar headers: Response headers.
:vartype headers: requests.structures.CaseInsensitiveDict
:ivar http_code: HTTP status code.
:vartype http_code: int
:ivar error_code: Error code from Pax.
:vartype error_code: int
:ivar response: Response object.
:vartype response: requests.Response
"""
def __init__(self, response, message, error_code=None):
self.message = message
self.url = response.url
self.body = response.text
self.headers = response.headers
self.http_code = response.status_code
self.error_code = error_code
self.response = response
Exception.__init__(self, message)
class InvalidCurrencyError(PaxfulError):
"""Raised when an invalid major currency is given."""
class InvalidOrderBookError(PaxfulError):
"""Raised when an invalid order book is given."""
| 1,304 | 390 |
"""
Displays anatomical data from the mesoscope
"""
from skimage.io import imread
from napari import Viewer, gui_qt
stack = imread('data/mesoscope/anatomical/volume_zoomed.tif')
with gui_qt():
# create an empty viewer
viewer = Viewer()
# add the image
layer = viewer.add_image(stack, name='stack', clim=(0.0, 3000.0), clim_range=(0.0, 6000.0), colormap='gray')
| 382 | 150 |
from .. import utils
from ..config import table
class Paper():
def __init__(self):
self.collection = table.paper
self.conn = utils.mongo.db.get_collection(self.collection)
def list(self):
pass
paper = Paper()
| 246 | 78 |
import discord
from discord.ext import commands
from utils.database import sqlite, create_tables
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = sqlite.Database()
def logs(self, guild_id):
data = self.db.fetchrow("SELECT * FROM Logging WHERE guild_id=?", (guild_id,))
if data:
return data["logs_id"]
else:
return None
@commands.Cog.listener()
async def on_message_delete(self, message):
log_channel = self.bot.get_channel(self.logs(message.guild.id))
if log_channel:
embed = discord.Embed(
title="Message Deleted 📝",
description=f"**Deleted in:** `#{message.channel}`\n**Author:** `{message.author}`\n**Message:** ```{message.content}```",
color=0x2F3136
)
embed.timestamp = message.created_at
await log_channel.send(embed=embed)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
log_channel = self.bot.get_channel(self.logs(before.guild.id))
if before.author.bot is True:
return None
if log_channel:
embed = discord.Embed(
title="Message Edited 📝",
description=f"**Edited in:** `#{before.channel}`\n**Author:** `{before.author}`\n**Before:** ```{before.content}```\n**Now:** ```{after.content}```",
color=0x2F3136
)
embed.timestamp = before.created_at
await log_channel.send(embed=embed)
def setup(bot):
bot.add_cog(Events(bot))
| 1,459 | 510 |
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import h5py
# In[24]:
input_step_size = 50
output_size = 30
sliding_window = False
file_name= 'bitcoin2012_2017_50_30_prediction.h5'
# In[19]:
df = pd.read_csv('data/bitstampUSD_1-min_data_2012-01-01_to_2017-05-31.csv').dropna().tail(1000000)
df['Datetime'] = pd.to_datetime(df['Timestamp'],unit='s')
df.head()
# In[30]:
prices= df.loc[:,'Close'].values
times = df.loc[:,'Close'].values
prices.shape
# In[31]:
outputs = []
inputs = []
output_times = []
input_times = []
if sliding_window:
for i in range(len(prices)-input_step_size-output_size):
inputs.append(prices[i:i + input_step_size])
input_times.append(times[i:i + input_step_size])
outputs.append(prices[i + input_step_size: i + input_step_size+ output_size])
output_times.append(times[i + input_step_size: i + input_step_size+ output_size])
else:
for i in range(0,len(prices)-input_step_size-output_size, input_step_size):
inputs.append(prices[i:i + input_step_size])
input_times.append(times[i:i + input_step_size])
outputs.append(prices[i + input_step_size: i + input_step_size+ output_size])
output_times.append(times[i + input_step_size: i + input_step_size+ output_size])
inputs= np.array(inputs)
outputs= np.array(outputs)
output_times = np.array(output_times)
input_times = np.array(input_times)
# In[34]:
with h5py.File(file_name, 'w') as f:
f.create_dataset("inputs", data = inputs)
f.create_dataset('outputs', data = outputs)
f.create_dataset("input_times", data = input_times)
f.create_dataset('output_times', data = output_times)
| 1,679 | 675 |
import requests
def wip():
print()
def apiToDictionary(url, *args):
request_string = url
response = (requests.get(request_string))
json = response.json()
response.close()
return dict(json)
def main():
# docDict = {"text":"592da8d73b39d3e1f54304fedf7456b1", "markdown":"6a4cccf1c66c780e72264a9fbcb9d5fe"}
# resultDict = apiToDictionary("https://api.github.com/gists/" + docDict.get("markdown"))
# print(dict(dict(resultDict.get('files')).get('MineCTC: Rules.md')).get('content'))
# resultDict = apiToDictionary("https://en.wikipedia.org/w/api.php?action=query&titles=Hebrew_alphabet&prop=revisions&rvprop=content&format=json&formatversion=2")
# print(dict(resultDict["query"]))
wip()
if __name__ == '__main__':
main()
| 776 | 303 |
# Generated by Django 3.1.12 on 2021-09-18 12:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('elibrary', '0010_auto_20210918_1434'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='tag',
),
]
| 325 | 125 |
#!/usr/bin/python
import requests
import json
import random
from sets import Set
#
# Creates client-side connectivity json
#
def tapi_client_input(sip_uuids):
create_input = {
"tapi-connectivity:input": {
"end-point" : [
{
"local-id": sip_uuids[0],
"service-interface-point": {
"service-interface-point-uuid" : sip_uuids[0]
}
},
{
"local-id": sip_uuids[1],
"service-interface-point": {
"service-interface-point-uuid" : sip_uuids[1]
}
}
]
}
}
return create_input
#
# Creates line-side connectivity json
#
def tapi_line_input(sip_uuids):
create_input = {
"tapi-connectivity:input" : {
"end-point": [
{
"layer-protocol-qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC",
"role": "UNKNOWN",
"local-id": "Src_end_point",
"direction": "BIDIRECTIONAL",
"service-interface-point": {
"service-interface-point-uuid" : sip_uuids[0]
},
"protection-role": "WORK",
"layer-protocol-name": "PHOTONIC_MEDIA"
},
{
"direction": "BIDIRECTIONAL",
"service-interface-point": {
"service-interface-point-uuid": sip_uuids[1]
},
"protection-role": "WORK",
"layer-protocol-name": "PHOTONIC_MEDIA",
"layer-protocol-qualifier": "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC",
"role": "UNKNOWN",
"local-id": "Dst_end_point"
}
]
}
}
return create_input
#
# Obtains TAPI context through restconf
#
def get_context(url_context):
resp = requests.get(url_context, auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('GET {}'.format(resp.status_code))
return resp.json()
#
# Check if the node is transponder.
# True - transponder
# False - OLS
#
def is_transponder_node(node):
if len(node["owned-node-edge-point"]) > 0 and "mapped-service-interface-point" in node["owned-node-edge-point"][0]:
return True
else:
return False
#
# Parse src and dst sip-uuids of specific link from topo.
#
def parse_src_dst(topo, link_index=-1):
if link_index == -1:
# select a link randomly from all links of topo
link_index = random.randint(0, len(topo["link"]) - 1)
nep_pair = topo["link"][link_index]["node-edge-point"]
assert topo["uuid"] == nep_pair[0]["topology-uuid"]
assert topo["uuid"] == nep_pair[1]["topology-uuid"]
src_onep, dst_onep = (find_line_onep(nep_pair[0], topo["node"]),
find_line_onep(nep_pair[1], topo["node"]))
if src_onep is not None and dst_onep is not None:
# If the link is between two transponders directly
pass
elif src_onep is None and dst_onep is None:
raise AssertionError("Impossible for that both two ports are OLS port")
else:
# If one of src_onep and dst_onep is None, then make src_onep not None,
# and find a new dst_onep with same connection id.
if src_onep is None:
src_onep = dst_onep
dst_onep = None
conn_id = parse_value(src_onep["name"])["odtn-connection-id"]
for node in topo["node"]:
cep = src_onep["tapi-connectivity:cep-list"]["connection-end-point"]
assert len(cep) == 1
if cep[0]["parent-node-edge-point"]["node-uuid"] != node["uuid"] and is_transponder_node(node):
# If this node is not the node that includes src_onep, and not a OLS node
for onep in node["owned-node-edge-point"]:
if parse_value(onep["name"])["odtn-connection-id"] == conn_id:
dst_onep = onep
break
if dst_onep is not None:
break
src_sip_uuid, dst_sip_uuid = \
(src_onep["mapped-service-interface-point"][0]["service-interface-point-uuid"],
dst_onep["mapped-service-interface-point"][0]["service-interface-point-uuid"])
return src_onep, dst_onep, src_sip_uuid, dst_sip_uuid
#
# Check whether the sip uuid is used in other existed services.
#
def is_port_used(sip_uuid, conn_context):
try:
for service in conn_context["connectivity-service"]:
for id in [0, 1]:
if service["end-point"][id]["service-interface-point"]["service-interface-point-uuid"] == sip_uuid:
return True
except KeyError:
print "There is no line-side service in ONOS now."
return False
#
# Requests a connectivity service
#
def request_connection(url_connectivity, context):
# All Context SIPs
sips = context["tapi-common:context"]["service-interface-point"]
# Sorted Photonic Media SIPs. filter is an iterable
esips = list(filter(is_dsr_media, sorted(sips, key=lambda sip: sip["name"][0]["value"])))
endpoints = [esips[0], esips[-1]]
sip_uuids = []
for sip in endpoints:
sip_uuids.append(sip["uuid"])
for uuid in sip_uuids:
print(uuid)
create_input_json = json.dumps(tapi_client_input(sip_uuids))
print (create_input_json)
headers = {'Content-type': 'application/json'}
resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('POST {}'.format(resp.status_code))
return resp
#
# Filter method used to keep only SIPs that are photonic_media
#
def is_photonic_media(sip):
return sip["layer-protocol-name"] == "PHOTONIC_MEDIA"
#
# Filter method used to keep only SIPs that are DSR
#
def is_dsr_media(sip):
return sip["layer-protocol-name"] == "DSR"
#
# Processes the topology to verify the correctness
#
def process_topology():
# TODO use method to parse topology
# Getting the Topology
# topology = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0]
# nodes = topology["node"];
# links = topology["link"];
noop
#
# Find mapped client-side sip_uuid according to a line-side sip_uuid.
# connection-ids of these two owned-node-edge-point should be the same.
#
def find_mapped_client_sip_uuid(line_sip_uuid, nodes):
line_node = None
line_onep = None
for node in nodes:
if is_transponder_node(node):
for onep in node["owned-node-edge-point"]:
if onep["mapped-service-interface-point"][0]["service-interface-point-uuid"] == line_sip_uuid:
line_node = node
line_onep = onep
break
if line_node is None:
raise AssertionError("Cannot match line-side sip uuid in topology.")
conn_id = parse_value(line_onep["name"])["odtn-connection-id"]
for onep in line_node["owned-node-edge-point"]:
vals = parse_value(onep["name"])
if vals["odtn-connection-id"] == conn_id and vals["odtn-port-type"] == "client":
return onep["mapped-service-interface-point"][0]["service-interface-point-uuid"], vals
return None
#
# Create a client-side connection. Firstly, get the context, parsing for SIPs that connect
# with each other in line-side; Secondly, issue the request
#
def create_client_connection(url_context, url_connectivity):
headers = {'Content-type': 'application/json'}
context = get_context(url_context)
# select the first topo from all topologies
topo = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0]
# Gather all current used sip_uuids
used_sip_uuids = Set()
try:
services = context["tapi-common:context"]["tapi-connectivity:connectivity-context"]["connectivity-service"]
for service in services:
used_sip_uuids.add(service["end-point"][0]["service-interface-point"]["service-interface-point-uuid"])
used_sip_uuids.add(service["end-point"][1]["service-interface-point"]["service-interface-point-uuid"])
except KeyError:
print "There is no existed connectivity service inside ONOS."
# select the first available line-side service as bridge. If there is no available line-side service,
# then only create a client-to-client service for src and dst node.
empty_client_src_sip_uuid, empty_client_dst_sip_uuid = None, None
empty_src_name, empty_dst_name, empty_client_src_name, empty_client_dst_name = None, None, None, None
for link_index in range(0, len(topo["link"])):
src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo, link_index)
client_src_sip_uuid, client_src_name = find_mapped_client_sip_uuid(src_sip_uuid, topo["node"])
client_dst_sip_uuid, client_dst_name = find_mapped_client_sip_uuid(dst_sip_uuid, topo["node"])
# firstly, check if line-side service exists
# If line-side service exists
if src_sip_uuid in used_sip_uuids and dst_sip_uuid in used_sip_uuids:
# secondly, check if mapped client-side service exists
if (client_src_sip_uuid not in used_sip_uuids) and (client_dst_sip_uuid not in used_sip_uuids):
# If there is no such client-side connection exists
# Create new client-side connection directly
print "Create client-side connection between %s and %s." % \
(client_src_name["onos-cp"], client_dst_name["onos-cp"])
create_input_json = json.dumps(tapi_client_input((client_src_sip_uuid, client_dst_sip_uuid)))
resp = requests.post(url_connectivity, data=create_input_json, headers=headers,
auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('POST {}'.format(resp.status_code))
return resp
else:
# If there exists such client-side connection
# Do nothing, just continue
pass
else:
# If line-side service doesn't exist
# save 4 sip uuids, and continue
empty_client_src_sip_uuid = client_src_sip_uuid
empty_client_dst_sip_uuid = client_dst_sip_uuid
empty_client_src_name = client_src_name
empty_client_dst_name = client_dst_name
empty_src_name = parse_value(src_onep["name"])
empty_dst_name = parse_value(dst_onep["name"])
pass
# After FOR loop, if this method doesn't return, there is no available line-side
# service for mapped client-side service creation.
# So, we need to create two client-side services.
if empty_client_src_sip_uuid is None:
# None case means all client-side services exist.
raise AssertionError("There is no available client-side service could be created.")
else:
print "Create client-side services:"
print "\t- from %s to %s." % (empty_client_src_name["onos-cp"], empty_client_dst_name["onos-cp"])
print "This service should go through:"
print "\t- %s and %s." % (empty_src_name["onos-cp"], empty_dst_name["onos-cp"])
create_input_json = json.dumps(tapi_client_input((empty_client_src_sip_uuid, empty_client_dst_sip_uuid)))
resp = requests.post(url_connectivity, data=create_input_json, headers=headers,
auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('POST {}'.format(resp.status_code))
return resp
#
# Parse array structure "name" under structure "owned node edge point"
#
def parse_value(arr):
rtn = {}
for item in arr:
rtn[item["value-name"]] = item["value"]
return rtn
#
# Find node edge point of node structure in topology with client-side port, by using nep with line-side port.
# The odtn-connection-id should be the same in both line-side nep and client-side nep
#
def find_client_onep(line_nep_in_link, nodes):
for node in nodes:
if node["uuid"] == line_nep_in_link["node-uuid"]:
conn_id = None
for onep in node["owned-node-edge-point"]:
if onep["uuid"] == line_nep_in_link["node-edge-point-uuid"]:
name = parse_value(onep["name"])
if name["odtn-port-type"] == "line":
conn_id = name["odtn-connection-id"]
break
if conn_id is None:
raise AssertionError("Cannot find owned node edge point with node id %s and nep id %s."
% (line_nep_in_link["node-uuid"], line_nep_in_link["node-edge-point-uuid"], ))
for onep in node["owned-node-edge-point"]:
name = parse_value(onep["name"])
if name["odtn-port-type"] == "client" and name["odtn-connection-id"] == conn_id:
return onep
return None
#
# Create a line-side connection. Firstly, get the context, parsing for SIPs with photonic_media type,
# and select one pair of them; Secondly, issue the request
#
def create_line_connection(url_context, url_connectivity):
context = get_context(url_context)
# select the first topo from all topologies
topo = context["tapi-common:context"]["tapi-topology:topology-context"]["topology"][0]
# select randomly the src_sip_uuid and dst_sip_uuid with same connection id.
src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo)
while is_port_used(src_sip_uuid, context["tapi-common:context"]["tapi-connectivity:connectivity-context"]):
print "Conflict occurs between randomly selected line-side link and existed ones."
src_onep, dst_onep, src_sip_uuid, dst_sip_uuid = parse_src_dst(topo)
print "\nBuild line-side connectivity:\n|Item|SRC|DST|\n|:--|:--|:--|\n|onos-cp|%s|%s|\n|connection id|%s|%s|\n|sip uuid|%s|%s|" % \
(src_onep["name"][2]["value"], dst_onep["name"][2]["value"],
src_onep["name"][1]["value"], dst_onep["name"][1]["value"],
src_sip_uuid, dst_sip_uuid)
create_input_json = json.dumps(tapi_line_input((src_sip_uuid, dst_sip_uuid)))
print "\nThe json content of creation operation for line-side connectivity service is \n\t\t%s." % \
create_input_json
headers = {'Content-type': 'application/json'}
resp = requests.post(url_connectivity, data=create_input_json, headers=headers, auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('POST {}'.format(resp.status_code))
return resp
#
# find owned-node-edge-point from all nodes according to line_nep_in_links
#
def find_line_onep(line_nep_in_link, nodes):
for node in nodes:
if node["uuid"] == line_nep_in_link["node-uuid"]:
if not is_transponder_node(node):
break
for onep in node["owned-node-edge-point"]:
if onep["uuid"] == line_nep_in_link["node-edge-point-uuid"]:
# check the length equals 1 to verify the 1-to-1 mapping relationship
assert len(onep["mapped-service-interface-point"]) == 1
return onep
# When node is OLS, this method will return None
return None
#
# Obtains existing connectivity services
#
def get_connection(url_connectivity, uuid):
# uuid is useless for this method
json = '{}'
headers = {'Content-type': 'application/json'}
resp = requests.post(url_connectivity, data=json, headers=headers, auth=('onos', 'rocks'))
if resp.status_code != 200:
raise Exception('POST {}'.format(resp.status_code))
return resp
| 15,933 | 5,048 |
"""
The outfile structure is the following:
diameter density
birth lifetime
is_captured stuck_to_geometry theta
(blank line)
Re Ur
(blank line)
n_trajectory
x1 y1 up1 vp1 Uf1 Vf1 gradpx1 gradpy1 ap_x1 ap_y1 af_x1 af_y1
x2 y2 up2 vp2 Uf2 Vf2 gradpx2 gradpy2 ap_x2 ap_y2 af_x2 af_y2
...
xNt yNt upNt vpNt UfNt VfNt gradpxNt gradpyNt ap_xN ap_yN af_xN af_yN
"""
import sys
sys.path.append('..')
import numpy as np
from particle import Particle
#==============================================================================
def floatIt(l):
return np.array([float(e) for e in l])
def intIt(l):
return np.array([int(e) for e in l])
def write_particle(p, f):
f.write('%2.3f %1.3f\n' % (p.diameter, p.density))
f.write('%d %d\n' % (p.birth, p.lifetime))
f.write('%s %s %s\n' % (p.captured, p.stuck_to_geometry, p.theta))
f.write('\n') # blank line
f.write('%d %.1f\n' % (p.Re, p.Ur))
f.write('\n')
Nt = len(p.trajectory)
f.write('%d\n' % Nt)
for n in range(Nt):
f.write('%e '*12 % \
(p.trajectory[n,0],
p.trajectory[n,1],
p.velocities[n,0],
p.velocities[n,1],
p.fluid_velocities[n,0],
p.fluid_velocities[n,1],
p.pressure_gradients[n,0],
p.pressure_gradients[n,1],
p.accelerations[n,0],
p.accelerations[n,1],
p.fluid_accelerations[n,0],
p.fluid_accelerations[n,1]))
f.write('\n')
def write_particles(particles, outfile):
f = open(outfile, 'w')
Np = len(particles)
f.write('%d\n' % Np)
f.write('\n') # blank line
for p in particles:
write_particle(p, f)
f.write('\n')
f.close()
def read_particle(f, old_version=False):
# I kept old_version because I had many particles saved before the final
# update of this function.
diameter, density = floatIt(f.readline().strip().split())
birth, lifetime = intIt(f.readline().strip().split())
if not(old_version):
str_captured, str_stuck, str_theta = f.readline().strip().split()
theta = float(str_theta)
else:
str_captured, str_stuck = f.readline().strip().split()
captured = False if str_captured == 'False' else True
stuck = None if str_stuck == 'None' else int(str_stuck)
f.readline() # read the blank line
Re, Ur = floatIt(f.readline().strip().split())
f.readline()
Nt = int(f.readline().strip())
trajectory = []
velocities = []
fluid_velocities = []
pressure_gradients = []
accelerations = []
fluid_accelerations = []
for n in range(Nt):
if old_version:
x, y, u, v, U, V, gradpx, gradpy \
= floatIt(f.readline().strip().split())
else:
x, y, u, v, U, V, gradpx, gradpy, ap_x, ap_y, af_x, af_y \
= floatIt(f.readline().strip().split())
trajectory.append([x, y])
velocities.append([u, v])
fluid_velocities.append([U, V])
pressure_gradients.append([gradpx, gradpy])
if not(old_version):
accelerations.append([ap_x, ap_y])
fluid_accelerations.append([af_x, af_y])
pos0 = trajectory[0]
u0 = velocities[0]
p = Particle(diameter, density, birth, lifetime, pos0, u0)
p.captured, p.stuck_to_geometry = captured, stuck
p.Re, p.Ur = Re, Ur
p.trajectory = np.array(trajectory)
p.velocities = np.array(velocities)
p.fluid_velocities = np.array(fluid_velocities)
p.pressure_gradients = np.array(pressure_gradients)
if not(old_version):
p.accelerations = np.array(accelerations)
p.fluid_accelerations = np.array(fluid_accelerations)
p.theta = theta
return p
def read_particles(infile, old_version=False):
f = open(infile, 'r')
Np = int(f.readline())
f.readline() # read a blank line
particles = []
for i in range(Np):
particles.append(read_particle(f, old_version))
f.readline()
f.close()
return np.array(particles)
| 4,151 | 1,578 |
########## 1.2.3. Formulação matemática de redução de dimensionalidade LDA ##########
# Primeiro note que K significa que \mu_k são vetores em \mathcal{R}^d, e eles estão em um subespaço afim H de dimensão no máximo K - 1 (2 pontos estão em uma linha, 3 pontos estão em um plano, etc.) ).
# Como mencionado acima, podemos interpretar LDA como a atribuição de x à classe cuja média \mu_k é a mais próxima em termos de distância de Mahalanobis, enquanto também leva em conta as probabilidades anteriores da classe. Alternativamente, LDA é equivalente a primeiro esferificar os dados de modo que a matriz de covariância seja a identidade e, em seguida, atribuir x à média mais próxima em termos de distância euclidiana (ainda contabilizando as classes prioritárias).
# Calcular distâncias euclidianas neste espaço d-dimensional é equivalente a primeiro projetar os pontos de dados em H e calcular as distâncias lá (já que as outras dimensões contribuirão igualmente para cada classe em termos de distância). Em outras palavras, se x estiver mais próximo de \mu_k no espaço original, também será o caso de H. Isso mostra que, implícito no classificador LDA, há uma redução de dimensionalidade por projeção linear em um espaço dimensional K-1 .
# Podemos reduzir ainda mais a dimensão, para um L escolhido, projetando no subespaço linear H_L que maximiza a variância do \mu^*_k após a projeção (na verdade, estamos fazendo uma forma de PCA para a classe transformada significa \ mu^*_k). Este L corresponde ao parâmetro n_components usado no método de transformação. Veja 1 para mais detalhes. | 1,610 | 501 |
"""A collection of cards."""
import random
from csrv.model import cards
from csrv.model.cards import card_info
# This import is just to pull in all the card definitions
import csrv.model.cards.corp
import csrv.model.cards.runner
class Deck(object):
def __init__(self, identity_name, card_names):
self.identity = cards.Registry.get(identity_name)
self.cards = []
self.is_valid = True
for name in card_names:
c = cards.Registry.get(name)
if c:
self.cards.append(c)
def _verify_less_than_three_copies(self):
"""Make sure we have no more than 3 copies of a single cards"""
card_list = {}
for c in self.cards:
card_list[c.NAME] = card_list.setdefault(c.NAME, 0) + 1
invalid_cards = filter(lambda x: card_list[x] > 3, card_list)
if len(invalid_cards):
return "Deck contains more than 3 copies of the following cards: {}".format(', '.join(invalid_cards))
def _verify_min_deck_size(self):
"""Make sure deck meets minimum deck size limit"""
if len(self.cards) < self.identity.MIN_DECK_SIZE:
self.is_valid = False
return "Deck does not meet minimum deck size requirement"
def _verify_influence_points(self):
"""Make sure deck doesnt exceed maximum influence points"""
influence_spent = reduce(lambda x,y: x+y.influence_cost(self.identity.FACTION), self.cards, 0)
if influence_spent > self.identity.MAX_INFLUENCE:
return "Deck contains {} influence but only {} allowed".format(influence_spent, self.identity.MAX_INFLUENCE)
def _verify_side_only(self, side):
"""Make sure we only have cards belonging to the correct side"""
if len(filter(lambda c: c.SIDE != side, self.cards)):
return "Deck contains cards from the other side (corp/runner)"
class CorpDeck(Deck):
"""A deck for a corp."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_in_faction_agendas(),
self._verify_agenda_points(),
self._verify_side_only(card_info.CORP)
])
def _verify_agenda_points(self):
"""Make sure deck has required agenda points based on deck size"""
agenda_points = reduce(lambda x,y: x+y.AGENDA_POINTS, self.cards, 0)
deck_size = len(self.cards)
if agenda_points/float(deck_size) < 2.0/5.0:
self.is_valid = False
return "Only {} Agenda Points in deck of {} cards".format(agenda_points, deck_size)
def _verify_in_faction_agendas(self):
"""Make sure deck only contains in faction agendas"""
agendas = filter(lambda c: c.TYPE == card_info.AGENDA, self.cards)
if len(filter(lambda a: not a.FACTION in [card_info.NEUTRAL, self.identity.FACTION], agendas)):
return "Deck contains out-of-faction Agendas"
class RunnerDeck(Deck):
"""A deck for a runner."""
def validate(self):
"""Return a list of errors with the deck."""
return filter(None, [
self._verify_min_deck_size(),
self._verify_influence_points(),
self._verify_less_than_three_copies(),
self._verify_side_only(card_info.RUNNER)
])
| 3,195 | 1,069 |
ans = True
while ans:
print("""
1.Add a Student
2.Delete a Student
3.Look Up Student Record
4.Exit/Quit
""")
ans = input("What would you like to do? ")
if ans == "1":
print("\nStudent Added")
elif ans == "2":
print("\n Student Deleted")
elif ans == "3":
print("\n Student Record Found")
elif ans == "4":
print("\n Goodbye")
ans = None
else:
print("\n Not Valid Choice Try again")
| 476 | 149 |
from __future__ import print_function, division
import sys
sys.dont_write_bytecode = True
from lib import *
@ok
def _rseed():
rseed(1)
one = list('abcdefghijklm')
assert shuffle(one) == ['m', 'h', 'j', 'f', 'a',
'g', 'l', 'd', 'e', 'c', 'i', 'k', 'b']
@ok
def _defDict():
d = DefaultDict(lambda: [])
for n,c in enumerate(list('tobeornottobe')):
d[c].append(n)
assert d == {'b': [2, 11], 'e': [3, 12],
'o': [1, 4, 7, 10], 'n': [6],
'r': [5], 't': [0, 8, 9]}
| 525 | 236 |
from setuptools import setup
import pyhap.const as pyhap_const
PROJECT_NAME = 'HAP-python'
URL = 'https://github.com/ikalchev/{}'.format(PROJECT_NAME)
PROJECT_URLS = {
'Bug Reports': '{}/issues'.format(URL),
'Documentation': 'http://hap-python.readthedocs.io/en/latest/',
'Source': '{}/tree/master'.format(URL),
}
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_NAME)
DOWNLOAD_URL = '{}/archive/{}.zip'.format(URL, pyhap_const.__version__)
MIN_PY_VERSION = '.'.join(map(str, pyhap_const.REQUIRED_PYTHON_VER))
setup(
name=PROJECT_NAME,
version=pyhap_const.__version__,
url=URL,
project_urls=PROJECT_URLS,
download_url=DOWNLOAD_URL,
python_requires='>={}'.format(MIN_PY_VERSION),
)
| 734 | 294 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Scrape the MMWR morbidity tables at http://wonder.cdc.gov/mmwr/mmwrmorb.asp. No processing is done;
we simply save the files for potential offline processing.
"""
# Copyright (c) Los Alamos National Security, LLC and others.
from __future__ import print_function, division
import requests
import codecs
import os
mmwr_table_url = 'http://wonder.cdc.gov/mmwr/mmwr_reps.asp?mmwr_year=%d&mmwr_week=%02d&mmwr_table=%s&request=Submit'
mmwr_file = '../data/mmwr/%d-%02d-%s.html'
tables = {'1', '2A', '2B', '2C', '2D', '2E', '2F', '2G', '2H', '2I', '2J', '2K', '3A', '3B', '4'}
error_messages = {'Data are not available for the week requested.', 'No records found.', 'does not exist before the week ending'}
for year in range(1996, 2015):
for week in range(1, 54):
for table in tables:
if not os.path.exists(mmwr_file % (year, week, table)):
response = requests.get(mmwr_table_url % (year, week, table))
error = False
for error_message in error_messages:
if error_message in response.text:
error = True
break
if not error:
with codecs.open(mmwr_file % (year, week, table), 'w', 'utf-8') as output:
output.write(response.text)
print('saved %s' % (mmwr_file % (year, week, table)))
| 1,291 | 529 |
import json
import logging
import os
import traceback
from boto3.dynamodb.conditions import Attr, Key
import storage
from util import lambda_result
logger = logging.getLogger('cluster_status')
if os.environ.get('DEBUG'):
logger.setLevel(logging.DEBUG)
def set_cluster_status(event, context):
"""Set the status of a cluster, ie active, inactive,
maintainance_mode, etc"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
cluster_status = query_string_params.get('cluster_status')
if cluster_status is None:
return lambda_result(
{"message": f'Must provide a status variable in uri query string'},
status_code=500)
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="SET cluster_status = :r",
ExpressionAttributeValues={
':r': cluster_status
},
ReturnValues="UPDATED_NEW"
)
return lambda_result(
{"message": (f'Updated cluster status for {cluster_name} '
f'to {cluster_status}')})
except Exception:
failed_txt = f'Failed to update cluster status for {cluster_name}'
logger.exception(failed_txt)
return lambda_result({"message": failed_txt}, status_code=500)
def set_cluster_environment(event, context):
"""Set the environment of a cluster, ie dev, stage, prod"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
environment = query_string_params.get('environment')
if environment is None:
return lambda_result(
{"message":
f'Must provide an environment param in uri query string'},
status_code=500)
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="ADD environment :e",
ExpressionAttributeValues={
':e': set([environment])
},
ReturnValues="UPDATED_NEW"
)
msg = (f'Updated cluster environment for {cluster_name} '
f'to {environment}')
return lambda_result(msg)
except Exception as e:
failed_txt = f'Failed to update cluster environment for {cluster_name}'
failed_txt += "\n{} \n{}".format(
str(e), repr(traceback.format_stack()))
print(failed_txt)
return lambda_result({"message": failed_txt}, status_code=500)
def clusters_per_environment(event, context):
"""Query cluster status attribute for given environment,
requires 'environment' query param, or defaults to all clusters"""
clusters = []
environment = event.get('queryStringParameters', {}).get('environment')
items = _query_dynamodb(environment)
for cluster in items:
clusters.append(cluster['id'])
return lambda_result(clusters)
def cluster_status(event, context):
"""Query cluster status attribute for given environment,
requires 'environment' query param, or defaults to all clusters"""
clusters = []
query_string_params = event.get('queryStringParameters', {})
environment = query_string_params.get('environment')
cluster_status = query_string_params.get('cluster_status')
items = _query_dynamodb(environment, cluster_status)
for cluster in items:
clusters.append(cluster['id'])
return lambda_result(clusters)
def set_cluster_metadata(event, context):
"""Set the metadata of a cluster.
metadata is a json blob use for describing extra details about a cluster.
"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
metadata = event.get('body', {})
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return lambda_result(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')},
status_code=500)
try:
if isinstance(metadata, str):
metadata = json.loads(metadata)
CLUSTER_TABLE.update_item(
Key={
'id': cluster_name,
},
UpdateExpression="set metadata = :md",
ExpressionAttributeValues={
':md': metadata
},
ReturnValues="UPDATED_NEW"
)
return lambda_result(
{"message": f'Updated cluster metadata for {cluster_name}'}
)
except Exception:
failed_txt = f'Failed to update cluster metadata for {cluster_name}'
logger.exception(failed_txt)
logger.error(json.dumps(event))
return lambda_result({"message": failed_txt}, status_code=500)
def get_cluster_metadata(event, context):
"""Get the metadata of a cluster.
metadata is a json blob use for describing extra details about a cluster.
"""
CLUSTER_TABLE = storage.get_cluster_table()
query_string_params = event.get('queryStringParameters', {})
cluster_name = query_string_params.get('cluster_name')
if cluster_name is None:
return {
"statusCode": 500,
"body": json.dumps(
{"message": (f'Must provide a cluster_name '
f'variable in uri query string')})
}
status_code = 404
db_response = CLUSTER_TABLE.get_item(
Key={
'id': cluster_name,
}
)
metadata = {}
if 'Item' in db_response:
status_code = 200
metadata = db_response['Item'].get('metadata', {})
if isinstance(metadata, str):
metadata = json.loads(metadata)
metadata['environment'] = db_response['Item'].get('environment')
metadata['status'] = db_response['Item'].get('status')
metadata['id'] = cluster_name
return lambda_result(metadata, status_code=status_code)
def _query_dynamodb(environment, status=None, metadata=False):
CLUSTER_TABLE = storage.get_cluster_table()
fkey = Attr('environment').contains(environment)
if status is not None:
fkey = fkey & Key('cluster_status').eq(status)
response = CLUSTER_TABLE.scan(
ProjectionExpression="id",
FilterExpression=fkey
)
return response.get('Items', [])
| 6,970 | 1,905 |
"""
This module implements utilities for labels
"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import List, Optional
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.label_schema import LabelSchemaEntity
from ote_sdk.entities.scored_label import ScoredLabel
def get_empty_label(label_schema: LabelSchemaEntity) -> Optional[LabelEntity]:
"""
Get first empty label from label_schema
"""
empty_candidates = list(
set(label_schema.get_labels(include_empty=True))
- set(label_schema.get_labels(include_empty=False))
)
if empty_candidates:
return empty_candidates[0]
return None
def get_leaf_labels(label_schema: LabelSchemaEntity) -> List[LabelEntity]:
"""
Get leafs from label tree
"""
leaf_labels = []
all_labels = label_schema.get_labels(False)
for lbl in all_labels:
if not label_schema.get_children(lbl):
leaf_labels.append(lbl)
return leaf_labels
def get_ancestors_by_prediction(
label_schema: LabelSchemaEntity, prediction: ScoredLabel
) -> List[ScoredLabel]:
"""
Get all the ancestors for a given label node
"""
ancestor_labels = label_schema.get_ancestors(prediction.get_label())
return [ScoredLabel(al, prediction.probability) for al in ancestor_labels]
| 1,366 | 433 |
import unittest
from typing import List
from dsa.lib.math.nums.three_sum import ThreeSum
class ThreeSumTest(unittest.TestCase):
def test_case1(self):
self.assertEqual([[0, 0, 0]], self.three_sum([0, 0, 0, 0]))
self.assertEqual([[-2, 0, 2]], self.three_sum([-2, 0, 0, 2, 2]))
self.assertEqual([[-1, 0, 1]], self.three_sum([-1, 0, 1, 0]))
def test_case2(self):
self.assertIn([-1, -1, 2], [[-1, -1, 2], [-1, 0, 1]])
self.assertIn([-1, 0, 1], [[-1, -1, 2], [-1, 0, 1]])
@staticmethod
def three_sum(nums: List[int]):
return ThreeSum().threeSum(nums)
| 614 | 271 |
""" Contains functions for plot spectrum action
"""
import mne
import numpy as np
import matplotlib.pyplot as plt
from meggie.utilities.plotting import color_cycle
from meggie.utilities.plotting import create_channel_average_plot
from meggie.utilities.channels import average_to_channel_groups
from meggie.utilities.channels import iterate_topography
from meggie.utilities.units import get_power_unit
def plot_spectrum_averages(subject, channel_groups, name, log_transformed=True):
""" Plots spectrum averages.
"""
subject_name = subject.name
spectrum = subject.spectrum.get(name)
data = spectrum.content
freqs = spectrum.freqs
ch_names = spectrum.ch_names
info = spectrum.info
colors = color_cycle(len(data))
conditions = spectrum.content.keys()
averages = {}
for key, psd in sorted(data.items()):
data_labels, averaged_data = average_to_channel_groups(
psd, info, ch_names, channel_groups)
for label_idx, label in enumerate(data_labels):
if not label in averages:
averages[label] = []
averages[label].append((key, averaged_data[label_idx]))
ch_types = sorted(set([label[0] for label in averages.keys()]))
for ch_type in ch_types:
ch_groups = sorted([label[1] for label in averages.keys()
if label[0] == ch_type])
def plot_fun(ax_idx, ax):
ch_group = ch_groups[ax_idx]
ax.set_title(ch_group)
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power ({})'.format(
get_power_unit(ch_type, log_transformed)))
for color_idx, (key, curve) in enumerate(averages[(ch_type, ch_group)]):
if log_transformed:
curve = 10 * np.log10(curve)
ax.plot(freqs, curve, color=colors[color_idx])
title = ' '.join([name, ch_type])
legend = list(zip(conditions, colors))
create_channel_average_plot(len(ch_groups), plot_fun, title, legend)
plt.show()
def plot_spectrum_topo(subject, name, log_transformed=True, ch_type='meg'):
""" Plots spectrum topography.
"""
subject_name = subject.name
spectrum = subject.spectrum.get(name)
data = spectrum.content
freqs = spectrum.freqs
ch_names = spectrum.ch_names
info = spectrum.info
if ch_type == 'meg':
picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names'])
if ch_idx in mne.pick_types(info, meg=True, eeg=False)]
else:
picked_channels = [ch_name for ch_idx, ch_name in enumerate(info['ch_names'])
if ch_idx in mne.pick_types(info, eeg=True, meg=False)]
info = info.copy().pick_channels(picked_channels)
colors = color_cycle(len(data))
def individual_plot(ax, info_idx, names_idx):
"""
"""
ch_name = ch_names[names_idx]
for color_idx, (key, psd) in enumerate(sorted(data.items())):
if log_transformed:
curve = 10 * np.log10(psd[names_idx])
else:
curve = psd[names_idx]
ax.plot(freqs, curve, color=colors[color_idx],
label=key)
title = ' '.join([name, ch_name])
ax.figure.canvas.set_window_title(title.replace(' ', '_'))
ax.figure.suptitle(title)
ax.set_title('')
ax.legend()
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power ({})'.format(get_power_unit(
mne.io.pick.channel_type(info, info_idx),
log_transformed
)))
plt.show()
fig = plt.figure()
for ax, info_idx, names_idx in iterate_topography(
fig, info, ch_names, individual_plot):
handles = []
for color_idx, (key, psd) in enumerate(sorted(data.items())):
if log_transformed:
curve = 10 * np.log10(psd[names_idx])
else:
curve = psd[names_idx]
handles.append(ax.plot(curve, color=colors[color_idx],
linewidth=0.5, label=key)[0])
if not handles:
return
fig.legend(handles=handles)
title = '{0}_{1}'.format(name, ch_type)
fig.canvas.set_window_title(title)
plt.show()
| 4,329 | 1,382 |
from .base_runner import BaseRunner
from .gym_runner import GymRunner
| 70 | 21 |
"""
DataGenerator for CyclesPS Dataset
This file use substantial portion of code from the original CNN-PS repository https://github.com/satoshi-ikehata/CNN-PS/
"""
import numpy as np
import cv2
import os
import gc
from data.datagenerator import DataGenerator
from data.utils import rotate_images
from misc.projections import standard_proj
from tensorflow.keras.models import Model
class CyclesDataGenerator(DataGenerator):
def __init__(self, datapath, objlist=None, batch_size=256,
spatial_patch_size=5, obs_map_size=32, shuffle=False, random_illums=False,
keep_axis=True, validation_split=None, nr_rotations=1, rotation_start=0, rotation_end=2 * np.pi,
projection=standard_proj, add_raw=False, images=None, normals=None, masks=None, illum_dirs=None,
order=2, divide_maps=False, round_nearest=True, rot_2D=False, verbose=False):
self.datapath = datapath
self.objlist = objlist if objlist is not None else sorted(os.listdir(datapath + '/PRPS'))
self.verbose = verbose
super(CyclesDataGenerator, self).__init__(
batch_size=batch_size,
spatial_patch_size=spatial_patch_size,
obs_map_size=obs_map_size,
shuffle=shuffle,
random_illums=random_illums,
keep_axis=keep_axis,
validation_split=validation_split,
nr_rotations=nr_rotations,
rotation_start=rotation_start,
rotation_end=rotation_end,
projection=projection,
add_raw=add_raw,
images=images,
normals=normals,
masks=masks,
illum_dirs=illum_dirs,
order=order,
divide_maps=divide_maps,
round_nearest=round_nearest,
rot_2D=rot_2D)
def load_data(self):
objid = 0
for obj in self.objlist:
for dirb, dirn, scale in zip(['PRPS_Diffuse/' + '%s' % obj, 'PRPS/' + '%s' % obj, 'PRPS/' + '%s' % obj],
['images_diffuse', 'images_specular', 'images_metallic'],
[1, 0.5, 0.5]):
if self.verbose:
print("\rPre-loading image ({:}/{:}) {:} ".format(objid + 1, self.nr_objects, dirb), end="")
nr_ch = 3 if self.add_raw else 1
sample_path = os.path.join(self.datapath, dirb, dirn)
imgs, nmls, msks, light_dirs = self.load_sample(sample_path, scale, -1, nr_ch)
self.fill_data(imgs, nmls, msks, light_dirs, objid)
if self.verbose:
print("", end="\x1b[1K\r")
objid += 1
if self.verbose:
print()
def get_max_shape(self, rotations=None):
"""
Returns a shape of an array (height, width, channels) which all images of various sizes under all rotations fit
:param rotations: List of rotation angles (in radians)
:return: max_shape [nr_objects, height, width, channels]
"""
max_shape = [0, 0, 0, 0]
for obj in self.objlist:
for p, scale in zip(['PRPS_Diffuse/' + '%s' % obj,
'PRPS/' + '%s' % obj,
'PRPS/' + '%s' % obj], [1, 0.5, 0.5]):
max_shape[0] += 1
normal_path = os.path.join(self.datapath, p, 'gt_normal.tif')
if not os.path.exists(normal_path):
raise ValueError("Path\"{:}\"does not exists.".format(normal_path))
normals = cv2.imread(normal_path, -1)
normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
f = open(os.path.join(self.datapath, p, 'light.txt'))
data = f.read()
f.close()
lines = data.split('\n')
nr_illums = len(lines) - 1 # the last line is empty (how to fix it?)
if nr_illums > max_shape[3]:
max_shape[3] = nr_illums
if rotations is not None:
# In case of rotations, the width and height might be larger
for angle in rotations:
img_shape = rotate_images(2 * np.pi - angle, normals[..., 0], axes=(0, 1), order=0).shape
for k in range(2):
if img_shape[k] > max_shape[k+1]:
max_shape[k+1] = img_shape[k]
else:
for k in range(2):
if normals.shape[k] > max_shape[k+1]:
max_shape[k+1] = normals.shape[k]
gc.collect()
return max_shape
@staticmethod
def load_sample(dirpath, scale, illum_ids=-1, nr_channels=1):
assert illum_ids == -1
normal_path = os.path.join(dirpath, '../gt_normal.tif')
inboundary_path = os.path.join(dirpath, '../inboundary.png')
onboundary_path = os.path.join(dirpath, '../onboundary.png')
if not os.path.exists(normal_path):
raise ValueError("Path\"{:}\"does not exists.".format(normal_path))
# read ground truth surface normal
normals = np.float32(cv2.imread(normal_path, -1)) / 65535.0 # [-1,1]
normals = normals[:, :, ::-1]
normals = 2 * normals - 1
normals = cv2.resize(normals, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
normals = normals / np.sqrt(np.sum(normals**2, axis=-1, keepdims=True))
height, width = np.shape(normals)[:2]
# read mask images_metallic
if os.path.exists(inboundary_path) and os.path.exists(onboundary_path):
inboundary = cv2.imread(inboundary_path, -1)
inboundary = cv2.resize(inboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
inboundary = inboundary > 0
onboundary = cv2.imread(onboundary_path, -1)
onboundary = cv2.resize(onboundary, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
onboundary = onboundary > 0
masks = inboundary | onboundary
else:
masks = normals[..., 2] > 0
masks = masks[..., None]
# read light filenames
f = open(os.path.join(dirpath, '../light.txt'))
data = f.read()
f.close()
lines = data.split('\n')
nr_illums = len(lines) - 1 # the last line is empty (how to fix it?)
light_directions = np.zeros((nr_illums, 3), np.float32)
for i, l in enumerate(lines):
s = l.split(' ')
if len(s) == 3:
light_directions[i, 0] = float(s[0])
light_directions[i, 1] = float(s[1])
light_directions[i, 2] = float(s[2])
# read images
images = np.zeros((height, width, nr_illums, nr_channels), np.float32)
for i in range(nr_illums):
if i % np.floor(nr_illums / 10) == 0:
print('.', end='')
image_path = os.path.join(dirpath, '%05d.tif' % i)
cv2_im = cv2.imread(image_path, -1) / 65535.0
cv2_im = cv2.resize(cv2_im, (height, width), interpolation=cv2.INTER_NEAREST)
if nr_channels == 1:
cv2_im = (cv2_im[:, :, 0:1] + cv2_im[:, :, 1:2] + cv2_im[:, :, 2:3]) / 3
images[:, :, i] = cv2_im
return images, normals, masks, light_directions
@staticmethod
def load_sample_test(dir_path, obj_path, scale, index=-1):
assert index == -1
obj, dirn = obj_path.split("/")
return CyclesDataGenerator.load_sample(dir_path + obj, dirn, scale)
| 7,733 | 2,546 |
import os
class ExistsError(Exception):
pass
class KeyInvalidError(Exception):
pass
def new_func(path, prev):
"""
去path路径的文件中,找到前缀为prev的一行数据,获取数据并返回给调用者。
1000,成功
1001,文件不存在
1002,关键字为空
1003,未知错误
...
:return:
"""
response = {'code': 1000, 'data': None}
try:
if not os.path.exists(path):
raise ExistsError()
if not prev:
raise KeyInvalidError()
pass
except ExistsError as e:
response['code'] = 1001
response['data'] = '文件不存在'
except KeyInvalidError as e:
response['code'] = 1002
response['data'] = '关键字为空'
except Exception as e:
response['code'] = 1003
response['data'] = '未知错误'
return response
# def func(path, prev):
# """
# 去path路径的文件中,找到前缀为prev的一行数据,获取数据并返回给调用者。
# 1000,成功
# 1001,文件不存在
# 1002,关键字为空
# 1003,未知错误
# ...
# :return:
# """
# response = {'code': 1000, 'data': None}
# try:
# if not os.path.exists(path):
# response['code'] = 1001
# response['data'] = '文件不存在'
# return response
# if not prev:
# response['code'] = 1002
# response['data'] = '关键字为空'
# return response
# pass
# except Exception as e:
# response['code'] = 1003
# response['data'] = '未知错误'
# return response
def show():
return 8
def run():
pass
# #############自定义异常############
# class MyException(Exception):
# def __init__(self, code, msg):
# self.code = code
# self.msg = msg
# try:
# raise MyException(1000, '操作异常')
#
# except MyException as obj:
# print(obj.code, obj.msg)
# 知识点:如何自定义异常类?
class MyException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
try:
# 知识点:主动抛出异常
raise MyException(1000, '操作异常')
except KeyError as obj:
print(obj, 1111)
except MyException as obj: # 知识点:捕获异常
print(obj, 2222)
except Exception as obj:
print(obj, 3333) | 2,105 | 880 |
import re
for _ in range(0, int(input())):
matches = re.findall(r'(#(?:[\da-f]{3}){1,2})(?!\w)(?=.*;)', input(), re.IGNORECASE)
if matches:
print(*matches, sep='\n')
| 183 | 78 |
from talon import Context, Module
from .user_settings import get_list_from_csv
mod = Module()
ctx = Context()
mod.list("vocabulary", desc="additional vocabulary words")
# Default words that will need to be capitalized (particularly under w2l).
# NB. These defaults and those later in this file are ONLY used when
# auto-creating the corresponding settings/*.csv files. Those csv files
# determine the contents of user.vocabulary and dictate.word_map. Once they
# exist, the contents of the lists/dictionaries below are irrelevant.
_capitalize_defaults = [
"I",
"I'm",
"I've",
"I'll",
"I'd",
"Monday",
"Mondays",
"Tuesday",
"Tuesdays",
"Wednesday",
"Wednesdays",
"Thursday",
"Thursdays",
"Friday",
"Fridays",
"Saturday",
"Saturdays",
"Sunday",
"Sundays",
"January",
"February",
# March omitted because it's a regular word too
"April",
# May omitted because it's a regular word too
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
# Default words that need to be remapped.
_word_map_defaults = {
# E.g:
# "cash": "cache",
# This is the opposite ordering to words_to_replace.csv (the latter has the target word first)
}
_word_map_defaults.update({word.lower(): word for word in _capitalize_defaults})
# "dictate.word_map" is used by `actions.dictate.replace_words` to rewrite words
# Talon recognized. Entries in word_map don't change the priority with which
# Talon recognizes some words over others.
ctx.settings["dictate.word_map"] = get_list_from_csv(
"words_to_replace.csv",
headers=("Replacement", "Original"),
default=_word_map_defaults,
)
# Default words that should be added to Talon's vocabulary.
_simple_vocab_default = ["nmap", "admin", "Cisco", "Citrix", "VPN", "DNS", "Minecraft", "Ferran", "Angelos", "storageos"]
# Defaults for different pronounciations of words that need to be added to
# Talon's vocabulary.
_default_vocabulary = {
"N map": "nmap",
"under documented": "under-documented",
"koob control": "kubectl",
"cube control": "kubectl",
"keep control": "kubectl",
"chang pod": "pod",
"chang pods": "pods",
"chang node": "node",
"chang nodes": "nodes",
"chang kubernetes": "kubernetes",
"chang git": "git",
"chang pull": "pull",
"chang com": "com",
"chang delete": "delete",
"trying to lead": "delete",
"replica set": "replicaset",
"change delete": "delete",
"name space": "namespace",
"at it": "edit",
"chang sudo": "sudo",
"diagnostic yew till": "diagnosticutil",
"stateful set": "statefulset",
"in flux": "influx",
"you control": "kubectl",
"check out": "checkout",
"make directory": "mkdir",
"demon set": "daemonset",
"demon sets": "daemonsets",
"chang log": "log",
"chang logs": "log",
"koob control create from file": "kubectl create -f",
"cube control create from file": "kubectl create -f",
"keep control create from file": "kubectl create -f",
"chang seff": "ceph",
"ray doss": "RADOS",
"raydos": "RADOS",
"open sauce": "open-source",
"all namespaces": "--all-namespaces",
"output wide": "-o wide",
"etsy dee": "etcd",
"at city": "etcd",
"at cd": "etcd",
"cube system": "kube-system",
"from file": " - f ",
"with namespace": " - n ",
"chang log": "log",
"chang logs": "logs",
"change directory": "cd",
"storage class": "storageclass",
"my sequel": "mysql",
"dee bench": "dbench",
"chang hay": "hey",
"elastic search": "elasticsearch",
"elastic such": "elasticsearch",
"storage oh ess": "storageos",
"store to us": "storageos",
"store ous": "storageos",
"store joes": "store joes"
}
_default_vocabulary.update({word: word for word in _simple_vocab_default})
# "user.vocabulary" is used to explicitly add words/phrases that Talon doesn't
# recognize. Words in user.vocabulary (or other lists and captures) are
# "command-like" and their recognition is prioritized over ordinary words.
ctx.lists["user.vocabulary"] = get_list_from_csv(
"additional_words.csv",
headers=("Word(s)", "Spoken Form (If Different)"),
default=_default_vocabulary,
)
# for quick verification of the reload
# print(str(ctx.settings["dictate.word_map"]))
# print(str(ctx.lists["user.vocabulary"]))
| 4,429 | 1,580 |
import json
from bson import ObjectId
from datetime import datetime, date
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, (datetime, date)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class DatetimeConverter:
def strToDatetime(o):
return datetime.strptime(o["$date"], "%Y-%m-%dT%H:%M:%S.000Z")
| 451 | 146 |
# Reraising last exception with raise w/o args
def f():
try:
raise ValueError("val", 3)
print("FAIL")
raise SystemExit
except:
raise
try:
f()
print("FAIL")
raise SystemExit
except ValueError as e:
pass
# Can reraise only in except block
try:
raise
print("FAIL")
raise SystemExit
except RuntimeError:
print("PASS")
| 390 | 124 |
import re
def test_version(host):
version = host.check_output('git-credential-manager-core --version')
pattern = r'[0-9\.]+(\.[0-9\.]+){2}'
assert re.search(pattern, version)
def test_git_config(host):
config = host.check_output('git config --system credential.helper')
assert config == '/usr/local/share/gcm-core/git-credential-manager-core'
config = host.check_output(
'git config --system credential.credentialStore')
assert config == 'secretservice'
| 494 | 160 |
# coding=utf-8
from datetime import datetime, timedelta
import dateutil.parser
import pytz
import urllib
TZ = pytz.timezone('Europe/Kiev')
def adapt_data(data):
data['data']['procuringEntity']['name'] = 'testuser_tender_owner'
for x in data['data']['items']:
x['unit']['name'] = get_unit_name(x['unit']['name'])
x['deliveryAddress']['region'] = get_delivery_region(x['deliveryAddress']['region'])
x['deliveryAddress']['locality'] = convert_locality(x['deliveryAddress']['locality'])
x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate'])
x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate'])
data['data']['procuringEntity']['address']['region'] = get_delivery_region(data['data']['procuringEntity']['address']['region'])
data['data']['procuringEntity']['address']['locality'] = convert_locality(data['data']['procuringEntity']['address']['locality'])
data['data']['procuringEntity']['contactPoint']['telephone'] = data['data']['procuringEntity']['contactPoint']['telephone'][:13]
return data
def adapt_step(data, new_step):
data['data']['minimalStep']['amount'] = round(new_step, 2)
data['data']['lots'][0]['minimalStep']['amount'] = round(new_step, 2)
def adapt_unit_name(data):
return {
u"наб.": u"набір",
u"шт.": u"штуки",
u"упак.": u"упаковка"
}.get(data, data)
def adapt_data_view(data):
for x in data['data']['items']:
x['deliveryDate']['startDate'] = adapt_delivery_date(x['deliveryDate']['startDate'])
x['deliveryDate']['endDate'] = adapt_delivery_date(x['deliveryDate']['endDate'])
return data
def download_file(url, file_name, output_dir):
urllib.urlretrieve(url, ('{}/{}'.format(output_dir, file_name)))
def get_type_field(field):
value = ['deliveryDate.startDate', 'deliveryDate.endDate', 'deliveryAddress.postalCode', 'deliveryAddress.region',
'deliveryAddress.streetAddress',
'additionalClassifications.id', 'classification.id', 'unit.name', 'unit.code', 'deliveryLocation.latitude',
'deliveryLocation.longitude', 'quantity', 'deliveryAddress.locality',
'title', 'value.amount', 'value.valueAddedTaxIncluded', 'minimalStep.amount',
'minimalStep.valueAddedTaxIncluded']
text = ['description', 'deliveryAddress.countryName', 'classification.scheme', 'classification.description',
'additionalClassifications.scheme', 'additionalClassifications.description',
'value.currency', 'minimalStep.currency', 'featureOf', 'status', 'resolutionType', 'resolution', 'satisfied', 'complaintID', 'cancellationReason']
if field in value:
type_fields = 'value'
elif field in text:
type_fields = 'text'
return type_fields
def get_delivery_region(region):
if region == u"місто Київ":
delivery_region = u"м.Київ"
elif region == u"Дніпропетровська область":
delivery_region = u"Днiпропетровська область"
elif region == u"Рівненська область":
delivery_region = u"Рiвненська область"
elif region == u"Чернігівська область":
delivery_region = u"Чернiгiвська область"
else: delivery_region = region
return delivery_region
def convert_float_to_string(number):
return format(number, '.2f')
def convert_coordinates_to_string(number):
return format(number)
def adapt_delivery_date(date):
adapt_date = ''.join([date[:date.index('T') + 1], '00:00:00', date[date.index('+'):]])
return adapt_date
def parse_date(date_str):
date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_item_date(date_str):
date_str = datetime.strptime(date_str, "%d.%m.%Y")
date = datetime(date_str.year, date_str.month, date_str.day)
date = TZ.localize(date).isoformat()
return date
def convert_date_to_string(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y %H:%M")
return date
def convert_item_date_to_string(date):
date = dateutil.parser.parse(date)
date = date.strftime("%d.%m.%Y")
return date
def parse_complaintPeriod_date(date_string):
date_str = datetime.strptime(date_string, "%d.%m.%Y %H:%M")
date_str -= timedelta(minutes=5)
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def parse_complaintPeriod_endDate(date_str):
if '-' in date_str:
date_str = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
else:
date_str = datetime.strptime(date_str, "%d.%m.%Y %H:%M")
date = datetime(date_str.year, date_str.month, date_str.day, date_str.hour, date_str.minute, date_str.second,
date_str.microsecond)
date = TZ.localize(date).isoformat()
return date
def capitalize_first_letter(string):
string = string.capitalize()
return string
def get_unit_name(name):
return {
u'штуки': u'шт.',
u'упаковка': u'упак.',
u'набір': u'наб.',
u'кілограми': u'кг.',
u'лот': u'лот',
u'флакон': u'флак.',
u'Флакон': u'флак.'
}.get(name, name)
def convert_locality(name):
if name == u"Київ":
adapted_name = u"М.КИЇВ"
elif name == u"Дніпропетровськ":
adapted_name = u"ДНІПРОПЕТРОВСЬКА ОБЛАСТЬ/М.ДНІПРО"
else:
adapted_name = name
return adapted_name.upper()
def convert_status(tender_status):
status = {
u'Очікування пропозицій': u'active.tendering',
u'Період аукціону': u'active.auction',
u'Період уточнень': u'active.enquiries',
u'Перед-кваліфікаційний період': u'active.pre-qualification',
u'Період оскарження': u'active.pre-qualification.stand-still'
}
return status[tender_status]
def get_claim_status(claim_status, test_name):
status = {
u'Вимога': 'claim',
u'Розглянуто': 'answered',
u'Вирішена': 'resolved',
u'Відхилено': 'cancelled',
u'Відхилена': 'declined',
u'Обробляється': 'pending',
u'Недійсна': 'invalid',
u'Проігнорована': 'ignored'
}
return status[claim_status]
def get_resolution_type(resolution):
types = {
u'Вирішено': 'resolved',
u'Задоволено': 'resolved',
u'Відхилено': 'declined',
u'Недійсно': 'invalid'
}
return types[resolution]
def convert_satisfied(value):
if value == u'Так':
satisfied = True
else:
satisfied = False
return satisfied
def get_unit(field,unit_data):
unit = unit_data.split()
unit[1] = adapt_unit_name(unit[1])
unit_value = {
'unit.code': unit[0],
'unit.name': unit[1]
}
return unit_value[field]
def convert_type_tender(key):
type_tender = {
u'Відкриті торги': 'aboveThresholdUA',
u'Відкриті торги з публікацією англ.мовою': 'aboveThresholdEU',
u'Переговорна процедура': 'reporting'
}
return type_tender[key]
def convert_data_lot(key):
data_lot = {
u'грн.': 'UAH'
}
return data_lot[key]
def convert_data_feature(key):
data_feature = {
u'Закупівлі': 'tenderer',
u'Лоту': 'lot',
u'Предмету лоту': 'item'
}
return data_feature[key]
def convert_complaintID(tender_uaid, type_complaint):
if 'complaint_number' not in globals():
complaint_number = 1
value = '%s.a%s' % (tender_uaid, complaint_number)
global complaint_number
complaint_number += 1
return value
def get_pos(featureOf):
if featureOf == u'Закупівлі':
position = 1
elif featureOf == u'Лоту':
position = 2
elif featureOf == u'Предмету лоту':
position = 1
return position
def get_value_feature(value):
value = value * 100
value = str(int(value)) + '%'
return value
def get_feature_xpath(field_name, feature_id):
xpath = {
'title': "//*[contains(@value, '" +feature_id+ "')]",
'description': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[2]/td[2]/textarea",
'featureOf': "//*[contains(@value, '" +feature_id+ "')]/ancestor::tbody/tr[3]/td[2]//td[2]/div[1]/label"
}
return xpath[field_name]
def convert_bid_status(value):
status = {
u'Недійсна пропозиція': 'invalid'
}
return status[value]
def get_all_dates(initial_tender_data, key):
tender_period = initial_tender_data.data.tenderPeriod
start_dt = dateutil.parser.parse(tender_period['startDate'])
end_dt = dateutil.parser.parse(tender_period['endDate'])
data = {
'EndPeriod': start_dt.strftime("%d.%m.%Y %H:%M"),
'StartDate': start_dt.strftime("%d.%m.%Y %H:%M"),
'EndDate': end_dt.strftime("%d.%m.%Y %H:%M"),
}
return data.get(key, '')
def increment_identifier(data):
data['data']['procuringEntity']['identifier']['id'] = str(int(data['data']['procuringEntity']['identifier']['id']) + 1)
def convert_cause_type(key):
cause_type = {
'1': 'artContestIP',
'2': 'noCompetition',
'4': 'twiceUnsuccessful',
'5': 'additionalPurchase',
'6': 'additionalConstruction',
'7': 'stateLegalServices',
}
return cause_type[key]
| 9,605 | 3,432 |
#!/usr/bin/env python3
""" Show split layout indicator
Usage:
./tiling-indicator.py
Suppoused to be used inside waybar or polybar.
Config example:
Waybar:
"custom/ws": {
"exec": "python -u $HOME/.config/sway/scripts/tiling-indicator-2.py 2> /dev/null
}
Polybar:
[module/layout]
type = custom/script
exec = PYTHONPATH=${XDG_CONFIG_HOME}/i3 python -u -m scripts.tiling-indicator.py 2> /dev/null
interval = 0
format = "<label>"
tail = true
label-font = 6
github :: https://github.com/iziGor
year :: 2021
"""
import i3ipc
i3 = i3ipc.Connection()
last = ''
# Font Awesome 5 Free:style=Solid
# layouts = { "tabbed": ("61bbf6", "\uf24d")
# , "stacked": ("00AA00", "\uf5fd")
# , "splitv": ("82B8DF", "\uf103")
# , "splith": ("CF4F88", "\uf101")
# }
layouts = { "tabbed": ("61bbf6", "\uf24d")
, "stacked": ("00AA00", "\uf5fd")
, "splitv": ("82B8DF", "\u2b9f")
, "splith": ("CF4F88", "\u2b9e")
}
# Material Icons
# layouts = {"tabbed":"\ue8d8", "stacked":"\ue3c7", "splitv":"\ue947", "splith":"\ue949"}
def on_event(sway, _):
global last
layout = sway.get_tree().find_focused().parent.layout
if not layout == last:
## polybar format output
# print("%{{F#{}}}{}%{{F-}}".format(*layouts.get(layout, ("888800", "?"))))
## waybar format output
print("<span color='#{}'>{}</span>".format(*layouts.get(layout, ("888800", "?"))))
last = layout
# Subscribe to events
i3.on("window::focus", on_event)
i3.on("binding", on_event)
# Start the main loop and wait for events to come in.
i3.main()
| 1,638 | 671 |
from typing import Optional
import discord
import asyncpg
from discord.ext import commands
from .utils.pagination import create_paginated_embed
class Tags(commands.Cog):
"""Productivity's tag system."""
def __init__(self, bot:commands.Bot) -> None:
self.bot = bot
self.emoji = "🏷️ "
async def delete_check(self, ctx:commands.Context, tag_name) -> bool:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
fetched = await connection.fetchrow(query, tag_name, ctx.guild.id)
return fetched['user_id'] == ctx.author or ctx.author.guild_permissions.manage_messages
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def tag(self, ctx, *, tag:str):
"""A tag system!"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
try:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
tag = await connection.fetchrow(query, tag, ctx.guild.id)
return await ctx.send(tag['tag_content'])
except TypeError:
return await ctx.send("Tag not found.")
@tag.command(description="Create a tag!", aliases=['add'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def create(self, ctx, name, *, content):
try:
query = """
INSERT INTO tags (user_id, guild_id, tag_name, tag_content)
VALUES ($1, $2, $3, $4);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.guild.id, name, content)
await ctx.send("Succesfully created the tag!")
except Exception as e:
await ctx.send(e)
await ctx.send("An error has occurred whilst creating the tag")
@tag.command(description="Start your use of creating tags")
@commands.cooldown(1, 5, commands.BucketType.user)
async def start(self, ctx):
try:
query = """
INSERT INTO tag_users (user_id, username)
VALUES ($1, $2);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.author.name)
await ctx.send("Successfully started your use of our tag system!")
except Exception:
await ctx.send("You are already in our database!")
@tag.command(description="Delete a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete(self, ctx, *, tag:str):
check = await self.delete_check(ctx, tag)
if check:
try:
query = """
DELETE FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
await self.bot.db.execute(query, tag, ctx.guild.id)
await ctx.send("Successfully deleted tag!")
except:
await ctx.send("An error has occurred while attempting to delete the tag.")
else:
await ctx.send("You do not have permission to delete this tag!")
@commands.command(description="Look at all of the tags a member has!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def tags(self, ctx, member:Optional[discord.Member]=None):
member = member or ctx.author
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE user_id = $1 AND guild_id = $2;
"""
tags = await connection.fetch(query, member.id, ctx.guild.id)
paginate = create_paginated_embed(ctx, tags, 'tag_name', f"{member}'s tags", member.avatar_url, member.name)
await paginate.start(ctx)
@tag.command(description="Edit a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def edit(self, ctx, old_tag, new_name, *, new_content):
query = """
UPDATE tags
SET tag_name = $1, tag_content = $2
WHERE user_id = $3 AND tag_name = $4 AND guild_id = $5;
"""
try:
await self.bot.db.execute(query, new_name, new_content, ctx.author.id, old_tag, ctx.guild.id)
return await ctx.send("Successfully edited tag!")
except Exception:
return await ctx.send(
"""
An error occurred while editing the tag,
this is likely because u dont own this tag or it doesnt exist.
"""
)
@tag.command(description="View information about a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def info(self, ctx, *, tag:str):
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE guild_id = $1 AND tag_name = $2;
"""
try:
tag_info = await connection.fetchrow(query, ctx.guild.id, tag)
owner = ctx.guild.get_member(tag_info['user_id'])
embed = discord.Embed(title=tag_info['tag_name'])
embed.add_field(name="Owner", value=owner.mention)
embed.set_author(name=owner, icon_url=owner.avatar_url)
return await ctx.send(embed=embed)
except TypeError:
return await ctx.send("Tag not found.")
def setup(bot:commands.Bot):
bot.add_cog(Tags(bot)) | 5,822 | 1,654 |
import sys
import mapping.organization as org
import mapping.contact as contact
import mapping.worship as worship
import mapping.central as central
import mapping.national as national
import mapping.codelist as codelist
import mapping.vocabulary as vocab
import mapping.location as location
import mapping.local_admin_unit as local_admin_unit
import mapping.nationality as nationality
def main(*args):
if args[0] == 'org':
org.main(args[1], args[2])
elif args[0] == 'contact':
contact.main(args[1], args[2])
elif args[0] == 'worship':
worship.main(args[1], args[2])
elif args[0] == 'central':
central.main(args[1], args[2])
elif args[0] == 'national':
national.main(args[1], args[2])
elif args[0] == 'codelist':
codelist.main()
elif args[0] == 'vocab':
vocab.main(args[1])
elif args[0] == 'location':
location.main()
elif args[0] == 'local_admin_unit':
local_admin_unit.main()
elif args[0] == 'nationality':
nationality.main()
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) > 0:
main(*args)
| 1,078 | 380 |
import numpy as np
import os
import matplotlib.pyplot as plt
from print_values import *
from plot_data_all_phonemes import *
from plot_data import *
# File that contains the data
data_npy_file = 'data/PB_data.npy'
# Loading data from .npy file
data = np.load(data_npy_file, allow_pickle=True)
data = np.ndarray.tolist(data)
# Make a folder to save the figures
figures_folder = os.path.join(os.getcwd(), 'figures')
if not os.path.exists(figures_folder):
os.makedirs(figures_folder, exist_ok=True)
# Array that contains the phoneme ID (1-10) of each sample
phoneme_id = data['phoneme_id']
print(phoneme_id)
# frequencies f1 and f2
f1 = data['f1']
f2 = data['f2']
print('f1 statistics:')
print_values(f1)
print('f2 statistics:')
print_values(f2)
# Initialize array containing f1 & f2, of all phonemes.
X_full = np.zeros((len(f1), 2))
#########################################
# Write your code here
# Store f1 in the first column of X_full, and f2 in the second column of X_full
X_full[:, 0] = f1
X_full[:, 1] = f2
########################################/
X_full = X_full.astype(np.float32)
# you can use the p_id variable, to store the ID of the chosen phoneme that will be used (e.g. phoneme 1, or phoneme 2)
p_id = 1
#########################################
# Write your code here
# Create an array named "X_phoneme_1", containing only samples that belong to the chosen phoneme.
# The shape of X_phoneme_1 will be two-dimensional. Each row will represent a sample of the dataset, and each column will represent a feature (e.g. f1 or f2)
# Fill X_phoneme_1 with the samples of X_full that belong to the chosen phoneme
# To fill X_phoneme_1, you can leverage the phoneme_id array, that contains the ID of each sample of X_full
# Create array containing only samples that belong to phoneme 1
X_phoneme_1 = np.zeros((np.sum(phoneme_id==1), 2))
# X_phoneme = ...
########################################
# Plot array containing all phonemes
# Create a figure and a subplot
fig, ax1 = plt.subplots()
# plot the full dataset (f1 & f2, all phonemes)
plot_data_all_phonemes(X=X_full, phoneme_id=phoneme_id, ax=ax1)
# save the plotted dataset as a figure
plot_filename = os.path.join(os.getcwd(), 'figures', 'dataset_full.png')
plt.savefig(plot_filename)
################################################
# Plot array containing phoneme 1
# Create a figure and a subplot
fig, ax2 = plt.subplots()
title_string = 'Phoneme 1'
# plot the samples of the dataset, belonging to phoneme 1 (f1 & f2, phoneme 1)
plot_data(X=X_phoneme_1, title_string=title_string, ax=ax2)
# save the plotted points of phoneme 1 as a figure
plot_filename = os.path.join(os.getcwd(), 'figures', 'dataset_phoneme_1.png')
plt.savefig(plot_filename)
# enter non-interactive mode of matplotlib, to keep figures open
plt.ioff()
plt.show() | 2,815 | 968 |
# Generated by Django 2.2.6 on 2020-04-03 20:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('students', '0005_subject'),
]
operations = [
migrations.RemoveField(
model_name='subject',
name='papers',
),
migrations.DeleteModel(
name='Paper',
),
migrations.DeleteModel(
name='Subject',
),
]
| 459 | 146 |
import curses
from contextlib import contextmanager
from time import sleep
from typing import List, Tuple, TYPE_CHECKING
from .actions import Action
from .properties import Property
if TYPE_CHECKING:
from .object import Object
class World:
class __World:
def __init__(self, fps: int, render: bool = True):
self.fps = fps
self._objects: List["Object"] = []
self._draw_queue: List[Tuple[int, int, str]] = []
self.running = True
self._window = None
self._height, self._width = 50, 80 # s.getmaxyx()
self._key = None
if render:
_ = curses.initscr()
curses.curs_set(0)
self._window = curses.newwin(self._height, self._width, 0, 0)
self._window.keypad(True)
self._window.timeout(1000 // self.fps)
def register(self, obj: "Object"):
assert obj not in self._objects
self._objects.append(obj)
self._draw_queue.append((obj.x, obj.y, obj.sign))
def get_properties(self, x: int, y: int) -> List[Property]:
for o in self._objects:
if o.xy == (x, y) and not o.is_destroyed:
return o.properties[:]
return []
def draw(self):
while self._draw_queue:
x, y, s = self._draw_queue.pop()
if self._window:
if x in range(0, self.width) and y in range(0, self.height):
self._window.addch(y, x, s)
else:
print(x, y, s)
def tick(self):
# Handle keypress mapping
key = self.keypress
if key:
for obj in (o for o in self._objects if o.mapping):
if key in obj.mapping:
new_x, new_y = obj.x, obj.y
action = obj.mapping[key]
if action == Action.MOVE_UP:
new_y -= 1
if action == Action.MOVE_DOWN:
new_y += 1
if action == Action.MOVE_LEFT:
new_x -= 1
if action == Action.MOVE_RIGHT:
new_x += 1
if Property.SOLID not in self.get_properties(new_x, new_y):
obj.x, obj.y = new_x, new_y
# Update draw queue
for obj in self._objects:
if obj.is_destroyed:
self._draw_queue.append((obj._oldx, obj._oldy, " "))
elif obj.has_moved:
self._draw_queue.append((obj._oldx, obj._oldy, " "))
self._draw_queue.append((obj.x, obj.y, obj.sign))
obj.tick()
# Render
self.draw()
# Remove destroyed objects
self._objects = [o for o in self._objects if not o.is_destroyed]
# Get keypress
if self._window:
self._key = self._window.getch()
else:
sleep(1.0 / self.fps)
self._key = None
return self.running
def quit(self):
self.running = False
if self._window:
curses.endwin()
self._window = False
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def keypress(self):
return self._key if self._key != -1 else None
instance = None
def __init__(self, *args, **kwargs):
assert not World.instance
World.instance = World.__World(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.instance, name)
@contextmanager
def renderer(self):
try:
yield self
finally:
self.quit()
| 4,027 | 1,116 |
import factory
from projects.tests.factories.project import ProjectFactory
class DeckFactory(factory.DjangoModelFactory):
class Meta:
model = "projects.Deck"
project = factory.SubFactory(ProjectFactory)
| 223 | 61 |
DATASET_LIST = [
{
"name": "tree_equity_score",
"module_dir": "tree_equity_score",
"class_name": "TreeEquityScoreETL",
},
{
"name": "census_acs",
"module_dir": "census_acs",
"class_name": "CensusACSETL",
},
{
"name": "ejscreen",
"module_dir": "ejscreen",
"class_name": "EJScreenETL",
},
{
"name": "housing_and_transportation",
"module_dir": "housing_and_transportation",
"class_name": "HousingTransportationETL",
},
{
"name": "hud_housing",
"module_dir": "hud_housing",
"class_name": "HudHousingETL",
},
{
"name": "calenviroscreen",
"module_dir": "calenviroscreen",
"class_name": "CalEnviroScreenETL",
},
{
"name": "hud_recap",
"module_dir": "hud_recap",
"class_name": "HudRecapETL",
},
]
CENSUS_INFO = {
"name": "census",
"module_dir": "census",
"class_name": "CensusETL",
}
| 1,012 | 389 |
from .group import Group
from .star import (STAR_PARAMETERS_NAMES,
GalacticDiskType,
Star)
| 129 | 38 |
# Generated by Django 3.0.8 on 2020-10-24 01:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cfc_app', '0004_auto_20201024_0133'),
]
operations = [
migrations.AlterField(
model_name='hash',
name='fob_method',
field=models.CharField(editable=False, max_length=6),
),
migrations.AlterField(
model_name='hash',
name='generated_date',
field=models.DateField(editable=False),
),
migrations.AlterField(
model_name='hash',
name='hashcode',
field=models.CharField(editable=False, max_length=32),
),
migrations.AlterField(
model_name='hash',
name='item_name',
field=models.CharField(editable=False, max_length=255),
),
migrations.AlterField(
model_name='hash',
name='size',
field=models.PositiveIntegerField(editable=False),
),
]
| 1,064 | 331 |
from e2e import DockerTest
class TestGet(DockerTest):
def test_get_current_prints_currently_set_committers(self):
self.guet_init()
self.git_init()
self.guet_add('initials1', 'name1', 'email1')
self.guet_add('initials2', 'name2', 'email2')
self.guet_start()
self.guet_set(['initials1', 'initials2'])
self.guet_get_current()
self.save_file_content('.guet/errors')
self.execute()
self.assert_text_in_logs(5, 'Currently set committers')
self.assert_text_in_logs(6, 'initials1 - name1 <email1>')
self.assert_text_in_logs(7, 'initials2 - name2 <email2>')
def test_get_committers_prints_all_committers_on_the_system(self):
self.guet_init()
self.guet_add('initials1', 'name1', 'email1')
self.guet_add('initials2', 'name2', 'email2')
self.guet_get_committers()
self.save_file_content('.guet/errors')
self.execute()
self.assert_text_in_logs(0, 'All committers')
self.assert_text_in_logs(1, 'initials1 - name1 <email1>')
self.assert_text_in_logs(2, 'initials2 - name2 <email2>')
def test_get_prints_error_message_if_trying_to_run_before_guet_init(self):
self.guet_get_committers()
self.execute()
self.assert_text_in_logs(0, ('guet has not been initialized yet! ' +
'Please do so by running the command "guet init".'))
def test_prints_help_message(self):
self.guet_init()
self.guet_get_committers(help=True)
self.execute()
self.assert_text_in_logs(0, 'usage: guet get <identifier> [-flag, ...]')
self.assert_text_in_logs(2, 'Get currently set information.')
self.assert_text_in_logs(4, 'Valid Identifier')
self.assert_text_in_logs(6, '\tcurrent - lists currently set committers')
self.assert_text_in_logs(7, '\tcommitters - lists all committers')
| 1,955 | 677 |
import numpy as np
import pandas as pd
import h5py
# docker run -it kagglegym
# python
# >>> import kagglegym
# >>> kagglegym.test()
train = pd.read_hdf("../data/train.h5")
| 175 | 67 |
# 50. Write a Python program to print without newline or space.
# Question:
# Input:
# Output:
# Solution: https://www.w3resource.com/python-exercises/python-basic-exercise-50.php
# Ideas:
"""
1. Badly phrased qquestion. Question should be using for loop to generate 10 off "*" and print them in one line.
"""
# Steps:
"""
"""
# Notes:
"""
"""
# Code:
for i in range(0, 10):
print('*', end="")
print("\n")
# Testing:
"""
"""
| 442 | 178 |
from django.db import models
import django_filters
from django_filters.rest_framework import FilterSet
REGISTERED_SERVICES = ['PACS']
class Service(models.Model):
identifier = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.identifier
class ServiceFile(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
fname = models.FileField(max_length=512, unique=True)
service = models.ForeignKey(Service, db_index=True, on_delete=models.CASCADE)
class Meta:
ordering = ('-fname',)
def __str__(self):
return self.fname.name
class ServiceFileFilter(FilterSet):
min_creation_date = django_filters.DateFilter(field_name='creation_date',
lookup_expr='gte')
max_creation_date = django_filters.DateFilter(field_name='creation_date',
lookup_expr='lte')
fname = django_filters.CharFilter(field_name='fname', lookup_expr='startswith')
fname_exact = django_filters.CharFilter(field_name='fname', lookup_expr='exact')
service_identifier = django_filters.CharFilter(field_name='service__identifier',
lookup_expr='exact')
service_id = django_filters.CharFilter(field_name='service_id', lookup_expr='exact')
class Meta:
model = ServiceFile
fields = ['id', 'min_creation_date', 'max_creation_date', 'fname', 'fname_exact',
'service_identifier', 'service_id']
| 1,559 | 463 |
#!/usr/bin/env python
"""
"""
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
import xml.dom.minidom
import re
import sys
import getopt
import os
from time import gmtime, strftime
from nipype import config, logging
from nighres.lesion_tool.lesion_pipeline import Lesion_extractor
def main():
try:
o, a = getopt.getopt(sys.argv[1:], "n:d:s:f:a:l:")
except getopt.GetoptError as err:
print(err)
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
if len(o) < 4:
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
for opt, arg in o:
if opt == '-n':
wf_name = arg
elif opt == '-d':
base_dir = arg
elif opt == '-s':
sub = arg
elif opt == '-f':
fsdir = arg
elif opt == '-a':
atlas = arg
elif opt == '-l':
labels = arg
wf = Lesion_extractor(wf_name=wf_name,
base_dir=base_dir,
subjects=[sub],
#main=main,
#acc=acc,
atlas=atlas,
fs_subjects_dir=fsdir,
labels=labels)
config.update_config({'logging': {'log_directory': wf.base_dir,'log_to_file': True}})
logging.update_logging(config)
config.set('execution','job_finished_timeout','20.0')
wf.config['execution'] = {'job_finished_timeout': '10.0'}
try:
wf.run()
except:
print('Error! Pipeline exited ')
raise
if __name__ == "__main__":
main()
| 1,813 | 599 |
# **********************************************************************
# Copyright (C) 2020 Johns Hopkins University Applied Physics Laboratory
#
# All Rights Reserved.
# For any other permission, please contact the Legal Office at JHU/APL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# **********************************************************************
from flask import Flask
from shared.config import config
from shared.log import logger
from system.extensions import FlaskExtensions, JobManagerClient, DockerClient
from system.job_queue_manager import job_queue_watchdog
cors = FlaskExtensions.cors
mail = FlaskExtensions.mail
mongodb = FlaskExtensions.mongodb
jwt = FlaskExtensions.jwt
bcrypt = FlaskExtensions.bcrypt
class FlaskApp(object):
def __init__(self):
self.app = Flask(__name__, static_folder=config.STATIC_DIR, static_url_path="")
self.app.config.update(config.dict())
bcrypt.init_app(self.app)
jwt.init_app(self.app)
mongodb.init_app(self.app)
mail.init_app(self.app)
cors.init_app(self.app)
DockerClient()
JobManagerClient()
job_queue_watchdog()
self.register_routes()
def register_routes(self):
from system.api.web import web_bp
self.app.register_blueprint(web_bp)
from system.api.info import info_bp
self.app.register_blueprint(info_bp)
from system.api.database import database_bp
self.app.register_blueprint(database_bp)
from system.api.jobs import jobs_bp
self.app.register_blueprint(jobs_bp)
from system.api.results import results_bp
self.app.register_blueprint(results_bp)
| 2,208 | 648 |
# Constants
DEFAULT_TITLE = 'Khan Academy'
HOME_DOMAIN = 'www.khanacademy.org'
| 80 | 35 |
# ------------------------------------------------------------------------------
# Copyright (c) Abdurasul Rakhimov 24.2.2021.
# ------------------------------------------------------------------------------
import numpy as np
from overrides import overrides
class Dataset:
def __init__(self, name, dataset_path):
self.name = name
self.dataset_path = dataset_path
self.dimensions = 0
self.size = 0
self.data = None
self.loaded = False
def get_data(self):
return self.data
def get_size(self):
return self.size
def get_dimensions(self):
return self.dimensions
def set_size(self, size):
self.size = size
def set_dimensions(self, dimensions):
self.dimensions = dimensions
def load_dataset(self, amount=-1):
if self.loaded:
return self.data
data = np.array(np.load(self.dataset_path), dtype=np.float32)
if amount == -1:
amount = data.shape[0]
self.data = data[:max(0, min(data.shape[0], amount))]
self.dimensions = self.data.shape[1]
self.size = self.data.shape[0]
self.loaded = True
return self.data
def load_dataset_from_numpy(self, numpy_array):
self.data = numpy_array
self.dimensions = self.data.shape[1]
self.size = self.data.shape[0]
self.loaded = True
return self.data
def unload_dataset(self):
self.data = None
self.loaded = False
def get_exact_query_results(self, queries, k, distance_function):
results = [[(distance_function(self.data[idx], queries[q_id]), idx) for idx in range(self.data.shape[0])] for
q_id in range(len(queries))]
return [[elem for elem in list(sorted(res))[:k]] for res in results]
class BasicDataset(Dataset):
def __init__(self, name, dataset_path):
super().__init__(name, dataset_path)
| 1,950 | 584 |
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/master/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass
from typing import *
from omegaconf import MISSING
@dataclass
class UserConf:
_target_: str = "configen.samples.user.User"
age: int = MISSING
name: str = MISSING
| 369 | 132 |
import time
import numpy as np
from freenect import sync_get_depth as get_depth, sync_get_video as get_video
class Kinect(object):
"""Offers access to rgb and depth from the real Kinect"""
def __init__(self):
pass
def get_frame(self, record=False):
# Get a fresh frame
(depth,_) = get_depth(format=4)
(rgb,_) = get_video()
if record:
self.snapshot(rgb, depth)
return (rgb, depth)
def snapshot(self, rgb, depth):
filename = "frames/frame-%d" % int(time.time()*1000)
filename_rgb = filename + "-rgb"
filename_depth = filename + "-depth"
np.save(filename_rgb, rgb)
np.save(filename_depth, depth) | 733 | 240 |
#!/usr/bin/env python
import flask
import requests
import lxml.html
import logging
app = flask.Flask(__name__)
LOGGER = logging.getLogger(__name__)
HN_BASE_URL = 'https://news.ycombinator.com/'
def has_virus(url):
if not url.startswith('http://') and not url.startswith('https://'):
return True
s = requests.get(url).text.lower()
for w in ['covid', 'virus']:
if w in s:
return True
return False
@app.route('/')
def main():
h = lxml.html.fromstring(requests.get(HN_BASE_URL).text)
ret = '<ol>'
for n, row in enumerate(h.xpath('//tr [@id]')[1:]):
story = row.xpath('.//a [@class="storylink"]').pop()
LOGGER.info('%d: %s', n, story.get('href'))
c_row = row.getnext()
comments = c_row.xpath('.//a [contains(@href, "item?id=")]')[-1]
comments_url = HN_BASE_URL + comments.get('href')
if has_virus(story.get('href')) or has_virus(comments_url):
continue
ret += f'''
<li>
<a href="{story.get("href")}">{story.text}</a>
(<a href="{comments_url}">{comments.text}</a>)
</li>'''
return ret
if __name__ == '__main__':
logging.basicConfig(level='INFO')
app.run(host='0.0.0.0')
| 1,262 | 441 |
# Functions for project: NormativeNeuroDev_Longitudinal
# Linden Parkes, 2019
# lindenmp@seas.upenn.edu
from IPython.display import clear_output
import numpy as np
import scipy as sp
from scipy import stats
import pandas as pd
from statsmodels.stats import multitest
def get_cmap(which_type = 'qual1', num_classes = 8):
# Returns a nice set of colors to make a nice colormap using the color schemes
# from http://colorbrewer2.org/
#
# The online tool, colorbrewer2, is copyright Cynthia Brewer, Mark Harrower and
# The Pennsylvania State University.
if which_type == 'linden':
cmap_base = np.array([[255,105,97],[97,168,255],[178,223,138],[117,112,179],[255,179,71]])
elif which_type == 'pair':
cmap_base = np.array([[124,230,199],[255,169,132]])
elif which_type == 'qual1':
cmap_base = np.array([[166,206,227],[31,120,180],[178,223,138],[51,160,44],[251,154,153],[227,26,28],
[253,191,111],[255,127,0],[202,178,214],[106,61,154],[255,255,153],[177,89,40]])
elif which_type == 'qual2':
cmap_base = np.array([[141,211,199],[255,255,179],[190,186,218],[251,128,114],[128,177,211],[253,180,98],
[179,222,105],[252,205,229],[217,217,217],[188,128,189],[204,235,197],[255,237,111]])
elif which_type == 'seq_red':
cmap_base = np.array([[255,245,240],[254,224,210],[252,187,161],[252,146,114],[251,106,74],
[239,59,44],[203,24,29],[165,15,21],[103,0,13]])
elif which_type == 'seq_blu':
cmap_base = np.array([[247,251,255],[222,235,247],[198,219,239],[158,202,225],[107,174,214],
[66,146,198],[33,113,181],[8,81,156],[8,48,107]])
elif which_type == 'redblu_pair':
cmap_base = np.array([[222,45,38],[49,130,189]])
elif which_type == 'yeo17':
cmap_base = np.array([[97,38,107], # VisCent
[194,33,39], # VisPeri
[79,130,165], # SomMotA
[44,181,140], # SomMotB
[75,148,72], # DorsAttnA
[23,116,62], # DorsAttnB
[149,77,158], # SalVentAttnA
[222,130,177], # SalVentAttnB
[75,87,61], # LimbicA
[149,166,110], # LimbicB
[210,135,47], # ContA
[132,48,73], # ContB
[92,107,131], # ContC
[218,221,50], # DefaultA
[175,49,69], # DefaultB
[41,38,99], # DefaultC
[53,75,158] # TempPar
])
elif which_type == 'yeo17_downsampled':
cmap_base = np.array([[97,38,107], # VisCent
[79,130,165], # SomMotA
[75,148,72], # DorsAttnA
[149,77,158], # SalVentAttnA
[75,87,61], # LimbicA
[210,135,47], # ContA
[218,221,50], # DefaultA
[53,75,158] # TempPar
])
if cmap_base.shape[0] > num_classes: cmap = cmap_base[0:num_classes]
else: cmap = cmap_base
cmap = cmap / 255
return cmap
def update_progress(progress, my_str = ''):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = my_str + " Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
def get_synth_cov(df, cov = 'scanageYears', stp = 1):
# Synthetic cov data
X_range = [np.min(df[cov]), np.max(df[cov])]
X = np.arange(X_range[0],X_range[1],stp)
X = X.reshape(-1,1)
return X
def run_corr(df_X, df_y, typ = 'spearmanr'):
df_corr = pd.DataFrame(index = df_y.columns, columns = ['coef', 'p'])
for i, row in df_corr.iterrows():
if typ == 'spearmanr':
df_corr.loc[i] = sp.stats.spearmanr(df_X, df_y[i])
elif typ == 'pearsonr':
df_corr.loc[i] = sp.stats.pearsonr(df_X, df_y[i])
return df_corr
def get_fdr_p(p_vals):
out = multitest.multipletests(p_vals, alpha = 0.05, method = 'fdr_bh')
p_fdr = out[1]
return p_fdr
def get_fdr_p_df(p_vals):
p_fdr = pd.DataFrame(index = p_vals.index,
columns = p_vals.columns,
data = np.reshape(get_fdr_p(p_vals.values.flatten()), p_vals.shape))
return p_fdr
def mark_outliers(x, thresh = 3, c = 1.4826):
my_med = np.median(x)
mad = np.median(abs(x - my_med))/c
cut_off = mad * thresh
upper = my_med + cut_off
lower = my_med - cut_off
outliers = np.logical_or(x > upper, x < lower)
return outliers
def perc_dev(Z, thr = 2.6, sign = 'abs'):
if sign == 'abs':
bol = np.abs(Z) > thr;
elif sign == 'pos':
bol = Z > thr;
elif sign == 'neg':
bol = Z < -thr;
# count the number that have supra-threshold z-stats and store as percentage
Z_perc = np.sum(bol, axis = 1) / Z.shape[1] * 100
return Z_perc
def evd(Z, thr = 0.01, sign = 'abs'):
m = Z.shape
l = np.int(m[1] * thr) # assumes features are on dim 1, subjs on dim 0
if sign == 'abs':
T = np.sort(np.abs(Z), axis = 1)[:,m[1] - l:m[1]]
elif sign == 'pos':
T = np.sort(Z, axis = 1)[:,m[1] - l:m[1]]
elif sign == 'neg':
T = np.sort(Z, axis = 1)[:,:l]
E = sp.stats.trim_mean(T, 0.1, axis = 1)
return E
def summarise_network(df, roi_loc, network_idx, metrics = ('ct',), method = 'mean'):
df_out = pd.DataFrame()
for metric in metrics:
if metric == 'ct':
if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).median()
if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).mean()
if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx[roi_loc == 1], axis = 1).max()
my_list = [metric + '_' + str(i) for i in np.unique(network_idx[roi_loc == 1]).astype(int)]
df_tmp.columns = my_list
else:
if method == 'median': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).median()
if method == 'mean': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).mean()
if method == 'max': df_tmp = df.filter(regex = metric).groupby(network_idx, axis = 1).max()
my_list = [metric + '_' + str(i) for i in np.unique(network_idx).astype(int)]
df_tmp.columns = my_list
df_out = pd.concat((df_out, df_tmp), axis = 1)
return df_out
| 7,134 | 2,977 |
from sympy.functions import sqrt, sign, root
from sympy.core import S, Wild, sympify, Mul, Add, Expr
from sympy.core.function import expand_multinomial, expand_mul
from sympy.core.symbol import Dummy
from sympy.polys import Poly, PolynomialError
from sympy.core.function import count_ops
def _mexpand(expr):
return expand_mul(expand_multinomial(expr))
def is_sqrt(expr):
"""Return True if expr is a sqrt, otherwise False."""
return expr.is_Pow and expr.exp.is_Rational and abs(expr.exp) is S.Half
def sqrt_depth(p):
"""Return the maximum depth of any square root argument of p.
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import sqrt_depth
Neither of these square roots contains any other square roots
so the depth is 1:
>>> sqrt_depth(1 + sqrt(2)*(1 + sqrt(3)))
1
The sqrt(3) is contained within a square root so the depth is
2:
>>> sqrt_depth(1 + sqrt(2)*sqrt(1 + sqrt(3)))
2
"""
if p.is_Atom:
return 0
elif p.is_Add or p.is_Mul:
return max([sqrt_depth(x) for x in p.args])
elif is_sqrt(p):
return sqrt_depth(p.base) + 1
else:
return 0
def is_algebraic(p):
"""Return True if p is comprised of only Rationals or square roots
of Rationals and algebraic operations.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import is_algebraic
>>> from sympy import cos
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*sqrt(2))))
True
>>> is_algebraic(sqrt(2)*(3/(sqrt(7) + sqrt(5)*cos(2))))
False
"""
if p.is_Rational:
return True
elif p.is_Atom:
return False
elif is_sqrt(p) or p.is_Pow and p.exp.is_Integer:
return is_algebraic(p.base)
elif p.is_Add or p.is_Mul:
return all(is_algebraic(x) for x in p.args)
else:
return False
def subsets(n):
"""
Returns all possible subsets of the set (0, 1, ..., n-1) except the
empty set, listed in reversed lexicographical order according to binary
representation, so that the case of the fourth root is treated last.
Examples
========
>>> from sympy.simplify.sqrtdenest import subsets
>>> subsets(2)
[[1, 0], [0, 1], [1, 1]]
"""
if n == 1:
a = [[1]]
elif n == 2:
a = [[1, 0], [0, 1], [1, 1]]
elif n == 3:
a = [[1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
else:
b = subsets(n-1)
a0 = [x+[0] for x in b]
a1 = [x+[1] for x in b]
a = a0 + [[0]*(n-1) + [1]] + a1
return a
def sqrtdenest(expr, max_iter=3):
"""Denests sqrts in an expression that contain other square roots
if possible, otherwise returns the expr unchanged. This is based on the
algorithms of [1].
Examples
========
>>> from sympy.simplify.sqrtdenest import sqrtdenest
>>> from sympy import sqrt
>>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))
sqrt(2) + sqrt(3)
See Also
========
sympy.solvers.solvers.unrad
References
==========
[1] http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf
[2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots
by Denesting' (available at http://www.cybertester.com/data/denest.pdf)
"""
expr = expand_mul(sympify(expr))
for i in range(max_iter):
z = _sqrtdenest0(expr)
if expr == z:
return expr
expr = z
return expr
def _sqrt_match(p):
"""Return [a, b, r] for p.match(a + b*sqrt(r)) where, in addition to
matching, sqrt(r) also has then maximal sqrt_depth among addends of p.
Examples
========
>>> from sympy.functions.elementary.miscellaneous import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrt_match
>>> _sqrt_match(1 + sqrt(2) + sqrt(2)*sqrt(3) + 2*sqrt(1+sqrt(5)))
[1 + sqrt(2) + sqrt(6), 2, 1 + sqrt(5)]
"""
p = _mexpand(p)
if p.is_Number:
res = (p, S.Zero, S.Zero)
elif p.is_Add:
pargs = list(p.args)
# to make the process canonical, the argument is included in the tuple
# so when the max is selected, it will be the largest arg having a
# given depth
v = [(sqrt_depth(x), x, i) for i, x in enumerate(pargs)]
nmax = max(v)
if nmax[0] == 0:
res = []
else:
depth, _, i = nmax
r = pargs.pop(i)
a = Add._from_args(pargs)
b = S.One
if r.is_Mul:
bv = []
rv = []
for x in r.args:
if sqrt_depth(x) < depth:
bv.append(x)
else:
rv.append(x)
b = Mul._from_args(bv)
r = Mul._from_args(rv)
res = (a, b, r**2)
else:
b, r = p.as_coeff_Mul()
if is_sqrt(r):
res = (S.Zero, b, r**2)
else:
res = []
return list(res)
class SqrtdenestStopIteration(StopIteration):
pass
def _sqrtdenest0(expr):
"""Returns expr after denesting its arguments."""
if is_sqrt(expr):
n, d = expr.as_numer_denom()
if d is S.One: # n is a square root
if n.base.is_Add:
args = n.base.args
if len(args) > 2 and all((x**2).is_Integer for x in args):
try:
return _sqrtdenest_rec(n)
except SqrtdenestStopIteration:
pass
expr = sqrt(_mexpand(Add(*[_sqrtdenest0(x) for x in args])))
return _sqrtdenest1(expr)
else:
n, d = [_sqrtdenest0(i) for i in (n, d)]
return n/d
if isinstance(expr, Expr):
args = expr.args
if args:
return expr.func(*[_sqrtdenest0(a) for a in args])
return expr
def _sqrtdenest_rec(expr):
"""Helper that denests the square root of three or more surds.
It returns the denested expression; if it cannot be denested it
throws SqrtdenestStopIteration
Algorithm: expr.base is in the extension Q_m = Q(sqrt(r_1),..,sqrt(r_k));
split expr.base = a + b*sqrt(r_k), where `a` and `b` are on
Q_(m-1) = Q(sqrt(r_1),..,sqrt(r_(k-1))); then a**2 - b**2*r_k is
on Q_(m-1); denest sqrt(a**2 - b**2*r_k) and so on.
See [1], section 6.
Examples
========
>>> from sympy import sqrt
>>> from sympy.simplify.sqrtdenest import _sqrtdenest_rec
>>> _sqrtdenest_rec(sqrt(-72*sqrt(2) + 158*sqrt(5) + 498))
-sqrt(10) + sqrt(2) + 9 + 9*sqrt(5)
>>> w=-6*sqrt(55)-6*sqrt(35)-2*sqrt(22)-2*sqrt(14)+2*sqrt(77)+6*sqrt(10)+65
>>> _sqrtdenest_rec(sqrt(w))
-sqrt(11) - sqrt(7) + sqrt(2) + 3*sqrt(5)
"""
from sympy.simplify.simplify import radsimp, split_surds, rad_rationalize
if expr.base < 0:
return sqrt(-1)*_sqrtdenest_rec(sqrt(-expr.base))
a, b = split_surds(expr.base)
if a < b:
a, b = b, a
c2 = _mexpand(a**2 - b**2)
if len(c2.args) > 2:
a1, b1 = split_surds(c2)
if a1 < b1:
a1, b1 = b1, a1
c2_1 = _mexpand(a1**2 - b1**2)
c_1 = _sqrtdenest_rec(sqrt(c2_1))
d_1 = _sqrtdenest_rec(sqrt(a1 + c_1))
num, den = rad_rationalize(b1, d_1)
c = _mexpand(d_1/sqrt(2) + num/(den*sqrt(2)))
else:
c = _sqrtdenest1(sqrt(c2))
if sqrt_depth(c) > 1:
raise SqrtdenestStopIteration
ac = a + c
if len(ac.args) >= len(expr.args):
if count_ops(ac) >= count_ops(expr.base):
raise SqrtdenestStopIteration
d = sqrtdenest(sqrt(ac))
if sqrt_depth(d) > 1:
raise SqrtdenestStopIteration
num, den = rad_rationalize(b, d)
r = d/sqrt(2) + num/(den*sqrt(2))
r = radsimp(r)
return _mexpand(r)
def _sqrtdenest1(expr):
"""Return denested expr after denesting with simpler methods or, that
failing, using the denester."""
from sympy.simplify.simplify import radsimp
if not is_sqrt(expr):
return expr
a = expr.base
if a.is_Atom:
return expr
val = _sqrt_match(a)
if not val:
return expr
a, b, r = val
# try a quick numeric denesting
d2 = _mexpand(a**2 - b**2*r)
if d2.is_Rational:
if d2.is_positive:
z = _sqrt_numeric_denest(a, b, r, d2)
if z is not None:
return z
else:
# fourth root case
# sqrtdenest(sqrt(3 + 2*sqrt(3))) =
# sqrt(2)*3**(1/4)/2 + sqrt(2)*3**(3/4)/2
dr2 = _mexpand(-d2*r)
dr = sqrt(dr2)
if dr.is_Rational:
z = _sqrt_numeric_denest(_mexpand(b*r), a, r, dr2)
if z is not None:
return z/root(r, 4)
else:
z = _sqrt_symbolic_denest(a, b, r)
if z is not None:
return z
if not is_algebraic(expr):
return expr
# now call to the denester
av0 = [a, b, r, d2]
z = _denester([radsimp(expr**2)], av0, 0, sqrt_depth(expr) - 1)[0]
if av0[1] is None:
return expr
if z is not None:
return z
return expr
def _sqrt_symbolic_denest(a, b, r):
"""Given an expression, sqrt(a + b*sqrt(b)), return the denested
expression or None.
Algorithm:
If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with
(y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and
(cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as
sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2).
Examples
========
>>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest
>>> from sympy import sqrt, Symbol, Poly
>>> from sympy.abc import x
>>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55
>>> _sqrt_symbolic_denest(a, b, r)
sqrt(-2*sqrt(29) + 11) + sqrt(5)
If the expression is numeric, it will be simplified:
>>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2)
>>> sqrtdenest(sqrt((w**2).expand()))
1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3)))
Otherwise, it will only be simplified if assumptions allow:
>>> w = w.subs(sqrt(3), sqrt(x + 3))
>>> sqrtdenest(sqrt((w**2).expand()))
sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2)
Notice that the argument of the sqrt is a square. If x is made positive
then the sqrt of the square is resolved:
>>> _.subs(x, Symbol('x', positive=True))
sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2)
"""
a, b, r = sympify([a, b, r])
rval = _sqrt_match(r)
if not rval:
return None
ra, rb, rr = rval
if rb:
y = Dummy('y', positive=True)
try:
newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y)
except PolynomialError:
return None
if newa.degree() == 2:
ca, cb, cc = newa.all_coeffs()
cb += b
if _mexpand(cb**2 - 4*ca*cc).equals(0):
z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2)
if z.is_number:
z = _mexpand(Mul._from_args(z.as_content_primitive()))
return z
def _sqrt_numeric_denest(a, b, r, d2):
"""Helper that denest expr = a + b*sqrt(r), with d2 = a**2 - b**2*r > 0
or returns None if not denested.
"""
from sympy.simplify.simplify import radsimp
depthr = sqrt_depth(r)
d = sqrt(d2)
vad = a + d
# sqrt_depth(res) <= sqrt_depth(vad) + 1
# sqrt_depth(expr) = depthr + 2
# there is denesting if sqrt_depth(vad)+1 < depthr + 2
# if vad**2 is Number there is a fourth root
if sqrt_depth(vad) < depthr + 1 or (vad**2).is_Rational:
vad1 = radsimp(1/vad)
return (sqrt(vad/2) + sign(b)*sqrt((b**2*r*vad1/2).expand())).expand()
def _denester(nested, av0, h, max_depth_level):
"""Denests a list of expressions that contain nested square roots.
Algorithm based on <http://www.almaden.ibm.com/cs/people/fagin/symb85.pdf>.
It is assumed that all of the elements of 'nested' share the same
bottom-level radicand. (This is stated in the paper, on page 177, in
the paragraph immediately preceding the algorithm.)
When evaluating all of the arguments in parallel, the bottom-level
radicand only needs to be denested once. This means that calling
_denester with x arguments results in a recursive invocation with x+1
arguments; hence _denester has polynomial complexity.
However, if the arguments were evaluated separately, each call would
result in two recursive invocations, and the algorithm would have
exponential complexity.
This is discussed in the paper in the middle paragraph of page 179.
"""
from sympy.simplify.simplify import radsimp
if h > max_depth_level:
return None, None
if av0[1] is None:
return None, None
if (av0[0] is None and
all(n.is_Number for n in nested)): # no arguments are nested
for f in subsets(len(nested)): # test subset 'f' of nested
p = _mexpand(Mul(*[nested[i] for i in range(len(f)) if f[i]]))
if f.count(1) > 1 and f[-1]:
p = -p
sqp = sqrt(p)
if sqp.is_Rational:
return sqp, f # got a perfect square so return its square root.
# Otherwise, return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
else:
R = None
if av0[0] is not None:
values = [av0[:2]]
R = av0[2]
nested2 = [av0[3], R]
av0[0] = None
else:
values = filter(None, [_sqrt_match(expr) for expr in nested])
for v in values:
if v[2]: #Since if b=0, r is not defined
if R is not None:
if R != v[2]:
av0[1] = None
return None, None
else:
R = v[2]
if R is None:
# return the radicand from the previous invocation
return sqrt(nested[-1]), [0]*len(nested)
nested2 = [_mexpand(v[0]**2) -
_mexpand(R*v[1]**2) for v in values] + [R]
d, f = _denester(nested2, av0, h + 1, max_depth_level)
if not f:
return None, None
if not any(f[i] for i in range(len(nested))):
v = values[-1]
return sqrt(v[0] + v[1]*d), f
else:
p = Mul(*[nested[i] for i in range(len(nested)) if f[i]])
v = _sqrt_match(p)
if 1 in f and f.index(1) < len(nested) - 1 and f[len(nested) - 1]:
v[0] = -v[0]
v[1] = -v[1]
if not f[len(nested)]: #Solution denests with square roots
vad = _mexpand(v[0] + d)
if vad <= 0:
# return the radicand from the previous invocation.
return sqrt(nested[-1]), [0]*len(nested)
if not(sqrt_depth(vad) < sqrt_depth(R) + 1 or
(vad**2).is_Number):
av0[1] = None
return None, None
vad1 = radsimp(1/vad)
return _mexpand(sqrt(vad/2) +
sign(v[1])*sqrt(_mexpand(v[1]**2*R*vad1/2))), f
else: #Solution requires a fourth root
s2 = _mexpand(v[1]*R) + d
if s2 <= 0:
return sqrt(nested[-1]), [0]*len(nested)
FR, s = root(_mexpand(R), 4), sqrt(s2)
return _mexpand(s/(sqrt(2)*FR) + v[0]*FR/(sqrt(2)*s)), f
| 15,808 | 5,730 |
import datetime
import os
import shutil
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.db import transaction, IntegrityError
from django.db.models import Q, F
from django.http import FileResponse
from django.utils.encoding import escape_uri_path
from django.utils.translation import gettext as _
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from constents import DocAvailableChoices, RepoTypeChoices, UserTypeChoices
from modules.account.serializers import UserInfoSerializer
from modules.doc.models import Doc, DocVersion, DocCollaborator, Comment
from modules.doc.permissions import DocManagePermission, DocCommonPermission
from modules.doc.serializers import (
DocCommonSerializer,
DocListSerializer,
DocUpdateSerializer,
DocVersionSerializer,
DocPublishChartSerializer,
)
from modules.repo.models import Repo, RepoUser
from modules.repo.serializers import RepoSerializer
from utils.authenticators import SessionAuthenticate
from utils.exceptions import Error404, ParamsNotFound, UserNotExist, OperationError
from utils.paginations import NumPagination
from utils.throttlers import DocSearchThrottle
from utils.viewsets import ThrottleAPIView
USER_MODEL = get_user_model()
class DocManageView(ModelViewSet):
"""文章管理入口"""
queryset = Doc.objects.filter(is_deleted=False)
serializer_class = DocCommonSerializer
permission_classes = [
DocManagePermission,
]
def perform_create(self, serializer):
return serializer.save()
def list(self, request, *args, **kwargs):
"""个人文章"""
self.serializer_class = DocListSerializer
# 获取个人的所有文章
sql = (
"SELECT d.*, r.name 'repo_name' FROM `doc_doc` d "
"JOIN `repo_repo` r ON d.repo_id=r.id "
"JOIN `auth_user` au ON au.uid=d.creator "
"WHERE d.creator=%s AND NOT d.is_deleted "
"{} "
"ORDER BY d.id DESC;"
)
# 标题关键字搜索
search_key = request.GET.get("searchKey", "")
if search_key:
sql = sql.format("AND d.title like %s")
search_key = f"%%{search_key}%%"
self.queryset = self.queryset.raw(sql, [request.user.uid, search_key])
else:
sql = sql.format("")
self.queryset = self.queryset.raw(sql, [request.user.uid])
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""新建文章"""
request.data["creator"] = request.user.uid
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
instance = self.perform_create(serializer)
DocVersion.objects.create(**DocVersionSerializer(instance).data)
return Response({"id": instance.id})
def update(self, request, *args, **kwargs):
"""更新文章"""
partial = kwargs.pop("partial", False)
instance = self.get_object()
serializer = DocUpdateSerializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
with transaction.atomic():
serializer.save(update_by=request.user.uid)
DocVersion.objects.create(**DocVersionSerializer(instance).data)
return Response({"id": instance.id})
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response()
@action(detail=True, methods=["GET"])
def list_collaborator(self, request, *args, **kwargs):
"""获取协作者"""
instance = self.get_object()
sql = (
"SELECT au.* "
"FROM `doc_collaborator` dc "
"JOIN `doc_doc` dd ON dd.id = dc.doc_id AND dd.id = %s "
"JOIN `auth_user` au on dc.uid = au.uid;"
)
collaborators = USER_MODEL.objects.raw(sql, [instance.id])
serializer = UserInfoSerializer(collaborators, many=True)
return Response(serializer.data)
@action(detail=True, methods=["POST"])
def add_collaborator(self, request, *args, **kwargs):
"""增加协作者"""
instance = self.get_object()
uid = request.data.get("uid")
if not uid or uid == request.user.uid:
raise OperationError()
try:
DocCollaborator.objects.create(doc_id=instance.id, uid=uid)
except IntegrityError:
raise OperationError(_("已添加该用户为协作者,请勿重复添加"))
return Response()
@action(detail=True, methods=["POST"])
def remove_collaborator(self, request, *args, **kwargs):
"""删除协作者"""
instance = self.get_object()
uid = request.data.get("uid")
if not uid or uid == request.user.uid:
raise OperationError()
DocCollaborator.objects.filter(doc_id=instance.id, uid=uid).delete()
return Response()
@action(detail=True, methods=["GET"])
def edit_status(self, request, *args, **kwargs):
"""为文章添加编辑中状态"""
instance = self.get_object()
cache_key = f"{self.__class__.__name__}:{self.action}:{instance.id}"
uid = cache.get(cache_key)
if uid is None or uid == request.user.uid:
cache.set(cache_key, request.user.uid, 60)
return Response(True)
else:
return Response(False)
@action(detail=True, methods=["GET"])
def export(self, request, *args, **kwargs):
"""导出文章"""
instance = self.get_object()
sql = (
"SELECT dc.*, au.username FROM `doc_comment` dc "
"JOIN `auth_user` au ON au.uid=dc.creator "
"WHERE dc.doc_id=%s AND NOT dc.is_deleted "
"ORDER BY dc.id DESC;"
)
comments = Comment.objects.raw(sql, [instance.id])
file_dir = os.path.join(
settings.BASE_DIR, "tmp", "doc", request.user.uid, str(instance.id)
)
if os.path.exists(file_dir):
shutil.rmtree(file_dir)
os.makedirs(file_dir)
filename = "{}.md".format(instance.title.replace(" ", "").replace("/", ""))
file_path = os.path.join(file_dir, filename)
with open(file_path, "w", encoding="utf-8") as file:
file.write(instance.content)
for comment in comments:
file.write("\n\n---\n\n")
file.write(comment.content)
file = open(file_path, "rb")
response = FileResponse(file)
response["Content-Type"] = "application/octet-stream"
response[
"Content-Disposition"
] = f"attachment; filename={escape_uri_path(filename)}"
return response
class DocCommonView(GenericViewSet):
"""文章常规入口"""
queryset = Doc.objects.filter(is_deleted=False, is_publish=True)
serializer_class = DocListSerializer
permission_classes = [DocCommonPermission]
authentication_classes = [SessionAuthenticate]
def list(self, request, *args, **kwargs):
"""获取仓库文章"""
repo_id = request.GET.get("repo_id", None)
# 没有传参直接返回
if repo_id is None:
raise Error404()
# 传入参数获取对应仓库的文章
try:
Repo.objects.get(id=repo_id, is_deleted=False)
except Repo.DoesNotExist:
raise Error404()
# 获取 仓库 的 公开或自己的 文章
sql = (
"SELECT d.*, au.username creator_name, r.name repo_name "
"FROM `doc_doc` d "
"JOIN `repo_repo` r ON r.id=d.repo_id "
"LEFT JOIN `doc_pin` dp ON dp.doc_id=d.id AND dp.in_use "
"LEFT JOIN `auth_user` au ON au.uid=d.creator "
"WHERE NOT d.`is_deleted` AND d.`is_publish` "
"AND dp.in_use IS NULL "
"AND d.repo_id = %s "
"AND (d.available = %s OR d.creator = %s) "
"AND d.title like %s "
"ORDER BY d.id DESC"
)
search_key = request.GET.get("searchKey")
search_key = f"%%{search_key}%%" if search_key else "%%"
queryset = self.queryset.raw(
sql, [repo_id, DocAvailableChoices.PUBLIC, request.user.uid, search_key]
)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, *args, **kwargs):
"""获取文章详情"""
instance = self.get_object()
Doc.objects.filter(id=instance.id).update(pv=F("pv") + 1)
instance.pv += 1
serializer = DocCommonSerializer(instance)
return Response(serializer.data)
@action(detail=True, methods=["GET"])
def is_collaborator(self, request, *args, **kwargs):
"""判断是否是协作者"""
instance = self.get_object()
try:
DocCollaborator.objects.get(doc_id=instance.id, uid=request.user.uid)
return Response()
except DocCollaborator.DoesNotExist:
return Response({"result": False})
@action(detail=False, methods=["GET"])
def load_pin_doc(self, request, *args, **kwargs):
"""获取置顶文章"""
repo_id = request.GET.get("repo_id", None)
# 没有传参直接返回
if repo_id is None:
raise Error404()
# 传入参数获取对应仓库的文章
try:
Repo.objects.get(id=repo_id, is_deleted=False)
except Repo.DoesNotExist:
raise Error404()
sql = (
"SELECT distinct dd.*, au.username creator_name, rr.name repo_name "
"FROM `doc_doc` dd "
"JOIN `auth_user` au ON dd.creator=au.uid "
"JOIN `repo_repo` rr ON rr.id=dd.repo_id "
"JOIN `doc_pin` dp ON dp.doc_id=dd.id AND dp.in_use "
"WHERE rr.id=%s AND dd.available=%s "
"AND dd.is_publish AND NOT dd.is_deleted; "
)
queryset = Doc.objects.raw(sql, [repo_id, DocAvailableChoices.PUBLIC])
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class DocPublicView(GenericViewSet):
"""公共入口"""
queryset = Doc.objects.filter(
is_deleted=False, is_publish=True, available=DocAvailableChoices.PUBLIC
)
authentication_classes = [SessionAuthenticate]
def list(self, request, *args, **kwargs):
# 获取 公开或成员仓库 的 公开或自己的 文章
sql = (
"SELECT d.*, au.username creator_name, r.name repo_name "
"FROM `repo_repo` r "
"JOIN `repo_user` ru ON r.id=ru.repo_id AND ru.u_type!=%s "
"JOIN `doc_doc` d ON r.id=d.repo_id "
"JOIN `auth_user` au ON au.uid=d.creator "
"WHERE NOT r.is_deleted AND (ru.uid=%s OR r.r_type=%s) "
"AND (d.available = %s OR d.creator = %s) AND NOT d.`is_deleted` AND d.`is_publish` "
"GROUP BY d.id "
"ORDER BY d.id DESC;"
)
docs = Doc.objects.raw(
sql,
[
UserTypeChoices.VISITOR,
request.user.uid,
RepoTypeChoices.PUBLIC,
DocAvailableChoices.PUBLIC,
request.user.uid,
],
)
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
@action(detail=False, methods=["GET"])
def recent(self, request, *args, **kwargs):
"""热门文章"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
# 公开库的近期文章
public_repo_ids = Repo.objects.filter(
r_type=RepoTypeChoices.PUBLIC, is_deleted=False
).values("id")
queryset = self.queryset.filter(repo_id__in=public_repo_ids, pv__gt=0).order_by(
"-pv"
)[:10]
serializer = DocListSerializer(queryset, many=True)
cache.set(cache_key, serializer.data, 1800)
return Response(serializer.data)
@action(detail=False, methods=["GET"])
def hot_repo(self, request, *args, **kwargs):
"""热门库"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
sql = (
"SELECT rr.*, dd.repo_id, COUNT(1) 'count' "
"FROM `doc_doc` dd "
"JOIN (SELECT MIN(dd2.id) 'min_id' from `doc_doc` dd2 ORDER BY dd2.id DESC LIMIT 100) dd3 "
"JOIN `repo_repo` rr ON rr.id=dd.repo_id "
"WHERE dd.id>=dd3.min_id "
"GROUP BY dd.repo_id "
"ORDER BY count DESC "
"LIMIT 10"
)
repos = Repo.objects.raw(sql)
serializer = RepoSerializer(repos, many=True)
cache.set(cache_key, serializer.data, 1800)
return Response(serializer.data)
@action(detail=False, methods=["GET"])
def user_doc(self, request, *args, **kwargs):
"""用户发布文章"""
username = request.GET.get("username")
if not username:
raise ParamsNotFound(_("用户名不能为空"))
try:
user = USER_MODEL.objects.get(username=username)
except USER_MODEL.DoesNotExist:
raise UserNotExist()
# 共同或公开仓库 的 公开文章
union_repo_ids = RepoUser.objects.filter(
Q(uid=request.user.uid) & ~Q(u_type=UserTypeChoices.VISITOR)
).values("repo_id")
allowed_repo_ids = Repo.objects.filter(
Q(r_type=RepoTypeChoices.PUBLIC) | Q(id__in=union_repo_ids)
).values("id")
docs = self.queryset.filter(
creator=user.uid, repo_id__in=allowed_repo_ids
).order_by("-id")
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
@action(detail=False, methods=["GET"])
def recent_chart(self, request, *args, **kwargs):
"""文章发布图表数据"""
cache_key = f"{self.__class__.__name__}:{self.action}"
cache_data = cache.get(cache_key)
if cache_data is not None:
return Response(cache_data)
today = datetime.datetime.today()
last_day = today - datetime.timedelta(days=30)
sql = (
"SELECT dd.id, DATE_FORMAT(dd.update_at, \"%%m-%%d\") 'date', COUNT(1) 'count' "
"FROM `doc_doc` dd "
"WHERE dd.update_at>='{}' AND NOT dd.is_deleted AND dd.available = '{}' "
"GROUP BY DATE(dd.update_at); "
).format(last_day, DocAvailableChoices.PUBLIC)
docs_count = Doc.objects.raw(sql)
serializer = DocPublishChartSerializer(docs_count, many=True)
data = {item["date"]: item["count"] for item in serializer.data}
cache.set(cache_key, data, 1800)
return Response(data)
class SearchDocView(ThrottleAPIView):
"""搜索入口"""
throttle_classes = [
DocSearchThrottle,
]
def post(self, request, *args, **kwargs):
search_key = request.data.get("searchKey")
if not search_key:
raise ParamsNotFound(_("搜索关键字不能为空"))
# 公开或成员仓库 的 公开或个人文章
sql = (
"SELECT dd.*, au.username creator_name, rr.name repo_name "
"FROM `repo_repo` rr "
"JOIN `repo_user` ru ON ru.repo_id=rr.id AND ru.u_type!=%s "
"JOIN `doc_doc` dd ON rr.id = dd.repo_id "
"JOIN `auth_user` au ON au.uid = dd.creator "
"WHERE NOT rr.is_deleted AND (ru.uid = %s OR rr.r_type = %s) "
"AND NOT dd.is_deleted AND dd.is_publish AND (dd.available = %s OR dd.creator = %s) "
"AND (({}) OR ({})) "
"GROUP BY dd.id "
"ORDER BY dd.id DESC;"
)
# 处理 key
extend_title_sqls = []
extend_content_sqls = []
params_keys = []
for key in search_key:
if key:
extend_title_sqls.append(" dd.title like %s ")
extend_content_sqls.append(" dd.content like %s ")
params_keys.append(f"%{key}%")
extend_title_sql = "AND".join(extend_title_sqls)
extend_content_sql = "AND".join(extend_content_sqls)
sql = sql.format(extend_title_sql, extend_content_sql)
docs = Doc.objects.raw(
sql,
[
UserTypeChoices.VISITOR,
request.user.uid,
RepoTypeChoices.PUBLIC,
DocAvailableChoices.PUBLIC,
request.user.uid,
*params_keys,
*params_keys,
],
)
page = NumPagination()
queryset = page.paginate_queryset(docs, request, self)
serializer = DocListSerializer(queryset, many=True)
return page.get_paginated_response(serializer.data)
| 17,074 | 5,597 |
#!/bin/env python3
import sys
import argparse
import configparser
import docx
from docx import Document
from docx.shared import Pt, Inches
from docx.enum.dml import MSO_THEME_COLOR_INDEX
from docx.enum.section import WD_ORIENT, WD_SECTION
from datetime import datetime
from mediumroast.api.high_level import Auth as authenticate
from mediumroast.api.high_level import Studies as study
from mediumroast.api.high_level import Interactions as interaction
### General utilities
def parse_cli_args(program_name='report_study', desc='A mediumroast.io utility that generates a Microsoft Word formatted report for a study.'):
parser = argparse.ArgumentParser(prog=program_name, description=desc)
parser.add_argument('--exclude_substudies', help="The names for the substudies to exclude in a comma separated list",
type=str, dest='exclude_substudies', default=None)
parser.add_argument('--rest_url', help="The URL of the target REST server",
type=str, dest='rest_url', default='http://mr-01:3000')
parser.add_argument('--guid', help="The GUID for the study to be reported on.",
type=str, dest='guid', required=True)
parser.add_argument('--org', help="The organization name for the report.",
type=str, dest='org', required=True)
parser.add_argument('--user', help="User name",
type=str, dest='user', default='foo')
parser.add_argument('--secret', help="Secret or password",
type=str, dest='secret', default='bar')
parser.add_argument('--config_file', help="The location to the configuration files",
type=str, dest='config_file', default='./reports.ini')
cli_args = parser.parse_args()
return cli_args
def read_config(conf_file='./reports.ini'):
c = configparser.ConfigParser()
c.read(conf_file)
return c
def get_interaction_name(guid):
"""Get the interaction name by the GUID
"""
interaction_ctl = interaction(credential)
return interaction_ctl.get_name_by_guid(guid)[1]['interactionName']
def _create_header(doc_obj, conf, font_size=7):
date_string = f'{datetime.now():%Y-%m-%d %H:%M}'
s = doc_obj.sections[0]
header = s.header
header_p = header.paragraphs[0]
header_p.text = conf['org'] + "\t | \t Created on: " + date_string
style = doc_obj.styles['Header']
font = style.font
font.name = conf['font']
font.size = Pt(font_size)
header_p.style = doc_obj.styles['Header']
def _create_footer(doc_obj, conf, font_size=7):
date_string = f'{datetime.now():%Y-%m-%d %H:%M}'
s = doc_obj.sections[0]
footer = s.footer
footer_p = footer.paragraphs[0]
footer_p.text = conf['confidentiality'] + "\t | \t" + conf['copyright']
style = doc_obj.styles['Footer']
font = style.font
font.name = conf['font']
font.size = Pt(font_size)
footer_p.style = doc_obj.styles['Footer']
def _create_cover_page(doc_obj, study, conf, logo_size=60, font_size=30):
# Generics
title_font_size = Pt(font_size) # Title Font Size
logo_size = Pt(font_size*2.5)
# Organization name and logo
logo = conf['logo']
logo_title = doc_obj.add_paragraph().add_run()
logo_title.add_picture(logo, height=logo_size)
# Define the Cover Title Style
org = conf['org'] # Organization
title = "\n\nTitle: " + study['studyName']
cover_title = doc_obj.add_paragraph(title)
style = doc_obj.styles['Title']
font = style.font
font.name = conf['font']
font.size = title_font_size
cover_title.style = doc_obj.styles['Title']
# Define the Subtitle content
subtitle = "A " + org + " study report enabling attributable market insights."
cover_subtitle = doc_obj.add_paragraph("")
s = cover_subtitle.add_run(subtitle)
subtitle_font = s.font
subtitle_font.bold = True
# Define the Author content
author = "Mediumroast Barrista Robot"
cover_author = doc_obj.add_paragraph("\nAuthor: ")
a = cover_author.add_run(author)
author_font = a.font
author_font.bold = True
# Define the Creation date content
creation_date = f'{datetime.now():%Y-%m-%d %H:%M}'
cover_date = doc_obj.add_paragraph("Creation Date: ")
d = cover_date.add_run(creation_date)
date_font = d.font
date_font.bold = True
# Add a page break
doc_obj.add_page_break()
def _create_summary(doc_obj, study_doc, conf):
# Create the Introduction section
section_title = doc_obj.add_paragraph(
'Findings') # Create the Findings section
section_title.style = doc_obj.styles['Title']
doc_obj.add_heading('Introduction')
clean_intro = " ".join(study_doc['Introduction'].split("\n"))
doc_obj.add_paragraph(clean_intro)
# Create the Opportunity section
doc_obj.add_heading('Opportunity')
clean_opportunity = " ".join(study_doc['Opportunity']['text'].split("\n"))
doc_obj.add_paragraph(clean_opportunity)
# Remove the text section before we process the numbered bullets
del(study_doc['Opportunity']['text'])
for opp in study_doc['Opportunity']:
clean_opp = " ".join(study_doc['Opportunity'][opp].split("\n"))
doc_obj.add_paragraph(clean_opp, style='List Bullet')
# Create the Action section
doc_obj.add_heading('Actions')
clean_action = " ".join(study_doc['Action']['text'].split("\n"))
doc_obj.add_paragraph(clean_action)
# Remove the text section before we process the numbered bullets
del(study_doc['Action']['text'])
for action in study_doc['Action']:
clean_act = " ".join(study_doc['Action'][action].split("\n"))
doc_obj.add_paragraph(clean_act, style='List Number')
# Add a page break
doc_obj.add_page_break()
def _add_hyperlink(paragraph, text, url):
"""Taken from https://stackoverflow.com/questions/47666642/adding-an-hyperlink-in-msword-by-using-python-docx
"""
# This gets access to the document.xml.rels file and gets a new relation id value
part = paragraph.part
r_id = part.relate_to(
url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)
# Create the w:hyperlink tag and add needed values
hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )
# Create a w:r element and a new w:rPr element
new_run = docx.oxml.shared.OxmlElement('w:r')
rPr = docx.oxml.shared.OxmlElement('w:rPr')
# Join all the xml elements together add add the required text to the w:r element
new_run.append(rPr)
new_run.text = text
hyperlink.append(new_run)
# Create a new Run object and add the hyperlink into it
r = paragraph.add_run()
r._r.append(hyperlink)
# A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
# Delete this if using a template that has the hyperlink style in it
r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
r.font.underline = True
return hyperlink
def _create_reference(interaction_guid, substudy, doc_obj, conf, char_limit=500):
interaction_ctl = interaction(credential)
success, interaction_data = interaction_ctl.get_by_guid(interaction_guid)
if success:
doc_obj.add_heading(interaction_data['interactionName'], 2)
my_time = str(interaction_data['time'][0:2]) + \
':' + str(interaction_data['time'][2:4])
my_date = str(interaction_data['date'][0:4]) + '-' + str(interaction_data['date'][4:6]) + '-' \
+ str(interaction_data['date'][6:8])
interaction_meta = "\t\t|\t".join(['Date: ' + my_date + "\t" + my_time,
'Sub-Study Identifier: ' + substudy])
doc_obj.add_paragraph(interaction_meta)
doc_obj.add_paragraph(
interaction_data['abstract'][0:char_limit] + '...')
resource = doc_obj.add_paragraph('Interaction Resource: ')
_add_hyperlink(
resource, interaction_data['interactionName'], interaction_data['url'].replace('s3', 'http'))
else:
print(
'Something went wrong obtaining the interaction data for [' + interaction_guid + ']')
def _create_references(doc_obj, substudy_list, conf):
section_title = doc_obj.add_paragraph(
'References') # Create the References section
section_title.style = doc_obj.styles['Title']
for substudy in substudy_list:
for interaction in substudy_list[substudy]['interactions']:
interaction_guid = substudy_list[substudy]['interactions'][interaction]['GUID']
_create_reference(interaction_guid, substudy, doc_obj, conf)
def _create_quote(doc_obj, quote, indent, font_size):
my_quote = quote
my_para = doc_obj.add_paragraph(style='List Bullet')
my_para.paragraph_format.left_indent = Pt(1.5 * indent)
my_bullet = my_para.add_run(my_quote)
my_bullet.font.size = Pt(font_size)
my_para.paragraph_format.space_after = Pt(3)
def _create_quotes(doc_obj, quotes, indent, font_size, location='quotes'):
for quote in quotes:
my_quote = quotes[quote][location]
my_para = doc_obj.add_paragraph(style='List Bullet')
my_para.paragraph_format.left_indent = Pt(1.5 * indent)
my_bullet = my_para.add_run(my_quote)
my_bullet.font.size = Pt(font_size)
my_para.paragraph_format.space_after = Pt(3)
def _create_subsection(doc_obj, start_text, body_text, indent, font_size, to_bold=False, to_italics=False):
para = doc_obj.add_paragraph()
para.paragraph_format.left_indent = Pt(indent)
start_run = para.add_run(start_text)
start_run.font.bold = to_bold
start_run.font.size = Pt(font_size)
body_run=para.add_run(body_text)
body_run.font.size = Pt(font_size)
if to_italics: body_run.font.italic = to_italics
def _create_intro(doc_obj, intro_name, intro_body, heading_level=2):
doc_obj.add_heading(intro_name, level=heading_level)
doc_obj.add_paragraph(intro_body)
def _create_key_theme(doc_obj, themes, quotes, conf, include_fortune=True):
### Define the summary theme
_create_intro(doc_obj,
'Summary Theme',
conf['themes']['summary_intro'].replace("\n", " "))
## Create the definition
theme = 'summary_theme'
_create_subsection(doc_obj,
'Definition: ',
themes[theme]['description'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
## Determine if we should include the theme fortune or not
if include_fortune:
_create_subsection(doc_obj,
'Fortune: ',
themes[theme]['fortune'][0].upper() + themes[theme]['fortune'][1:] + ' [system generated]',
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
## Create the tags
_create_subsection(doc_obj,
'Tags: ',
" | ".join(themes[theme]['tags'].keys()),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
## Create the quotes
subsection_name = 'Theme Quotes'
doc_obj.add_heading(subsection_name, level=3)
_create_quotes(doc_obj,
quotes['summary'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']))
### Add the discrete/detailed themes
theme_loc = 'discrete_themes'
quotes_loc = 'discrete'
## Create the starting paragraph
_create_intro(doc_obj,
'Detailed Themes',
conf['themes']['discrete_intro'].replace("\n", " "))
## Add in the individual themes and their quotes
my_themes = themes[theme_loc]
for my_theme in my_themes:
# Put in the theme identifier
_create_intro(doc_obj,
'Detailed Theme Identifier: ' + my_theme,
conf['themes']['discrete_theme_intro'].replace("\n", " "),
heading_level=3)
# Add the description
_create_subsection(doc_obj,
'Definition: ',
my_themes[my_theme]['description'],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
# Include the fortune if the setting is true
if include_fortune:
_create_subsection(doc_obj,
'Fortune: ',
my_themes[my_theme]['fortune'][0].upper() + my_themes[my_theme]['fortune'][1:] + ' [system generated]',
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True)
# Add the tags
_create_subsection(doc_obj,
'Tags: ',
" | ".join(my_themes[my_theme]['tags'].keys()),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
# Pull in the quotes
subsection_name = 'Theme Quotes by Interaction'
doc_obj.add_heading(subsection_name, level=4)
if my_theme in quotes[quotes_loc]:
for interaction in quotes[quotes_loc][my_theme]:
doc_obj.add_heading(get_interaction_name(interaction), level=5)
the_quotes = quotes[quotes_loc][my_theme][interaction]['quotes']
# Explain that the system was not able to find a relevant quote
if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']]
for my_quote in the_quotes:
_create_quote(doc_obj,
my_quote[0],
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']))
_create_subsection(doc_obj,
'Frequency: ',
str(quotes[quotes_loc][my_theme][interaction]['frequency']),
int(conf['themes']['indent']),
font_size = int(conf['themes']['font_size']),
to_bold = True,
to_italics = True)
doc_obj.add_page_break()
def _create_key_themes(doc_obj, substudies, conf, substudy_excludes=list()):
section_title = doc_obj.add_paragraph(
'Key Themes by Sub-Study') # Create the Themes section
section_title.style = doc_obj.styles['Title']
doc_obj.add_paragraph(conf['themes']['intro'].replace("\n", " "))
for substudy in substudies:
if substudy in substudy_excludes:
continue
doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1)
_create_key_theme(
doc_obj, substudies[substudy]['keyThemes'], substudies[substudy]['keyThemeQuotes'], conf)
def change_orientation(doc_obj):
current_section = doc_obj.sections[-1]
new_width, new_height = current_section.page_height, current_section.page_width
new_section = doc_obj.add_section(WD_SECTION.NEW_PAGE)
new_section.orientation = WD_ORIENT.LANDSCAPE
new_section.page_width = new_width
new_section.page_height = new_height
return new_section
def _create_row(the_row, id, type,freq, src, snip):
ID = 0
TYPE = 1
FREQ = 2
SNIP = 4
SRC = 3
the_row[ID].text = str(id)
the_row[TYPE].text = str(type)
the_row[FREQ].text = str(freq)
the_row[SNIP].text = str(snip)
the_row[SRC].text = str(src)
def _create_rows():
"""
For summary
create single row
For discrete
foreach theme
create single row
"""
pass
def _create_summary_theme_tables(doc_obj, substudies, conf, substudy_excludes=list()):
change_orientation(doc_obj) # Flip to landscape mode
my_widths = [Inches(1.5), Inches(0.75), Inches(0.75), Inches(1.5), Inches(3.5)]
section_title = doc_obj.add_paragraph(
'Key Theme Summary Tables') # Create the References section
section_title.style = doc_obj.styles['Title']
for substudy in substudies:
if substudy in substudy_excludes:
continue
doc_obj.add_heading('Sub-Study Identifier: ' + substudy + ' — ' + substudies[substudy]['description'], 1)
my_table = doc_obj.add_table(rows=1, cols=5)
my_table.style = 'Colorful Grid'
header_row = my_table.rows[0].cells
header_row[0].text = 'Identifier'
header_row[1].text = 'Type'
header_row[2].text = 'Frequency'
header_row[3].text = 'Source'
header_row[4].text = 'Snippet'
my_row = my_table.add_row().cells
## Process the summary theme
my_theme = 'Summary Theme'
my_type = 'Summary'
my_frequency = 'N/A'
my_interaction = list(substudies[substudy]['keyThemeQuotes']['summary'].keys())[0]
my_snippet = substudies[substudy]['keyThemeQuotes']['summary'][my_interaction]['quotes'][0]
my_source = get_interaction_name(my_interaction)
_create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet)
## Process the discrete themes
theme_loc = 'discrete_themes'
quotes_loc = 'discrete'
## Add in the individual themes and their quotes
my_themes = substudies[substudy]['keyThemes'][theme_loc]
my_quotes = substudies[substudy]['keyThemeQuotes'][quotes_loc]
my_type = 'Detailed'
for my_theme in my_themes:
if my_theme in my_quotes:
my_row = my_table.add_row().cells
my_interaction = list(my_quotes[my_theme].keys())[0]
my_source = get_interaction_name(my_interaction)
the_quotes = my_quotes[my_theme][my_interaction]['quotes']
# Explain that the system was not able to find a relevant quote
if not the_quotes: the_quotes=[['mediumroast.io was unable to find a relevant quote or text snippet for this theme.']]
my_snippet = the_quotes[0][0]
my_frequency = my_themes[my_theme]['frequency']
_create_row(my_row, my_theme, my_type, my_frequency, my_source, my_snippet)
doc_obj.add_page_break()
change_orientation(doc_obj) # Flip to portrait mode
def report(study, conf, substudy_excludes):
# Document generics
d = Document() # Create doc object
style = d.styles['Normal']
font = style.font
font.name = conf['font']
font.size = Pt(int(conf['font_size']))
_create_cover_page(d, study, conf) # Create the cover page
_create_header(d, conf) # Create the doc header
_create_footer(d, conf) # Create the doc footer
### Intro, opportunity and actions sections
_create_summary(d, study['document'], conf)
### Key Themes
## Key Themes Summary Table
_create_summary_theme_tables(d, study['substudies'], conf, substudy_excludes)
## Detailed Key Themes
_create_key_themes(d, study['substudies'], conf, substudy_excludes)
### References
_create_references(d, study['substudies'], conf)
return d
if __name__ == "__main__":
my_args = parse_cli_args()
configurator = read_config(conf_file=my_args.config_file)
my_org = my_args.org.upper()
# Set default items from the configuration file for the report
report_conf = {
'org': configurator[my_org]['organization_name'],
'logo': configurator[my_org]['logo_image'],
'font': configurator[my_org]['font_type'],
'font_size': configurator[my_org]['font_size'],
'font_measure': configurator[my_org]['font_measure'],
'copyright': configurator[my_org]['copyright_notice'],
'confidentiality': configurator[my_org]['confidential_notice'],
'themes': {
'font_size': configurator['THEME_FORMAT']['font_size'],
'intro': configurator['THEME_FORMAT']['key_theme_intro'],
'summary_intro': configurator['THEME_FORMAT']['summary_theme_intro'],
'discrete_intro': configurator['THEME_FORMAT']['discrete_themes_intro'],
'discrete_theme_intro': configurator['THEME_FORMAT']['discrete_theme_intro'],
'indent': configurator['THEME_FORMAT']['indent'],
}
}
auth_ctl = authenticate(
user_name=my_args.user, secret=my_args.secret, rest_server_url=my_args.rest_url)
credential = auth_ctl.login()
substudy_excludes = my_args.exclude_substudies.split(',') if my_args.exclude_substudies else list()
study_ctl = study(credential)
success, study_obj = study_ctl.get_by_guid(my_args.guid)
if success:
doc_name = study_obj['studyName'].replace(
' ', '_') + "_study_report.docx"
document = report(study_obj, report_conf, substudy_excludes)
document.save(doc_name)
else:
print('CLI ERROR: This is a generic error message, as something went wrong.')
sys.exit(-1)
| 21,175 | 6,874 |
import pprint
import logging
import datetime
from selenium import webdriver
import hubcheck.conf
# block websites that make linkcheck slow
# these are usually blocked by the workspace firewall
# mozillalabs comes from using a nightly version of firefox browser
# many of the others are from login authentication sites
PROXY_BLACKLIST = [
"http(s)?://.*mozillalabs\\.com/?.*", # testpilot.mozillalabs.com
"http(s)?://.*google-analytics\\.com/.*", # ssl.google-analytics.com
'http(s)?://.*facebook\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*fbcdn\\.com/?.*', # www.facebook.com/login.php
'http(s)?://.*accounts\\.google\\.com/?.*', # accounts.google.com
'http(s)?://.*linkedin\\.com/?.*', # linkedin.com
'http(s)?://.*twitter\\.com/?.*', # api.twitter.com
# 'http(s)?://.*purdue\\.edu/apps/account/cas/?.*', # purdue cas
]
MIMETYPES = [
"appl/text", # .doc \
"application/acad", # .dwg \
"application/acrobat", # .pdf \
"application/autocad_dwg", # .dwg \
"application/doc", # .doc, .rtf \
"application/dwg", # .dwg \
"application/eps", # .eps \
"application/futuresplash", # .swf \
"application/gzip", # .gz \
"application/gzipped", # .gz \
"application/gzip-compressed", # .gz \
"application/jpg", # .jpg \
"application/ms-powerpoint", # .ppt \
"application/msexcel", # .xls \
"application/mspowerpnt", # .ppt \
"application/mspowerpoint", # .ppt \
"application/msword", # .doc, .rtf \
"application/octet-stream", # .gz, .zip \
"application/pdf", # .pdf \
"application/photoshop", # .psd \
"application/postscript", # .ps, .avi, .eps \
"application/powerpoint", # .ppt \
"application/psd", # .psd \
"application/rss+xml", # .rss \
"application/rtf", # .rtf \
"application/tar", # .tar \
"application/vnd.ms-excel", # .xls, .xlt, .xla \
"application/vnd.ms-excel.addin.macroEnabled.12", # .xlam \
"application/vnd.ms-excel.sheet.binary.macroEnabled.12", # .xlsb \
"application/vnd.ms-excel.sheet.macroEnabled.12", # .xlsm \
"application/vnd.ms-excel.template.macroEnabled.12", # .xltm \
"application/vnd.ms-powerpoint", # .pps, .ppt, .pot, .ppa \
"application/vnd.ms-powerpoint.addin.macroEnabled.12", # .ppam \
"application/vnd.ms-powerpoint.presentation.macroEnabled.12", # .pptm \
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12", # .ppsm \
"application/vnd.ms-powerpoint.template.macroEnabled.12", # .potm \
"application/vnd.ms-word", # .doc \
"application/vnd.ms-word.document.macroEnabled.12", # .docm \
"application/vnd.ms-word.template.macroEnabled.12", # .dotm \
"application/vnd.msexcel", # .xls \
"application/vnd.mspowerpoint", # .ppt \
"application/vnd.msword", # .doc \
"application/vnd.openxmlformats-officedocument.presentationml.presentation", # .pptx \
"application/vnd.openxmlformats-officedocument.presentationml.template", # .potx \
"application/vnd.openxmlformats-officedocument.presentationml.slideshow", # .ppsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # .xlsx \
"application/vnd.openxmlformats-officedocument.spreadsheetml.template", # .xltx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.document", # .docx \
"application/vnd.openxmlformats-officedocument.wordprocessingml.template", # .dotx \
"application/vnd.pdf", # .pdf \
"application/vnd-mspowerpoint", # .ppt \
"application/winword", # .doc \
"application/word", # .doc \
"application/x-acad", # .dwg \
"application/x-apple-diskimage", # .dmg \
"application/x-autocad", # .dwg \
"application/x-bibtex", # .bib \
"application/x-compress", # .gz, .tar, .zip \
"application/x-compressed", # .gz, .tar, .zip \
"application/x-dos_ms_excel", # .xls \
"application/x-dwg", # .dwg \
"application/x-endnote-refer", # .enw \
"application/x-eps", # .eps \
"application/x-excel", # .xls \
"application/x-gtar", # .tar \
"application/x-gunzip", # .gz \
"application/x-gzip", # .gz \
"application/x-jpg", # .jpg \
"application/x-m", # .ppt \
"application/x-ms-excel", # .xls \
"application/x-msexcel", # .xls \
"application/x-mspublisher", # .pub \
"application/x-msw6", # .doc \
"application/x-msword", # .doc \
"application/x-ole-storage", # .msi \
"application/x-pdf", # .pdf \
"application/x-powerpoint", # .ppt \
"application/x-rtf", # .rtf \
"application/x-shockwave-flash", # .swf \
"application/x-shockwave-flash2-preview", # .swf \
"application/x-tar", # .tar \
"application/x-troff-msvideo", # .avi \
"application/x-soffice", # .rtf \
"application/x-xml", # .xml, .pub \
"application/x-zip", # .zip \
"application/x-zip-compressed", # .zip \
"application/xls", # .xls \
"application/xml", # .xml, .pub \
"application/zip", # .zip \
"audio/aiff", # .avi, .mov \
"audio/avi", # .avi \
"audio/mp3", # .mp3 \
"audio/mp4", # .mp4 \
"audio/mpg", # .mp3 \
"audio/mpeg", # .mp3 \
"audio/mpeg3", # .mp3 \
"audio/x-midi", # .mov \
"audio/x-mp3", # .mp3 \
"audio/x-mpg", # .mp3 \
"audio/x-mpeg", # .mp3 \
"audio/x-mpeg3", # .mp3 \
"audio/x-mpegaudio", # .mp3 \
"audio/x-wav", # .mov \
"drawing/dwg", # .dwg \
"gzip/document", # .gz \
"image/avi", # .avi \
"image/eps", # .eps \
"image/gi_", # .gif \
"image/gif", # .eps, .gif \
"image/jpeg", # .jpg, .jpeg \
"image/jpg", # .jpg \
"image/jp_", # .jpg \
"image/mpeg", # .mpeg \
"image/mov", # .mov \
"image/photoshop", # .psd \
"image/pipeg", # .jpg \
"image/pjpeg", # .jpg \
"image/png", # .png \
"image/psd", # .psd \
"image/vnd.dwg", # .dwg \
"image/vnd.rn-realflash", # .swf \
"image/vnd.swiftview-jpeg", # .jpg \
"image/x-eps", # .eps \
"image/x-dwg", # .dwg \
"image/x-photoshop", # .psd \
"image/x-xbitmap", # .gif, .jpg \
"multipart/x-tar", # .tar \
"multipart/x-zip", # .zip \
"octet-stream", # possibly some .ppt files \
"text/csv", # .csv \
"text/mspg-legacyinfo", # .msi \
"text/pdf", # .pdf \
"text/richtext", # .rtf \
"text/rtf", # .rtf \
"text/x-pdf", # .pdf \
"text/xml", # .xml, .rss \
"video/avi", # .avi, .mov \
"video/mp4v-es", # .mp4 \
"video/msvideo", # .avi \
"video/quicktime", # .mov \
"video/x-flv", # .flv \
"video/x-m4v", # .m4v \
"video/x-msvideo", # .avi \
"video/x-quicktime", # .mov \
"video/xmpg2", # .avi \
"zz-application/zz-winassoc-psd", # .psd \
]
class Browser(object):
"""hubcheck webdriver interface"""
def __init__(self, mimetypes=[], downloaddir='/tmp'):
self.logger = logging.getLogger(__name__)
self.logger.info("setting up a web browser")
self._browser = None
self.wait_time = 2
self.marker = 0
self.proxy_client = None
self.proxy_blacklist = PROXY_BLACKLIST
self.profile = None
self.downloaddir = downloaddir
self.mimetypes = mimetypes
def __del__(self):
self.close()
def setup_browser_preferences(self):
"""browser preferences should be setup by subclasses
"""
pass
def start_proxy_client(self):
# setup proxy if needed
if hubcheck.conf.settings.proxy is None:
self.logger.info("proxy not started, not starting client")
return
# start the client
self.proxy_client = hubcheck.conf.settings.proxy.create_client()
# setup the proxy website blacklist
if self.proxy_client is not None:
self.logger.info("setting up proxy blacklist")
for url_re in self.proxy_blacklist:
self.logger.debug("blacklisting %s" % url_re)
self.proxy_client.blacklist(url_re,200)
def stop_proxy_client(self):
if self.proxy_client is not None:
self.logger.info("stopping proxy client")
self.proxy_client.close()
self.proxy_client = None
def setup_browser_size_and_position(self):
# set the amount of time to wait for an element to appear on the page
self._browser.implicitly_wait(self.wait_time)
# place the browser window in the upper left corner of the screen
self._browser.set_window_position(0, 0)
# resize the window to just shy of our 1024x768 screen
self._browser.set_window_size(1070,700)
def launch(self):
"""subclass should add code required to launch the browser
"""
pass
def get(self,url):
if self._browser is None:
self.launch()
self.logger.debug("retrieving url: %s" % (url))
self._browser.get(url)
def close(self):
if self._browser is None:
return
self.logger.info("closing browser")
self._browser.quit()
self._browser = None
self.profile
self.stop_proxy_client()
def error_loading_page(self,har_entry):
"""
check if there was an error loading the web page
returns True or False
"""
harurl = har_entry['request']['url']
harstatus = har_entry['response']['status']
self.logger.debug("%s returned status %s" % (harurl,harstatus))
result = None
if (harstatus >= 100) and (harstatus <= 199):
# information codes
result = False
elif (harstatus >= 200) and (harstatus <= 299):
# success codes
result = False
elif (harstatus >= 300) and (harstatus <= 399):
# redirect codes
result = False
elif (harstatus >= 400) and (harstatus <= 499):
# client error codes
# client made an invalid request (bad links)
# page does not exist
result = True
elif (harstatus >= 500) and (harstatus <= 599):
# server error codes
# client made a valid request,
# but server failed while responsing.
result = True
else:
result = True
return result
def page_load_details(self,url=None,follow_redirects=True):
"""
return the har entry for the last page loaded
follow redirects to make sure you get the har entry
for the page that was eventually loaded.
A return value of None means no page was ever loaded.
"""
if not self.proxy_client:
return None
if url is None:
url = self._browser.current_url
self.logger.debug("processing har for %s" % (url))
har = self.proxy_client.har
self.logger.debug("har entry = %s" % (pprint.pformat(har)))
return_entry = None
for entry in har['log']['entries']:
harurl = entry['request']['url']
harstatus = entry['response']['status']
if url == None:
# we are following a redirect from below
return_entry = entry
elif url == harurl:
# the original url matches the url for this har entry exactly
return_entry = entry
elif (not url.endswith('/')) and (url+'/' == harurl):
# the original url almost matches the url for this har entry
return_entry = entry
if return_entry is not None:
if follow_redirects and (harstatus >= 300) and (harstatus <= 399):
# follow the redirect (should be the next har entry)
url = None
continue
else:
# found our match
break
self.logger.debug("har for url = %s" % (pprint.pformat(return_entry)))
return return_entry
def take_screenshot(self,filename=None):
"""
Take a screen shot of the browser, store it in filename.
"""
if self._browser is None:
return
if filename is None:
dts = datetime.datetime.today().strftime("%Y%m%d%H%M%S")
filename = 'hcss_%s.png' % dts
self.logger.debug("screenshot filename: %s" % (filename))
self._browser.save_screenshot(filename)
def next_marker(self):
self.marker += 1
return self.marker
| 17,388 | 4,725 |
from topdown import *
from caty.jsontools import stdjson
from caty.jsontools import xjson
from caty.jsontools.selector import stm as default_factory
from caty.core.spectypes import UNDEFINED
from caty.core.language import name_token
class JSONPathSelectorParser(Parser):
def __init__(self, empty_when_error=False, ignore_rest=False, factory=None):
Parser.__init__(self)
self.empty_when_error = empty_when_error
self.ignore_rest = ignore_rest
self.factory = factory if factory else default_factory
def __call__(self, seq):
o = chainl([self.all,
self.tag,
self.exp_tag,
self.untagged,
self.length,
self.it,
self.name,
self.index,
self.namewildcard,
self.itemwildcard,
try_(self.oldtag),
], self.dot)(seq)
o = self.factory.SelectorWrapper(o)
optional = option(u'?')(seq)
if optional and option('=')(seq):
d = xjson.parse(seq)
else:
d = UNDEFINED
if not seq.eof and not self.ignore_rest:
raise ParseFailed(seq, self)
o.set_optional(optional)
o.set_default(d)
return o
def apply_option(self, stm):
stm.empty_when_error = self.empty_when_error
return stm
def dot(self, seq):
seq.parse('.')
def _(a, b):
#self.apply_option(a)
#self.apply_option(b)
return a.chain(b)
return _
def all(self, seq):
seq.parse('$')
return self.factory.AllSelector()
def name(self, seq):
key = seq.parse([self.namestr, lambda s:self.quoted(s, '"'), lambda s: self.quoted(s, "'")])
optional = False
#optional = option(u'?')(seq)
return self.factory.PropertySelector(key, optional)
def namestr(self, seq):
return seq.parse(name_token)
def quoted(self, seq, qc):
def series_of_escape(s):
import itertools
return len(list(itertools.takewhile(lambda c: c=='\\', reversed(s))))
try:
seq.ignore_hook = True
st = [seq.parse(qc)]
s = seq.parse(until(qc))
while True:
if series_of_escape(s) % 2 == 0:
st.append(s)
break
else:
st.append(s)
s = seq.parse(Regex(r'%s[^%s]*' % (qc, qc)))
st.append(seq.parse(qc))
return stdjson.loads('"%s"'%''.join(st[1:-1]))
except EndOfBuffer, e:
raise ParseFailed(seq, string)
finally:
seq.ignore_hook = False
def index(self, seq):
idx = int(seq.parse(Regex(r'([0-9]+)')))
optional = False
#optional = option(u'?')(seq)
return self.factory.ItemSelector(idx, optional)
def namewildcard(self, seq):
seq.parse('*')
return self.factory.NameWildcardSelector()
def itemwildcard(self, seq):
seq.parse('#')
return self.factory.ItemWildcardSelector()
def oldtag(self, seq):
seq.parse('^')
name = seq.parse(option([self.namestr, lambda s:self.quoted(s, '"'), lambda s: self.quoted(s, "'")], None))
if name is not None:
return TagSelector(name, False)
t = seq.parse(['*', '^'])
if t == '*':
e = seq.parse(option('!', None))
return self.factory.TagSelector(None, bool(e))
else:
e = seq.parse(option('!', None))
return self.factory.TagReplacer(None, bool(e))
def tag(self, seq):
seq.parse('tag()')
return self.factory.TagNameSelector(False)
def exp_tag(self, seq):
seq.parse('exp-tag()')
return self.factory.TagNameSelector(True)
def untagged(self, seq):
v = seq.parse(choice('untagged()', 'content()'))
return self.factory.TagContentSelector(v)
def length(self, seq):
seq.parse('length()')
return self.factory.LengthSelector()
def it(self, seq):
seq.parse('it()')
return self.factory.ItSelector()
| 4,278 | 1,320 |
# -*- coding: utf8 -*-
import datetime
import mock
import os
import unittest
import webapp2
from google.appengine.ext import testbed, deferred
from google.appengine.api import queueinfo
from . import models
from .handler import application
from .wrapper import defer
TESTCONFIG_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "testconfig")
def noop(*args, **kwargs):
pass
def noop_fail(*args, **kwargs):
raise Exception
def noop_permanent_fail(*args, **kwargs):
raise deferred.PermanentTaskFailure
class Foo(object):
def bar(self): pass
def __call__(self): pass
class BaseTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_taskqueue_stub(root_path=TESTCONFIG_DIR)
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
super(BaseTest, self).setUp()
def reload(self, obj):
return obj.get(obj.key())
class DeferTaskTests(BaseTest):
def test_creates_state(self):
task_state = defer(noop)
queue_state = models.QueueState.get_by_key_name("default")
self.assertTrue(queue_state)
self.assertEqual(task_state.parent().key(), queue_state.key())
def test_unique_task_ref(self):
unique_until = datetime.datetime.utcnow() + datetime.timedelta(days=1)
self.assertRaises(AssertionError, defer, noop, unique_until=unique_until)
self.assertTrue(defer(noop, task_reference="project1", unique_until=unique_until))
self.assertFalse(defer(noop, task_reference="project1", unique_until=unique_until))
def test_args_repr(self):
task_state = defer(noop, 2, u"bår")
self.assertEqual(task_state.deferred_args, u"(2, u'b\\xe5r')")
def test_kwargs_repr(self):
task_state = defer(noop, foo="bår", _bar="foo")
self.assertEqual(task_state.deferred_kwargs, u"{'foo': 'b\\xc3\\xa5r'}")
def test_class_method_repr(self):
task_state = defer(Foo().bar)
self.assertEqual(task_state.deferred_function, u"<class 'deferred_manager.tests.Foo'>.bar")
def test_module_func_repr(self):
task_state = defer(noop)
self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.noop")
def test_builtin_func_repr(self):
task_state = defer(map)
self.assertEqual(task_state.deferred_function, u"map")
def test_callable_obj_func_repr(self):
task_state = defer(Foo)
self.assertEqual(task_state.deferred_function, u"deferred_manager.tests.Foo")
def test_builtin_method_repr(self):
task_state = defer(datetime.datetime.utcnow)
self.assertEqual(task_state.deferred_function, u"<type 'datetime.datetime'>.utcnow")
class ModelTaskTests(unittest.TestCase):
def test_queue_state(self):
queue_state = models.QueueState(name="default")
self.assertEqual(queue_state.retry_limit, 7)
self.assertEqual(queue_state.age_limit, 2*24*3600) # 2 days
class HandlerTests(BaseTest):
def make_request(self, path, task_name, queue_name, headers=None, environ=None, **kwargs):
request_headers = {
"X-AppEngine-TaskName": task_name,
"X-AppEngine-QueueName": queue_name,
'X-AppEngine-TaskExecutionCount': kwargs.pop('retries', 0)
}
if headers:
request_headers.update(headers)
request_environ = {
"SERVER_SOFTWARE": "Development"
}
if environ:
request_environ.update(environ)
return webapp2.Request.blank('/', environ=request_environ, headers=request_headers, **kwargs)
def test_success(self):
task_state = defer(noop)
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertTrue(task_state.task_name)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_failure(self):
task_state = defer(noop_fail)
noop_pickle = deferred.serialize(noop_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 500)
task_state = self.reload(task_state)
self.assertFalse(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_retry_success(self):
task_state = defer(noop)
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=2)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 2)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertFalse(task_state.is_permanently_failed)
def test_retry_max_retries(self):
task_state = defer(noop_fail)
# give the task an old age. tasks must fail both the retry and age conditions (if specified)
task_state.first_run = datetime.datetime.utcnow() - datetime.timedelta(days=2)
task_state.put()
noop_pickle = deferred.serialize(noop_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle, retries=8)
response = request.get_response(application)
self.assertEqual(response.status_int, 500)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 8)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertTrue(task_state.is_permanently_failed)
def test_permanent_failure(self):
task_state = defer(noop_permanent_fail)
noop_pickle = deferred.serialize(noop_permanent_fail)
request = self.make_request("/", task_state.task_name, 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
task_state = self.reload(task_state)
self.assertEqual(task_state.retry_count, 0)
self.assertTrue(task_state.is_complete)
self.assertFalse(task_state.is_running)
self.assertTrue(task_state.is_permanently_failed)
def test_no_task_state(self):
noop_pickle = deferred.serialize(noop)
request = self.make_request("/", 'task1', 'default', POST=noop_pickle)
response = request.get_response(application)
self.assertEqual(response.status_int, 200)
| 7,057 | 2,349 |
from django.db import models
class Category(models.Model):
"""Category model."""
name = models.CharField(max_length=100, unique=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
| 240 | 75 |
"""MIT License
Copyright (c) 2018 rongjiewang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
import argparse
from bucket import encodeBucketClass, decodeBucketClass
from deBruijnGraph import encodeGraphClass, decodeGraphClass
def args_check(args):
if not args.encode and not args.decode:
sys.exit("you must give a -e or -d for encode/decode")
if not args.input and not args.paired:
sys.exit("you must give a file input with -i input for single end data or -p -1 input1 -2 input2 for paired-end data")
if not args.output:
sys.exit("you must give a file output with -o output")
return
def main(args):
args_check(args)
#encode
if args.encode:
en_bucket = encodeBucketClass(args.input, args.output, args.paired, \
args.input1, args.input2, args.kmer, args.lossless, args.verbose)
en_bucket.encode()
en_graph = encodeGraphClass(args.output, args.paired, args.kmer, \
args.verbose, en_bucket.sequenceTableSave)
del en_bucket
en_graph.encode()
del en_graph
sys.exit()
#decode
else:
de_bucket = decodeBucketClass(args.input, args.output, args.verbose)
de_bucket.decode()
de_graph = decodeGraphClass(args.input, args.output, de_bucket.paired, de_bucket.readNum,\
de_bucket.bucketIndexLen, de_bucket.lossless, de_bucket.verbose)
de_graph.loadBucktData(de_bucket.bucketIndex, de_bucket.bucketCov, de_bucket.readIndexPos,\
de_bucket.readrc, de_bucket.readN, de_bucket.readLen, de_bucket.readOrder)
del de_bucket
de_graph.decode()
del de_graph
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'BdBG')
parser.add_argument("-e", "--encode",
help="encoding",action="store_true")
parser.add_argument("-d", "--decode",
help="decoding",action="store_true")
parser.add_argument("-i", "--input",type=str,
help="inputFile")
parser.add_argument("-o", "--output",
help="outputFile")
parser.add_argument("-p", "--paired",
help="paired-end flag",action="store_true")
parser.add_argument("-1", "--input1",
help="paired-end file1")
parser.add_argument("-2", "--input2",
help="paired-end file2")
parser.add_argument("-l", "--lossless",
help="keep the reads orders, default:false, \
if encode paired-end files, default:ture ",action="store_true")
parser.add_argument("-k", "--kmer",type=int, default=15,
help="kmer size for bucket and de Bruijn graph, default=15")
parser.add_argument("-v","--verbose", action="store_true",
help="verbose information")
args = parser.parse_args()
main(args)
| 4,023 | 1,251 |
"""
This module provides the interactive Python console.
"""
import sys
import traceback
from browser import window
class Console:
"""
A class providing a console widget. The constructor accepts
a domnode which should be a textarea and it takes it over
and turns it into a python interactive console.
"""
_credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.
"""
_copyright = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com
All Rights Reserved.
Copyright (c) 2001-2013 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved.
"""
_license = """Copyright (c) 2012, Pierre Quentel pierre.quentel@gmail.com
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided
with the distribution.
Neither the name of the <ORGANIZATION> nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def __init__(self, elem):
self._elem = elem
self.credits.__repr__ = lambda: Console._credits
self.copyright.__repr__ = lambda: Console._copyright
self.license.__repr__ = lambda: Console._license
self._redirected = False
self._oldstdout = None
self._oldstderr = None
self.history = []
self.current = 0
self._status = "main" # or "block" if typing inside a block
self.current_line = ""
# execution namespace
self.editor_ns = {
'credits': self.credits,
'copyright': self.copyright,
'license': self.license,
'__name__': '__console__',
}
self._elem.bind('keypress', self.my_key_press)
self._elem.bind('keydown', self.my_key_down)
self._elem.bind('click', self.cursor_to_end)
version = sys.implementation.version
self._elem.value = "Brython %s.%s.%s on %s %s\n%s\n>>> " % (version[0],
version[1],
version[2],
window.navigator.appName,
window.navigator.appVersion,
'Type "copyright()", "credits()" or "license()" for more information.')
self._elem.focus()
self.cursor_to_end()
def add_to_ns(self, key, value):
"""
Adds key to the console's local scope. Think:
```
key=value
```
"""
self.editor_ns[key] = value
def _redirect_out(self):
if self._redirected:
sys.__console__ = False
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self._redirected = False
else:
sys.__console__ = True
self._oldstdout = sys.stdout
self._oldstderr = sys.stderr
sys.stdout = self
sys.stderr = self
self._redirected = True
def credits(self):
self.write(self._credits)
def copyright(self):
self.write(self._copyright)
def license(self):
self.write(self._license)
def write(self, data):
self._elem.value += str(data)
def cursor_to_end(self, *_args):
pos = len(self._elem.value)
self._elem.setSelectionRange(pos, pos)
self._elem.scrollTop = self._elem.scrollHeight
def get_col(self, _area):
"""
returns the column position of the cursor
"""
sel = self._elem.selectionStart
lines = self._elem.value.split('\n')
for line in lines[:-1]:
sel -= len(line) + 1
return sel
def my_key_press(self, event):
if event.keyCode == 9: # tab key
event.preventDefault()
self._elem.value += " "
elif event.keyCode == 13: # return
src = self._elem.value
if self._status == "main":
self.current_line = src[src.rfind('>>>') + 4:]
elif self._status == "3string":
self.current_line = src[src.rfind('>>>') + 4:]
self.current_line = self.current_line.replace('\n... ', '\n')
else:
self.current_line = src[src.rfind('...') + 4:]
if self._status == 'main' and not self.current_line.strip():
self._elem.value += '\n>>> '
event.preventDefault()
return
self._elem.value += '\n'
self.history.append(self.current_line)
self.current = len(self.history)
if self._status == "main" or self._status == "3string":
try:
self._redirect_out()
_ = self.editor_ns['_'] = eval(self.current_line, self.editor_ns)
if _ is not None:
self.write(repr(_) + '\n')
self._elem.value += '>>> '
self._status = "main"
except IndentationError:
self._elem.value += '... '
self._status = "block"
except SyntaxError as msg:
if str(msg) == 'invalid syntax : triple string end not found' or \
str(msg).startswith('Unbalanced bracket'):
self._elem.value += '... '
self._status = "3string"
elif str(msg) == 'eval() argument must be an expression':
try:
self._redirect_out()
exec(self.current_line, self.editor_ns)
except:
# pylint: disable=bare-except; any exception can happen here
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
self._status = "main"
elif str(msg) == 'decorator expects function':
self._elem.value += '... '
self._status = "block"
else:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
self._elem.value += '>>> '
self._status = "main"
finally:
self._redirect_out()
elif self.current_line == "": # end of block
block = src[src.rfind('>>>') + 4:].splitlines()
block = [block[0]] + [b[4:] for b in block[1:]]
block_src = '\n'.join(block)
# status must be set before executing code in globals()
self._status = "main"
try:
self._redirect_out()
_ = exec(block_src, self.editor_ns)
if _ is not None:
print(repr(_))
# pylint: disable=bare-except; any exception can happen here
except:
traceback.print_exc(self)
finally:
self._redirect_out()
self._elem.value += '>>> '
else:
self._elem.value += '... '
self.cursor_to_end()
event.preventDefault()
def my_key_down(self, event):
if event.keyCode == 37: # left arrow
sel = self.get_col(self._elem)
if sel < 5:
event.preventDefault()
event.stopPropagation()
elif event.keyCode == 36: # line start
pos = self._elem.selectionStart
col = self.get_col(self._elem)
self._elem.setSelectionRange(pos - col + 4, pos - col + 4)
event.preventDefault()
elif event.keyCode == 38: # up
if self.current > 0:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current -= 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 40: # down
if self.current < len(self.history) - 1:
pos = self._elem.selectionStart
col = self.get_col(self._elem)
# remove self.current line
self._elem.value = self._elem.value[:pos - col + 4]
self.current += 1
self._elem.value += self.history[self.current]
event.preventDefault()
elif event.keyCode == 8: # backspace
src = self._elem.value
lstart = src.rfind('\n')
if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6):
event.preventDefault()
event.stopPropagation()
| 10,820 | 3,013 |
"""
Test setting a breakpoint on an overloaded function by name.
"""
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBreakpointOnOverload(TestBase):
mydir = TestBase.compute_mydir(__file__)
def check_breakpoint(self, name):
bkpt = self.target.BreakpointCreateByName(name)
self.assertEqual(bkpt.num_locations, 1, "Got one location")
addr = bkpt.locations[0].GetAddress()
self.assertTrue(addr.function.IsValid(), "Got a real function")
# On Window, the name of the function includes the return value.
# We still succeed in setting the breakpoint, but the resultant
# name is not the same.
# So just look for the name we used for the breakpoint in the
# function name, rather than doing an equality check.
self.assertIn(name, addr.function.name, "Got the right name")
def test_break_on_overload(self):
self.build()
self.target = lldbutil.run_to_breakpoint_make_target(self)
self.check_breakpoint("a_function(int)")
self.check_breakpoint("a_function(double)")
self.check_breakpoint("a_function(int, double)")
self.check_breakpoint("a_function(double, int)")
| 1,333 | 405 |
from tkinter import *
# ---------------------------- CONSTANTS ------------------------------- #
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "Courier"
WORK_MIN = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
is_counting = False
reps = 0
timer = None
# ---------------------------- TIMER RESET ------------------------------- #
def reset_timer():
global timer, reps, is_counting
if timer is not None:
window.after_cancel(timer)
lb_title.config(text="Timer", fg=GREEN)
canvas.itemconfig(count_text, text="00:00")
lb_checks["text"] = ""
timer = None
reps = 0
is_counting = False
# ---------------------------- TIMER MECHANISM ------------------------------- #
def start_timer():
global is_counting, reps
if is_counting:
pass
reps += 1
if reps % 8 == 0:
lb_title.config(text="Break", fg=RED)
minutes = LONG_BREAK_MIN
elif reps % 2 == 0:
lb_title.config(text="Break", fg=PINK)
minutes = SHORT_BREAK_MIN
else:
lb_title.config(text="Work", fg=GREEN)
minutes = WORK_MIN
is_counting = True
count_down(minutes * 60)
# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #
def count_down(count):
global reps
minutes = count // 60
seconds = count % 60
seconds = f"0{seconds}" if seconds < 10 else seconds
canvas.itemconfig(count_text, text=f"{minutes}:{seconds}")
if count > 0:
global timer
timer = window.after(1000, count_down, count - 1)
elif reps % 2 == 1:
global is_counting
is_counting = False
lb_checks["text"] += "✅"
start_timer() # Break
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Pomodoro")
window.config(padx=100, pady=50, bg=YELLOW)
canvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)
image = PhotoImage(file="tomato.png")
canvas.create_image(100, 112, image=image)
count_text = canvas.create_text(100, 130, text="00:00", fill="white", font=(FONT_NAME, 35, "bold"))
bt_start = Button(text="Start", highlightthickness=0, command=start_timer)
bt_reset = Button(text="Reset", highlightthickness=0, command=reset_timer)
lb_checks = Label(text="", fg=GREEN, bg=YELLOW)
lb_title = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 30, "bold"))
lb_title.grid(column=1, row=0)
canvas.grid(column=1, row=1)
bt_start.grid(column=0, row=2)
bt_reset.grid(column=2, row=2)
lb_checks.grid(column=1, row=3)
window.mainloop()
| 2,611 | 998 |
# tests for narps code
# - currently these are all just smoke tests
import pytest
import os
import pandas
from narps import Narps
from AnalyzeMaps import mk_overlap_maps,\
mk_range_maps, mk_std_maps,\
mk_correlation_maps_unthresh, analyze_clusters,\
plot_distance_from_mean, get_thresh_similarity
from MetaAnalysis import get_thresholded_Z_maps
from ThreshVoxelStatistics import get_thresh_voxel_stats,\
get_zstat_diagnostics
from GetMeanSimilarity import get_similarity_summary
# Use a fixed base dir so that we can
# access the results as a circleci artifact
@pytest.fixture(scope="session")
def narps():
basedir = '/tmp/data'
assert os.path.exists(basedir)
narps = Narps(basedir)
narps.load_data()
narps.metadata = pandas.read_csv(
os.path.join(narps.dirs.dirs['metadata'], 'all_metadata.csv'))
return(narps)
# tests
# AnalyzeMaps
def test_mk_overlap_maps(narps):
# create maps showing overlap of thresholded images
mk_overlap_maps(narps)
def test_mk_range_maps(narps):
mk_range_maps(narps)
def test_mk_std_maps(narps):
mk_std_maps(narps)
def test_unthresh_correlation_analysis(narps):
# conbine these into a single test
# since they share data
corr_type = 'spearman'
dendrograms, membership = mk_correlation_maps_unthresh(
narps, corr_type=corr_type)
_ = analyze_clusters(
narps,
dendrograms,
membership,
corr_type=corr_type)
def test_plot_distance_from_mean(narps):
plot_distance_from_mean(narps)
def test_get_thresh_similarity(narps):
get_thresh_similarity(narps)
# this was created for ALE but we do it earlier here
def test_thresh_zmap(narps):
# create thresholded versions of Z maps
narps = get_thresholded_Z_maps(
narps)
def test_thresh_voxel_stats(narps):
get_zstat_diagnostics(narps)
get_thresh_voxel_stats(narps.basedir)
def test_mean_similarity(narps):
_ = get_similarity_summary(narps)
| 1,983 | 705 |
"""
Convenience functions for plotting DTCWT-related objects.
"""
from __future__ import absolute_import
import numpy as np
from matplotlib.pyplot import *
__all__ = (
'overlay_quiver',
)
def overlay_quiver(image, vectorField, level, offset):
"""Overlays nicely coloured quiver plot of complex coefficients over original full-size image,
providing a useful phase visualisation.
:param image: array holding grayscale values on the interval [0, 255] to display
:param vectorField: a single [MxNx6] numpy array of DTCWT coefficients
:param level: the transform level (1-indexed) of *vectorField*.
:param offset: Offset for DTCWT coefficients (typically 0.5)
.. note::
The *level* parameter is 1-indexed meaning that the third level has
index "3". This is unusual in Python but is kept for compatibility
with similar MATLAB routines.
Should also work with other types of complex arrays (e.g., SLP
coefficients), as long as the format is the same.
Usage example:
.. plot::
:include-source: true
import dtcwt
import dtcwt.plotting as plotting
mandrill = datasets.mandrill()
transform2d = dtcwt.Transform2d()
mandrill_t = transform2d.forward(mandrill, nlevels=5)
plotting.overlay_quiver(mandrill*255, mandrill_t.highpasses[-1], 5, 0.5)
.. codeauthor:: R. Anderson, 2005 (MATLAB)
.. codeauthor:: S. C. Forshaw, 2014 (Python)
"""
# Make sure imshow() uses the full range of greyscale values
imshow(image, cmap=cm.gray, clim=(0,255))
hold(True)
# Set up the grid for the quiver plot
g1 = np.kron(np.arange(0, vectorField[:,:,0].shape[0]).T, np.ones((1,vectorField[:,:,0].shape[1])))
g2 = np.kron(np.ones((vectorField[:,:,0].shape[0], 1)), np.arange(0, vectorField[:,:,0].shape[1]))
# Choose a coloUrmap
cmap = cm.spectral
scalefactor = np.max(np.max(np.max(np.max(np.abs(vectorField)))))
vectorField[-1,-1,:] = scalefactor
for sb in range(0, vectorField.shape[2]):
hold(True)
thiscolour = cmap(sb / float(vectorField.shape[2])) # Select colour for this subband
hq = quiver(g2*(2**level) + offset*(2**level), g1*(2**level) + offset*(2**level), np.real(vectorField[:,:,sb]), \
np.imag(vectorField[:,:,sb]), color=thiscolour, scale=scalefactor*2**level)
quiverkey(hq, 1.05, 1.00-0.035*sb, 0, "subband " + np.str(sb), coordinates='axes', color=thiscolour, labelcolor=thiscolour, labelpos='E')
hold(False)
return hq
| 2,529 | 892 |
import grokcore.component as grok
from grokcore.component.interfaces import IContext
import grokcore.view as view
from zope.interface import Interface
from grokcore.registries.tests.registries.interfaces import IExample
class MyExample(grok.GlobalUtility):
grok.name('global')
grok.implements(IExample)
class Page(view.View):
grok.context(IContext)
def render(self):
return u"I Am grabbed from GSM"
| 428 | 138 |
from graphdata.shared.shared1D import AuxPlotLabelLL1D
from graphdata.shared.shared1D import ProcessData1D
from graphdata.shared.shared1D import LoadData1D
from graphdata.shared.figsizes import LogLogSize
from graphdata.shared.shared import ExtendDictionary
from graphdata.shared.shared import ProcessComplex
from graphdata import plt
from graphdata import np
from graphdata import configs
def loglog(filename,figsize=None,decades=None,xlim=None,ylim=None,\
complex_op=None,overwrite=False,**kwargs):
"""
Loglog graph of 1D data file using Matplotlib plt.loglog
INPUTS:
filename: string
name of file containing 1D data to be plotted
figsize: tuple (width,height)
size of figure to be displayed
xlim: np.array
x-axis limits of graph
ylim: np.array
x-axis limits of graph
decades: int
number of decades of data below maximum to plot
overwrite: bool
add lines to an existing plt.semilogy graph if it exists
(default is False which will create graph on a new figure)
**kwargs: dictionary
(optional) arguments to be passed onto plt.loglog plot
OUTPUTS:
ax : matplotlib.axes.Axes
Matplotlib axes object, allows for setting limits and other manipulation of the axes
(e.g. ax.set_xlim([0,1]) would set the graph x-limits to be between 0 and 1)
"""
x,y,auxDict = LoadData1D(filename)
if complex_op is not None:
y = ProcessComplex(complex_op,y)
if decades is None:
decades = configs._G['decades']
if xlim is None:
xlim = [x[0],x[-1]]
if ylim is None:
ylim = [np.min(y),np.max(y)]
figsize = LogLogSize(figsize)
ExtendDictionary(auxDict,figsize=figsize,decades=decades,\
xlim=xlim,ylim=ylim,overwrite=overwrite)
x,y,auxDict = ProcessData1D(x,y,auxDict)
figsize = LogLogSize(figsize)
if overwrite:
labs = plt.get_figlabels()
if "LogLog" not in labs:
configs.defaultLS()
else:
configs.toggleLS()
plt.figure("LogLog",figsize=figsize)
else:
configs.defaultLS()
plt.figure(figsize=figsize)
fig = plt.loglog(x,y,configs.LS,**kwargs)
plt.grid(True)
AuxPlotLabelLL1D(auxDict)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
plt.ion()
plt.show()
return fig
| 2,481 | 794 |
import datetime
import json
import requests
import pytest
import random
from tests.suites.test_payment import TestPayment
from tests.utilities.settings import get_settings, get_test_data, setup_access_data
@pytest.mark.incremental
@pytest.mark.parametrize('login_session', setup_access_data('PREMIUM', ['BCSC']), indirect=True, scope='class')
@pytest.mark.usefixtures('setup_data')
class TestPremiumAccount:
__test__ = True
def test_get_user_profile(self, testing_config, logger):
"""Test get user profile. After login, the user should be created in db."""
response = requests.get(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.user_id = response_json.get('keycloakGuid')
def test_get_last_terms(self, testing_config, logger):
"""Get last version of termofuse."""
response = requests.get(f'{testing_config.auth_api_url}/documents/termsofuse',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.terms_version = response_json.get('versionId')
def test_accept_terms(self, testing_config, logger):
"""Test accept termofuser."""
input_data = json.dumps({'termsversion': testing_config.terms_version, 'istermsaccepted': True})
response = requests.patch(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 200
def test_get_user_profile(self, testing_config, logger):
"""Test get user profile."""
response = requests.get(f'{testing_config.auth_api_url}/users/@me',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
response_json = response.json()
testing_config.user_id = response_json.get('keycloakGuid')
@pytest.mark.skip_login_as('bcsc_member')
def test_link_bcol_account(self, testing_config, logger):
"""Test link bcol account."""
load_data = random.sample(get_settings().BCOL_USERS, 1)[0]
input_data = json.dumps({
'userId': load_data.username,
'password': load_data.password
})
response = requests.post(f'{testing_config.auth_api_url}/bcol-profiles',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 200
response_json = response.json()
@pytest.mark.skip_login_as('bcsc_member')
def test_create_account(self, testing_config, logger):
"""Test create account."""
input_data = json.dumps(get_test_data(testing_config.test_data['org']))
response = requests.post(f'{testing_config.auth_api_url}/orgs',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 201
response_json = response.json()
testing_config.org_id = response_json.get('id')
def test_create_user_profile(self, testing_config, logger):
"""Test create user profile (contact information)."""
input_data = json.dumps(get_test_data(testing_config.test_data['user_profile']))
response = requests.post(f'{testing_config.auth_api_url}/users/contacts',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}',
'Content-Type': 'application/json'},
data=input_data)
assert response.status_code == 201
def test_get_account(self, testing_config, logger):
"""Test get account."""
response = requests.get(f'{testing_config.auth_api_url}/orgs/{testing_config.org_id}',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
def test_get_user_settings(self, testing_config, logger):
"""Test get user settings."""
response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/settings',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
def test_get_user_notifications(self, testing_config, logger):
"""Test get user notifications."""
response = requests.get(f'{testing_config.auth_api_url}/users/{testing_config.user_id}/org/{testing_config.org_id}/notifications',
headers={'Authorization': f'Bearer {testing_config.keycloak_token}'})
assert response.status_code == 200
| 5,387 | 1,529 |
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.workrecord_1_0 import models as dingtalkworkrecord__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def count_work_record(
self,
request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest,
) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkworkrecord__1__0_models.CountWorkRecordHeaders()
return self.count_work_record_with_options(request, headers, runtime)
async def count_work_record_async(
self,
request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest,
) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkworkrecord__1__0_models.CountWorkRecordHeaders()
return await self.count_work_record_with_options_async(request, headers, runtime)
def count_work_record_with_options(
self,
request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest,
headers: dingtalkworkrecord__1__0_models.CountWorkRecordHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkworkrecord__1__0_models.CountWorkRecordResponse(),
self.do_roarequest('CountWorkRecord', 'workrecord_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/workrecord/counts', 'json', req, runtime)
)
async def count_work_record_with_options_async(
self,
request: dingtalkworkrecord__1__0_models.CountWorkRecordRequest,
headers: dingtalkworkrecord__1__0_models.CountWorkRecordHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkworkrecord__1__0_models.CountWorkRecordResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.user_id):
query['userId'] = request.user_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkworkrecord__1__0_models.CountWorkRecordResponse(),
await self.do_roarequest_async('CountWorkRecord', 'workrecord_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/workrecord/counts', 'json', req, runtime)
)
| 3,960 | 1,211 |
from __future__ import annotations
import argparse
import pickle
import numpy as np
import json
import logging
import math
import glob
import random
import os
import sys
import datetime
import time
from functools import partial
from pathlib import Path
from collections import defaultdict
from shutil import copyfile
from tqdm import tqdm
import sklearn.model_selection
import sklearn.metrics
import torch
from ..extension.clmbr import *
from .. import timeline
from .. import ontology
from .. import labeler
from .dataset import DataLoader, convert_patient_data
from .prediction_model import CLMBR
from .trainer import Trainer
from .utils import read_config, read_info, device_from_config
from ..featurizer import ColumnValue, Featurizer
from ..splits import read_time_split
from ..utils import OnlineStatistics, set_up_logging
from .opt import OpenAIAdam
from typing import Mapping, Any, Dict, Optional, Tuple
def check_dir_for_overwrite(dirname: str) -> bool:
return bool(
glob.glob(os.path.join(dirname, "*.json"))
or glob.glob(os.path.join(dirname, "checkpoints"))
)
def create_info_program() -> None:
parser = argparse.ArgumentParser(
description="Precompute training data summary statistics etc for CLMBR experiments"
)
parser.add_argument(
"input_data_dir",
type=str,
help="Location of the dataset extract to be used for CLMBR training",
)
parser.add_argument(
"save_dir", type=str, help="Location where model info is to be saved",
)
parser.add_argument(
"train_end_date", type=str, help="The end date for training"
)
parser.add_argument(
"val_end_date",
type=str,
help="The end date for validation. Should be later than the end date for training",
)
parser.add_argument(
"--min_patient_count",
type=int,
default=100,
help="Only keep statistics on codes/terms that appear for this many patients (default 100)",
)
parser.add_argument(
"--excluded_patient_file",
type=str,
help="A file containing a list of patients to exclude from training. "
"Any patient ID you plan to use for finetuning / evaluation should be "
"listed in this file. If not provided, exclude_patient_ratio must be specified.",
default=None,
)
parser.add_argument(
"--exclude_patient_ratio",
type=float,
default=None,
help="Ratio of patients to exclude from pre-training between 0 and 1."
" If provided, excluded patient IDs will "
"be randomly selected and written out to a file "
'"excluded_patient_ids.txt" in the save directory. If not '
"provided, excluded_patient_file must be specified.",
)
parser.add_argument(
"--seed",
type=int,
default=3451235,
help="Random seed (default 3451235)",
)
args = parser.parse_args()
if args.save_dir is None:
print("Error - must specify save_dir", file=sys.stderr)
exit(1)
else:
save_dir = args.save_dir
os.makedirs(save_dir, exist_ok=True)
set_up_logging(os.path.join(save_dir, "create_info.log"))
logging.info("Args: %s", str(args))
if check_dir_for_overwrite(save_dir):
print(
"Fatal error - model dir {} is not empty".format(save_dir),
file=sys.stderr,
)
logging.info("Fatal error - model dir {} is not empty".format(save_dir))
exit(1)
ontologies_path = os.path.join(args.input_data_dir, "ontology.db")
timelines_path = os.path.join(args.input_data_dir, "extract.db")
train_end_date = datetime.datetime.fromisoformat(args.train_end_date)
val_end_date = datetime.datetime.fromisoformat(args.val_end_date)
if train_end_date == val_end_date:
logging.info("Could not creat info with the same train and validation end date")
exit(1)
result = json.loads(
create_info(
timelines_path,
ontologies_path,
train_end_date,
val_end_date,
args.min_patient_count,
)
)
result["extract_dir"] = args.input_data_dir
result["extract_file"] = "extract.db"
result["train_start_date"] = "1900-01-01"
result["train_end_date"] = args.train_end_date
result["val_start_date"] = args.train_end_date
result["val_end_date"] = args.val_end_date
result["seed"] = args.seed
result["min_patient_count"] = args.min_patient_count
def remove_pids(a, x):
return [(p, c) for p, c in a if p not in x]
if args.excluded_patient_file is not None:
with open(args.excluded_patient_file) as f:
pids = {int(a) for a in f}
result["train_patient_ids_with_length"] = remove_pids(
result["train_patient_ids_with_length"], pids
)
result["val_patient_ids_with_length"] = remove_pids(
result["val_patient_ids_with_length"], pids
)
logging.info(
"Removed %d patient IDs from file %s"
% (len(pids), args.excluded_patient_file)
)
elif args.exclude_patient_ratio is not None:
assert 0 < args.exclude_patient_ratio and args.exclude_patient_ratio < 1
train_pids = set([x[0] for x in result["train_patient_ids_with_length"]])
val_pids = set([x[0] for x in result["val_patient_ids_with_length"]])
all_pids = train_pids.union(val_pids)
excluded_pids = set(
random.sample(
list(all_pids),
int(round(len(all_pids) * args.exclude_patient_ratio)),
)
)
result["train_patient_ids_with_length"] = remove_pids(
result["train_patient_ids_with_length"], excluded_pids
)
result["val_patient_ids_with_length"] = remove_pids(
result["val_patient_ids_with_length"], excluded_pids
)
with open(
os.path.join(args.save_dir, "excluded_patient_ids.txt"), "w"
) as f:
for pid in excluded_pids:
f.write("%d\n" % pid)
logging.info(
"Removed %d patient IDs using ratio %f"
% (len(excluded_pids), args.exclude_patient_ratio)
)
def count_frequent_items(counts: Mapping[Any, int], threshold: int) -> int:
return len(
{item for item, count in counts.items() if count >= threshold}
)
logging.info(
"Codes with >= 10 {}".format(
count_frequent_items(result["code_counts"], 10)
)
)
logging.info(
"Codes with >= 25 {}".format(
count_frequent_items(result["code_counts"], 25)
)
)
logging.info(
"Codes with >= 50 {}".format(
count_frequent_items(result["code_counts"], 50)
)
)
logging.info(
"Codes with >= 100 {}".format(
count_frequent_items(result["code_counts"], 100)
)
)
logging.info(
"Codes with >= 1000 {}".format(
count_frequent_items(result["code_counts"], 1000)
)
)
logging.info("Number codes: {}".format(len(result["code_counts"])))
logging.info("Number valid codes: {}".format(len(result["valid_code_map"])))
with open(os.path.join(args.save_dir, "info.json"), "w") as fp:
json.dump(result, fp)
def train_model() -> None:
parser = argparse.ArgumentParser(
description="Representation Learning Experiments"
)
# paths
parser.add_argument(
"model_dir",
type=str,
help="Location where model logs and weights should be saved",
)
parser.add_argument(
"info_dir",
type=str,
help="Location where `clmbr_create_info` results were saved",
)
parser.add_argument(
"--extract_dir",
action="store_true",
help="Use the doctorai task definition",
)
# model specification
parser.add_argument(
"--size",
default=768,
type=int,
help="Dimensionality of the output embeddings",
)
parser.add_argument(
"--encoder_type",
default="gru",
choices=["gru", "lstm", "transformer"],
help='the sequence encoder module type (default "gru")',
)
parser.add_argument("--no_tied_weights", default=False, action="store_true")
parser.add_argument(
"--rnn_layers",
default=1,
type=int,
help='number of recurrent layers to use if encoder_type is "gru" or '
'"lstm" (default 1), not used if encoder_type is "transformer"',
)
parser.add_argument(
"--dropout",
default=0,
type=float,
help="dropout percentage (default 0)",
)
# optimization specification
parser.add_argument(
"--batch_size", type=int, default=500, help="Batch size (default 500)"
)
parser.add_argument(
"--eval_batch_size",
type=int,
default=2000,
help="Batch size during evaluation (default 2000)",
)
parser.add_argument(
"--epochs",
type=int,
default=50,
help="Number of training epochs (default 50)",
)
parser.add_argument(
"--warmup_epochs",
type=int,
default=2,
help="Number of warmup epochs (default 2)",
)
parser.add_argument(
"--lr", type=float, default=0.01, help="learning rate (default 0.01)"
)
parser.add_argument(
"--l2",
default=0.01,
type=float,
help="l2 regularization strength (default 0.01)",
)
parser.add_argument(
"--device",
default="cpu",
help='Specify whether the model should be run on CPU or GPU. Can specify a specific GPU, e.g. "cuda:0" (default "cpu")',
)
parser.add_argument("--code_dropout", type=float, default=0.2)
# Day dropout added in reference to Lawrence's comment,
# although Ethan mentioned it should be removed from the API
parser.add_argument("--day_dropout", type=float, default=0.2)
args = parser.parse_args()
model_dir = args.model_dir
os.makedirs(model_dir, exist_ok=True)
if check_dir_for_overwrite(model_dir):
print(
"Fatal error - model dir {} is not empty".format(model_dir),
file=sys.stderr,
)
logging.info(
"Fatal error - model dir {} is not empty".format(model_dir)
)
exit(1)
# Try to load info.json file; see create_info above for details.
info = read_info(os.path.join(args.info_dir, "info.json"))
copyfile(
os.path.join(args.info_dir, "info.json"),
os.path.join(model_dir, "info.json"),
)
first_too_small_index = float("inf")
for code, index in info["valid_code_map"].items():
if info["code_counts"][code] < 10 * info["min_patient_count"]:
first_too_small_index = min(first_too_small_index, index)
print(len(info["valid_code_map"]), flush=True)
# Create and save config dictionary
config = {
"batch_size": args.batch_size,
"eval_batch_size": args.eval_batch_size,
"num_first": first_too_small_index,
"num_second": len(info["valid_code_map"]) - first_too_small_index,
"size": args.size,
"lr": args.lr,
"dropout": args.dropout,
"encoder_type": args.encoder_type,
"rnn_layers": args.rnn_layers,
"tied_weights": not args.no_tied_weights,
"l2": args.l2,
"b1": 0.9,
"b2": 0.999,
"e": 1e-8,
"epochs_per_cycle": args.epochs,
"warmup_epochs": args.warmup_epochs,
"code_dropout": args.code_dropout,
"day_dropout": args.day_dropout,
"model_dir": os.path.abspath(model_dir),
}
with open(os.path.join(model_dir, "config.json"), "w") as outfile:
json.dump(config, outfile)
set_up_logging(os.path.join(model_dir, "train.log"))
logging.info("Args: %s", str(args))
dataset = PatientTimelineDataset(
os.path.join(info["extract_dir"], "extract.db"),
os.path.join(info["extract_dir"], "ontology.db"),
os.path.join(args.info_dir, "info.json"),
)
random.seed(info["seed"])
model = CLMBR(config, info).to(torch.device(args.device))
trainer = Trainer(model)
trainer.train(dataset, use_pbar=False)
def debug_model() -> None:
parser = argparse.ArgumentParser(
description="Representation Learning Experiments"
)
parser.add_argument(
"--model_dir", type=str, help="Override where model is saved"
)
args = parser.parse_args()
model_dir = args.model_dir
config = read_config(os.path.join(model_dir, "config.json"))
info = read_info(os.path.join(model_dir, "info.json"))
use_cuda = torch.cuda.is_available()
model = CLMBR(config, info).to(device_from_config(use_cuda=use_cuda))
model_data = torch.load(os.path.join(model_dir, "best"), map_location="cpu")
model.load_state_dict(model_data)
loaded_data = PatientTimelineDataset(
os.path.join(info["extract_dir"], "extract.db"),
os.path.join(info["extract_dir"], "ontology.db"),
os.path.join(model_dir, "info.json"),
)
ontologies = ontology.OntologyReader(
os.path.join(info["extract_dir"], "ontology.db")
)
timelines = timeline.TimelineReader(
os.path.join(info["extract_dir"], "extract.db")
)
reverse_map = {}
for b, a in info["valid_code_map"].items():
word = ontologies.get_dictionary().get_word(b)
reverse_map[a] = word
reverse_map[len(info["valid_code_map"])] = "None"
with DataLoader(
loaded_data,
threshold=config["num_first"],
is_val=True,
batch_size=1,
seed=info["seed"],
day_dropout=0,
code_dropout=0,
) as batches:
for batch in batches:
if batch["task"][0].size()[0] == 0:
continue
values, non_text_loss = model(batch)
values = torch.sigmoid(values)
patient_id = int(batch["pid"][0])
patient = timelines.get_patient(patient_id)
original_day_indices = batch["day_index"][0]
indices, targets, seen_before, _, _, _ = batch["task"]
day_indices = indices[:, 0]
word_indices = indices[:, 1]
(
all_non_text_codes,
all_non_text_offsets,
all_non_text_codes1,
all_non_text_offsets1,
all_day_information,
all_positional_encoding,
all_lengths,
) = batch["rnn"]
all_non_text_codes = list(all_non_text_codes)
all_non_text_offsets = list(all_non_text_offsets) + [
len(all_non_text_codes)
]
print(patient_id, batch["pid"], original_day_indices)
all_seen = set()
for i, index in enumerate(original_day_indices):
day = patient.days[index]
print("------------------")
print(patient_id, i, index, day.age / 365, day.date)
words = set()
for code in day.observations:
for subword in ontologies.get_subwords(code):
words.add(ontologies.get_dictionary().get_word(subword))
all_seen.add(
ontologies.get_dictionary().get_word(subword)
)
print("Source", words)
wordsA = set()
if (i + 1) < len(all_non_text_offsets):
for code in all_non_text_codes[
all_non_text_offsets[i] : all_non_text_offsets[i + 1]
]:
wordsA.add(reverse_map[code.item()])
print("Given", wordsA)
day_mask = day_indices == i
w = word_indices[day_mask]
p = values[day_mask]
t = targets[day_mask]
f = seen_before[day_mask]
items = [
(
t_i.item(),
reverse_map[w_i.item()],
p_i.item(),
reverse_map[w_i.item()] in all_seen,
w_i.item(),
f_i.item(),
)
for p_i, t_i, w_i, f_i in zip(p, t, w, f)
]
items.sort(key=lambda x: (-x[0], x[1]))
for a in items:
print(a)
| 16,677 | 5,342 |
prim = int(input('primeiro termo: '))
raz = int(input('razao: '))
dec = prim + (10 - 1) * raz
for c in range(prim, dec + raz, raz):
print(' {}'.format(c,), end=' ->')
print('acabou')
| 190 | 81 |
"""
A script that partitions the dataset for transferability scenarios
"""
# basics
import numpy as np
from PIL import Image
# torch...
import torch
# custom libs
import utils
# ------------------------------------------------------------------------------
# Misc. functions
# ------------------------------------------------------------------------------
def update_numpy(acc, term, func):
if acc is None:
acc = term
else:
acc = func((acc, term))
return acc
def get_class_wise_lists(n_classes_cifar10, return_test=False):
if not return_test:
class_wise_dataset = []
for n_class in range(n_classes_cifar10):
train_data, train_labels, _, _ = af.get_cifar10_class_data(n_class) # don't use
class_wise_dataset.append((train_data, train_labels))
return class_wise_dataset
else:
class_wise_dataset = []
test_class_wise_dataset = []
for n_class in range(n_classes_cifar10):
train_data, train_labels, test_data, test_labels = af.get_cifar10_class_data(n_class) # don't use
class_wise_dataset.append((train_data, train_labels))
test_class_wise_dataset.append((test_data, test_labels))
return class_wise_dataset, test_class_wise_dataset
# ------------------------------------------------------------------------------
# Scenario related...
# ------------------------------------------------------------------------------
def scenario_1_split(int_percentages=None):
np.random.seed(0)
"""
Scenario 1) Train CIFAR10 models that use 10%, 25%, 50% of the full training set.
Chooses p% of data in each class (and corresponding labels)
Parameter int_percentages contains percentages as integers, NOT FLOATS!
Returns:
- percent_loaders (dict): each key p% contains an af.ManualData object containing p% of dataset (p% from each label)
* Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive
* Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive
"""
if int_percentages is None:
int_percentages = [10, 25, 50, 100]
print('Running scenario_1_split\n')
n_classes_cifar10 = 10
# get a list containing CIFAR10 data class by class (class k at index k)
class_wise_dataset = get_class_wise_lists(n_classes_cifar10)
percent_loaders = {}
for p in int_percentages:
subset_data = None
subset_labels = None
for n_class in range(n_classes_cifar10):
crt_train_data, crt_train_labels = class_wise_dataset[n_class]
count = crt_train_data.shape[0]
how_many_2_choose = int(count * p / 100.0)
indexes = np.random.choice(np.arange(count), how_many_2_choose, replace=False)
subset_data = update_numpy(acc=subset_data, term=np.copy(crt_train_data[indexes]), func=np.vstack)
subset_labels = update_numpy(acc=subset_labels, term=np.copy(crt_train_labels[indexes]), func=np.hstack)
# end for n_class
print(f'p={p}, data: {subset_data.shape}, labels: {subset_labels.shape}\n')
percent_loaders[p] = af.ManualData(data=subset_data, labels=subset_labels)
# end for p
np.random.seed(af.get_random_seed())
return percent_loaders
def scenario_2_split(int_classes=None):
np.random.seed(0)
"""
Scenario 2) Split CIFAR10 training set into non-overlapping 5 classes - 5 classes, 6 - 6 and 7 - 7.
Parameter int_classes_left:
- each value c is used to generate the two datasets that contain c classes
Returns:
- percent_loaders (dict): each key c contains a pair of af.ManualData meaning ( Dataset w c classes, another dataset c classes)
* Loader data contains p% of images (p% of class 0, ..., p% of class 9) - consecutive
* Loader labels (np.ndarray): contains p% of labels (p% 0s, ..., p% 9s) - consecutive
"""
if int_classes is None:
int_classes = [5, 6, 7]
print('Running scenario_2_split\n')
n_classes_cifar10 = 10
# get a list containing CIFAR10 data class by class (class k at index k)
class_wise_dataset, test_class_wise_dataset = get_class_wise_lists(n_classes_cifar10, return_test=True)
all_classes = np.arange(n_classes_cifar10)
class_loaders = {}
for classes in int_classes:
num_class_overlap = 2*(classes - 5)
class_indexes_overlap = np.random.choice(all_classes, num_class_overlap, replace=False)
left_unique_classes = np.random.choice([x for x in all_classes if x not in class_indexes_overlap], classes-num_class_overlap, replace=False)
right_unique_classes = [x for x in all_classes if (x not in class_indexes_overlap) and (x not in left_unique_classes)]
class_indexes_left = np.array(list(left_unique_classes) + list(class_indexes_overlap))
class_indexes_right = np.array(list(right_unique_classes) + list(class_indexes_overlap))
print(class_indexes_left)
print(class_indexes_right)
subset_data_left, subset_labels_left = None, None
subset_data_right, subset_labels_right = None, None
subset_test_data_left, subset_test_labels_left = None, None
subset_test_data_right, subset_test_labels_right = None, None
label_left = 0
label_right = 0
for n_class in all_classes:
crt_train_data, crt_train_labels = class_wise_dataset[n_class]
crt_test_data, crt_test_labels = test_class_wise_dataset[n_class]
if n_class in class_indexes_left:
new_train_labels = np.ones(crt_train_labels.shape) * label_left # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,...
subset_data_left = update_numpy(acc=subset_data_left, term=np.copy(crt_train_data), func=np.vstack)
subset_labels_left = update_numpy(acc=subset_labels_left, term=np.copy(new_train_labels), func=np.hstack)
new_test_labels = np.ones(crt_test_labels.shape) * label_left
subset_test_data_left = update_numpy(acc=subset_test_data_left, term=np.copy(crt_test_data), func=np.vstack)
subset_test_labels_left = update_numpy(acc=subset_test_labels_left, term=np.copy(new_test_labels), func=np.hstack)
label_left += 1
if n_class in class_indexes_right:
new_train_labels = np.ones(crt_train_labels.shape) * label_right # we have to relabel the dataset because pytorch expects labels as 0,1,2,3,...
subset_data_right = update_numpy(acc=subset_data_right, term=np.copy(crt_train_data), func=np.vstack)
subset_labels_right = update_numpy(acc=subset_labels_right, term=np.copy(new_train_labels), func=np.hstack)
new_test_labels = np.ones(crt_test_labels.shape) * label_right
subset_test_data_right = update_numpy(acc=subset_test_data_right, term=np.copy(crt_test_data), func=np.vstack)
subset_test_labels_right = update_numpy(acc=subset_test_labels_right, term=np.copy(new_test_labels), func=np.hstack)
label_right += 1
# end for n_class
print(f'{classes}: train - data-left: {subset_data_left.shape}, labels-left: {subset_labels_left.shape}, data-right: {subset_data_right.shape}, labels-right: {subset_labels_right.shape}\n')
print(f'{classes}: test - data-left: {subset_test_data_left.shape}, labels-left: {subset_test_labels_left.shape}, data-right: {subset_test_data_right.shape}, labels-right: {subset_test_labels_right.shape}\n')
loaders_left = (af.ManualData(data=subset_data_left, labels=subset_labels_left), af.ManualData(data=subset_test_data_left, labels=subset_test_labels_left))
loaders_right = (af.ManualData(data=subset_data_right, labels=subset_labels_right), af.ManualData(data=subset_test_data_right, labels=subset_test_labels_right))
class_loaders[classes] = (loaders_left, loaders_right)
np.random.seed(af.get_random_seed())
# end for class_left, class_right
return class_loaders | 8,215 | 2,653 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 17:49:07 2020
@author: Amir
"""
import email
import imaplib
from email.header import decode_header
import re
def gmail_ckeck(email_address, password):
mail = imaplib.IMAP4_SSL('imap.gmail.com')
(retcode, capabilities) = mail.login(email_address,password)
mail.list()
mail.select('inbox')
flag = False
while 1:
opt_list = []
mail = imaplib.IMAP4_SSL('imap.gmail.com')
(retcode, capabilities) = mail.login(email_address, password)
mail.list()
mail.select('inbox')
status, messages = mail.search(None, '(UNSEEN)')
if messages[0].decode("utf-8") == '':
flag = False
else:
res = messages[0].decode("utf-8")
messages = res.split(' ')
for i in range(len(messages)):
typ, data = mail.fetch(messages[i],'(RFC822)')
for response in data:
if isinstance(response, tuple):
msg = email.message_from_bytes(response[1])
subject = decode_header(msg["Subject"])[0][0]
if isinstance(subject, bytes):
subject = subject.decode()
if subject == 'One-time Password (OTP) Confirmation Email':
flag = True
if msg.is_multipart():
for part in msg.walk(): # iterate over email parts
# extract content type of email
content_type = part.get_content_type()
try:
# get the email body
body = part.get_payload(decode=True).decode()
except:
pass
if content_type == "text/plain":
opt = re.findall(r'(\d{6})', body)
opt_list.append(opt)
else:
content_type = msg.get_content_type() # extract content type of email
body = msg.get_payload(decode=True).decode()
if content_type == "text/plain":
opt = re.findall(r'(\d{6})', body)
opt_list.append(opt)
if flag:
break
else:
pass
mail.logout()
return opt_list[-1]
| 2,878 | 771 |
# coding=utf-8
import numpy as np
import imageio
from gym import spaces
import tkinter as tk
from PIL import Image, ImageTk
import matplotlib.pyplot as plt
import time
CELL, BLOCK, AGENT_GOAL, OPPONENT_GOAL, AGENT, OPPONENT = range(6)
WIN, LOSE = 5, -5
UP, RIGHT, DOWN, LEFT, HOLD = range(5)
UNIT = 40
class Soccer(tk.Tk, object):
playground = [1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 1,
3, 0, 0, 0, 0, 0, 2,
3, 0, 0, 0, 0, 0, 2,
3, 0, 0, 0, 0, 0, 2,
1, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1]
action_map = {
UP: np.array([-1, 0]),
RIGHT: np.array([0, 1]),
DOWN: np.array([1, 0]),
LEFT: np.array([0, -1]),
HOLD: np.array([0, 0])}
def __init__(self):
super(Soccer, self).__init__()
self.size = 7
self.agent = np.array([3, 1])
self.opponent = np.array([3, 5])
self.grids = np.array(self.playground).reshape(self.size, self.size)
self.agent_keep_ball = False
self.action_space = [UP, RIGHT, DOWN, LEFT, HOLD]
self.n_actions = len(self.action_space)
self.n_features = 5
self.visualize()
# low high to observe
#self.observation_space = spaces.Discrete(7 * 7 * 2)
def step(self, act_a, act_o):
new_pos_a = self.agent + self.action_map[act_a]
new_pos_o = self.opponent + self.action_map[act_o]
reward, done, s_ = 0, False, []
# opponent win
if self.grids[tuple(new_pos_o)] == 3 and not self.agent_keep_ball:
reward = LOSE
done = True
# agent win
if self.grids[tuple(new_pos_a)] == 2 and self.agent_keep_ball:
reward = WIN
done = True
# valid check for opponent and agent
if self.grids[tuple(new_pos_a)] in (1, 2, 3):
new_pos_a = self.agent
if self.grids[tuple(new_pos_o)] in (1, 2, 3):
new_pos_o = self.opponent
# collision
if np.array_equal(new_pos_a, new_pos_o) and self.grids[tuple(new_pos_a)] != 1:
self.agent_keep_ball = not self.agent_keep_ball
#print(self.canvas.coords(self.agent_rect))
self.agent = new_pos_a
self.opponent = new_pos_o
self.canvas.delete(self.agent_rect)
self.canvas.delete(self.opp_rect)
self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red')
self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue')
self.canvas.delete(self.ball_rect)
if self.agent_keep_ball:
self.ball_rect = self.canvas.create_oval((self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT), fill='white')
else:
self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white')
s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]]
if self.agent_keep_ball:
s_.append(0)
else: s_.append(1)
s_ = np.array(s_[:5])/ 10
return s_, reward, done
# reset position and ball
def reset(self):
self.agent = np.array([3, 1])
self.opponent = np.array([3, 5])
self.agent_keep_ball = False
self.update()
s_ = [self.agent[0], self.agent[1], self.opponent[0], self.opponent[1]]
if self.agent_keep_ball:
s_.append(0)
else: s_.append(1)
s_ = np.array(s_[:5])/ 10
return s_
# render array
def render(self):
m = np.copy(self.grids)
m[tuple(self.agent)] = 4
m[tuple(self.opponent)] = 5
if self.agent_keep_ball:
m[tuple(self.agent)] += 2
else:
m[tuple(self.opponent)] += 2
#print(m, end='\n\n')
self.update()
return m.reshape(49)
# render img
def visualize(self):
self.canvas = tk.Canvas(self, bg='white',
height=self.size * UNIT,
width=self.size * UNIT)
# create grids
for c in range(0, self.size * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, self.size * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, self.size * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, self.size * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
m = np.copy(self.grids)
m[tuple(self.agent)] = 4
m[tuple(self.opponent)] = 5
#print(m)
for j in range(self.size):
for i in range(self.size):
if m[j, i] == 1: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='black')
elif m[j, i] == 2 or m[j, i] == 3: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='white')
elif m[j, i] == 0 or m[j, i] == 4 or m[j, i] == 5: self.canvas.create_rectangle(i * UNIT, j * UNIT, (i + 1) * UNIT, (j + 1) * UNIT, fill='green')
self.agent_rect = self.canvas.create_rectangle(self.agent[1] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[0] + 1) * UNIT, fill='red')
self.opp_rect = self.canvas.create_rectangle(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='blue')
if self.agent_keep_ball:
self.ball_rect = self.canvas.create_oval((self.agent[0] * UNIT, self.agent[0] * UNIT, (self.agent[1] + 1) * UNIT, (self.agent[1] + 1) * UNIT), fill='white')
else:
self.ball_rect = self.canvas.create_oval(self.opponent[1] * UNIT, self.opponent[0] * UNIT, (self.opponent[1] + 1) * UNIT, (self.opponent[0] + 1) * UNIT, fill='white')
# pack all
self.canvas.pack()
if __name__ == '__main__':
env = Soccer()
env.reset()
# agent strategy
agent_actions = [RIGHT, RIGHT, UP, RIGHT, RIGHT, RIGHT]
# opponent strategy, you can initialize it randomly
opponent_actions = [UP, LEFT, LEFT, LEFT, LEFT, LEFT, LEFT]
for a_a, a_o in zip(agent_actions, opponent_actions):
env.render()
env.step(a_a, a_o)
time.sleep(1)
#env.after(100, run_maze)
#env.mainloop()
# env.render()
| 6,637 | 2,625 |
# Originated from https://github.com/amdegroot/ssd.pytorch
from .augmentations import SSDAugmentation
| 103 | 35 |
"""
Basic moderation utilities for Birb.
"""
from .staff import CheckMods
from .actions import ModActions
def setup(bot):
bot.add_cog(CheckMods())
bot.add_cog(ModActions())
| 184 | 66 |
import os
API_KEY = os.getenv('API_KEY')
API_SECRET = os.getenv('API_SECRET')
ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
| 227 | 106 |
class FrontEnd(object):
def __init__(self):
self.theme = "slate"
self.logo = "" # path from static/
self.app_name = "Open Prose Metrics"
self.report_title = "Results"
self.theme_cdn = self.bootswatch_url(self.theme)
def bootswatch_url(self, label):
if label == "cyborg":
return "https://stackpath.bootstrapcdn.com/bootswatch/3.4.1/cyborg/bootstrap.min.css"
elif label == "slate":
return "https://stackpath.bootstrapcdn.com/bootswatch/3.4.1/slate/bootstrap.min.css"
else:
return ""
| 605 | 199 |
# -*- coding: utf-8 -*-
"""
test_jid
----------------------------------
Tests for `vexmpp.xmpp.jid` module.
"""
import unittest
from vexmpp.jid import Jid, _parse, _prep, InvalidJidError, internJid
class TestJidParsing(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
self.assertEqual(_parse("user@host/resource"),
("user", "host", "resource"))
self.assertEqual(_parse("user@host"),
("user", "host", None))
self.assertEqual(_parse("host"),
(None, "host", None))
self.assertEqual(_parse("host/resource"),
(None, "host", "resource"))
self.assertEqual(_parse("foo/bar@baz"),
(None, "foo", "bar@baz"))
self.assertEqual(_parse("boo@foo/bar@baz"),
("boo", "foo", "bar@baz"))
self.assertEqual(_parse("boo@foo/bar/baz"),
("boo", "foo", "bar/baz"))
self.assertEqual(_parse("boo/foo@bar@baz"),
(None, "boo", "foo@bar@baz"))
self.assertEqual(_parse("boo/foo/bar"),
(None, "boo", "foo/bar"))
self.assertEqual(_parse("boo//foo"),
(None, "boo", "/foo"))
def test_noHost(self):
'''
Test for failure on no host part.
'''
self.assertRaises(InvalidJidError, _parse, "user@")
def test_doubleAt(self):
"""
Test for failure on double @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(UnicodeError, _parse, "user@@host")
def test_multipleAt(self):
"""
Test for failure on two @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(UnicodeError, _parse, "user@host@host")
# Basic tests for case mapping. These are fallback tests for the
# prepping done in twisted.words.protocols.jabber.xmpp_stringprep
def test_prepCaseMapUser(self):
"""
Test case mapping of the user part of the JID.
"""
self.assertEqual(_prep("UsEr", "host", "resource"),
("user", "host", "resource"))
def test_prepCaseMapHost(self):
"""
Test case mapping of the host part of the JID.
"""
self.assertEqual(_prep("user", "hoST", "resource"),
("user", "host", "resource"))
def test_prepNoCaseMapResource(self):
"""
Test no case mapping of the resourcce part of the JID.
"""
self.assertEqual(_prep("user", "hoST", "resource"),
("user", "host", "resource"))
self.assertNotEqual(_prep("user", "host", "Resource"),
("user", "host", "resource"))
def tearDown(self):
pass
class TestJidObject(unittest.TestCase):
def setUp(self):
pass
def test_ctor_types(self):
self.assertRaises(ValueError, Jid, b"bytes")
self.assertRaises(ValueError, Jid, (b"user", "host", "rsrc"))
self.assertRaises(ValueError, Jid, ("user", b"host", "rsrc"))
self.assertRaises(ValueError, Jid, ("user", "host", b"rsrc"))
def tearDown(self):
pass
def test_noneArguments(self):
"""
Test that using no arguments raises an exception.
"""
self.assertRaises(TypeError, Jid)
def test_attributes(self):
"""
Test that the attributes correspond with the JID parts.
"""
j = Jid("user@host/resource")
self.assertEqual(j.user, "user")
self.assertEqual(j.host, "host")
self.assertEqual(j.resource, "resource")
def test_userhost(self):
"""
Test the extraction of the bare JID.
"""
j = Jid("user@host/resource")
self.assertEqual("user@host", j.bare)
def test_userhostOnlyHost(self):
"""
Test the extraction of the bare JID of the full form host/resource.
"""
j = Jid("host/resource")
self.assertEqual("host", j.bare)
def test_userhostJID(self):
"""
Test getting a JID object of the bare JID.
"""
j1 = Jid("user@host/resource")
j2 = internJid("user@host")
self.assertEqual(id(j2), id(j1.bare_jid))
def test_userhostJIDNoResource(self):
"""
Test getting a JID object of the bare JID when there was no resource.
"""
j = Jid("user@host")
self.assertEqual(id(j), id(j.bare_jid))
def test_fullHost(self):
"""
Test giving a string representation of the JID with only a host part.
"""
j = Jid((None, 'host', None))
self.assertEqual('host', j.full)
def test_fullHostResource(self):
"""
Test giving a string representation of the JID with host, resource.
"""
j = Jid((None, 'host', 'resource'))
self.assertEqual('host/resource', j.full)
def test_fullUserHost(self):
"""
Test giving a string representation of the JID with user, host.
"""
j = Jid(('user', 'host', None))
self.assertEqual('user@host', j.full)
def test_fullAll(self):
"""
Test giving a string representation of the JID.
"""
j = Jid(('user', 'host', 'resource'))
self.assertEqual('user@host/resource', j.full)
def test_equality(self):
"""
Test JID equality.
"""
j1 = Jid("user@host/resource")
j2 = Jid("user@host/resource")
self.assertNotEqual(id(j1), id(j2))
self.assertEqual(j1, j2)
def test_equalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = Jid("user@host/resource")
try:
res = (j == "user@host/resource")
except NotImplementedError:
pass
else:
self.assertFalse("Jid and strings should not be comparable")
def test_inequality(self):
"""
Test JID inequality.
"""
j1 = Jid("user1@host/resource")
j2 = Jid("user2@host/resource")
self.assertNotEqual(j1, j2)
def test_inequalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = Jid("user@host/resource")
self.assertNotEqual(j, 'user@host/resource')
def test_hashable(self):
"""
Test JID hashability.
"""
j1 = Jid("user@host/resource")
j2 = Jid("user@host/resource")
self.assertEqual(hash(j1), hash(j2))
def test_unicode(self):
"""
Test unicode representation of JIDs.
"""
j = Jid(('user', 'host', 'resource'))
self.assertEqual("user@host/resource", j.full)
def test_repr(self):
"""
Test representation of JID objects.
"""
j = Jid(('user', 'host', 'resource'))
self.assertEqual("Jid('user@host/resource')", repr(j))
| 7,140 | 2,174 |
# -*- coding: utf-8 -*-
"""
.. module:: dbu - context
:platform: Unix, Windows
:synopsis: Contexto Principal por defecto
.. moduleauthor:: Diego Gonzalez <dgonzalez.jim@gmail.com>
"""
from . import configuration
from . import models
def load_context(request):
"""
Load Context
Description
Carga las variables de contexto principales
:param request:
:return:
"""
IS_TEST_MODE = configuration.isTESTMode()
IS_MAINTENANCE = configuration.isMaintenanceMode()
try:
LANGUAGES = models.Language.objects.get_active()
except:
LANGUAGES = []
return {
'IS_TEST_MODE' : IS_TEST_MODE,
'IS_MAINTENANCE' : IS_MAINTENANCE,
'LANGUAGES' : LANGUAGES
} | 745 | 256 |
from tests.trainer.generic import std_trainer_input_1
from knodle.trainer.multi_trainer import MultiTrainer
def test_auto_train(std_trainer_input_1):
(
model,
model_input_x, rule_matches_z, mapping_rules_labels_t,
y_labels
) = std_trainer_input_1
trainers = ["majority", "snorkel", "knn", "snorkel_knn"]
trainer = MultiTrainer(
name=trainers,
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=model_input_x,
rule_matches_z=rule_matches_z,
)
trainer.train()
metrics = trainer.test(model_input_x, y_labels)
# Check whether the code ran up to here
assert 2 == 2
| 689 | 246 |
from django.db import models
from django.contrib.auth.models import User
class City(models.Model):
user = models.ManyToManyField(User, default=None,)
name = models.CharField(max_length=255, verbose_name="Nome da cidade")
def __str__(self):
return self.name
class Meta:
verbose_name = "Cidade"
verbose_name_plural = 'Cidades'
| 368 | 115 |
import numpy as np
from numba import jit
@jit
def cholesky(in_arr, out_arr, n):
np.copyto(out_arr, np.linalg.cholesky(in_arr))
| 133 | 60 |
# -*- coding: utf-8 -*-
"""
doxieapi
~~~~~~~~
A Python library for the developer API of the Doxie Go Wi-Fi document scanner.
"""
from .api import DoxieScanner
__all__ = ['DoxieScanner']
| 191 | 75 |
from pathlib import Path
from django.core.management.base import BaseCommand
from wagtail.core.models import Page, Site, Locale
from django.core.files.images import ImageFile
from wagtail.images.models import Image
from wagtail_localize.models import Translation
from wagtail_localize.views.submit_translations import TranslationCreator
import home.models as models
import psycopg2
import psycopg2.extras
import json
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--host',
default='0.0.0.0',
help='IoGT V1 database host'
)
parser.add_argument(
'--port',
default='5432',
help='IoGT V1 database port'
)
parser.add_argument(
'--name',
default='postgres',
help='IoGT V1 database name'
)
parser.add_argument(
'--user',
default='postgres',
help='IoGT V1 database user'
)
parser.add_argument(
'--password',
default='',
help='IoGT V1 database password'
)
parser.add_argument(
'--media-dir',
required=True,
help='Path to IoGT v1 media directory'
)
parser.add_argument(
'--skip-locales',
action='store_true',
help='Skip data of locales other than default language'
)
def handle(self, *args, **options):
self.db_connect(options)
self.media_dir = options.get('media_dir')
self.skip_locales = options.get('skip_locales')
self.image_map = {}
self.page_translation_map = {}
self.v1_to_v2_page_map = {}
self.clear()
self.stdout.write('Existing site structure cleared')
root = Page.get_first_root_node()
self.migrate(root)
def clear(self):
models.FooterPage.objects.all().delete()
models.FooterIndexPage.objects.all().delete()
models.BannerPage.objects.all().delete()
models.BannerIndexPage.objects.all().delete()
models.Article.objects.all().delete()
models.Section.objects.all().delete()
models.SectionIndexPage.objects.all().delete()
models.HomePage.objects.all().delete()
Site.objects.all().delete()
Image.objects.all().delete()
def db_connect(self, options):
connection_string = self.create_connection_string(options)
self.stdout.write(f'DB connection string created, string={connection_string}')
self.v1_conn = psycopg2.connect(connection_string)
self.stdout.write('Connected to v1 DB')
def __del__(self):
try:
self.v1_conn.close()
self.stdout.write('Closed connection to v1 DB')
except AttributeError:
pass
def create_connection_string(self, options):
host = options.get('host', '0.0.0.0')
port = options.get('port', '5432')
name = options.get('name', 'postgres')
user = options.get('user', 'postgres')
password = options.get('password', '')
return f"host={host} port={port} dbname={name} user={user} password={password}"
def db_query(self, q):
cur = self.v1_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute(q)
return cur
def migrate(self, root):
self.migrate_images()
self.load_page_translation_map()
home = self.create_home_page(root)
section_index_page, banner_index_page, footer_index_page = self.create_index_pages(home)
self.migrate_sections(section_index_page)
self.migrate_articles(section_index_page)
self.migrate_banners(banner_index_page)
self.migrate_footers(footer_index_page)
self.stop_translations()
Page.fix_tree()
def create_home_page(self, root):
sql = 'select * from core_main main join wagtailcore_page page on main.page_ptr_id = page.id'
cur = self.db_query(sql)
main = cur.fetchone()
cur.close()
home = None
if main:
home = models.HomePage(
title=main['title'],
draft_title=main['draft_title'],
seo_title=main['seo_title'],
slug=main['slug'],
live=main['live'],
latest_revision_created_at=main['latest_revision_created_at'],
first_published_at=main['first_published_at'],
last_published_at=main['last_published_at'],
)
root.add_child(instance=home)
else:
raise Exception('Could not find a main page in v1 DB')
cur.close()
cur = self.db_query('select * from wagtailcore_site')
v1_site = cur.fetchone()
cur.close()
if v1_site:
Site.objects.create(
hostname=v1_site['hostname'],
port=v1_site['port'],
root_page=home,
is_default_site=True,
site_name=v1_site['site_name'] if v1_site['site_name'] else 'Internet of Good Things',
)
else:
raise Exception('Could not find site in v1 DB')
return home
def create_index_pages(self, homepage):
section_index_page = models.SectionIndexPage(title='Sections')
homepage.add_child(instance=section_index_page)
banner_index_page = models.BannerIndexPage(title='Banners')
homepage.add_child(instance=banner_index_page)
footer_footer_page = models.FooterIndexPage(title='Footers')
homepage.add_child(instance=footer_footer_page)
return section_index_page, banner_index_page, footer_footer_page
def migrate_images(self):
cur = self.db_query('select * from wagtailimages_image')
content_type = self.find_content_type_id('wagtailimages', 'image')
for row in cur:
image_file = self.open_image_file(row['file'])
if image_file:
image = Image.objects.create(
title=row['title'],
file=ImageFile(image_file, name=row['file'].split('/')[-1]),
focal_point_x=row['focal_point_x'],
focal_point_y=row['focal_point_y'],
focal_point_width=row['focal_point_width'],
focal_point_height=row['focal_point_height'],
# uploaded_by_user='',
)
image.get_file_size()
image.get_file_hash()
tags = self.find_tags(content_type, row['id'])
if tags:
image.tags.add(*tags)
self.image_map.update({ row['id']: image })
cur.close()
self.stdout.write('Images migrated')
def find_content_type_id(self, app_label, model):
cur = self.db_query(f"select id from django_content_type where app_label = '{app_label}' and model = '{model}'")
content_type = cur.fetchone()
cur.close()
return content_type.get('id')
def open_image_file(self, file):
file_path = Path(self.media_dir) / file
try:
return open(file_path, 'rb')
except:
self.stdout.write(f"Image file not found: {file_path}")
def find_tags(self, content_type, object_id):
tags_query = 'select t.name from taggit_tag t join taggit_taggeditem ti on t.id = ti.tag_id where ti.content_type_id = {} and ti.object_id = {}'
cur = self.db_query(tags_query.format(content_type, object_id))
tags = [tag['name'] for tag in cur]
cur.close()
return tags
def migrate_sections(self, section_index_page):
sql = "select * " \
"from core_sectionpage csp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where csp.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += 'order by wcp.path'
cur = self.db_query(sql)
section_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
section_page_translations.append(row)
else:
self.create_section(section_index_page, row)
else:
for row in section_page_translations:
section = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=section)
translated_section = section.get_translation_or_none(locale)
if translated_section:
translated_section.title = row['title']
translated_section.draft_title = row['draft_title']
translated_section.live = row['live']
translated_section.save(update_fields=['title', 'draft_title', 'slug', 'live'])
self.stdout.write(f"Translated section, title={row['title']}")
cur.close()
def create_section(self, section_index_page, row):
section = models.Section(
title=row['title'],
draft_title=row['draft_title'],
show_in_menus=True,
font_color='1CABE2',
slug=row['slug'],
path=section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
)
section.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: section
})
self.stdout.write(f"saved section, title={section.title}")
def migrate_articles(self, section_index_page):
sql = "select * " \
"from core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cap.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += "and locale = 'en' "
sql += " and wcp.path like '000100010002%'order by wcp.path"
cur = self.db_query(sql)
article_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
article_page_translations.append(row)
else:
self.create_article(section_index_page, row)
else:
for row in article_page_translations:
article = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=article)
translated_article = article.get_translation_or_none(locale)
if translated_article:
translated_article.lead_image = self.image_map.get(row['image_id'])
translated_article.title = row['title']
translated_article.draft_title = row['draft_title']
translated_article.live = row['live']
translated_article.body = self.map_article_body(row['body'])
translated_article.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body'])
self.stdout.write(f"Translated article, title={row['title']}")
cur.close()
def create_article(self, section_index_page, row):
article = models.Article(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=section_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
body=self.map_article_body(row['body']),
)
try:
article.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: article
})
except Page.DoesNotExist:
self.stdout.write(f"Skipping page with missing parent: title={row['title']}")
return
self.stdout.write(f"saved article, title={article.title}")
def map_article_body(self, v1_body):
v2_body = json.loads(v1_body)
for block in v2_body:
if block['type'] == 'paragraph':
block['type'] = 'markdown'
return json.dumps(v2_body)
def migrate_banners(self, banner_index_page):
sql = "select * " \
"from core_bannerpage cbp, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cbp.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += ' order by wcp.path'
cur = self.db_query(sql)
banner_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
banner_page_translations.append(row)
else:
self.create_banner(banner_index_page, row)
else:
for row in banner_page_translations:
banner = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
try:
self.translate_page(locale=locale, page=banner)
except:
continue
translated_banner = banner.get_translation_or_none(locale)
if translated_banner:
translated_banner.banner_image = self.image_map.get(row['banner_id'])
translated_banner.banner_link_page = self.v1_to_v2_page_map.get(row['banner_link_page_id'])
translated_banner.title = row['title']
translated_banner.draft_title = row['draft_title']
translated_banner.live = row['live']
translated_banner.save(update_fields=['banner_image', 'title', 'draft_title', 'slug', 'live'])
self.stdout.write(f"Translated banner, title={row['title']}")
cur.close()
def create_banner(self, banner_index_page, row):
banner = models.BannerPage(
banner_image=self.image_map.get(row['banner_id']),
banner_link_page=self.v1_to_v2_page_map.get(row['banner_link_page_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=banner_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
banner_description=''
)
banner.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: banner
})
self.stdout.write(f"saved banner, title={banner.title}")
def migrate_footers(self, footer_index_page):
sql = "select * " \
"from core_footerpage cfp, core_articlepage cap, wagtailcore_page wcp, core_languagerelation clr, core_sitelanguage csl " \
"where cfp.articlepage_ptr_id = cap.page_ptr_id " \
"and cap.page_ptr_id = wcp.id " \
"and wcp.id = clr.page_id " \
"and clr.language_id = csl.id "
if self.skip_locales:
sql += " and locale = 'en' "
sql += ' order by wcp.path'
cur = self.db_query(sql)
footer_page_translations = []
for row in cur:
if row['page_ptr_id'] in self.page_translation_map:
footer_page_translations.append(row)
else:
self.create_footer(footer_index_page, row)
else:
for row in footer_page_translations:
footer = self.v1_to_v2_page_map.get(self.page_translation_map[row['page_ptr_id']])
locale, __ = Locale.objects.get_or_create(language_code=row['locale'])
self.translate_page(locale=locale, page=footer)
translated_footer = footer.get_translation_or_none(locale)
if translated_footer:
translated_footer.lead_image = self.image_map.get(row['image_id'])
translated_footer.title = row['title']
translated_footer.draft_title = row['draft_title']
translated_footer.live = row['live']
translated_footer.body = self.map_article_body(row['body'])
translated_footer.save(update_fields=['lead_image', 'title', 'draft_title', 'slug', 'live', 'body'])
self.stdout.write(f"Translated footer, title={row['title']}")
cur.close()
def create_footer(self, footer_index_page, row):
footer = models.FooterPage(
lead_image=self.image_map.get(row['image_id']),
title=row['title'],
draft_title=row['draft_title'],
slug=row['slug'],
path=footer_index_page.path + row['path'][12:],
depth=row['depth'],
numchild=row['numchild'],
live=row['live'],
body=self.map_article_body(row['body']),
)
footer.save()
self.v1_to_v2_page_map.update({
row['page_ptr_id']: footer
})
self.stdout.write(f"saved footer, title={footer.title}")
def load_page_translation_map(self):
sql = "select * " \
"from core_pagetranslation"
cur = self.db_query(sql)
for row in cur:
self.page_translation_map.update({
row['translated_page_id']: row['page_id'],
})
cur.close()
self.stdout.write('Page translation map loaded.')
def translate_page(self, locale, page):
translator = TranslationCreator(user=None, target_locales=[locale])
translator.create_translations(page)
def stop_translations(self):
Translation.objects.update(enabled=False)
self.stdout.write('Translations stopped.')
| 18,447 | 5,552 |
import numpy as np
import libs.contact_inhibition_lib as lib #library for simulation routines
import libs.data as data
import libs.plot as vplt #plotting library
from structure.global_constants import *
import structure.initialisation as init
from structure.cell import Tissue, BasicSpringForceNoGrowth
import matplotlib.pyplot as plt
import os
"""run a single voronoi tessellation model simulation"""
OUTDIR = "CIP_cell_division_relaxation_time2/"
l = 10 # population size N=l*l
timend = 30. # simulation time (hours)
timestep = 1.0 # time intervals to save simulation history
rand = np.random.RandomState()
simulation = lib.simulation_contact_inhibition_area_dependent #simulation routine imported from lib
threshold_area_fraction=1.0
DEATH_RATE = 1./12
rates = (DEATH_RATE,DEATH_RATE/0.4) #death_rate,division_rate
domain_size_multiplier=0.980940
eta,mu,dt=1.,-250,0.001
T_m_init=0.1
def get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime):
history = lib.run_simulation(simulation,l,timestep,timend,rand,progress_on=True,
init_time=None,til_fix=False,save_areas=True,cycle_phase=None,eta=eta,mu=mu,dt=dt,T_m=T_m_init,
return_events=False,save_cell_histories=True,domain_size_multiplier=domain_size_multiplier,
rates=rates,threshold_area_fraction=threshold_area_fraction)
tissue = lib.run_return_final_tissue(lib.simulation_no_division(history[-1],dt,200,rand,eta),200)
division_ready = lib.check_area_threshold(tissue.mesh,threshold_area_fraction)
mother = rand.choice(division_ready)
tissue.add_daughter_cells(mother,rand)
tissue.remove(mother,True)
tissue.update(dt)
init_tissues = [tissue.copy() for T_m in T_m_vals]
for T_m,tissue in zip(T_m_vals,init_tissues):
tissue.Force = BasicSpringForceNoGrowth(mu,T_m)
histories = [lib.run(lib.simulation_no_division(tissue,dt,int(relaxtime/dt),rand,eta),int(relaxtime/dt),1) for tissue in init_tissues]
for T_m,history in zip(T_m_vals,histories):
cell1,cell2 = len(history[0])-2,len(history[0])-1
sibling_distance = get_sibling_distance(history,cell1,cell2)
mean_area = np.array([np.mean(tissue.mesh.areas[-2:]) for tissue in history])
time = np.arange(0,relaxtime,dt)
data = np.vstack((time,sibling_distance,mean_area))
try: np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
except IOError:
os.makedirs(OUTDIR)
np.savetxt(OUTDIR+"T_m=%.3f.txt"%T_m,data)
def narg(tissue,i,j):
try: return np.where(tissue.mesh.neighbours[i]==j)[0][0]
except IndexError: return np.nan
def get_sibling_distance(history,cell1,cell2):
return np.array([tissue.mesh.distances[cell1][narg(tissue,cell1,cell2)] if narg(tissue,cell1,cell2)<100 else np.nan for tissue in history])
relaxtime = 2.0
T_m_vals=[0.001,0.01,0.1,0.25,0.5,1.0,2.0]
get_relaxation_data(T_m_vals,T_m_init,eta,mu,dt,relaxtime) | 2,968 | 1,176 |
from django.contrib import admin
from .models import Invoice
# Register your models here.
admin.site.register(Invoice)
| 120 | 33 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.name_and_value import NameAndValue # noqa: F401,E501
from tapi_server.models.operational_state_pac import OperationalStatePac # noqa: F401,E501
from tapi_server.models.resource_spec import ResourceSpec # noqa: F401,E501
from tapi_server.models.termination_pac import TerminationPac # noqa: F401,E501
from tapi_server import util
class ConnectionEndPoint(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, uuid: str=None, name: List[NameAndValue]=None, operational_state: str=None, lifecycle_state: str=None, termination_direction: str=None, termination_state: str=None, layer_protocol_name: str=None, connectivity_service_end_point: str=None, parent_node_edge_point: List[str]=None, client_node_edge_point: List[str]=None, connection_port_direction: str=None, connection_port_role: str=None): # noqa: E501
"""ConnectionEndPoint - a model defined in Swagger
:param uuid: The uuid of this ConnectionEndPoint. # noqa: E501
:type uuid: str
:param name: The name of this ConnectionEndPoint. # noqa: E501
:type name: List[NameAndValue]
:param operational_state: The operational_state of this ConnectionEndPoint. # noqa: E501
:type operational_state: str
:param lifecycle_state: The lifecycle_state of this ConnectionEndPoint. # noqa: E501
:type lifecycle_state: str
:param termination_direction: The termination_direction of this ConnectionEndPoint. # noqa: E501
:type termination_direction: str
:param termination_state: The termination_state of this ConnectionEndPoint. # noqa: E501
:type termination_state: str
:param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint. # noqa: E501
:type layer_protocol_name: str
:param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint. # noqa: E501
:type connectivity_service_end_point: str
:param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint. # noqa: E501
:type parent_node_edge_point: List[str]
:param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint. # noqa: E501
:type client_node_edge_point: List[str]
:param connection_port_direction: The connection_port_direction of this ConnectionEndPoint. # noqa: E501
:type connection_port_direction: str
:param connection_port_role: The connection_port_role of this ConnectionEndPoint. # noqa: E501
:type connection_port_role: str
"""
self.swagger_types = {
'uuid': str,
'name': List[NameAndValue],
'operational_state': str,
'lifecycle_state': str,
'termination_direction': str,
'termination_state': str,
'layer_protocol_name': str,
'connectivity_service_end_point': str,
'parent_node_edge_point': List[str],
'client_node_edge_point': List[str],
'connection_port_direction': str,
'connection_port_role': str
}
self.attribute_map = {
'uuid': 'uuid',
'name': 'name',
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'termination_direction': 'termination-direction',
'termination_state': 'termination-state',
'layer_protocol_name': 'layer-protocol-name',
'connectivity_service_end_point': 'connectivity-service-end-point',
'parent_node_edge_point': 'parent-node-edge-point',
'client_node_edge_point': 'client-node-edge-point',
'connection_port_direction': 'connection-port-direction',
'connection_port_role': 'connection-port-role'
}
self._uuid = uuid
self._name = name
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._termination_direction = termination_direction
self._termination_state = termination_state
self._layer_protocol_name = layer_protocol_name
self._connectivity_service_end_point = connectivity_service_end_point
self._parent_node_edge_point = parent_node_edge_point
self._client_node_edge_point = client_node_edge_point
self._connection_port_direction = connection_port_direction
self._connection_port_role = connection_port_role
@classmethod
def from_dict(cls, dikt) -> 'ConnectionEndPoint':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The connection-end-point of this ConnectionEndPoint. # noqa: E501
:rtype: ConnectionEndPoint
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self) -> str:
"""Gets the uuid of this ConnectionEndPoint.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this ConnectionEndPoint.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid: str):
"""Sets the uuid of this ConnectionEndPoint.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this ConnectionEndPoint.
:type uuid: str
"""
self._uuid = uuid
@property
def name(self) -> List[NameAndValue]:
"""Gets the name of this ConnectionEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this ConnectionEndPoint.
:rtype: List[NameAndValue]
"""
return self._name
@name.setter
def name(self, name: List[NameAndValue]):
"""Sets the name of this ConnectionEndPoint.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this ConnectionEndPoint.
:type name: List[NameAndValue]
"""
self._name = name
@property
def operational_state(self) -> str:
"""Gets the operational_state of this ConnectionEndPoint.
:return: The operational_state of this ConnectionEndPoint.
:rtype: str
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state: str):
"""Sets the operational_state of this ConnectionEndPoint.
:param operational_state: The operational_state of this ConnectionEndPoint.
:type operational_state: str
"""
allowed_values = ["DISABLED", "ENABLED"] # noqa: E501
if operational_state not in allowed_values:
raise ValueError(
"Invalid value for `operational_state` ({0}), must be one of {1}"
.format(operational_state, allowed_values)
)
self._operational_state = operational_state
@property
def lifecycle_state(self) -> str:
"""Gets the lifecycle_state of this ConnectionEndPoint.
:return: The lifecycle_state of this ConnectionEndPoint.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state: str):
"""Sets the lifecycle_state of this ConnectionEndPoint.
:param lifecycle_state: The lifecycle_state of this ConnectionEndPoint.
:type lifecycle_state: str
"""
allowed_values = ["PLANNED", "POTENTIAL_AVAILABLE", "POTENTIAL_BUSY", "INSTALLED", "PENDING_REMOVAL"] # noqa: E501
if lifecycle_state not in allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state` ({0}), must be one of {1}"
.format(lifecycle_state, allowed_values)
)
self._lifecycle_state = lifecycle_state
@property
def termination_direction(self) -> str:
"""Gets the termination_direction of this ConnectionEndPoint.
The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501
:return: The termination_direction of this ConnectionEndPoint.
:rtype: str
"""
return self._termination_direction
@termination_direction.setter
def termination_direction(self, termination_direction: str):
"""Sets the termination_direction of this ConnectionEndPoint.
The overall directionality of the LP. - A BIDIRECTIONAL LP will have some SINK and/or SOURCE flowss. - A SINK LP can only contain elements with SINK flows or CONTRA_DIRECTION_SOURCE flows - A SOURCE LP can only contain SOURCE flows or CONTRA_DIRECTION_SINK flows # noqa: E501
:param termination_direction: The termination_direction of this ConnectionEndPoint.
:type termination_direction: str
"""
allowed_values = ["BIDIRECTIONAL", "SINK", "SOURCE", "UNDEFINED_OR_UNKNOWN"] # noqa: E501
if termination_direction not in allowed_values:
raise ValueError(
"Invalid value for `termination_direction` ({0}), must be one of {1}"
.format(termination_direction, allowed_values)
)
self._termination_direction = termination_direction
@property
def termination_state(self) -> str:
"""Gets the termination_state of this ConnectionEndPoint.
Indicates whether the layer is terminated and if so how. # noqa: E501
:return: The termination_state of this ConnectionEndPoint.
:rtype: str
"""
return self._termination_state
@termination_state.setter
def termination_state(self, termination_state: str):
"""Sets the termination_state of this ConnectionEndPoint.
Indicates whether the layer is terminated and if so how. # noqa: E501
:param termination_state: The termination_state of this ConnectionEndPoint.
:type termination_state: str
"""
allowed_values = ["LP_CAN_NEVER_TERMINATE", "LT_NOT_TERMINATED", "TERMINATED_SERVER_TO_CLIENT_FLOW", "TERMINATED_CLIENT_TO_SERVER_FLOW", "TERMINATED_BIDIRECTIONAL", "LT_PERMENANTLY_TERMINATED", "TERMINATION_STATE_UNKNOWN"] # noqa: E501
if termination_state not in allowed_values:
raise ValueError(
"Invalid value for `termination_state` ({0}), must be one of {1}"
.format(termination_state, allowed_values)
)
self._termination_state = termination_state
@property
def layer_protocol_name(self) -> str:
"""Gets the layer_protocol_name of this ConnectionEndPoint.
:return: The layer_protocol_name of this ConnectionEndPoint.
:rtype: str
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name: str):
"""Sets the layer_protocol_name of this ConnectionEndPoint.
:param layer_protocol_name: The layer_protocol_name of this ConnectionEndPoint.
:type layer_protocol_name: str
"""
allowed_values = ["OTSiA", "OCH", "OTU", "ODU", "ETH", "ETY", "DSR"] # noqa: E501
if layer_protocol_name not in allowed_values:
raise ValueError(
"Invalid value for `layer_protocol_name` ({0}), must be one of {1}"
.format(layer_protocol_name, allowed_values)
)
self._layer_protocol_name = layer_protocol_name
@property
def connectivity_service_end_point(self) -> str:
"""Gets the connectivity_service_end_point of this ConnectionEndPoint.
:return: The connectivity_service_end_point of this ConnectionEndPoint.
:rtype: str
"""
return self._connectivity_service_end_point
@connectivity_service_end_point.setter
def connectivity_service_end_point(self, connectivity_service_end_point: str):
"""Sets the connectivity_service_end_point of this ConnectionEndPoint.
:param connectivity_service_end_point: The connectivity_service_end_point of this ConnectionEndPoint.
:type connectivity_service_end_point: str
"""
self._connectivity_service_end_point = connectivity_service_end_point
@property
def parent_node_edge_point(self) -> List[str]:
"""Gets the parent_node_edge_point of this ConnectionEndPoint.
:return: The parent_node_edge_point of this ConnectionEndPoint.
:rtype: List[str]
"""
return self._parent_node_edge_point
@parent_node_edge_point.setter
def parent_node_edge_point(self, parent_node_edge_point: List[str]):
"""Sets the parent_node_edge_point of this ConnectionEndPoint.
:param parent_node_edge_point: The parent_node_edge_point of this ConnectionEndPoint.
:type parent_node_edge_point: List[str]
"""
self._parent_node_edge_point = parent_node_edge_point
@property
def client_node_edge_point(self) -> List[str]:
"""Gets the client_node_edge_point of this ConnectionEndPoint.
:return: The client_node_edge_point of this ConnectionEndPoint.
:rtype: List[str]
"""
return self._client_node_edge_point
@client_node_edge_point.setter
def client_node_edge_point(self, client_node_edge_point: List[str]):
"""Sets the client_node_edge_point of this ConnectionEndPoint.
:param client_node_edge_point: The client_node_edge_point of this ConnectionEndPoint.
:type client_node_edge_point: List[str]
"""
self._client_node_edge_point = client_node_edge_point
@property
def connection_port_direction(self) -> str:
"""Gets the connection_port_direction of this ConnectionEndPoint.
The orientation of defined flow at the EndPoint. # noqa: E501
:return: The connection_port_direction of this ConnectionEndPoint.
:rtype: str
"""
return self._connection_port_direction
@connection_port_direction.setter
def connection_port_direction(self, connection_port_direction: str):
"""Sets the connection_port_direction of this ConnectionEndPoint.
The orientation of defined flow at the EndPoint. # noqa: E501
:param connection_port_direction: The connection_port_direction of this ConnectionEndPoint.
:type connection_port_direction: str
"""
allowed_values = ["BIDIRECTIONAL", "INPUT", "OUTPUT", "UNIDENTIFIED_OR_UNKNOWN"] # noqa: E501
if connection_port_direction not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_direction` ({0}), must be one of {1}"
.format(connection_port_direction, allowed_values)
)
self._connection_port_direction = connection_port_direction
@property
def connection_port_role(self) -> str:
"""Gets the connection_port_role of this ConnectionEndPoint.
Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501
:return: The connection_port_role of this ConnectionEndPoint.
:rtype: str
"""
return self._connection_port_role
@connection_port_role.setter
def connection_port_role(self, connection_port_role: str):
"""Sets the connection_port_role of this ConnectionEndPoint.
Each EP of the FC has a role (e.g., working, protection, protected, symmetric, hub, spoke, leaf, root) in the context of the FC with respect to the FC function. # noqa: E501
:param connection_port_role: The connection_port_role of this ConnectionEndPoint.
:type connection_port_role: str
"""
allowed_values = ["SYMMETRIC", "ROOT", "LEAF", "TRUNK", "UNKNOWN"] # noqa: E501
if connection_port_role not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_role` ({0}), must be one of {1}"
.format(connection_port_role, allowed_values)
)
self._connection_port_role = connection_port_role
| 17,920 | 5,314 |
from urllib.parse import urljoin
from appdirs import user_data_dir
from notebook.notebookapp import NotebookApp
from idom.config import IDOM_WED_MODULES_DIR
from tornado.web import StaticFileHandler
from tornado.web import Application
IDOM_WED_MODULES_DIR.current = user_data_dir("idom-jupyter", "idom-team")
def _load_jupyter_server_extension(notebook_app: NotebookApp):
web_app: Application = notebook_app.web_app
base_url = web_app.settings["base_url"]
route_pattern = urljoin(base_url, rf"_idom_web_modules/(.*)")
web_app.add_handlers(
host_pattern=".*$",
host_handlers=[
(
route_pattern,
StaticFileHandler,
{"path": str(IDOM_WED_MODULES_DIR.current.absolute())},
),
],
)
# compat for older versions of Jupyter
load_jupyter_server_extension = _load_jupyter_server_extension
| 901 | 297 |
## common class for only dobot with cam
import gym
from gym import utils
from glob import glob
from dobot_gym.utils.dobot_controller import DobotController
from gym.spaces import MultiDiscrete
class DobotRealEnv(gym.Env, utils.EzPickle):
def __init__(self):
super().__init__()
# Find the port on which dobot is connected
available_ports = glob('/dev/tty*USB*')
if len(available_ports) == 0:
print('no port found for Dobot Magician')
exit(1)
def_port = available_ports[0]
self.dobot = DobotController(port=def_port)
self.observation_space = None
self.action_space = MultiDiscrete([3, 3, 3])
def compute_reward(self):
return 0
def step(self, action):
real_action = action - 1
self.dobot.moveangleinc(*real_action, r=0, q=1)
reward = self.compute_reward(image, centroid)
poses = self.dobot.get_dobot_joint()
done = False
info =None
return poses,reward, done, info | 1,033 | 331 |
#########################
#Author: Sam Higginbotham
########################
from WMCore.Configuration import Configuration
config = Configuration()
#name='Pt11to30'
config.section_("General")
config.General.requestName = 'PCC_Run2017E_Corrections'
config.General.workArea = 'RawPCCZeroBias2017'
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'raw_corr_Random_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.outputFiles = ['rawPCC.csv']
config.JobType.inputFiles = ['c.db']
config.section_("Data")
#config.Data.inputDataset = '/AlCaLumiPixels/Run2017E-AlCaPCCZeroBias-PromptReco-v1/ALCARECO'
config.Data.userInputFiles=['/store/data/Run2017E/AlCaLumiPixels/ALCARECO/AlCaPCCRandom-PromptReco-v1/000/303/832/00000/E6B8ACA4-95A4-E711-9AA2-02163E014793.root']
#config.Data.lumiMask = ''
#config.Data.runRange='303382'#,297283,297278,297280,297281,297271,297227,297230,297276,297261,297266'
config.Data.ignoreLocality = True
#useParent = True
config.Data.inputDBS = 'global'
#config.Data.splitting = 'LumiBased'
config.Data.splitting = 'FileBased'
config.Data.publication = False
config.Data.unitsPerJob = 1000
#config.Data.totalUnits = -1
#config.Data.publishDbsUrl = 'test'
config.Data.outputDatasetTag = 'PCC_AlCaLumiPixels_Run2017C_1kLS_NoZeroes'
config.Data.outLFNDirBase = '/store/group/comm_luminosity/PCC/ForLumiComputations/2017/5Feb2018'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
config.Site.whitelist=['T2_FR_CCIN2P3','T2_IT_Pisa','T2_UK_London_IC','T2_HU_Budapest']
#config.Site.whitelist=['T2_FR_CCIN2P3']
| 1,609 | 734 |
import cv2
import os
import glob
import numpy as np
from operator import itemgetter
# import matplotlib.pyplot as plt
import math
import scipy.stats as stats
def main():
video_dir = './UCF-101' #./testdata
result_dir = './UCF101-OF' #test-image
loaddata(video_dir = video_dir, depth = 24, dest_forder=result_dir)
def save_image_to_file(frame_array, folder):
for i in range(np.size(frame_array,axis=0)):
cv2.imwrite(folder +"/" + format(i,'05d')+'.jpg', frame_array[i])
def loaddata(video_dir, depth, dest_forder):
#video_dir can contain sub_directory
dirs = os.listdir(video_dir)
class_number = -1
#pbar = tqdm(total=len(files))
for dir in dirs:
path = os.path.join(video_dir, dir, '*.avi')
files = sorted(glob.glob(path),key=lambda name: path )
for filename in files:
print('Extracting file:',filename)
# frame_array = video3d_overlap(filename, depth)
# frame_array = video3d_selected_active_frame(filename, depth)
# frame_array = full_selected_active_frame(filename, depth)
frame_array = video3d_opticalflow(filename, depth)
newdir = dir + "/" + os.path.splitext(os.path.basename(filename))[0]
directory = os.path.join(dest_forder,newdir)
if not os.path.exists(directory):
os.makedirs(directory)
save_image_to_file(frame_array, directory)
def active_frames(frame_array):
d=[] #euclid distance
frames =[]
for i in range(np.size(frame_array,axis=0)-1):
d.append((np.linalg.norm(frame_array[i+1]-frame_array[i]),i,0))
#Sort d[i] accending under first column of di
d.sort(key=itemgetter(0)) #get the order of active frame
d = normal_distribution(d) #assign each d one value based on normal distribution
d.sort(key=itemgetter(1)) #re_order
frames.append(frame_array[0])
for i in range(1,np.size(d,axis=0)):
temp_frame = frame_array[i] * d[i][2]
frames.append(temp_frame)
temp_frame = np.sum(frames, axis = 0)
temp_frame = cv2.normalize(temp_frame, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
return np.array(temp_frame)
#This function select numbers of the most active frame in a segment
def selected_active_frame(frame_array):
max_euclidean_distance = 0
temp_frame = frame_array[0] #assign first frame
max = 0
for i in range(np.size(frame_array,axis=0)-1):
euclidean_distant = np.linalg.norm(frame_array[i+1]-frame_array[i])
if euclidean_distant > max_euclidean_distance:
max_euclidean_distance = euclidean_distant
temp_frame = frame_array[i+1]
max = i+1
# print(max)
return np.array(temp_frame)
#this function get the most active frame
def full_selected_active_frame(filename, depth):
cap_images = read_video_from_file(filename)
framearray = []
distance = []
for i in range(np.size(cap_images,axis=0)-1):
distance.append((np.linalg.norm(cap_images[i+1]-cap_images[i]),i+1))
frames = [item[1] for item in sorted(distance,key = itemgetter(0))[-depth:]]
frames.sort()
for i in range(np.size(frames,axis=0)):
framearray.append(cap_images[frames[i]])
# print(frames[i])
return framearray
def video3d_selected_active_frame(filename, depth):
cap_images = read_video_from_file(filename)
framearray = []
flatten_framearray = []
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
# print(nframe, frames)
for i in range(np.size(frames,axis=0)):
if i < np.size(frames,axis=0)-1:
flatten_framearray = cap_images[frames[i]:frames[i+1]]
# print(frames[i],frames[i+1])
else: #last frame
flatten_framearray = cap_images[frames[i]:nframe]
# print(frames[i], nframe)
# newframe = selected_active_frame(flatten_framearray)
# framearray.append(newframe)
return np.array(framearray)
def video3d_overlap(filename, depth = 16, overlap = 5, ):
cap_images = read_video_from_file(filename)
frame_array = []
flatten_framearray = []
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
fromframe = 0
toframe = 0
for i in range(np.size(frames)):
fromframe = frames[i] - overlap
toframe = frames[i] + overlap
if fromframe < 0: fromframe = 0
if toframe > nframe-1: toframe = nframe-1
flatten_framearray = cap_images[fromframe:toframe]
frame = active_frames(flatten_framearray)
frame_array.append(frame)
return np.array(frame_array)
def read_video_from_file(filename):
video_cap = cv2.VideoCapture(filename)
nframe = np.int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = np.int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = np.int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
j = 0
ret = True
cap_images = np.empty((nframe, frameHeight, frameWidth, 3))
while (j < nframe and ret):
ret, cap_images[j] = video_cap.read()
if ret != True:
cap_images = cap_images[0:j-1]
break
else:
j += 1
return cap_images
def normal_distribution(d):
dmax = max(l[0] for l in d)
dmin = min(l[0] for l in d)
mean = (dmax - dmin)/2
sd = (mean - dmin)/3
for i in range(np.size(d,axis=0)):
temp = list(d[i])
if dmax == dmin: #2frame is definitely the same
temp[2] = 1
else:
# temp[2] = 5*i+1
temp[2] = alpha(16,i)
# temp[2] = normpdf(i,mean,sd)
# temp[2] = stats.norm(mean,sd).pdf(i)
d[i] = tuple(temp)
return d
def video3d_opticalflow(filename, depth):
framearray = []
cap_images = read_video_from_file(filename)
nframe = np.size(cap_images,axis = 0)
frames = [np.int(x * nframe / depth) for x in range(depth)]
fromframe = 0
toframe = 0
cap_images = np.asarray(cap_images, dtype=np.float32)
for i in range(np.size(frames)):
fromframe = frames[i]
toframe = frames[i] + 1
if toframe > nframe-1:
fromframe = nframe-2
toframe = nframe-1
prevframe = cv2.cvtColor(cap_images[fromframe],cv2.COLOR_BGR2GRAY)
nextframe = cv2.cvtColor(cap_images[toframe],cv2.COLOR_BGR2GRAY)
hsvImg = np.zeros((np.size(cap_images[fromframe],axis=0), np.size(cap_images[fromframe],axis=1),3))
hsvImg[..., 1] = 0
flow = cv2.calcOpticalFlowFarneback(prevframe, nextframe, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsvImg[..., 0] = 0.5 * ang * 180 / np.pi
hsvImg[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsvImg = np.asarray(hsvImg,dtype=np.float32)
frame = cv2.cvtColor(hsvImg, cv2.COLOR_HSV2BGR)
framearray.append(frame)
return np.array(framearray)
#https://en.wikipedia.org/wiki/Normal_distribution#Probability_density_function
def normpdf(x, mean, sd):
var = float(sd)**2
pi = 3.1415926
denom = (2*pi*var)**.5
num = math.exp(-(float(x)-float(mean))**2/(2*var))
return num/denom
def alpha(T, t):
return 2*(T-t+1)-(T+1)*(Harmonic_number(T)-Harmonic_number(t-1))
def Harmonic_number(n):
if n==0:
return 0
return sum(1.0/i for i in range(1,n+1))
if __name__ == '__main__':
main() | 6,819 | 2,898 |
"""Metrics to assess performance on ite prediction task."""
from typing import Optional
import numpy as np
import pandas as pd
def expected_response(y: np.ndarray, w: np.ndarray, policy: np.ndarray,
mu: Optional[np.ndarray]=None, ps: Optional[np.ndarray]=None) -> float:
"""Estimate expected response.
Parameters
----------
y: array-like of shape = (n_samples)
Observed target values.
w: array-like of shape = shape = (n_samples)
Treatment assignment variables.
policy: array-like of shape = (n_samples)
Estimated treatment policy.
mu: array-like of shape = (n_samples, n_trts), optional
Estimated potential outcomes.
ps: array-like of shape = (n_samples, n_trts), optional
Estimated propensity scores.
Returns
-------
expected_response: float
Estimated expected_response.
"""
mu = np.zeros((w.shape[0], np.unique(w).shape[0])) if mu is None else mu
ps = pd.get_dummies(w).mean(axis=0).values if ps is None else ps
indicator = np.array(w == policy, dtype=int)
expected_response = np.mean(mu[np.arange(w.shape[0]), policy]
+ (y - mu[np.arange(w.shape[0]), policy]) * indicator / ps[w])
return expected_response
def ips_value(y: np.ndarray, w: np.ndarray, policy: np.ndarray, ps: Optional[np.ndarray]=None) -> float:
"""Decision Value Estimator based on Inverse Propensity Score Weighting method.
Parameters
----------
y: array-like of shape = (n_samples)
Observed target values.
w: array-like of shape = shape = (n_samples)
Treatment assignment indicators.
policy: array-like of shape = (n_samples)
Estimated decision model.
ps: array-like of shape = (n_samples), optional
Estimated propensity scores.
Returns
-------
decision_value: float
Estimated decision value using Inverse Propensity Score Weighting method.
References
----------
[1] Y. Zhao, X. Fang, D. S. Levi: Uplift modeling with multiple treatments and general response types, 2017.
[2] A. Schuler, M. Baiocchi, R. Tibshirani, N. Shah: A comparison of methods for model selection when estimating individual treatment effects, 2018.
"""
if not isinstance(y, np.ndarray):
raise TypeError("y must be a numpy.ndarray.")
if not isinstance(w, np.ndarray):
raise TypeError("w must be a numpy.ndarray.")
if not isinstance(policy, np.ndarray):
raise TypeError("policy must be a numpy.ndarray.")
if ps is None:
trts_probs = pd.get_dummies(w).mean(axis=0).values
ps = np.ones((w.shape[0], np.unique(w).shape[0])) * np.expand_dims(trts_probs, axis=0)
else:
assert (np.max(ps) < 1) and (np.min(ps) > 0), "ps must be strictly between 0 and 1."
treatment_matrix = pd.get_dummies(w).values
if np.unique(policy).shape[0] == np.unique(w).shape[0]:
policy = pd.get_dummies(policy).values
else:
diff = np.setdiff1d(np.unique(w), np.unique(policy))
policy = pd.get_dummies(policy).values
for _diff in diff:
policy = np.insert(policy, _diff, 0, axis=1)
indicator_matrix = policy * treatment_matrix
outcome_matrix = np.expand_dims(y, axis=1) * treatment_matrix
decision_value = np.mean(np.sum(indicator_matrix * (outcome_matrix / ps), axis=1))
return decision_value
def dr_value(y: np.ndarray, w: np.ndarray, policy: np.ndarray,
mu: np.ndarray, ps: Optional[np.ndarray]=None) -> float:
"""Decision Value Estimator based on Doubly Robust method.
Parameters
----------
y: array-like of shape = (n_samples)
Observed target values.
w: array-like of shape = (n_samples)
Treatment assignment indicators.
policy: array-like of shape = (n_samples)
Estimated decision model.
ps: array-like of shape = (n_samples)
Estimated propensity scores.
mu: array-like of shape = (n_samples, n_treatments), optional
Estimated potential outcome for each treatment.
Returns
-------
decision_value: float
Estimated decision value using Doubly Robust method.
References
----------
[1] A. Schuler, M. Baiocchi, R. Tibshirani, N. Shah: A comparison of methods for model selection when estimating individual treatment effects, 2018.
"""
if not isinstance(y, np.ndarray):
raise TypeError("y must be a numpy.ndarray.")
if not isinstance(w, np.ndarray):
raise TypeError("w must be a numpy.ndarray.")
if not isinstance(policy, np.ndarray):
raise TypeError("policy must be a numpy.ndarray.")
if not isinstance(mu, np.ndarray):
raise TypeError("mu must be a numpy.ndarray.")
if ps is None:
trts_probs = pd.get_dummies(w).mean(axis=0).values
ps = np.ones((w.shape[0], np.unique(w).shape[0])) * np.expand_dims(trts_probs, axis=0)
else:
assert (np.max(ps) < 1) and (np.min(ps) > 0), "ps must be strictly between 0 and 1."
treatment_matrix = pd.get_dummies(w).values
policy = pd.get_dummies(policy).values
diff = np.setdiff1d(np.unique(w), np.unique(policy))
for _diff in diff:
policy = np.insert(policy, _diff, 0, axis=1)
indicator_matrix = policy * treatment_matrix
outcome_matrix = np.expand_dims(y, axis=1) * treatment_matrix
decision_value = np.mean(np.sum(treatment_matrix * mu + indicator_matrix * (outcome_matrix - mu) / ps, axis=1))
return decision_value
| 5,547 | 1,816 |
from random import randint
class Dataset:
def get_mock_scattered_dataset(self, numberOf, x_upper_bound, y_upper_bound):
""" Mock 2D dataset with scattered data points. """
points = []
for i in range(numberOf):
point = [randint(0,x_upper_bound), randint(0,y_upper_bound), 'black']
points.append(point)
return points
def get_mock_dataset(self, numberOf, x_upper_bound, y_upper_bound):
""" Mock 2D dataset with clustered data points. """
points = []
clusters = []
""" Creates between 2 to 10 cluster areas with random x/y values """
for i in range(randint(2, 10)):
cluster = [randint(0, x_upper_bound), randint(0, y_upper_bound)]
clusters.append(cluster)
""" Creates numberOf points each randomly assigned to one cluster area and random x/y values near that area """
for i in range(numberOf):
j = randint(0, len(clusters)-1)
point = [randint(0,30)+clusters[j][0], randint(0,30)+clusters[j][1], 'black']
points.append(point)
return points
| 1,133 | 350 |
from openpyxl import load_workbook
def import_msmt_college_registry_xlsx(path, sheet_name):
"""
Import XLSX from https://regvssp.msmt.cz/registrvssp/cvslist.aspx
(list of colleges and faculties).
Parameters:
path -- path to XLSX file
sheet_name -- "ExportVS" or "ExportFakulty"
"""
workbook = load_workbook(path)
sheet = workbook[sheet_name]
out = []
columns = [k.value.strip() for k in sheet[1]]
for i in range(2, sheet.max_row + 1):
values = [i.value for i in sheet[i]]
item = dict(zip(columns, values))
out.append(item)
return out
| 623 | 220 |
import unittest
import numpy as np
from mtrack.graphs import G1
from mtrack.evaluation.matching_graph import MatchingGraph
from mtrack.evaluation.voxel_skeleton import VoxelSkeleton
from comatch import match_components
import json
test_data_dir = "./data"
class ParallelLinesSetUp(unittest.TestCase):
def setUp(self):
"""
o o
| |
| |
| |
| |
| |
o o
| |
| |
| |
| |
o o
| |
| |
. .
. .
. .
"""
self.gt_vertices = 10
self.rec_vertices = 10
self.gt = G1(self.gt_vertices)
self.rec = G1(self.rec_vertices)
z = 0
for v in self.gt.get_vertex_iterator():
self.gt.set_position(v, np.array([100,100,z]))
self.gt.set_orientation(v, np.array([1,0,0]))
z += 5
if int(v)<self.gt_vertices-1:
self.gt.add_edge(int(v), int(v)+1)
self.vs_gt = VoxelSkeleton(self.gt, voxel_size=[1.,1.,1.], verbose=True)
# Different offset:
z = 0
for v in self.rec.get_vertex_iterator():
self.rec.set_position(v, np.array([150,100,z]))
self.rec.set_orientation(v, np.array([1,0,0]))
z += 5
if int(v)<self.rec_vertices-1:
self.rec.add_edge(int(v), int(v)+1)
self.vs_rec = VoxelSkeleton(self.rec, voxel_size=[1.,1.,1.], verbose=True)
self.groundtruth_skeletons = [self.vs_gt]
self.reconstructed_skeletons = [self.vs_rec]
self.skeletons = {"gt": self.vs_gt, "rec": self.vs_rec}
self.distance_threshold = 50.1
self.voxel_size = [1.,1.,1.]
class ErrorTestSetUpSameDistance(unittest.TestCase):
def setUp(self):
self.gt_vertices = 10
self.rec1_vertices = 5
self.rec2_vertices = 5
self.gt = G1(self.gt_vertices)
self.rec1 = G1(self.rec1_vertices)
self.rec2 = G1(self.rec2_vertices)
z = 0
for v in self.gt.get_vertex_iterator():
self.gt.set_position(v, np.array([100,100,z]))
self.gt.set_orientation(v, np.array([1,0,0]))
z += 5
if int(v)<self.gt_vertices-1:
self.gt.add_edge(int(v), int(v)+1)
self.vs_gt = VoxelSkeleton(self.gt, voxel_size=[1.,1.,1.], verbose=True, subsample=5)
z = 0
for v in self.rec1.get_vertex_iterator():
self.rec1.set_position(v, np.array([160,100, z]))
self.rec1.set_orientation(v, np.array([1,0,0]))
z += 5
if int(v)<self.rec1_vertices-1:
self.rec1.add_edge(int(v), int(v)+1)
z = 0
for v in self.rec2.get_vertex_iterator():
self.rec2.set_position(v, np.array([100,150, z]))
self.rec2.set_orientation(v, np.array([1,0,0]))
z += 5
if int(v)<self.rec2_vertices-1:
self.rec2.add_edge(int(v), int(v)+1)
self.vs_rec1 = VoxelSkeleton(self.rec1, voxel_size=[1.,1.,1.], verbose=True, subsample=5)
self.vs_rec2 = VoxelSkeleton(self.rec2, voxel_size=[1.,1.,1.], verbose=True, subsample=5)
self.groundtruth_skeletons = [self.vs_gt]
self.reconstructed_skeletons = [self.vs_rec1, self.vs_rec2]
self.skeletons = {"gt": [self.vs_gt], "rec": [self.vs_rec1, self.vs_rec2]}
self.distance_threshold = 51
self.voxel_size = [1.,1.,1.]
class MatchingGraphNoInitAllTestCase(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=False)
self.assertTrue(mg.total_vertices ==\
self.vs_gt.get_graph().get_number_of_vertices() +\
self.vs_rec.get_graph().get_number_of_vertices())
class MatchingGraphInitializeTestCase(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=False)
# Test private methods too as internals are complex:
matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize()
self.assertTrue(matching_graph.get_number_of_vertices() ==\
mg._MatchingGraph__get_total_vertices())
self.assertTrue(matching_graph.get_number_of_edges()==0)
for tag in ["gt", "rec"]:
for graph in mg.graphs[tag]:
for v in graph.get_vertex_iterator():
mv = v_to_mv[(graph, int(v))]
pos_v = np.array(graph.get_position(v))
pos_mv = np.array(matching_graph.get_position(mv))
self.assertTrue(np.all(pos_v == pos_mv))
mv_ids_rec = mappings["rec"]["mv_ids"]
mv_ids_gt = mappings["gt"]["mv_ids"]
self.assertTrue(set(mv_ids_rec) & set(mv_ids_gt) == set([]))
self.assertTrue(sorted(mv_ids_rec + mv_ids_gt) ==\
range(matching_graph.get_number_of_vertices()))
for i in range(len(mv_ids_gt)):
mv_id = mv_ids_gt[i]
graph_pos = np.array(matching_graph.get_position(mv_id))
mapping_pos = mappings["gt"]["positions"][i]
self.assertTrue(np.all(graph_pos == mapping_pos))
for i in range(len(mv_ids_rec)):
mv_id = mv_ids_rec[i]
graph_pos = np.array(matching_graph.get_position(mv_id))
mapping_pos = mappings["rec"]["positions"][i]
self.assertTrue(np.all(graph_pos == mapping_pos))
class MatchingGraphAddSkeletonEdgesTestCase(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=False)
matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize()
mg.matching_graph = matching_graph
mg.mappings = mappings
mg.mv_to_v = mv_to_v
mg.v_to_mv = v_to_mv
self.assertTrue(matching_graph.get_number_of_edges() == 0)
mg._MatchingGraph__add_skeleton_edges()
self.assertTrue(matching_graph.get_number_of_edges() ==\
self.vs_gt.get_graph().get_number_of_edges() +\
self.vs_rec.get_graph().get_number_of_edges())
# Check that all edges are attached to the correct vertices:
for e in matching_graph.get_edge_iterator():
mv0 = e.source()
mv1 = e.target()
v0 = mv_to_v[mv0]
v1 = mv_to_v[mv1]
# Compare graphs
self.assertTrue(v0[0] == v1[0])
edge = v0[0].get_edge(v0[1], v1[1]) # Raises value error if not there
pos_v0 = np.array(v0[0].get_position(v0[1]))
pos_v1 = np.array(v0[0].get_position(v1[1]))
pos_mv0 = np.array(matching_graph.get_position(mv0))
pos_mv1 = np.array(matching_graph.get_position(mv1))
self.assertTrue(np.all(pos_v0 == pos_mv0))
self.assertTrue(np.all(pos_v1 == pos_mv1))
class MatchingGraphAddMatchingEdgesTestCase(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=False)
matching_graph, mappings, mv_to_v, v_to_mv = mg._MatchingGraph__initialize()
mg.matching_graph = matching_graph
mg.mappings = mappings
mg.mv_to_v = mv_to_v
mg.v_to_mv = v_to_mv
mg._MatchingGraph__add_skeleton_edges()
edges_pre_add = mg.matching_graph.get_number_of_edges()
mg._MatchingGraph__add_matching_edges(self.distance_threshold, self.voxel_size)
edges_post_add = mg.matching_graph.get_number_of_edges()
self.assertTrue(edges_post_add > edges_pre_add)
mg.mask_skeleton_edges()
edges_post_masking = mg.matching_graph.get_number_of_edges()
self.assertTrue(edges_post_masking == edges_pre_add)
for e in mg.matching_graph.get_edge_iterator():
self.assertTrue(mg.get_edge_type(e) == "skeleton")
mg.clear_edge_masks()
self.assertTrue(edges_post_add == mg.matching_graph.get_number_of_edges())
mg.mask_matching_edges()
self.assertTrue(edges_post_add - edges_pre_add ==\
mg.matching_graph.get_number_of_edges())
for e in mg.matching_graph.get_edge_iterator():
self.assertTrue(mg.get_edge_type(e) == "matching")
v0_gt = mg.is_groundtruth_mv(e.source())
v1_gt = mg.is_groundtruth_mv(e.target())
self.assertTrue(int(v0_gt) != int(v1_gt))
mg.clear_edge_masks()
mg.to_nml(test_data_dir + "/matching_graph.nml")
class MatchingGraphExportToComatch(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=True)
nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch()
for v_gt in nodes_gt:
self.assertTrue(mg.is_groundtruth_mv(v_gt))
for v_rec in nodes_rec:
self.assertFalse(mg.is_groundtruth_mv(v_rec))
mg.mask_matching_edges()
self.assertTrue(len(edges_gt_rec) == mg.get_number_of_edges())
mg.clear_edge_masks()
self.assertTrue(len(nodes_gt) + len(nodes_rec) == mg.get_number_of_vertices())
class MatchingGraphImportMatches(ParallelLinesSetUp):
def runTest(self):
print "Import matches"
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=True)
nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch()
label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec)
matches = node_matches
# Everything is matched
self.assertTrue(len(matches) == mg.get_number_of_vertices()/2)
mg.import_matches(matches)
for v in mg.get_vertex_iterator():
self.assertTrue(mg.is_tp(v))
self.assertFalse(mg.is_fp(v))
self.assertFalse(mg.is_fn(v))
for e in mg.get_edge_iterator():
self.assertFalse(mg.is_split(e))
self.assertFalse(mg.is_merge(e))
class MatchingGraphExportToComatch(ParallelLinesSetUp):
def runTest(self):
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
initialize_all=True)
nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch()
for v_gt in nodes_gt:
self.assertTrue(mg.is_groundtruth_mv(v_gt))
for v_rec in nodes_rec:
self.assertFalse(mg.is_groundtruth_mv(v_rec))
mg.mask_matching_edges()
self.assertTrue(len(edges_gt_rec) == mg.get_number_of_edges())
mg.clear_edge_masks()
self.assertTrue(len(nodes_gt) + len(nodes_rec) == mg.get_number_of_vertices())
class TestOneToOne(ErrorTestSetUpSameDistance):
def runTest(self):
print "OneToOne"
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
distance_cost=True,
initialize_all=True)
nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch()
try:
# Quadmatch
label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt,
labels_rec, edge_conflicts=edge_conflicts, max_edges=1, edge_costs=edge_costs)
except TypeError:
# Comatch
label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt,
labels_rec, allow_many_to_many=False, edge_costs=edge_costs, no_match_costs=1000.)
print "label matches:", label_matches
print "node_matches:", node_matches
comatch_errors = {"splits": num_splits, "num_merges": num_merges, "num_fps": num_fps, "num_fns": num_fns}
print comatch_errors
mg.import_matches(node_matches)
output_dir = test_data_dir + "/MatchingOnetoOne"
mg.export_all(output_dir)
with open(output_dir + "/macro_errors.json", "w+") as f:
json.dump(comatch_errors, f)
class TestManyToMany(ErrorTestSetUpSameDistance):
def runTest(self):
print "ManyToMany"
mg = MatchingGraph(self.groundtruth_skeletons,
self.reconstructed_skeletons,
self.distance_threshold,
self.voxel_size,
verbose=True,
distance_cost=True,
initialize_all=True)
nodes_gt, nodes_rec, edges_gt_rec, labels_gt, labels_rec, edge_costs, edge_conflicts, edge_pairs = mg.export_to_comatch()
try:
# Quadmatch
label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt,
labels_rec, edge_conflicts=edge_conflicts, max_edges=10, edge_costs=edge_costs)
except TypeError:
# Comatch
label_matches, node_matches, num_splits, num_merges, num_fps, num_fns = match_components(nodes_gt, nodes_rec, edges_gt_rec, labels_gt,
labels_rec, allow_many_to_many=True, edge_costs=edge_costs, no_match_costs=1000.)
print "label matches:", label_matches
print "node_matches:", node_matches
comatch_errors = {"splits": num_splits, "num_merges": num_merges, "num_fps": num_fps, "num_fns": num_fns}
print comatch_errors
mg.import_matches(node_matches)
output_dir = test_data_dir + "/MatchingManytoMany"
mg.export_all(output_dir)
with open(output_dir + "/macro_errors.json", "w+") as f:
json.dump(comatch_errors, f)
if __name__ == "__main__":
unittest.main()
| 16,358 | 5,473 |
"""
Functions specifically for working with QC/DQRs from
the Atmospheric Radiation Measurement Program (ARM).
"""
import datetime as dt
import numpy as np
import requests
from act.config import DEFAULT_DATASTREAM_NAME
def add_dqr_to_qc(
obj,
variable=None,
assessment='incorrect,suspect',
exclude=None,
include=None,
normalize_assessment=True,
cleanup_qc=True,
):
"""
Function to query the ARM DQR web service for reports and
add as a new quality control test to ancillary quality control
variable. If no anicllary quality control variable exist a new
one will be created and lined to the data variable through
ancillary_variables attribure.
See online documentation from ARM Data
Quality Office on the use of the DQR web service.
https://code.arm.gov/docs/dqrws-examples/wikis/home
Information about the DQR web-service avaible at
https://adc.arm.gov/dqrws/
Parameters
----------
obj : xarray Dataset
Data object
variable : string, or list of str, or None
Variables to check DQR web service. If set to None will
attempt to update all variables.
assessment : string
assessment type to get DQRs. Current options include
'missing', 'suspect', 'incorrect' or any combination separated
by a comma.
exclude : list of strings
DQR IDs to exclude from adding into QC
include : list of strings
List of DQR IDs to include in flagging of data. Any other DQR IDs
will be ignored.
normalize_assessment : boolean
The DQR assessment term is different than the embedded QC
term. Embedded QC uses "Bad" and "Indeterminate" while
DQRs use "Incorrect" and "Suspect". Setting this will ensure
the same terms are used for both.
cleanup_qc : boolean
Call clean.cleanup() method to convert to standardized ancillary
quality control variables. Has a little bit of overhead so
if the Dataset has already been cleaned up, no need to run.
Returns
-------
obj : xarray Dataset
Data object
Examples
--------
.. code-block:: python
from act.qc.arm import add_dqr_to_qc
obj = add_dqr_to_qc(obj, variable=['temp_mean', 'atmos_pressure'])
"""
# DQR Webservice goes off datastreams, pull from object
if 'datastream' in obj.attrs:
datastream = obj.attrs['datastream']
elif '_datastream' in obj.attrs:
datastream = obj.attrs['_datastream']
else:
raise ValueError('Object does not have datastream attribute')
if datastream == DEFAULT_DATASTREAM_NAME:
raise ValueError("'datastream' name required for DQR service set to default value "
f"{datastream}. Unable to perform DQR service query.")
# Clean up QC to conform to CF conventions
if cleanup_qc:
obj.clean.cleanup()
# In order to properly flag data, get all variables if None. Exclude QC variables.
if variable is None:
variable = list(set(obj.data_vars) - set(obj.clean.matched_qc_variables))
# Check to ensure variable is list
if not isinstance(variable, (list, tuple)):
variable = [variable]
# Loop through each variable and call web service for that variable
for var_name in variable:
# Create URL
url = 'http://www.archive.arm.gov/dqrws/ARMDQR?datastream='
url += datastream
url += '&varname=' + var_name
url += ''.join(
[
'&searchmetric=',
assessment,
'&dqrfields=dqrid,starttime,endtime,metric,subject',
]
)
# Call web service
req = requests.get(url)
# Check status values and raise error if not successful
status = req.status_code
if status == 400:
raise ValueError('Check parameters')
if status == 500:
raise ValueError('DQR Webservice Temporarily Down')
# Get data and run through each dqr
dqrs = req.text.splitlines()
time = obj['time'].values
dqr_results = {}
for line in dqrs:
line = line.split('|')
dqr_no = line[0]
# Exclude DQRs if in list
if exclude is not None and dqr_no in exclude:
continue
# Only include if in include list
if include is not None and dqr_no not in include:
continue
starttime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[1])))
endtime = np.datetime64(dt.datetime.utcfromtimestamp(int(line[2])))
ind = np.where((time >= starttime) & (time <= endtime))
if ind[0].size == 0:
continue
if dqr_no in dqr_results.keys():
dqr_results[dqr_no]['index'] = np.append(dqr_results[dqr_no]['index'], ind)
else:
dqr_results[dqr_no] = {
'index': ind,
'test_assessment': line[3],
'test_meaning': ': '.join([dqr_no, line[-1]]),
}
for key, value in dqr_results.items():
try:
obj.qcfilter.add_test(
var_name,
index=value['index'],
test_meaning=value['test_meaning'],
test_assessment=value['test_assessment'],
)
except IndexError:
print(f"Skipping '{var_name}' DQR application because of IndexError")
if normalize_assessment:
obj.clean.normalize_assessment(variables=var_name)
return obj
| 5,706 | 1,677 |
import allure
from common.constans import PrintedDress, PrintedSummerDress, Colors
@allure.step('Product Card')
def test_open_product_card(app, login):
"""
1. Open page
2. Choose product
3. Open product card
4. Check product info
"""
app.page.select_woman_category()
app.page.open_product('Printed Dress')
assert app.page.product_name() == PrintedDress.name
assert app.page.product_model() == PrintedDress.model
assert app.page.product_description() == PrintedDress.description
assert app.page.product_price() == PrintedDress.price
@allure.step('Color Test')
def test_color_dress(app, login):
"""
1. Open page
2. Choose product
3. Open product card
4. Check product colors
"""
app.page.select_woman_category()
app.page.open_product('Printed Summer Dress')
assert app.page.product_name() == PrintedSummerDress.name
assert app.page.product_model() == PrintedSummerDress.model
assert app.page.product_description() == PrintedSummerDress.description
assert app.page.product_price() == PrintedSummerDress.price
assert app.page.get_color(app.page.black_color()) == Colors.black
assert app.page.get_color(app.page.orange_color()) == Colors.orange
assert app.page.get_color(app.page.blue_color()) == Colors.blue
assert app.page.get_color(app.page.yellow_color()) == Colors.yellow
| 1,458 | 481 |