code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# (C) Copyright 2021 Hewlett Packard Enterprise Development LP.
#!/usr/bin/python3
import schedule
from datetime import date, datetime, time, timedelta
import time
from cleanupclasses import cleanupTrackers as cleanupTrackers
from cleanupclasses import cleanupafcAudit as cleanupafcAudit
from cleanupclasses import cleanupLogging as cleanupLogging
from cleanupclasses import checkSocketserver as checkSocketserver
schedule.every(5).minutes.do(cleanupTrackers)
schedule.every(5).minutes.do(cleanupLogging)
schedule.every(10).seconds.do(checkSocketserver)
schedule.every(5).minutes.do(cleanupafcAudit)
while 1:
schedule.run_pending()
time.sleep(1)
| [
"schedule.run_pending",
"schedule.every",
"time.sleep"
] | [((633, 655), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (653, 655), False, 'import schedule\n'), ((661, 674), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (671, 674), False, 'import time\n'), ((427, 444), 'schedule.every', 'schedule.every', (['(5)'], {}), '(5)\n', (441, 444), False, 'import schedule\n'), ((474, 491), 'schedule.every', 'schedule.every', (['(5)'], {}), '(5)\n', (488, 491), False, 'import schedule\n'), ((520, 538), 'schedule.every', 'schedule.every', (['(10)'], {}), '(10)\n', (534, 538), False, 'import schedule\n'), ((570, 587), 'schedule.every', 'schedule.every', (['(5)'], {}), '(5)\n', (584, 587), False, 'import schedule\n')] |
"""
Getting started with The Cannon and APOGEE
"""
import os
import numpy as np
from astropy.table import Table
import AnniesLasso as tc
# Load in the data.
PATH, CATALOG, FILE_FORMAT = ("/Users/arc/research/apogee/", "apogee-rg.fits",
"apogee-rg-custom-normalization-{}.memmap")
labelled_set = Table.read(os.path.join(PATH, CATALOG))
dispersion = np.memmap(os.path.join(PATH, FILE_FORMAT).format("dispersion"),
mode="r", dtype=float)
normalized_flux = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("flux"),
mode="c", dtype=float).reshape((len(labelled_set), -1))
normalized_ivar = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("ivar"),
mode="c", dtype=float).reshape(normalized_flux.shape)
# The labelled set includes ~14000 stars. Let's chose a random ~1,400 for the
# training and validation sets.
np.random.seed(888) # For reproducibility.
q = np.random.randint(0, 10, len(labelled_set)) % 10
validate_set = (q == 0)
train_set = (q == 1)
# Create a Cannon model in parallel using all available threads
model = tc.L1RegularizedCannonModel(labelled_set[train_set],
normalized_flux[train_set], normalized_ivar[train_set],
dispersion=dispersion, threads=-1)
# No regularization.
model.regularization = 0
# Specify the vectorizer.
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set[train_set],
tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2))
print("Vectorizer terms: {0}".format(
" + ".join(model.vectorizer.get_human_readable_label_vector())))
# Train the model.
model.train()
# Let's set the scatter for each pixel to ensure the mean chi-squared value is
# 1 for the training set, then re-train.
model._set_s2_by_hogg_heuristic()
model.train()
# Use the model to fit the stars in the validation set.
validation_set_labels = model.fit(
normalized_flux[validate_set], normalized_ivar[validate_set])
for i, label_name in enumerate(model.vectorizer.label_names):
fig, ax = plt.subplots()
x = labelled_set[label_name][validate_set]
y = validation_set_labels[:, i]
abs_diff = np.abs(y - x)
ax.scatter(x, y, facecolor="k")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
ax.set_xlim(limits.min(), limits.max())
ax.set_ylim(limits.min(), limits.max())
ax.set_title("{0}: {1:.2f}".format(label_name, np.mean(abs_diff)))
print("{0}: {1:.2f}".format(label_name, np.mean(abs_diff)))
| [
"numpy.abs",
"numpy.mean",
"AnniesLasso.vectorizer.polynomial.terminator",
"os.path.join",
"AnniesLasso.L1RegularizedCannonModel",
"numpy.random.seed"
] | [((840, 859), 'numpy.random.seed', 'np.random.seed', (['(888)'], {}), '(888)\n', (854, 859), True, 'import numpy as np\n'), ((1055, 1203), 'AnniesLasso.L1RegularizedCannonModel', 'tc.L1RegularizedCannonModel', (['labelled_set[train_set]', 'normalized_flux[train_set]', 'normalized_ivar[train_set]'], {'dispersion': 'dispersion', 'threads': '(-1)'}), '(labelled_set[train_set], normalized_flux[\n train_set], normalized_ivar[train_set], dispersion=dispersion, threads=-1)\n', (1082, 1203), True, 'import AnniesLasso as tc\n'), ((315, 342), 'os.path.join', 'os.path.join', (['PATH', 'CATALOG'], {}), '(PATH, CATALOG)\n', (327, 342), False, 'import os\n'), ((1379, 1443), 'AnniesLasso.vectorizer.polynomial.terminator', 'tc.vectorizer.polynomial.terminator', (["['TEFF', 'LOGG', 'FE_H']", '(2)'], {}), "(['TEFF', 'LOGG', 'FE_H'], 2)\n", (1414, 1443), True, 'import AnniesLasso as tc\n'), ((2105, 2118), 'numpy.abs', 'np.abs', (['(y - x)'], {}), '(y - x)\n', (2111, 2118), True, 'import numpy as np\n'), ((367, 398), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (379, 398), False, 'import os\n'), ((2350, 2367), 'numpy.mean', 'np.mean', (['abs_diff'], {}), '(abs_diff)\n', (2357, 2367), True, 'import numpy as np\n'), ((2415, 2432), 'numpy.mean', 'np.mean', (['abs_diff'], {}), '(abs_diff)\n', (2422, 2432), True, 'import numpy as np\n'), ((481, 512), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (493, 512), False, 'import os\n'), ((622, 653), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (634, 653), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`fit`
==================
.. module:: fit
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2015-09-24, 07:18:22
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from b2ac.compat import *
import b2ac.matrix.matrix_operations as mo
import b2ac.eigenmethods.qr_algorithm as qr
import b2ac.eigenmethods.inverse_iteration as inv_iter
def fit_improved_B2AC_double(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting uses float storage during calculations and performs the
eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
be as similar to the integer implementation as possible.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
e_conds = []
points = np.array(points, 'float')
M, T = _calculate_M_and_T_double(points)
e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])
a = None
for ev_ind in [1, 2, 0]:
# Find the eigenvector that matches this eigenvector.
eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)
# See if that eigenvector yields an elliptical solution.
elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
e_conds.append(elliptical_condition)
if elliptical_condition > 0:
a = eigenvector
break
if a is None:
print("Eigenvalues = {0}".format(e_vals))
print("Elliptical conditions = {0}".format(e_conds))
raise ArithmeticError("No elliptical solution found.")
conic_coefficients = np.concatenate((a, np.dot(T, a)))
return conic_coefficients
def _calculate_M_and_T_double(points):
"""Part of the B2AC ellipse fitting algorithm, calculating the M and T
matrices needed.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: Matrices M and T.
:rtype: tuple
"""
S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
S1 = S[:3, :3]
S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
S3_inv = mo.inverse_symmetric_3by3_double(S3).reshape((3, 3))
S2 = S[:3, 3:]
T = -np.dot(S3_inv, S2.T)
M_term_2 = np.dot(S2, T)
M = S1 + M_term_2
M[[0, 2], :] = M[[2, 0], :] / 2
M[1, :] = -M[1, :]
return M, T
def _calculate_scatter_matrix_double(x, y):
"""Calculates the complete scatter matrix for the input coordinates.
:param x: The x coordinates.
:type x: :py:class:`numpy.ndarray`
:param y: The y coordinates.
:type y: :py:class:`numpy.ndarray`
:return: The complete scatter matrix.
:rtype: :py:class:`numpy.ndarray`
"""
D = np.ones((len(x), 6), 'int64')
D[:, 0] = x * x
D[:, 1] = x * y
D[:, 2] = y * y
D[:, 3] = x
D[:, 4] = y
return D.T.dot(D)
| [
"b2ac.eigenmethods.inverse_iteration.inverse_iteration_for_eigenvector_double",
"b2ac.matrix.matrix_operations.inverse_symmetric_3by3_double",
"numpy.array",
"numpy.dot",
"b2ac.eigenmethods.qr_algorithm.QR_algorithm_shift_Givens_double"
] | [((1195, 1220), 'numpy.array', 'np.array', (['points', '"""float"""'], {}), "(points, 'float')\n", (1203, 1220), True, 'import numpy as np\n'), ((2502, 2566), 'numpy.array', 'np.array', (['[S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]]'], {}), '([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])\n', (2510, 2566), True, 'import numpy as np\n'), ((2697, 2710), 'numpy.dot', 'np.dot', (['S2', 'T'], {}), '(S2, T)\n', (2703, 2710), True, 'import numpy as np\n'), ((1458, 1529), 'b2ac.eigenmethods.inverse_iteration.inverse_iteration_for_eigenvector_double', 'inv_iter.inverse_iteration_for_eigenvector_double', (['M', 'e_vals[ev_ind]', '(5)'], {}), '(M, e_vals[ev_ind], 5)\n', (1507, 1529), True, 'import b2ac.eigenmethods.inverse_iteration as inv_iter\n'), ((2661, 2681), 'numpy.dot', 'np.dot', (['S3_inv', 'S2.T'], {}), '(S3_inv, S2.T)\n', (2667, 2681), True, 'import numpy as np\n'), ((1288, 1326), 'b2ac.eigenmethods.qr_algorithm.QR_algorithm_shift_Givens_double', 'qr.QR_algorithm_shift_Givens_double', (['M'], {}), '(M)\n', (1323, 1326), True, 'import b2ac.eigenmethods.qr_algorithm as qr\n'), ((2055, 2067), 'numpy.dot', 'np.dot', (['T', 'a'], {}), '(T, a)\n', (2061, 2067), True, 'import numpy as np\n'), ((2580, 2616), 'b2ac.matrix.matrix_operations.inverse_symmetric_3by3_double', 'mo.inverse_symmetric_3by3_double', (['S3'], {}), '(S3)\n', (2612, 2616), True, 'import b2ac.matrix.matrix_operations as mo\n')] |
import logging
from contextlib import contextmanager
import AIPS
import AIPSDir
import ObitTalkUtil
import OErr
import OSystem
import katacomb.configuration as kc
log = logging.getLogger('katacomb')
# Single obit context class
__obit_context = None
class ObitContext(object):
"""
Small wrapper class encapsulating
the Obit error stack and Obit System
"""
def __init__(self):
"""
Constructor
Largely derived from
https://github.com/bill-cotton/Obit/blob/master/ObitSystem/Obit/share/scripts/AIPSSetup.py
"""
# Get the current configuration
cfg = kc.get_config()
self.err = err = OErr.OErr()
self.obitsys = OSystem.OSystem("Pipeline", 1, cfg['userno'],
0, [" "], 0, [" "],
True, False, err)
OErr.printErrMsg(err, "Error starting Obit System")
# Setup AIPS userno
AIPS.userno = cfg['userno']
# Setup Obit Environment
ObitTalkUtil.SetEnviron(AIPS_ROOT=cfg['aipsroot'],
AIPS_VERSION=cfg['aipsversion'],
OBIT_EXEC=cfg['obitexec'],
DA00=cfg['da00'],
ARCH="LINUX",
aipsdirs=cfg['aipsdirs'],
fitsdirs=cfg['fitsdirs'])
def close(self):
"""
Shutdown the Obit System, logging any errors on the error stack
"""
# Remove defined AIPS & FITS dirs from environment to prevent
# overloading the list of defined disks when multiple Obit
# environments are constructed in a single python session
# (eg. during the unit tests).
AIPS.AIPS.disks = [None]
AIPSDir.AIPSdisks = []
if self.err.isErr:
OErr.printErr(self.err)
OSystem.Shutdown(self.obitsys)
@contextmanager
def obit_context():
"""
Creates a global Obit Context, initialising the AIPS system
and creating error stacks.
.. code-block:: python
with obit_context():
err = obit_err()
handle_obit_err("An error occured", err)
...
"""
global __obit_context
try:
if __obit_context is not None:
raise ValueError("Obit Context already exists")
log.info("Creating Obit Context")
__obit_context = ObitContext()
yield
finally:
if __obit_context is not None:
log.info("Shutting Down Obit Context")
__obit_context.close()
__obit_context = None
def handle_obit_err(msg="", err=None):
"""
If the Obit error stack is in an error state,
print it via :code:`OErr.printErrMsg` and raise
an :code:`Exception(msg)`.
Parameters
----------
msg (optional): str
Message describing the context in which the
error occurred. Defaults to "".
err (optional): OErr
Obit error stack to handle. If None, the default
error stack will be obtained from :code:`obit_err()`
Raises
------
Exception
Raised by Obit if error stack is in an error state
"""
if err is None:
err = obit_err()
# OErr.printErrMsg raises an Exception
if err.isErr:
err.printErrMsg(err, msg)
def obit_err():
""" Return the Obit Context error stack """
try:
return __obit_context.err
except AttributeError as e:
if 'NoneType' in str(e):
raise ValueError("Create a valid Obit context with obit_context()")
def obit_sys():
""" Return the Obit Context system """
try:
return __obit_context.obitsys
except AttributeError as e:
if 'NoneType' in str(e):
raise ValueError("Create a valid Obit context with obit_context()")
| [
"logging.getLogger",
"ObitTalkUtil.SetEnviron",
"katacomb.configuration.get_config",
"OErr.printErr",
"OSystem.Shutdown",
"OSystem.OSystem",
"OErr.printErrMsg",
"OErr.OErr"
] | [((174, 203), 'logging.getLogger', 'logging.getLogger', (['"""katacomb"""'], {}), "('katacomb')\n", (191, 203), False, 'import logging\n'), ((632, 647), 'katacomb.configuration.get_config', 'kc.get_config', ([], {}), '()\n', (645, 647), True, 'import katacomb.configuration as kc\n'), ((674, 685), 'OErr.OErr', 'OErr.OErr', ([], {}), '()\n', (683, 685), False, 'import OErr\n'), ((709, 797), 'OSystem.OSystem', 'OSystem.OSystem', (['"""Pipeline"""', '(1)', "cfg['userno']", '(0)', "[' ']", '(0)', "[' ']", '(True)', '(False)', 'err'], {}), "('Pipeline', 1, cfg['userno'], 0, [' '], 0, [' '], True, \n False, err)\n", (724, 797), False, 'import OSystem\n'), ((879, 930), 'OErr.printErrMsg', 'OErr.printErrMsg', (['err', '"""Error starting Obit System"""'], {}), "(err, 'Error starting Obit System')\n", (895, 930), False, 'import OErr\n'), ((1038, 1242), 'ObitTalkUtil.SetEnviron', 'ObitTalkUtil.SetEnviron', ([], {'AIPS_ROOT': "cfg['aipsroot']", 'AIPS_VERSION': "cfg['aipsversion']", 'OBIT_EXEC': "cfg['obitexec']", 'DA00': "cfg['da00']", 'ARCH': '"""LINUX"""', 'aipsdirs': "cfg['aipsdirs']", 'fitsdirs': "cfg['fitsdirs']"}), "(AIPS_ROOT=cfg['aipsroot'], AIPS_VERSION=cfg[\n 'aipsversion'], OBIT_EXEC=cfg['obitexec'], DA00=cfg['da00'], ARCH=\n 'LINUX', aipsdirs=cfg['aipsdirs'], fitsdirs=cfg['fitsdirs'])\n", (1061, 1242), False, 'import ObitTalkUtil\n'), ((1923, 1953), 'OSystem.Shutdown', 'OSystem.Shutdown', (['self.obitsys'], {}), '(self.obitsys)\n', (1939, 1953), False, 'import OSystem\n'), ((1890, 1913), 'OErr.printErr', 'OErr.printErr', (['self.err'], {}), '(self.err)\n', (1903, 1913), False, 'import OErr\n')] |
from app.models import User,Post, Comment,Clap
from app import db
# def setUp(self):
# self.user_Jack = User(username = 'Jack',password = '<PASSWORD>', email = '<EMAIL>')
def tearDown(self):
Post.query.delete()
User.query.delete()
Comment.query.delete()
Clap.query.delete() | [
"app.models.User.query.delete",
"app.models.Comment.query.delete",
"app.models.Clap.query.delete",
"app.models.Post.query.delete"
] | [((209, 228), 'app.models.Post.query.delete', 'Post.query.delete', ([], {}), '()\n', (226, 228), False, 'from app.models import User, Post, Comment, Clap\n'), ((237, 256), 'app.models.User.query.delete', 'User.query.delete', ([], {}), '()\n', (254, 256), False, 'from app.models import User, Post, Comment, Clap\n'), ((265, 287), 'app.models.Comment.query.delete', 'Comment.query.delete', ([], {}), '()\n', (285, 287), False, 'from app.models import User, Post, Comment, Clap\n'), ((296, 315), 'app.models.Clap.query.delete', 'Clap.query.delete', ([], {}), '()\n', (313, 315), False, 'from app.models import User, Post, Comment, Clap\n')] |
"""
Copyright (c) 2020 <NAME> <EMAIL>
zlib License, see LICENSE file.
"""
import json
import pygame
from draw import draw_polygon
from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, \
generate_horizontal_line_groups, optimize_screen_buffer_colors
def gba_vertex(st_vertex):
x_margin = 0
x = x_margin + float(st_vertex['x']) * ((240 - x_margin) / 256)
x = int(round(x))
y = float(st_vertex['y']) * (160 / 200)
y = int(round(y))
return x, y
def draw_horizontal_line_sets(screen, horizontal_line_sets, colors):
for color_index, horizontal_lines in horizontal_line_sets.items():
color = colors[color_index]
for horizontal_line in horizontal_lines:
y = horizontal_line[0]
pygame.draw.line(screen, color, (horizontal_line[1], y), (horizontal_line[2], y))
def draw_horizontal_line_groups(screen, horizontal_line_groups, colors):
for horizontal_line_group in horizontal_line_groups:
for y, line_pair in enumerate(horizontal_line_group):
if line_pair is not None:
color_index = line_pair[0]
line = line_pair[1]
pygame.draw.line(screen, colors[color_index], (line[1], y), (line[2], y))
def draw_shapes(screen, shapes, colors):
for shape in shapes:
color = colors[shape[0]]
y = shape[1]
for horizontal_line in shape[3]:
pygame.draw.line(screen, color, (horizontal_line[0], y), (horizontal_line[1], y))
y += 1
def draw_shape_groups(screen, shape_groups, colors):
# print(len(shape_groups))
for shape_group in shape_groups:
draw_shapes(screen, shape_group, colors)
with open('niccc.json') as json_file:
niccc = json.load(json_file)
pygame.init()
screen = pygame.display.set_mode([240, 160])
clock = pygame.time.Clock()
output_frames = []
for frame_index, frame in enumerate(niccc['frames']):
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
colors = []
for frame_color in frame['palette']:
colors.append(pygame.Color(frame_color))
try:
frame_vertices = frame['vertices']
indexed_vertices = True
except KeyError:
frame_vertices = []
indexed_vertices = False
vertices = []
for frame_vertex in frame_vertices:
vertices.append(gba_vertex(frame_vertex))
screen_buffer = [0] * 160
for i in range(160):
screen_buffer[i] = [0] * 240
for frame_polygon in frame['polygons']:
polygon_vertices = []
if indexed_vertices:
for vertex_index in frame_polygon['verticesIdx']:
polygon_vertices.append(vertices[vertex_index['idx']])
else:
for vertex in frame_polygon['vertices']:
polygon_vertices.append(gba_vertex(vertex))
color_index = frame_polygon['colidx']
if color_index > 0:
draw_polygon(screen, color_index, polygon_vertices, screen_buffer)
optimize_screen_buffer_colors(screen_buffer, colors)
screen.fill(colors[0])
horizontal_line_sets = generate_horizontal_line_sets(screen_buffer)
# draw_horizontal_line_sets(screen, horizontal_line_sets, colors)
horizontal_line_groups = generate_horizontal_line_groups(horizontal_line_sets)
# draw_horizontal_line_groups(screen, horizontal_line_groups, colors)
shapes = generate_shapes(horizontal_line_groups)
# draw_shapes(screen, shapes, colors)
shape_groups = generate_shape_groups(shapes)
draw_shape_groups(screen, shape_groups, colors)
output_frames.append((colors, shape_groups))
pygame.display.flip()
clock.tick(60)
# if len(output_frames) >= 2 * 60:
# break
write_frames(output_frames)
| [
"export.write_frames",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.Color",
"pygame.display.set_mode",
"pygame.display.flip",
"export.optimize_screen_buffer_colors",
"export.generate_shape_groups",
"export.generate_horizontal_line_groups",
"export.generate_horizontal_line_sets"... | [((1865, 1878), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1876, 1878), False, 'import pygame\n'), ((1889, 1924), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[240, 160]'], {}), '([240, 160])\n', (1912, 1924), False, 'import pygame\n'), ((1934, 1953), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1951, 1953), False, 'import pygame\n'), ((3928, 3955), 'export.write_frames', 'write_frames', (['output_frames'], {}), '(output_frames)\n', (3940, 3955), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((1841, 1861), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1850, 1861), False, 'import json\n'), ((2051, 2069), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2067, 2069), False, 'import pygame\n'), ((3173, 3225), 'export.optimize_screen_buffer_colors', 'optimize_screen_buffer_colors', (['screen_buffer', 'colors'], {}), '(screen_buffer, colors)\n', (3202, 3225), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((3284, 3328), 'export.generate_horizontal_line_sets', 'generate_horizontal_line_sets', (['screen_buffer'], {}), '(screen_buffer)\n', (3313, 3328), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((3432, 3485), 'export.generate_horizontal_line_groups', 'generate_horizontal_line_groups', (['horizontal_line_sets'], {}), '(horizontal_line_sets)\n', (3463, 3485), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((3577, 3616), 'export.generate_shapes', 'generate_shapes', (['horizontal_line_groups'], {}), '(horizontal_line_groups)\n', (3592, 3616), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((3682, 3711), 'export.generate_shape_groups', 'generate_shape_groups', (['shapes'], {}), '(shapes)\n', (3703, 3711), False, 'from export import generate_shape_groups, write_frames, generate_horizontal_line_sets, generate_shapes, generate_horizontal_line_groups, optimize_screen_buffer_colors\n'), ((3824, 3845), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3843, 3845), False, 'import pygame\n'), ((826, 912), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(horizontal_line[1], y)', '(horizontal_line[2], y)'], {}), '(screen, color, (horizontal_line[1], y), (horizontal_line[2\n ], y))\n', (842, 912), False, 'import pygame\n'), ((1503, 1589), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'color', '(horizontal_line[0], y)', '(horizontal_line[1], y)'], {}), '(screen, color, (horizontal_line[0], y), (horizontal_line[1\n ], y))\n', (1519, 1589), False, 'import pygame\n'), ((2216, 2241), 'pygame.Color', 'pygame.Color', (['frame_color'], {}), '(frame_color)\n', (2228, 2241), False, 'import pygame\n'), ((3099, 3165), 'draw.draw_polygon', 'draw_polygon', (['screen', 'color_index', 'polygon_vertices', 'screen_buffer'], {}), '(screen, color_index, polygon_vertices, screen_buffer)\n', (3111, 3165), False, 'from draw import draw_polygon\n'), ((1244, 1317), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'colors[color_index]', '(line[1], y)', '(line[2], y)'], {}), '(screen, colors[color_index], (line[1], y), (line[2], y))\n', (1260, 1317), False, 'import pygame\n')] |
import logging
import multiprocessing
import os
import pickle as pkl
import numpy as np
import tensorflow as tf
from gensim.models import word2vec
from gensim.models.word2vec import PathLineSentences
logger = logging.getLogger('Word2Vec')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
seg_file = 'data/processed/seg_text.txt'
word_vec_file = 'data/processed/word2vec.txt'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('processed_data_path', 'data/processed', 'processed data dir to load')
tf.app.flags.DEFINE_integer('word_dim', 300, 'dimension of word embedding')
def word_vec():
logger.info('Word to vec')
model = word2vec.Word2Vec(PathLineSentences(seg_file), sg=1, size=300, window=5, min_count=10, sample=1e-4,
workers=multiprocessing.cpu_count())
model.wv.save_word2vec_format(word_vec_file, binary=False)
def dump_pkl(file_path, obj):
with open(file_path, 'wb') as f:
pkl.dump(obj, f)
f.close()
def create_word_vec(flags):
logger.info('Word map and embedding')
word_map = {}
word_map['PAD'] = len(word_map)
word_map['UNK'] = len(word_map)
word_embed = []
for line in open(word_vec_file, 'r'):
content = line.strip().split()
if len(content) != flags.word_dim + 1:
continue
word_map[content[0]] = len(word_map)
word_embed.append(np.asarray(content[1:], dtype=np.float32))
word_embed = np.stack(word_embed)
embed_mean, embed_std = word_embed.mean(), word_embed.std()
pad_embed = np.random.normal(embed_mean, embed_std, (2, flags.word_dim))
word_embed = np.concatenate((pad_embed, word_embed), axis=0)
word_embed = word_embed.astype(np.float32)
print('Word in dict - {}'.format(len(word_map)))
dump_pkl(os.path.join(flags.processed_data_path, 'word_map.pkl'), word_map)
dump_pkl(os.path.join(flags.processed_data_path, 'word_embed.pkl'), word_embed)
def main(_):
word_vec()
create_word_vec(FLAGS)
if __name__ == "__main__":
tf.app.run()
| [
"logging.getLogger",
"numpy.random.normal",
"logging.StreamHandler",
"pickle.dump",
"tensorflow.app.flags.DEFINE_integer",
"logging.Formatter",
"os.path.join",
"numpy.asarray",
"tensorflow.app.flags.DEFINE_string",
"multiprocessing.cpu_count",
"numpy.stack",
"numpy.concatenate",
"gensim.mode... | [((210, 239), 'logging.getLogger', 'logging.getLogger', (['"""Word2Vec"""'], {}), "('Word2Vec')\n", (227, 239), False, 'import logging\n'), ((282, 355), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (299, 355), False, 'import logging\n'), ((374, 397), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (395, 397), False, 'import logging\n'), ((628, 729), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""processed_data_path"""', '"""data/processed"""', '"""processed data dir to load"""'], {}), "('processed_data_path', 'data/processed',\n 'processed data dir to load')\n", (654, 729), True, 'import tensorflow as tf\n'), ((726, 801), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""word_dim"""', '(300)', '"""dimension of word embedding"""'], {}), "('word_dim', 300, 'dimension of word embedding')\n", (753, 801), True, 'import tensorflow as tf\n'), ((1664, 1684), 'numpy.stack', 'np.stack', (['word_embed'], {}), '(word_embed)\n', (1672, 1684), True, 'import numpy as np\n'), ((1766, 1826), 'numpy.random.normal', 'np.random.normal', (['embed_mean', 'embed_std', '(2, flags.word_dim)'], {}), '(embed_mean, embed_std, (2, flags.word_dim))\n', (1782, 1826), True, 'import numpy as np\n'), ((1844, 1891), 'numpy.concatenate', 'np.concatenate', (['(pad_embed, word_embed)'], {'axis': '(0)'}), '((pad_embed, word_embed), axis=0)\n', (1858, 1891), True, 'import numpy as np\n'), ((2247, 2259), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2257, 2259), True, 'import tensorflow as tf\n'), ((881, 908), 'gensim.models.word2vec.PathLineSentences', 'PathLineSentences', (['seg_file'], {}), '(seg_file)\n', (898, 908), False, 'from gensim.models.word2vec import PathLineSentences\n'), ((1170, 1186), 'pickle.dump', 'pkl.dump', (['obj', 'f'], {}), '(obj, f)\n', (1178, 1186), True, 'import pickle as pkl\n'), ((2006, 2061), 'os.path.join', 'os.path.join', (['flags.processed_data_path', '"""word_map.pkl"""'], {}), "(flags.processed_data_path, 'word_map.pkl')\n", (2018, 2061), False, 'import os\n'), ((2086, 2143), 'os.path.join', 'os.path.join', (['flags.processed_data_path', '"""word_embed.pkl"""'], {}), "(flags.processed_data_path, 'word_embed.pkl')\n", (2098, 2143), False, 'import os\n'), ((1001, 1028), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1026, 1028), False, 'import multiprocessing\n'), ((1603, 1644), 'numpy.asarray', 'np.asarray', (['content[1:]'], {'dtype': 'np.float32'}), '(content[1:], dtype=np.float32)\n', (1613, 1644), True, 'import numpy as np\n')] |
#!/usr/bin/python3.8
import time
import copy
import random
import numpy as np
import clustering as clt
class HyperParams:
def __init__(self, population_size : int, crossover_rate : float, mutation_rate : float):
self._population_size = population_size
self._crossover_rate = crossover_rate
self._mutation_rate = mutation_rate
@property
def population_size(self) -> int:
return self._population_size
@property
def crossover_rate(self) -> float:
return self._crossover_rate
@property
def mutation_rate(self) -> float:
return self._mutation_rate
class Population:
def __init__(self, k : int, size : int, clusters : clt.Clusters):
self._size = size
self._instances : list = []
self._k : int = k
self._point_dim : tuple = clusters.point_dim
for i in range(size):
self._instances.append(clt.Clusters(clusters.k, clusters.points))
self._instances[i].initialize_state()
@property
def instances(self) -> list:
return self._instances
def mutate(self, instance : int):
self._instances[instance].disturb()
self._instances[instance].accept_disturbed()
def crossover(self, a : int, b : int) -> clt.Clusters:
offspring : clt.Clusters = clt.Clusters(self._instances[a].k, self._instances[a].points)
divide : int = self._k // 2 if self._k <= 3 else random.randint(1, self._k - 2)
centroids : list = list([])
for i in range(self._k):
centroids.append(self._instances[a].centroids[i] if i < divide else self._instances[b].centroids[i])
offspring.initialize_state(centroids)
return offspring
def adaptation(self, instance : int) -> float:
sse_sum : float = 0
for i in range(self._size):
sse_sum += self._instances[i].sse
return self._instances[i].sse / sse_sum
def instance_value(self, instance : int) -> float:
return self._instances[instance].sse
def coverging(self) -> bool:
s : float = self._instances[0].sse
for i in range(1, self._size):
if abs(s - self._instances[i].sse) > .000000001:
return False
return True
def set_instance(self, instance : int, clusters : clt.Clusters):
self._instances[instance] = clusters
def set_instances(self, instances : list):
for i in range(self._size):
self._instances[i] = instances[i]
def genetic(hyper_params : HyperParams, clusters : clt.Clusters, population : Population = None) -> (float, clt.Clusters):
population = Population(clusters.k, hyper_params.population_size, clusters) if population == None else population
while not population.coverging():
population_indices : list = range(0, hyper_params.population_size)
probabilities : list = list(map(population.adaptation, population_indices))
values : list = list(map(population.instance_value, population_indices))
a : int = random.choices(population_indices, probabilities, k = 1)[0]
b : int = random.choices(population_indices, probabilities, k = 1)[0]
if random.random() < hyper_params.crossover_rate:
offspring : clt.Clusters = population.crossover(a, b)
max_value : float = max(values)
if offspring.sse < max_value:
population.set_instance(values.index(max_value), offspring)
result : float = min(map(population.instance_value, range(0, hyper_params.population_size)))
result_instace : clt.Clusters = None
for instance in population.instances:
if instance.sse == result:
result_instace = instance
return (result, result_instace) | [
"random.random",
"random.choices",
"random.randint",
"clustering.Clusters"
] | [((1342, 1403), 'clustering.Clusters', 'clt.Clusters', (['self._instances[a].k', 'self._instances[a].points'], {}), '(self._instances[a].k, self._instances[a].points)\n', (1354, 1403), True, 'import clustering as clt\n'), ((1470, 1500), 'random.randint', 'random.randint', (['(1)', '(self._k - 2)'], {}), '(1, self._k - 2)\n', (1484, 1500), False, 'import random\n'), ((3107, 3161), 'random.choices', 'random.choices', (['population_indices', 'probabilities'], {'k': '(1)'}), '(population_indices, probabilities, k=1)\n', (3121, 3161), False, 'import random\n'), ((3185, 3239), 'random.choices', 'random.choices', (['population_indices', 'probabilities'], {'k': '(1)'}), '(population_indices, probabilities, k=1)\n', (3199, 3239), False, 'import random\n'), ((3256, 3271), 'random.random', 'random.random', ([], {}), '()\n', (3269, 3271), False, 'import random\n'), ((931, 972), 'clustering.Clusters', 'clt.Clusters', (['clusters.k', 'clusters.points'], {}), '(clusters.k, clusters.points)\n', (943, 972), True, 'import clustering as clt\n')] |
"""
Utilities related to matplotlib.
"""
import logging
from matplotlib.ticker import Formatter
from pytools.api import AllTracker
from pytools.meta import SingletonMeta
log = logging.getLogger(__name__)
#
# Exported names
#
__all__ = ["PercentageFormatter"]
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# Classes
#
class PercentageFormatter(Formatter, metaclass=SingletonMeta):
"""
Formats floats as a percentages with 3 digits precision, omitting trailing zeros.
For percentages above 100%, formats percentages as the nearest whole number.
Formatting examples:
- ``0.00005`` is formatted as ``0.01%``
- ``0.0005`` is formatted as ``0.05%``
- ``0.0`` is formatted as ``0%``
- ``0.1`` is formatted as ``10%``
- ``1.0`` is formatted as ``100%``
- ``0.01555`` is formatted as ``1.56%``
- ``0.1555`` is formatted as ``15.6%``
- ``1.555`` is formatted as ``156%``
- ``15.55`` is formatted as ``1556%``
- ``1555`` is formatted as ``1.6e+05%``
"""
def __call__(self, x, pos=None) -> str:
if x < 1.0:
return f"{x * 100.0:.3g}%"
else:
return f"{round(x * 100.0):.5g}%"
# check consistency of __all__
__tracker.validate()
| [
"logging.getLogger"
] | [((180, 207), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'import logging\n')] |
from django.db import models
class BadQuery(models.Model):
date = models.DateTimeField(auto_now=True)
query = models.TextField() | [
"django.db.models.DateTimeField",
"django.db.models.TextField"
] | [((72, 107), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (92, 107), False, 'from django.db import models\n'), ((120, 138), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (136, 138), False, 'from django.db import models\n')] |
import random
from .base import *
try:
from .private import *
except ImportError:
raise ImportError("""
Please create private.py file
with contain configuration for
====================================
SECRET_KEY = '{}'
DEBUG = False
ALLOWED_HOSTS = []
SSHUB_API = 'api-key'
====================================
""".format(''.join([random.SystemRandom().
choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])))
| [
"random.SystemRandom"
] | [((379, 400), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (398, 400), False, 'import random\n')] |
import sublime, sublime_plugin
from os.path import join as join_path, isdir
from os import mkdir
IS_WINDOWS = sublime.platform() == 'windows'
PATH_SEPARATOR = '\\' if IS_WINDOWS else '/'
EXTENSION = '.exe' if IS_WINDOWS else ''
class VlangBuilderCommand(sublime_plugin.WindowCommand):
def run(self, **kwargs):
self.flags = kwargs.pop('flags') if 'flags' in kwargs else []
self.project = kwargs.pop('project') if 'project' in kwargs else False
action = kwargs.pop('action') if 'action' in kwargs else 'guess'
kwargs['shell_cmd'] = self.get_shell_cmd(action)
# kwargs['shell_cmd'] = 'echo ' + kwargs.get('shell_cmd')
self.window.run_command('exec', kwargs)
def get_shell_cmd(self, action: str) -> str:
parts = self.window.active_view().file_name().split(PATH_SEPARATOR)
file = '.' if self.project else parts[-1]
root = parts[-2]
is_test = '_test.v' in file
if not action and is_test:
return disabled('file')
settings = sublime.load_settings('V.sublime-settings')
if not action:
bin_name = file.split('.')[0] + EXTENSION
if root in settings.get('magic_dirs') or []:
base = PATH_SEPARATOR.join(parts[:-2])
bin_dir = join_path(base, 'bin')
if not isdir(bin_dir): mkdir(bin_dir)
bin_name = join_path(bin_dir, bin_name)
self.push_flags(False, ['-o', bin_name])
elif action == 'guess':
action = 'test' if is_test else 'run'
extension = get_extension(file)
for preset in settings.get('magic_if') or []:
exts = preset.get('extensions', [])
dirs = preset.get('directories', [])
plat = preset.get('platform', '')
flags = preset.get('flags', [])
done = False
[match_ext, excl_ext] = includes(extension, exts)
[match_dir, excl_dir] = includes(root, dirs)
if match_ext:
if excl_ext:
return disabled('platform')
elif match_dir or match_dir is None:
done = self.push_flags(done, flags)
elif match_dir:
if excl_dir:
return disabled('platform')
if match_ext is None:
self.push_flags(done, flags)
compiler = settings.get('compiler') or 'v'
return ' '.join([compiler, *self.flags, action, file])
def push_flags(self, done: bool, flags: list) -> bool:
if not done:
skip = False
for f in flags:
if skip:
skip = False
elif f in self.flags:
if f == '-o':
skip = True
else:
self.flags.append(f)
return done or len(flags) > 0
def get_extension(file: str) -> str:
"""
:examples:
get_extension('some.win.prod.v') -> 'win.prod' # TODO
get_extension('some.win.v') -> 'win'
get_extension('some.v') -> ''
"""
parts = file.split('.')[1:-1]
return '.'.join(parts)
def includes(base: str, ary: list):
if not ary: return [None, False]
excl = '!' + base in ary
return [base in ary or excl, excl]
def disabled(kind: str) -> str:
return f'echo Disabled for the current {kind}.'
| [
"os.path.join",
"os.path.isdir",
"os.mkdir",
"sublime.platform",
"sublime.load_settings"
] | [((112, 130), 'sublime.platform', 'sublime.platform', ([], {}), '()\n', (128, 130), False, 'import sublime, sublime_plugin\n'), ((1043, 1086), 'sublime.load_settings', 'sublime.load_settings', (['"""V.sublime-settings"""'], {}), "('V.sublime-settings')\n", (1064, 1086), False, 'import sublime, sublime_plugin\n'), ((1304, 1326), 'os.path.join', 'join_path', (['base', '"""bin"""'], {}), "(base, 'bin')\n", (1313, 1326), True, 'from os.path import join as join_path, isdir\n'), ((1408, 1436), 'os.path.join', 'join_path', (['bin_dir', 'bin_name'], {}), '(bin_dir, bin_name)\n', (1417, 1436), True, 'from os.path import join as join_path, isdir\n'), ((1350, 1364), 'os.path.isdir', 'isdir', (['bin_dir'], {}), '(bin_dir)\n', (1355, 1364), False, 'from os.path import join as join_path, isdir\n'), ((1366, 1380), 'os.mkdir', 'mkdir', (['bin_dir'], {}), '(bin_dir)\n', (1371, 1380), False, 'from os import mkdir\n')] |
# Generated by Django 2.1.5 on 2020-07-24 07:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logs_manager', '0005_auto_20200724_0647'),
]
operations = [
migrations.RemoveField(
model_name='analyticlog',
name='logobject_ptr',
),
migrations.AlterModelOptions(
name='userinteraction',
options={'ordering': ('log_id', 'timestamp')},
),
migrations.RenameField(
model_name='userinteraction',
old_name='analytic_log',
new_name='log_id',
),
migrations.DeleteModel(
name='AnalyticLog',
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField",
"django.db.migrations.RenameField"
] | [((232, 302), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""analyticlog"""', 'name': '"""logobject_ptr"""'}), "(model_name='analyticlog', name='logobject_ptr')\n", (254, 302), False, 'from django.db import migrations\n'), ((347, 451), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""userinteraction"""', 'options': "{'ordering': ('log_id', 'timestamp')}"}), "(name='userinteraction', options={'ordering': (\n 'log_id', 'timestamp')})\n", (375, 451), False, 'from django.db import migrations\n'), ((491, 592), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""userinteraction"""', 'old_name': '"""analytic_log"""', 'new_name': '"""log_id"""'}), "(model_name='userinteraction', old_name=\n 'analytic_log', new_name='log_id')\n", (513, 592), False, 'from django.db import migrations\n'), ((644, 686), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""AnalyticLog"""'}), "(name='AnalyticLog')\n", (666, 686), False, 'from django.db import migrations\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, zlib, pickle, base64, string, hashlib, random, functools
# for 2/3 compatibility
try:
unicode
except NameError:
# this happens in python3
unicode = ()
def smart_str(obj):
"smart version of str(), works in both python2/3, regardless of whether obj is an unicode or a string or bytes"
if isinstance(obj, unicode): # for python3, it gives False, because unicode==()
s = obj.encode('utf-8') # but for python2, we have to use a string, not unicode string
elif not isinstance(obj, str):
s = str(obj)
else: # obj is already a string
s = obj
return s
class Cache:
"general on-disk cache"
def __init__(self, cachedir='cache', name=''):
if os.path.exists(cachedir) and not os.path.isdir(cachedir):
raise IOError('File {} exists but is not a directory'.format(cachedir))
if not os.path.exists(cachedir):
os.makedirs(cachedir)
self.cachedir = cachedir
# name is used to distinguish between several caches using the same directory
self.name = name
def _to_safe(self, s):
"helper function to extract only filesystem-safe character from a string"
s = smart_str(s)
return ''.join(x for x in s if x in string.ascii_letters+string.digits)
def _fname(self, key):
# asckey is used as a human readable prefix of the on-disk files, to aid in debugging
asckey = self._to_safe(key)
key = repr(key)
fname1 = asckey[:10]
h = hashlib.md5((repr(self.name)+key).encode('utf-8', errors='replace'))
hd = h.digest()
fname2 = base64.urlsafe_b64encode(hd)
fname2 = fname2.decode('ascii')
fname = fname1 + fname2
fname = os.path.join(self.cachedir, fname)
return fname
def __getitem__(self, key):
fname = self._fname(key)
if os.path.exists(fname):
value = pickle.loads(zlib.decompress(open(fname, 'rb').read()))
return value
else:
raise KeyError(key)
def __setitem__(self, key, val):
fname = self._fname(key)
p = pickle.dumps(val)
# trying to get some atomicity
fname_tmp = fname+'.'+str(random.random())+'.tmp'
open(fname_tmp, 'wb').write(zlib.compress(p, 9))
os.rename(fname_tmp, fname)
def __contains__(self, key):
fname = self._fname(key)
return os.path.exists(fname)
def get(self, key, default=None):
return self[key] if key in self else default
def memoize(func=None, cachedir='cache', cachename=None):
"simple memoizing decorator, works on functions, methods, or classes"
"inspired by https://wiki.python.org/moin/PythonDecoratorLibrary"
def memoizer_decorator(obj):
# try to minimize conflicts between different caches by constructing more or less unique name
if cachename is None:
if '__name__' in dir(obj):
localcachename = obj.__name__
else:
localcachename = ''
localcachename += str(type(obj))
else:
localcachename = cachename
if isinstance(cachedir, dict):
cache = cachedir
elif isinstance(cachedir, str):
cache = Cache(cachedir, localcachename)
else:
raise ValueError('Expecting directory path or dictionary')
@functools.wraps(obj)
def memoizer(*args, **kwargs):
# kwargs keys are guaranteed to be strings, so we can sort the items to get deterministic order
kwargs_hash = sorted(kwargs.items()) if kwargs else None
key = (args, kwargs_hash)
if key in cache:
#print('cached', args, kwargs)
val = cache[key]
else:
#print('unchached', args, kwargs)
val = obj(*args, **kwargs)
cache[key] = val
return val
return memoizer
# if this is used as a decorator without arguments
if func is not None and callable(func):
return memoizer_decorator(func)
return memoizer_decorator
if __name__=='__main__':
pass
| [
"os.path.exists",
"os.makedirs",
"base64.urlsafe_b64encode",
"pickle.dumps",
"os.rename",
"os.path.join",
"functools.wraps",
"zlib.compress",
"os.path.isdir",
"random.random"
] | [((1671, 1699), 'base64.urlsafe_b64encode', 'base64.urlsafe_b64encode', (['hd'], {}), '(hd)\n', (1695, 1699), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((1788, 1822), 'os.path.join', 'os.path.join', (['self.cachedir', 'fname'], {}), '(self.cachedir, fname)\n', (1800, 1822), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((1921, 1942), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1935, 1942), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((2174, 2191), 'pickle.dumps', 'pickle.dumps', (['val'], {}), '(val)\n', (2186, 2191), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((2354, 2381), 'os.rename', 'os.rename', (['fname_tmp', 'fname'], {}), '(fname_tmp, fname)\n', (2363, 2381), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((2464, 2485), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2478, 2485), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((3440, 3460), 'functools.wraps', 'functools.wraps', (['obj'], {}), '(obj)\n', (3455, 3460), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((762, 786), 'os.path.exists', 'os.path.exists', (['cachedir'], {}), '(cachedir)\n', (776, 786), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((919, 943), 'os.path.exists', 'os.path.exists', (['cachedir'], {}), '(cachedir)\n', (933, 943), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((957, 978), 'os.makedirs', 'os.makedirs', (['cachedir'], {}), '(cachedir)\n', (968, 978), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((2325, 2344), 'zlib.compress', 'zlib.compress', (['p', '(9)'], {}), '(p, 9)\n', (2338, 2344), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((795, 818), 'os.path.isdir', 'os.path.isdir', (['cachedir'], {}), '(cachedir)\n', (808, 818), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n'), ((2265, 2280), 'random.random', 'random.random', ([], {}), '()\n', (2278, 2280), False, 'import os, zlib, pickle, base64, string, hashlib, random, functools\n')] |
import sublime, sublime_plugin
import os
from ...libs import util
from ...libs import JavascriptEnhancementsExecuteOnTerminalCommand
class JavascriptEnhancementsGenerateJsdocCommand(JavascriptEnhancementsExecuteOnTerminalCommand, sublime_plugin.WindowCommand):
is_node = True
is_bin_path = True
def prepare_command(self):
jsdoc_conf_file = os.path.join(self.settings['project_dir_name'], self.settings['project_settings']['jsdoc']['conf_file'])
if os.path.isfile(jsdoc_conf_file) :
self.command = ["jsdoc", "-c", jsdoc_conf_file]
else :
sublime.error_message("JSDOC ERROR: Can't load "+jsdoc_conf_file+" file!\nConfiguration file REQUIRED!")
return
self._run()
def _run(self):
super(JavascriptEnhancementsGenerateJsdocCommand, self)._run()
def is_enabled(self):
return True if util.is_javascript_project() else False | [
"os.path.isfile",
"os.path.join",
"sublime.error_message"
] | [((354, 463), 'os.path.join', 'os.path.join', (["self.settings['project_dir_name']", "self.settings['project_settings']['jsdoc']['conf_file']"], {}), "(self.settings['project_dir_name'], self.settings[\n 'project_settings']['jsdoc']['conf_file'])\n", (366, 463), False, 'import os\n'), ((466, 497), 'os.path.isfile', 'os.path.isfile', (['jsdoc_conf_file'], {}), '(jsdoc_conf_file)\n', (480, 497), False, 'import os\n'), ((572, 687), 'sublime.error_message', 'sublime.error_message', (['("JSDOC ERROR: Can\'t load " + jsdoc_conf_file +\n """ file!\nConfiguration file REQUIRED!""")'], {}), '("JSDOC ERROR: Can\'t load " + jsdoc_conf_file +\n """ file!\nConfiguration file REQUIRED!""")\n', (593, 687), False, 'import sublime, sublime_plugin\n')] |
"""
crash_geocoding
Normalizes CRASH data into collision _events_ and collision _involved persons_, then matches
collision events to the centreline conflation target that was created by
`centreline_conflation_target`.
Our legacy schema in Oracle stores both event-related and involved-person-related information
in a single table, `TRAFFIC.ACC`. That table has one record per involved person, with event-level
details copied across all persons involved in a collision. To make this easier to work with in
MOVE, we transform `TRAFFIC.ACC` into a normalized representation.
To match collisions to the centreline, we use the following heuristic:
- if there are any intersections within 20m, match to the closest such intersection;
- otherwise, if there are any midblocks within 20m, match to the closest such midblock;
- otherwise, do not match.
This same heuristic was used by the legacy CRASH system to assign collisions to intersections
and midblocks. (However, CRASH did not use the Toronto Centreline, but instead used a legacy
map layer that has been deprecated and is no longer maintained by the City.)
This is intended to run after `replicator_transfer_crash` and `centreline_conflation_target`.
"""
# pylint: disable=pointless-statement
from datetime import datetime
from airflow_utils import create_dag, create_bash_task_nested
START_DATE = datetime(2019, 7, 17)
SCHEDULE_INTERVAL = '20 19 * * 1-5'
DAG = create_dag(__file__, __doc__, START_DATE, SCHEDULE_INTERVAL)
A1_EVENTS_FIELDS_RAW = create_bash_task_nested(DAG, 'A1_events_fields_raw')
A2_EVENTS_FIELDS_NORM = create_bash_task_nested(DAG, 'A2_events_fields_norm')
A2_INVOLVED_FIELDS_RAW = create_bash_task_nested(DAG, 'A2_involved_fields_raw')
A3_INVOLVED_FIELDS_NORM = create_bash_task_nested(DAG, 'A3_involved_fields_norm')
A4_INVOLVED = create_bash_task_nested(DAG, 'A4_involved')
A5_EVENTS = create_bash_task_nested(DAG, 'A5_events')
A6_EVENTS_INTERSECTIONS = create_bash_task_nested(DAG, 'A6_events_intersections')
A6_EVENTS_SEGMENTS = create_bash_task_nested(DAG, 'A6_events_segments')
A7_EVENTS_CENTRELINE = create_bash_task_nested(DAG, 'A7_events_centreline')
A1_EVENTS_FIELDS_RAW >> A2_EVENTS_FIELDS_NORM
A1_EVENTS_FIELDS_RAW >> A2_INVOLVED_FIELDS_RAW
A2_EVENTS_FIELDS_NORM >> A3_INVOLVED_FIELDS_NORM
A2_INVOLVED_FIELDS_RAW >> A3_INVOLVED_FIELDS_NORM
A3_INVOLVED_FIELDS_NORM >> A4_INVOLVED
A4_INVOLVED >> A5_EVENTS
A5_EVENTS >> A6_EVENTS_INTERSECTIONS
A5_EVENTS >> A6_EVENTS_SEGMENTS
A6_EVENTS_INTERSECTIONS >> A7_EVENTS_CENTRELINE
A6_EVENTS_SEGMENTS >> A7_EVENTS_CENTRELINE
| [
"datetime.datetime",
"airflow_utils.create_bash_task_nested",
"airflow_utils.create_dag"
] | [((1359, 1380), 'datetime.datetime', 'datetime', (['(2019)', '(7)', '(17)'], {}), '(2019, 7, 17)\n', (1367, 1380), False, 'from datetime import datetime\n'), ((1423, 1483), 'airflow_utils.create_dag', 'create_dag', (['__file__', '__doc__', 'START_DATE', 'SCHEDULE_INTERVAL'], {}), '(__file__, __doc__, START_DATE, SCHEDULE_INTERVAL)\n', (1433, 1483), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1508, 1560), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A1_events_fields_raw"""'], {}), "(DAG, 'A1_events_fields_raw')\n", (1531, 1560), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1585, 1638), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A2_events_fields_norm"""'], {}), "(DAG, 'A2_events_fields_norm')\n", (1608, 1638), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1664, 1718), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A2_involved_fields_raw"""'], {}), "(DAG, 'A2_involved_fields_raw')\n", (1687, 1718), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1745, 1800), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A3_involved_fields_norm"""'], {}), "(DAG, 'A3_involved_fields_norm')\n", (1768, 1800), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1815, 1858), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A4_involved"""'], {}), "(DAG, 'A4_involved')\n", (1838, 1858), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1871, 1912), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A5_events"""'], {}), "(DAG, 'A5_events')\n", (1894, 1912), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((1939, 1994), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A6_events_intersections"""'], {}), "(DAG, 'A6_events_intersections')\n", (1962, 1994), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((2016, 2066), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A6_events_segments"""'], {}), "(DAG, 'A6_events_segments')\n", (2039, 2066), False, 'from airflow_utils import create_dag, create_bash_task_nested\n'), ((2090, 2142), 'airflow_utils.create_bash_task_nested', 'create_bash_task_nested', (['DAG', '"""A7_events_centreline"""'], {}), "(DAG, 'A7_events_centreline')\n", (2113, 2142), False, 'from airflow_utils import create_dag, create_bash_task_nested\n')] |
from collections import Counter
adapters = []
for line in open('input.txt', 'r').readlines():
adapters.append(int(line.strip()))
adapters = sorted(adapters)
device_joltage = adapters[-1] + 3
adapters.append(device_joltage)
dp = Counter()
dp[0] = 1
for adapter in adapters:
dp[adapter] = dp[adapter - 3] + dp[adapter - 2] + dp[adapter - 1]
print(dp[device_joltage])
| [
"collections.Counter"
] | [((232, 241), 'collections.Counter', 'Counter', ([], {}), '()\n', (239, 241), False, 'from collections import Counter\n')] |
import torch.nn as nn
from architectures.position_wise_feed_forward_net import PositionWiseFeedForwardNet
from architectures.multi_head_attention import MultiHeadAttention
from architectures.add_and_norm import AddAndNorm
class TransformerEncoderBlock(nn.Module):
def __init__(self, d_model, n_heads, d_ff, dropout_proba):
super(TransformerEncoderBlock, self).__init__()
self.W_q = nn.Linear(d_model, d_model)
self.W_k = nn.Linear(d_model, d_model)
self.W_v = nn.Linear(d_model, d_model)
self.mha_layer=MultiHeadAttention(d_model, n_heads)
self.dropout_layer_1=nn.Dropout(dropout_proba)
self.add_and_norm_layer_1 = AddAndNorm(d_model)
self.ffn_layer = PositionWiseFeedForwardNet(d_model, d_ff)
self.dropout_layer_2=nn.Dropout(dropout_proba)
self.add_and_norm_layer_2 = AddAndNorm(d_model)
def forward(self, x, mask):
# x dims: (batch_size, src_seq_len, d_model)
# mask dim: (batch_size, 1, 1, src_seq_len)
q = self.W_q(x) # (batch_size, src_seq_len, d_model)
k = self.W_k(x) # (batch_size, src_seq_len, d_model)
v = self.W_v(x) # (batch_size, src_seq_len, d_model)
mha_out = self.mha_layer(q, k, v, mask) # (batch_size, src_seq_len, d_model)
mha_out= self.dropout_layer_1(mha_out) # (batch_size, src_seq_len, d_model)
mha_out = self.add_and_norm_layer_1(x, mha_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.ffn_layer(mha_out) # (batch_size, src_seq_len, d_model)
ffn_out= self.dropout_layer_2(ffn_out) # (batch_size, src_seq_len, d_model)
ffn_out = self.add_and_norm_layer_2(mha_out, ffn_out) # (batch_size, src_seq_len, d_model)
return ffn_out
class TransformerEncoder(nn.Module):
def __init__(self, n_blocks, n_heads, d_model, d_ff, dropout_proba=0.1):
super(TransformerEncoder, self).__init__()
self.encoder_blocks=nn.ModuleList([TransformerEncoderBlock(d_model, n_heads, d_ff, dropout_proba) for _ in range(n_blocks)])
def forward(self, x, mask):
for encoder_block in self.encoder_blocks:
x = encoder_block(x, mask)
return x | [
"torch.nn.Dropout",
"architectures.add_and_norm.AddAndNorm",
"architectures.position_wise_feed_forward_net.PositionWiseFeedForwardNet",
"torch.nn.Linear",
"architectures.multi_head_attention.MultiHeadAttention"
] | [((405, 432), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (414, 432), True, 'import torch.nn as nn\n'), ((453, 480), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (462, 480), True, 'import torch.nn as nn\n'), ((500, 527), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (509, 527), True, 'import torch.nn as nn\n'), ((560, 596), 'architectures.multi_head_attention.MultiHeadAttention', 'MultiHeadAttention', (['d_model', 'n_heads'], {}), '(d_model, n_heads)\n', (578, 596), False, 'from architectures.multi_head_attention import MultiHeadAttention\n'), ((626, 651), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (636, 651), True, 'import torch.nn as nn\n'), ((688, 707), 'architectures.add_and_norm.AddAndNorm', 'AddAndNorm', (['d_model'], {}), '(d_model)\n', (698, 707), False, 'from architectures.add_and_norm import AddAndNorm\n'), ((734, 775), 'architectures.position_wise_feed_forward_net.PositionWiseFeedForwardNet', 'PositionWiseFeedForwardNet', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (760, 775), False, 'from architectures.position_wise_feed_forward_net import PositionWiseFeedForwardNet\n'), ((805, 830), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_proba'], {}), '(dropout_proba)\n', (815, 830), True, 'import torch.nn as nn\n'), ((867, 886), 'architectures.add_and_norm.AddAndNorm', 'AddAndNorm', (['d_model'], {}), '(d_model)\n', (877, 886), False, 'from architectures.add_and_norm import AddAndNorm\n')] |
import struct
import numpy
from math import floor
class HeightMap:
def __init__(self, width, length, heightData=None, max_val=0):
self.heightData = (
heightData if heightData != None else [0 for n in range(width * length)]
)
self.width = width
self.length = length
self.highest = max_val
def copy(self):
return HeightMap(self.width, self.length, list(self.heightData))
def getMax(self):
return self.highest
@classmethod
def from_data(cls, data, res, width):
heightData, length, m = cls._parse_data(data, res, width)
return cls(width, length, heightData, m)
def serialize(self, res):
return HeightMap._serialize_data(self.heightData, res, self.getWidth())
# Takes a bz height map byte buffer and converts it to an array of height points
@classmethod
def _parse_data(cls, data, res, width):
size = int(len(data) / 2)
zone_size = 2 ** res
length = int(size / width)
m = 0
obuffer = [0 for n in range(size)]
for n in range(size):
try:
d_idx = n * 2
zone = int(n / (zone_size ** 2))
x = (n % zone_size) + zone * zone_size % width
z = (int(n / zone_size) % zone_size) + int(
zone * zone_size / width
) * zone_size
height = struct.unpack("<H", data[d_idx : d_idx + 2])[0]
m = max(m, height)
b_idx = int(x + ((length - 1) - z) * width)
obuffer[b_idx] = height
except Exception as e:
break
return obuffer, length, m
# Takes an array of height points and converts it to a bz height map
@classmethod
def _serialize_data(cls, data, res, width):
size = len(data)
zone_size = 2 ** res
length = int(size / width)
obuffer = [b"" for n in range(size)]
for n in range(size):
try:
zone = int(n / (zone_size ** 2))
x = (n % zone_size) + zone * zone_size % width
z = (int(n / zone_size) % zone_size) + int(
zone * zone_size / width
) * zone_size
b_idx = int(x + ((length - 1) - z) * width)
obuffer[n] = struct.pack("<H", data[b_idx])
except Exception as e:
print(e)
break
return b"".join(obuffer)
def getWidth(self):
return self.width
def getLength(self):
return self.length
def getHeight(self, x, z):
xx = int(min(max(x, 0), self.getWidth() - 1))
zz = int(min(max(z, 0), self.getLength() - 1))
return self.heightData[xx + zz * self.getWidth()]
def getCroped(self, x, z, w, h):
return HeightMap(
w, h, [self.getHeight(x + n % w, z + int(n / w)) for n in range(w * h)]
)
def getResized(self, newW, newL, method=lambda x, z, map: map.getHeight(x, z)):
newMap = [0 for n in range(int(newW * newL))]
wf, lf = self.getWidth() / newW, self.getLength() / newL
m = 0
print("Resizing:")
lp = 0
for i in range(len(newMap)):
x = i % newW
z = int(i / newW)
newMap[i] = int(method(int(x * wf), int(z * lf), self))
m = max(m, newMap[i])
p = int((i + 1) / len(newMap) * 25)
if p != lp:
print(
"[{}{}] - {:>8}/{:<8}".format(
"=" * p, " " * (25 - p), i + 1, len(newMap)
),
end="\r",
)
lp = p
print("\nDone")
return HeightMap(int(newW), int(newL), newMap, m)
w_cache = {}
def createWeightGrid(size):
if not int(size) in w_cache:
c = size / 2
weights = [
size - ((c - n % size) ** 2 + (c - int(n / size)) ** 2) ** 0.5
for n in range(0, size * size)
]
w_cache[int(size)] = weights
return w_cache[int(size)]
def AvgEdge(x, z, map, grid=5):
cropped = map.getCroped(int(x - grid / 2), int(z - grid / 2), grid, grid)
hdata = cropped.heightData
weights = createWeightGrid(grid)
mean, median = numpy.average(hdata, weights=weights), numpy.median(hdata)
d = [n for n in hdata if (abs(n - mean) >= abs(n - median))]
return numpy.mean([numpy.mean(d)])
def Avg(x, z, map, grid=5):
cropped = map.getCroped(int(x - grid / 2), int(z - grid / 2), grid, grid)
weights = createWeightGrid(grid)
hdata = cropped.heightData
return numpy.average(hdata, weights=weights)
| [
"numpy.mean",
"numpy.median",
"numpy.average",
"struct.pack",
"struct.unpack"
] | [((4669, 4706), 'numpy.average', 'numpy.average', (['hdata'], {'weights': 'weights'}), '(hdata, weights=weights)\n', (4682, 4706), False, 'import numpy\n'), ((4318, 4355), 'numpy.average', 'numpy.average', (['hdata'], {'weights': 'weights'}), '(hdata, weights=weights)\n', (4331, 4355), False, 'import numpy\n'), ((4357, 4376), 'numpy.median', 'numpy.median', (['hdata'], {}), '(hdata)\n', (4369, 4376), False, 'import numpy\n'), ((4465, 4478), 'numpy.mean', 'numpy.mean', (['d'], {}), '(d)\n', (4475, 4478), False, 'import numpy\n'), ((2358, 2388), 'struct.pack', 'struct.pack', (['"""<H"""', 'data[b_idx]'], {}), "('<H', data[b_idx])\n", (2369, 2388), False, 'import struct\n'), ((1428, 1470), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'data[d_idx:d_idx + 2]'], {}), "('<H', data[d_idx:d_idx + 2])\n", (1441, 1470), False, 'import struct\n')] |
#!/usr/bin/env python3
import zipfile
# The file to USE inside the zip, before compression
filein = "index.php"
print("[i] FileIn: %s\n" % filein)
# How deep are we going?
depth = ""
# Loop 11 times (00-10)
for i in range(11):
# The .zip file to use
zipname = "depth-%02d.zip" % i
print("[i] ZipName: %s" % zipname)
# Get the zip file out ready
with zipfile.ZipFile(zipname , 'w') as zip:
# The file INSIDDE the zip
filezip = "%s%s" % (depth, filein)
print("[i] ZipFile: %s" % filezip)
# Write the zip file out
zip.write(filein, filezip)
# Increase depth for next loop
depth += "../"
print("\n[i] Done")
| [
"zipfile.ZipFile"
] | [((365, 394), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipname', '"""w"""'], {}), "(zipname, 'w')\n", (380, 394), False, 'import zipfile\n')] |
from ctypes import *
from typing import *
from pathlib import Path
from numpy import array, cos, ndarray, pi, random, sin, zeros, tan
try:
lib = cdll.LoadLibrary(str(Path(__file__).with_name("libkmeans.so")))
except Exception as E:
print(f"Cannot load DLL")
print(E)
class observation_2d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("group", c_size_t)]
class observation_3d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("z", c_double), ("group", c_size_t)]
class cluster_2d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("count", c_size_t)]
class cluster_3d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("z", c_double), ("count", c_size_t)]
lib.kmeans_2d.restype = POINTER(cluster_2d)
def kmeans_2d(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)`
An array of observations (x, y) to be clustered.
Data should be provided as: `[(x, y), (x, y), (x, y), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
# Fix orientation on data
if observations.shape[-1] == 2:
observations = observations.T
else:
raise ValueError("Provided array should contain ((x, y), ) observations.")
# Find observation_2d length
n = observations.shape[-1]
# Create a Python list of observations
py_observations_list = map(observation_2d, *observations)
# Convert the Python list into a c-array
c_observations_array = (observation_2d * n)(*py_observations_list)
# Get c-array of cluster_2d
c_clusters_array = lib.kmeans_2d(
c_observations_array, c_size_t(n), c_size_t(k))
# Convert c-array of clusters into a python list
py_clusters_list = [c_clusters_array[index] for index in range(k)]
# Split clusters
center = zeros([k, 2], dtype=observations.dtype)
count = zeros(k, dtype=int)
for index, cluster_object in enumerate(py_clusters_list):
center[index][0] = cluster_object.x
center[index][1] = cluster_object.y
count[index] = cluster_object.count
# Pack into DataFrame and return
return (center, count)
lib.kmeans_3d.restype = POINTER(cluster_3d)
def kmeans_3d(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as: `[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1, 3], [-4, -4, -4], [1, -7, 7], [9, -2, 1], [6, -6, 6]]
>>> center, count = kmeans_3d(observations, k=2)
>>> center
[[ -0.35830777 -7.41219447 201.90265473]
[ 1.83808572 -5.86460671 -28.00696338]
[ -0.81562641 -1.20418037 1.60364838]]
>>> count
[2, 3]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
# Fix orientation on data
if observations.shape[-1] == 3:
observations = observations.T
else:
raise ValueError("Provided array should contain ((x, y, z), ) observations.")
# Find observation_3d length
n = observations.shape[-1]
# Create a Python list of observations
py_observations_list = map(observation_3d, *observations)
# Convert the Python list into a c-array
c_observations_array = (observation_3d * n)(*py_observations_list)
# Get c-array of cluster_2d
c_clusters_array = lib.kmeans_3d(c_observations_array, c_size_t(n), c_size_t(k))
# Convert c-array of clusters into a python list
py_clusters_list = [c_clusters_array[index] for index in range(k)]
# Split clusters
center = zeros([k, 3], dtype=observations.dtype)
count = zeros(k, dtype=int)
for index, cluster_object in enumerate(py_clusters_list):
center[index][0] = cluster_object.x
center[index][1] = cluster_object.y
center[index][2] = cluster_object.z
count[index] = cluster_object.count
# Pack into DataFrame and return
return (center, count)
def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)` or `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as:
`[(x, y), (x, y), (x, y), ...]`
or
`[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)` or `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
if observations.shape[-1] == 3:
return kmeans_3d(observations, k)
elif observations.shape[-1] == 2:
return kmeans_2d(observations, k)
else:
pass
if __name__ == "__main__":
random.seed(1234)
rand_list = random.random(100)
x = 10 * rand_list * cos(2 * pi * rand_list)
y = 10 * rand_list * sin(2 * pi * rand_list)
z = 10 * rand_list * tan(2 * pi * rand_list)
df = array([x, y, z]).T
print(f"Observations:\n{df[0:5]}\n...\n\nshape {len(df), len(df[0])}\n")
centers, count = kmeans(df, 3)
print(f"Centers:\n{centers}\n")
print(f"Count:\n{count}")
observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
center, count = kmeans_2d(array(observations), k=2)
print(f"Centers:\n{centers}\n")
print(f"Count:\n{count}")
| [
"numpy.tan",
"pathlib.Path",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.random.seed",
"numpy.sin"
] | [((2541, 2580), 'numpy.zeros', 'zeros', (['[k, 2]'], {'dtype': 'observations.dtype'}), '([k, 2], dtype=observations.dtype)\n', (2546, 2580), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((2593, 2612), 'numpy.zeros', 'zeros', (['k'], {'dtype': 'int'}), '(k, dtype=int)\n', (2598, 2612), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((4813, 4852), 'numpy.zeros', 'zeros', (['[k, 3]'], {'dtype': 'observations.dtype'}), '([k, 3], dtype=observations.dtype)\n', (4818, 4852), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((4865, 4884), 'numpy.zeros', 'zeros', (['k'], {'dtype': 'int'}), '(k, dtype=int)\n', (4870, 4884), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6497, 6514), 'numpy.random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (6508, 6514), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6532, 6550), 'numpy.random.random', 'random.random', (['(100)'], {}), '(100)\n', (6545, 6550), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6577, 6600), 'numpy.cos', 'cos', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6580, 6600), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6626, 6649), 'numpy.sin', 'sin', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6629, 6649), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6675, 6698), 'numpy.tan', 'tan', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6678, 6698), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6709, 6725), 'numpy.array', 'array', (['[x, y, z]'], {}), '([x, y, z])\n', (6714, 6725), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((7005, 7024), 'numpy.array', 'array', (['observations'], {}), '(observations)\n', (7010, 7024), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((172, 186), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'from pathlib import Path\n')] |
"""https://projecteuler.net/problem=4"""
from euler.main import largest_palindrome
def test_004() -> None:
"""Expected: 906609"""
assert largest_palindrome(3) == 906609
| [
"euler.main.largest_palindrome"
] | [((148, 169), 'euler.main.largest_palindrome', 'largest_palindrome', (['(3)'], {}), '(3)\n', (166, 169), False, 'from euler.main import largest_palindrome\n')] |
from collections import Counter, defaultdict
import matplotlib as mpl
import networkx as nx
import numba
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
from fa2 import ForceAtlas2
from scipy import sparse
def to_adjacency_matrix(net):
if sparse.issparse(net):
if type(net) == "scipy.sparse.csr.csr_matrix":
return net
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
elif "networkx" in "%s" % type(net):
return (
sparse.csr_matrix(nx.adjacency_matrix(net), dtype=np.float64),
net.nodes(),
)
elif "numpy.ndarray" == type(net):
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
def to_nxgraph(net):
if sparse.issparse(net):
return nx.from_scipy_sparse_matrix(net)
elif "networkx" in "%s" % type(net):
return net
elif "numpy.ndarray" == type(net):
return nx.from_numpy_array(net)
def set_node_colors(c, x, cmap, colored_nodes):
node_colors = defaultdict(lambda x: "#8d8d8d")
node_edge_colors = defaultdict(lambda x: "#4d4d4d")
cnt = Counter([c[d] for d in colored_nodes])
num_groups = len(cnt)
# Set up the palette
if cmap is None:
if num_groups <= 10:
cmap = sns.color_palette().as_hex()
elif num_groups <= 20:
cmap = sns.color_palette("tab20").as_hex()
else:
cmap = sns.color_palette("hls", num_groups).as_hex()
# Calc size of groups
cmap = dict(
zip(
[d[0] for d in cnt.most_common(num_groups)],
[cmap[i] for i in range(num_groups)],
)
)
bounds = np.linspace(0, 1, 11)
norm = mpl.colors.BoundaryNorm(bounds, ncolors=12, extend="both")
# Calculate the color for each node using the palette
cmap_coreness = {
k: sns.light_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
cmap_coreness_dark = {
k: sns.dark_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
for d in colored_nodes:
node_colors[d] = cmap_coreness[c[d]][norm(x[d]) - 1]
node_edge_colors[d] = cmap_coreness_dark[c[d]][-norm(x[d])]
return node_colors, node_edge_colors
def classify_nodes(G, c, x, max_num=None):
non_residuals = [d for d in G.nodes() if (c[d] is not None) and (x[d] is not None)]
residuals = [d for d in G.nodes() if (c[d] is None) or (x[d] is None)]
# Count the number of groups
cnt = Counter([c[d] for d in non_residuals])
cvals = np.array([d[0] for d in cnt.most_common(len(cnt))])
if max_num is not None:
cvals = set(cvals[:max_num])
else:
cvals = set(cvals)
#
colored_nodes = [d for d in non_residuals if c[d] in cvals]
muted = [d for d in non_residuals if not c[d] in cvals]
# Bring core nodes to front
order = np.argsort([x[d] for d in colored_nodes])
colored_nodes = [colored_nodes[d] for d in order]
return colored_nodes, muted, residuals
def calc_node_pos(G, iterations=300, **params):
default_params = dict(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
verbose=False,
)
if params is not None:
for k, v in params.items():
default_params[k] = v
forceatlas2 = ForceAtlas2(**default_params)
return forceatlas2.forceatlas2_networkx_layout(G, pos=None, iterations=iterations)
def draw(
G,
c,
x,
ax,
draw_edge=True,
font_size=0,
pos=None,
cmap=None,
max_group_num=None,
draw_nodes_kwd={},
draw_edges_kwd={"edge_color": "#adadad"},
draw_labels_kwd={},
layout_kwd={},
):
"""Plot the core-periphery structure in the networks.
:param G: Graph
:type G: networkx.Graph
:param c: dict
:type c: group membership c[i] of i
:param x: core (x[i])=1 or periphery (x[i]=0)
:type x: dict
:param ax: axis
:type ax: matplotlib.pyplot.ax
:param draw_edge: whether to draw edges, defaults to True
:type draw_edge: bool, optional
:param font_size: font size for node labels, defaults to 0
:type font_size: int, optional
:param pos: pos[i] is the xy coordinate of node i, defaults to None
:type pos: dict, optional
:param cmap: colomap defaults to None
:type cmap: matplotlib.cmap, optional
:param max_group_num: Number of groups to color, defaults to None
:type max_group_num: int, optional
:param draw_nodes_kwd: Parameter for networkx.draw_networkx_nodes, defaults to {}
:type draw_nodes_kwd: dict, optional
:param draw_edges_kwd: Parameter for networkx.draw_networkx_edges, defaults to {"edge_color": "#adadad"}
:type draw_edges_kwd: dict, optional
:param draw_labels_kwd: Parameter for networkx.draw_networkx_labels, defaults to {}
:type draw_labels_kwd: dict, optional
:param layout_kwd: layout keywords, defaults to {}
:type layout_kwd: dict, optional
:return: (ax, pos)
:rtype: matplotlib.pyplot.ax, dict
"""
# Split node into residual and non-residual
colored_nodes, muted_nodes, residuals = classify_nodes(G, c, x, max_group_num)
node_colors, node_edge_colors = set_node_colors(c, x, cmap, colored_nodes)
# Set the position of nodes
if pos is None:
pos = calc_node_pos(G, **layout_kwd)
# Draw
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color=[node_colors[d] for d in colored_nodes],
nodelist=colored_nodes,
ax=ax,
# zorder=3,
**draw_nodes_kwd
)
if nodes is not None:
nodes.set_zorder(3)
nodes.set_edgecolor([node_edge_colors[r] for r in colored_nodes])
draw_nodes_kwd_residual = draw_nodes_kwd.copy()
draw_nodes_kwd_residual["node_size"] = 0.1 * draw_nodes_kwd.get("node_size", 100)
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color="#efefef",
nodelist=residuals,
node_shape="s",
ax=ax,
**draw_nodes_kwd_residual
)
if nodes is not None:
nodes.set_zorder(1)
nodes.set_edgecolor("#4d4d4d")
if draw_edge:
nx.draw_networkx_edges(
G.subgraph(colored_nodes + residuals), pos, ax=ax, **draw_edges_kwd
)
if font_size > 0:
nx.draw_networkx_labels(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)
ax.axis("off")
return ax, pos
def draw_interactive(G, c, x, hover_text=None, node_size=10.0, pos=None, cmap=None):
node_colors, node_edge_colors = set_node_colors(G, c, x, cmap)
if pos is None:
pos = nx.spring_layout(G)
nodelist = [d for d in G.nodes()]
group_ids = [c[d] if c[d] is not None else "residual" for d in nodelist]
coreness = [x[d] if x[d] is not None else "residual" for d in nodelist]
node_size_list = [(x[d] + 1) if x[d] is not None else 1 / 2 for d in nodelist]
pos_x = [pos[d][0] for d in nodelist]
pos_y = [pos[d][1] for d in nodelist]
df = pd.DataFrame(
{
"x": pos_x,
"y": pos_y,
"name": nodelist,
"group_id": group_ids,
"coreness": coreness,
"node_size": node_size_list,
}
)
df["marker"] = df["group_id"].apply(
lambda s: "circle" if s != "residual" else "square"
)
df["hovertext"] = df.apply(
lambda s: "{ht}<br>Group: {group}<br>Coreness: {coreness}".format(
ht="Node %s" % s["name"]
if hover_text is None
else hover_text.get(s["name"], ""),
group=s["group_id"],
coreness=s["coreness"],
),
axis=1,
)
fig = go.Figure(
data=go.Scatter(
x=df["x"],
y=df["y"],
marker_size=df["node_size"],
marker_symbol=df["marker"],
hovertext=df["hovertext"],
hoverlabel=dict(namelength=0),
hovertemplate="%{hovertext}",
marker={
"color": node_colors,
"sizeref": 1.0 / node_size,
"line": {"color": node_edge_colors, "width": 1},
},
mode="markers",
),
)
fig.update_layout(
autosize=False,
width=800,
height=800,
template="plotly_white",
# layout=go.Layout(xaxis={"showgrid": False}, yaxis={"showgrid": True}),
)
return fig
| [
"numpy.argsort",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"numpy.arange",
"seaborn.color_palette",
"networkx.spring_layout",
"networkx.from_scipy_sparse_matrix",
"numpy.linspace",
"networkx.from_numpy_array",
"pandas.DataFrame",
"scipy.sparse.csr_matrix",
"networkx.adja... | [((293, 313), 'scipy.sparse.issparse', 'sparse.issparse', (['net'], {}), '(net)\n', (308, 313), False, 'from scipy import sparse\n'), ((792, 812), 'scipy.sparse.issparse', 'sparse.issparse', (['net'], {}), '(net)\n', (807, 812), False, 'from scipy import sparse\n'), ((1070, 1102), 'collections.defaultdict', 'defaultdict', (["(lambda x: '#8d8d8d')"], {}), "(lambda x: '#8d8d8d')\n", (1081, 1102), False, 'from collections import Counter, defaultdict\n'), ((1126, 1158), 'collections.defaultdict', 'defaultdict', (["(lambda x: '#4d4d4d')"], {}), "(lambda x: '#4d4d4d')\n", (1137, 1158), False, 'from collections import Counter, defaultdict\n'), ((1170, 1208), 'collections.Counter', 'Counter', (['[c[d] for d in colored_nodes]'], {}), '([c[d] for d in colored_nodes])\n', (1177, 1208), False, 'from collections import Counter, defaultdict\n'), ((1717, 1738), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (1728, 1738), True, 'import numpy as np\n'), ((1750, 1808), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds'], {'ncolors': '(12)', 'extend': '"""both"""'}), "(bounds, ncolors=12, extend='both')\n", (1773, 1808), True, 'import matplotlib as mpl\n'), ((2537, 2575), 'collections.Counter', 'Counter', (['[c[d] for d in non_residuals]'], {}), '([c[d] for d in non_residuals])\n', (2544, 2575), False, 'from collections import Counter, defaultdict\n'), ((2919, 2960), 'numpy.argsort', 'np.argsort', (['[x[d] for d in colored_nodes]'], {}), '([x[d] for d in colored_nodes])\n', (2929, 2960), True, 'import numpy as np\n'), ((3786, 3815), 'fa2.ForceAtlas2', 'ForceAtlas2', ([], {}), '(**default_params)\n', (3797, 3815), False, 'from fa2 import ForceAtlas2\n'), ((5833, 5968), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_color': '[node_colors[d] for d in colored_nodes]', 'nodelist': 'colored_nodes', 'ax': 'ax'}), '(G, pos, node_color=[node_colors[d] for d in\n colored_nodes], nodelist=colored_nodes, ax=ax, **draw_nodes_kwd)\n', (5855, 5968), True, 'import networkx as nx\n'), ((6318, 6444), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_color': '"""#efefef"""', 'nodelist': 'residuals', 'node_shape': '"""s"""', 'ax': 'ax'}), "(G, pos, node_color='#efefef', nodelist=residuals,\n node_shape='s', ax=ax, **draw_nodes_kwd_residual)\n", (6340, 6444), True, 'import networkx as nx\n'), ((7466, 7600), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': pos_x, 'y': pos_y, 'name': nodelist, 'group_id': group_ids,\n 'coreness': coreness, 'node_size': node_size_list}"], {}), "({'x': pos_x, 'y': pos_y, 'name': nodelist, 'group_id':\n group_ids, 'coreness': coreness, 'node_size': node_size_list})\n", (7478, 7600), True, 'import pandas as pd\n'), ((829, 861), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['net'], {}), '(net)\n', (856, 861), True, 'import networkx as nx\n'), ((6768, 6846), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'ax': 'ax', 'font_size': 'font_size'}), '(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)\n', (6791, 6846), True, 'import networkx as nx\n'), ((7077, 7096), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (7093, 7096), True, 'import networkx as nx\n'), ((408, 448), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['net'], {'dtype': 'np.float64'}), '(net, dtype=np.float64)\n', (425, 448), False, 'from scipy import sparse\n'), ((450, 473), 'numpy.arange', 'np.arange', (['net.shape[0]'], {}), '(net.shape[0])\n', (459, 473), True, 'import numpy as np\n'), ((976, 1000), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['net'], {}), '(net)\n', (995, 1000), True, 'import networkx as nx\n'), ((1901, 1934), 'seaborn.light_palette', 'sns.light_palette', (['v'], {'n_colors': '(12)'}), '(v, n_colors=12)\n', (1918, 1934), True, 'import seaborn as sns\n'), ((2013, 2045), 'seaborn.dark_palette', 'sns.dark_palette', (['v'], {'n_colors': '(12)'}), '(v, n_colors=12)\n', (2029, 2045), True, 'import seaborn as sns\n'), ((562, 586), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['net'], {}), '(net)\n', (581, 586), True, 'import networkx as nx\n'), ((696, 736), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['net'], {'dtype': 'np.float64'}), '(net, dtype=np.float64)\n', (713, 736), False, 'from scipy import sparse\n'), ((738, 761), 'numpy.arange', 'np.arange', (['net.shape[0]'], {}), '(net.shape[0])\n', (747, 761), True, 'import numpy as np\n'), ((1330, 1349), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (1347, 1349), True, 'import seaborn as sns\n'), ((1409, 1435), 'seaborn.color_palette', 'sns.color_palette', (['"""tab20"""'], {}), "('tab20')\n", (1426, 1435), True, 'import seaborn as sns\n'), ((1478, 1514), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'num_groups'], {}), "('hls', num_groups)\n", (1495, 1514), True, 'import seaborn as sns\n')] |
from time import gmtime, strftime
def app(environ, start_response):
data = strftime("minimal gunicorn demo: %Y-%m-%d %H:%M:%S", gmtime()).encode('UTF-8')
start_response("200 OK", [
("Content-Type", "text/plain"),
("Content-Length", str(len(data)))
])
return iter([data])
| [
"time.gmtime"
] | [((133, 141), 'time.gmtime', 'gmtime', ([], {}), '()\n', (139, 141), False, 'from time import gmtime, strftime\n')] |
"""Transducers for RxPY.
There are several different implementations of transducers in Python.
This implementation is currently targeted for:
- http://code.sixty-north.com/python-transducers
You should also read the excellent article series "Understanding
Transducers through Python" at:
- http://sixty-north.com/blog/series/understanding-transducers-through-python
Other implementations of transducers in Python are:
- https://github.com/cognitect-labs/transducers-python
"""
from rx.core import Observable, AnonymousObservable
from rx.internal import extensionmethod
class Observing(object):
"""An observing transducer."""
def __init__(self, observer):
self.observer = observer
def initial(self):
return self.observer
def step(self, obs, input):
return obs.on_next(input)
def complete(self, obs):
return obs.on_completed()
def __call__(self, result, item):
return self.step(result, item)
@extensionmethod(Observable)
def transduce(self, transducer):
"""Execute a transducer to transform the observable sequence.
Keyword arguments:
:param Transducer transducer: A transducer to execute.
:returns: An Observable sequence containing the results from the
transducer.
:rtype: Observable
"""
source = self
def subscribe(observer):
xform = transducer(Observing(observer))
def on_next(v):
try:
xform.step(observer, v)
except Exception as e:
observer.on_error(e)
def on_completed():
xform.complete(observer)
return source.subscribe(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
| [
"rx.internal.extensionmethod",
"rx.core.AnonymousObservable"
] | [((974, 1001), 'rx.internal.extensionmethod', 'extensionmethod', (['Observable'], {}), '(Observable)\n', (989, 1001), False, 'from rx.internal import extensionmethod\n'), ((1707, 1737), 'rx.core.AnonymousObservable', 'AnonymousObservable', (['subscribe'], {}), '(subscribe)\n', (1726, 1737), False, 'from rx.core import Observable, AnonymousObservable\n')] |
from django.contrib.auth import get_user_model
from rest_framework import authentication, permissions, viewsets, filters
from .forms import TaskFilter, SprintFilter
from .models import Sprint, Task
from .serializers import SprintSerializer, TaskSerializer, UserSerializer
#from django.shortcuts import render
User = get_user_model()
class DefaultsMixin(object):
"""Default settings for view authentication, permissions, filtering and
pagination"""
authentication_classes = (
authentication.BasicAuthentication,
authentication.TokenAuthentication,
)
permission_classes = (
permissions.IsAuthenticated,
)
paginate_by = 25
paginate_by_param = 'page_size'
max_paginate_by = 100
filter_backends = (
filters.DjangoFilterBackend,
filters.SearchFilter,
filters.OrderingFilter,
)
class SprintViewSet(DefaultsMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating sprints."""
queryset = Sprint.objects.order_by('end')
serializer_class = SprintSerializer
filter_class = SprintFilter
search_fields = ('name',)
ordering_fields = ('end', 'name',)
class TaskViewSet(DefaultsMixin, viewsets.ModelViewSet):
"""API endpoint for listing and creating tasks."""
queryset = Task.objects.all()
serializer_class = TaskSerializer
search_fields = ('name', 'description',)
ordering_fields = ('name', 'order', 'started', 'due', 'completed',)
class UserViewSet(DefaultsMixin, viewsets.ReadOnlyModelViewSet):
"""Api endpoint for listing users."""
lookup_field = User.USERNAME_FIELD
lookup_url_kwarg = User.USERNAME_FIELD
queryset = User.objects.order_by(User.USERNAME_FIELD)
serializer_class = UserSerializer
search_fields = (User.USERNAME_FIELD,)
| [
"django.contrib.auth.get_user_model"
] | [((318, 334), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (332, 334), False, 'from django.contrib.auth import get_user_model\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_cdigraphlayout
----------------------------------
Tests for `cdigraphlayout` module.
"""
import os
import sys
import unittest
import io
import tempfile
import shutil
import json
import ndex2
from cdigraphlayout import cdigraphlayoutcmd
class TestCdIgraphLayout(unittest.TestCase):
TEST_DIR = os.path.dirname(__file__)
HUNDRED_NODE_DIR = os.path.join(TEST_DIR, 'data',
'100node_example')
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_args_all_defaults(self):
myargs = ['inputarg']
res = cdigraphlayoutcmd._parse_arguments('desc', myargs)
self.assertEqual('inputarg', res.input)
self.assertEqual('auto', res.layout)
self.assertEqual(None, res.scale)
self.assertEqual(None, res.fit_into)
def test_parse_args_scale_and_layoutset(self):
myargs = ['inputarg', '--layout', 'circle',
'--scale', '5.0']
res = cdigraphlayoutcmd._parse_arguments('desc', myargs)
self.assertEqual('inputarg', res.input)
self.assertEqual('circle', res.layout)
self.assertEqual(5.0, res.scale)
self.assertEqual(None, res.fit_into)
def test_parse_args_fit_into_set(self):
myargs = ['inputarg',
'--fit_into', '1,2,3,4']
res = cdigraphlayoutcmd._parse_arguments('desc', myargs)
self.assertEqual('inputarg', res.input)
self.assertEqual('auto', res.layout)
self.assertEqual(None, res.scale)
self.assertEqual('1,2,3,4', res.fit_into)
def test_get_node_size_from_cyvisual_properties_with_none(self):
try:
cdigraphlayoutcmd._get_node_size_from_cyvisual_properties()
self.fail('Expected ValueError')
except ValueError as ve:
self.assertEqual('Network passed in cannot be None',
str(ve))
def test_get_node_size_from_cyvisual_properties_network_missing_aspect(self):
net = ndex2.nice_cx_network.NiceCXNetwork()
res = cdigraphlayoutcmd._get_node_size_from_cyvisual_properties(net_cx=net)
self.assertIsNone(res)
def test_get_node_size_from_cyvisual_properties_on_real_network(self):
five_node = os.path.join(os.path.dirname(__file__), 'data',
'5node.cx')
net = ndex2.create_nice_cx_from_file(five_node)
res = cdigraphlayoutcmd._get_node_size_from_cyvisual_properties(net_cx=net)
self.assertEqual(85.0, res)
def test_get_bounding_box_based_on_node_size_with_none(self):
try:
cdigraphlayoutcmd._get_bounding_box_based_on_node_size()
self.fail('Expected ValueError')
except ValueError as ve:
self.assertEqual('Network passed in cannot be None',
str(ve))
def test_get_bounding_box_based_on_node_size_with_5node(self):
five_node = os.path.join(os.path.dirname(__file__), 'data',
'5node.cx')
net = ndex2.create_nice_cx_from_file(five_node)
res = cdigraphlayoutcmd._get_bounding_box_based_on_node_size(net_cx=net)
self.assertEqual((0.0, 0.0, 550.0, 550.0), res.coords)
def test_get_bounding_box_from_user_str(self):
self.assertIsNone(cdigraphlayoutcmd.
_get_bounding_box_from_user_str(None))
# test empty str
try:
cdigraphlayoutcmd._get_bounding_box_from_user_str('')
self.fail('Expected ValueError')
except ValueError as ve:
self.assertEqual('Could not parse bounding box coordinates from '
'input string: ', str(ve))
# test str with only 1 comma
try:
cdigraphlayoutcmd._get_bounding_box_from_user_str('1,2')
self.fail('Expected ValueError')
except ValueError as ve:
self.assertEqual('Could not parse bounding box coordinates from '
'input string: 1,2', str(ve))
# test str with non numeric values
try:
cdigraphlayoutcmd._get_bounding_box_from_user_str('1,b,c,d')
self.fail('Expected ValueError')
except ValueError as ve:
self.assertTrue('invalid coordinate' in str(ve))
# test valid
res = cdigraphlayoutcmd._get_bounding_box_from_user_str('0.0,1.0,2,3')
self.assertEqual((0.0, 1.0, 2.0, 3.0), res.coords)
def test_runlayout_input_is_not_a_file(self):
temp_dir = tempfile.mkdtemp()
try:
args = cdigraphlayoutcmd._parse_arguments('desc',
[os.path.join(temp_dir,
'input')])
o_stream = io.StringIO()
e_stream = io.StringIO()
res = cdigraphlayoutcmd.run_layout(args, out_stream=o_stream,
err_stream=e_stream)
self.assertEqual(3, res)
finally:
shutil.rmtree(temp_dir)
def test_runlayout_input_is_not_an_empty_file(self):
temp_dir = tempfile.mkdtemp()
try:
input_file = os.path.join(temp_dir, 'input')
open(input_file, 'a').close()
args = cdigraphlayoutcmd._parse_arguments('desc',
[input_file])
o_stream = io.StringIO()
e_stream = io.StringIO()
res = cdigraphlayoutcmd.run_layout(args, out_stream=o_stream,
err_stream=e_stream)
self.assertEqual(4, res)
finally:
shutil.rmtree(temp_dir)
def test_runlayout_on_5node(self):
temp_dir = tempfile.mkdtemp()
try:
five_node = os.path.join(os.path.dirname(__file__), 'data',
'5node.cx')
args = cdigraphlayoutcmd._parse_arguments('desc',
[five_node])
o_stream = io.StringIO()
e_stream = io.StringIO()
res = cdigraphlayoutcmd.run_layout(args, out_stream=o_stream,
err_stream=e_stream)
self.assertEqual('', e_stream.getvalue())
self.assertEqual(0, res)
cart_layout = json.loads(o_stream.getvalue())
self.assertTrue(isinstance(cart_layout, list))
self.assertEqual(5, len(cart_layout))
for entry in cart_layout:
self.assertTrue('node' in entry)
self.assertTrue('x' in entry)
self.assertTrue('y' in entry)
self.assertTrue(entry['node'] in [175, 180, 185, 190, 195])
finally:
shutil.rmtree(temp_dir)
def test_runlayout_on_5node_scale_set(self):
temp_dir = tempfile.mkdtemp()
try:
five_node = os.path.join(os.path.dirname(__file__), 'data',
'5node.cx')
args = cdigraphlayoutcmd._parse_arguments('desc',
[five_node,
'--scale',
'10.0'])
o_stream = io.StringIO()
e_stream = io.StringIO()
res = cdigraphlayoutcmd.run_layout(args, out_stream=o_stream,
err_stream=e_stream)
self.assertEqual('', e_stream.getvalue())
self.assertEqual(0, res)
cart_layout = json.loads(o_stream.getvalue())
self.assertTrue(isinstance(cart_layout, list))
self.assertEqual(5, len(cart_layout))
for entry in cart_layout:
self.assertTrue('node' in entry)
self.assertTrue('x' in entry)
self.assertTrue('y' in entry)
self.assertTrue(entry['node'] in [175, 180, 185, 190, 195])
finally:
shutil.rmtree(temp_dir)
def test_runlayout_on_5node_fit_into_set(self):
temp_dir = tempfile.mkdtemp()
try:
five_node = os.path.join(os.path.dirname(__file__), 'data',
'5node.cx')
args = cdigraphlayoutcmd._parse_arguments('desc',
[five_node,
'--fit_into',
'0.0,0.0,1.0,1.0'])
o_stream = io.StringIO()
e_stream = io.StringIO()
res = cdigraphlayoutcmd.run_layout(args, out_stream=o_stream,
err_stream=e_stream)
self.assertEqual('', e_stream.getvalue())
self.assertEqual(0, res)
cart_layout = json.loads(o_stream.getvalue())
self.assertTrue(isinstance(cart_layout, list))
self.assertEqual(5, len(cart_layout))
print(cart_layout)
for entry in cart_layout:
self.assertTrue('node' in entry)
self.assertTrue('x' in entry)
self.assertTrue('y' in entry)
self.assertTrue(0.0 <= entry['x'] <= 1.1)
self.assertTrue(0.0 <= entry['y'] <= 1.1)
self.assertTrue(entry['node'] in [175, 180, 185, 190, 195])
finally:
shutil.rmtree(temp_dir)
if __name__ == '__main__':
sys.exit(unittest.main())
| [
"cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str",
"ndex2.create_nice_cx_from_file",
"os.path.join",
"cdigraphlayout.cdigraphlayoutcmd.run_layout",
"os.path.dirname",
"cdigraphlayout.cdigraphlayoutcmd._parse_arguments",
"cdigraphlayout.cdigraphlayoutcmd._get_node_size_from_cyvisual_prop... | [((357, 382), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (372, 382), False, 'import os\n'), ((407, 456), 'os.path.join', 'os.path.join', (['TEST_DIR', '"""data"""', '"""100node_example"""'], {}), "(TEST_DIR, 'data', '100node_example')\n", (419, 456), False, 'import os\n'), ((655, 705), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', 'myargs'], {}), "('desc', myargs)\n", (689, 705), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((1040, 1090), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', 'myargs'], {}), "('desc', myargs)\n", (1074, 1090), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((1404, 1454), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', 'myargs'], {}), "('desc', myargs)\n", (1438, 1454), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((2073, 2110), 'ndex2.nice_cx_network.NiceCXNetwork', 'ndex2.nice_cx_network.NiceCXNetwork', ([], {}), '()\n', (2108, 2110), False, 'import ndex2\n'), ((2125, 2194), 'cdigraphlayout.cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', 'cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', ([], {'net_cx': 'net'}), '(net_cx=net)\n', (2182, 2194), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((2429, 2470), 'ndex2.create_nice_cx_from_file', 'ndex2.create_nice_cx_from_file', (['five_node'], {}), '(five_node)\n', (2459, 2470), False, 'import ndex2\n'), ((2485, 2554), 'cdigraphlayout.cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', 'cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', ([], {'net_cx': 'net'}), '(net_cx=net)\n', (2542, 2554), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((3116, 3157), 'ndex2.create_nice_cx_from_file', 'ndex2.create_nice_cx_from_file', (['five_node'], {}), '(five_node)\n', (3146, 3157), False, 'import ndex2\n'), ((3172, 3238), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_based_on_node_size', 'cdigraphlayoutcmd._get_bounding_box_based_on_node_size', ([], {'net_cx': 'net'}), '(net_cx=net)\n', (3226, 3238), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((4421, 4485), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str', 'cdigraphlayoutcmd._get_bounding_box_from_user_str', (['"""0.0,1.0,2,3"""'], {}), "('0.0,1.0,2,3')\n", (4470, 4485), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((4615, 4633), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4631, 4633), False, 'import tempfile\n'), ((5250, 5268), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5266, 5268), False, 'import tempfile\n'), ((5876, 5894), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5892, 5894), False, 'import tempfile\n'), ((7010, 7028), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7026, 7028), False, 'import tempfile\n'), ((8276, 8294), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (8292, 8294), False, 'import tempfile\n'), ((9673, 9688), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9686, 9688), False, 'import unittest\n'), ((1735, 1794), 'cdigraphlayout.cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', 'cdigraphlayoutcmd._get_node_size_from_cyvisual_properties', ([], {}), '()\n', (1792, 1794), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((2335, 2360), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2350, 2360), False, 'import os\n'), ((2683, 2739), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_based_on_node_size', 'cdigraphlayoutcmd._get_bounding_box_based_on_node_size', ([], {}), '()\n', (2737, 2739), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((3022, 3047), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3037, 3047), False, 'import os\n'), ((3380, 3435), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str', 'cdigraphlayoutcmd._get_bounding_box_from_user_str', (['None'], {}), '(None)\n', (3429, 3435), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((3515, 3568), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str', 'cdigraphlayoutcmd._get_bounding_box_from_user_str', (['""""""'], {}), "('')\n", (3564, 3568), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((3844, 3900), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str', 'cdigraphlayoutcmd._get_bounding_box_from_user_str', (['"""1,2"""'], {}), "('1,2')\n", (3893, 3900), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((4185, 4245), 'cdigraphlayout.cdigraphlayoutcmd._get_bounding_box_from_user_str', 'cdigraphlayoutcmd._get_bounding_box_from_user_str', (['"""1,b,c,d"""'], {}), "('1,b,c,d')\n", (4234, 4245), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((4889, 4902), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (4900, 4902), False, 'import io\n'), ((4926, 4939), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (4937, 4939), False, 'import io\n'), ((4958, 5034), 'cdigraphlayout.cdigraphlayoutcmd.run_layout', 'cdigraphlayoutcmd.run_layout', (['args'], {'out_stream': 'o_stream', 'err_stream': 'e_stream'}), '(args, out_stream=o_stream, err_stream=e_stream)\n', (4986, 5034), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((5149, 5172), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (5162, 5172), False, 'import shutil\n'), ((5307, 5338), 'os.path.join', 'os.path.join', (['temp_dir', '"""input"""'], {}), "(temp_dir, 'input')\n", (5319, 5338), False, 'import os\n'), ((5400, 5456), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', '[input_file]'], {}), "('desc', [input_file])\n", (5434, 5456), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((5534, 5547), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5545, 5547), False, 'import io\n'), ((5571, 5584), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5582, 5584), False, 'import io\n'), ((5603, 5679), 'cdigraphlayout.cdigraphlayoutcmd.run_layout', 'cdigraphlayoutcmd.run_layout', (['args'], {'out_stream': 'o_stream', 'err_stream': 'e_stream'}), '(args, out_stream=o_stream, err_stream=e_stream)\n', (5631, 5679), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((5793, 5816), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (5806, 5816), False, 'import shutil\n'), ((6049, 6104), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', '[five_node]'], {}), "('desc', [five_node])\n", (6083, 6104), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((6182, 6195), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6193, 6195), False, 'import io\n'), ((6219, 6232), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (6230, 6232), False, 'import io\n'), ((6251, 6327), 'cdigraphlayout.cdigraphlayoutcmd.run_layout', 'cdigraphlayoutcmd.run_layout', (['args'], {'out_stream': 'o_stream', 'err_stream': 'e_stream'}), '(args, out_stream=o_stream, err_stream=e_stream)\n', (6279, 6327), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((6917, 6940), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (6930, 6940), False, 'import shutil\n'), ((7183, 7257), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', "[five_node, '--scale', '10.0']"], {}), "('desc', [five_node, '--scale', '10.0'])\n", (7217, 7257), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((7445, 7458), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (7456, 7458), False, 'import io\n'), ((7482, 7495), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (7493, 7495), False, 'import io\n'), ((7514, 7590), 'cdigraphlayout.cdigraphlayoutcmd.run_layout', 'cdigraphlayoutcmd.run_layout', (['args'], {'out_stream': 'o_stream', 'err_stream': 'e_stream'}), '(args, out_stream=o_stream, err_stream=e_stream)\n', (7542, 7590), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((8180, 8203), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (8193, 8203), False, 'import shutil\n'), ((8449, 8541), 'cdigraphlayout.cdigraphlayoutcmd._parse_arguments', 'cdigraphlayoutcmd._parse_arguments', (['"""desc"""', "[five_node, '--fit_into', '0.0,0.0,1.0,1.0']"], {}), "('desc', [five_node, '--fit_into',\n '0.0,0.0,1.0,1.0'])\n", (8483, 8541), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((8725, 8738), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8736, 8738), False, 'import io\n'), ((8762, 8775), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8773, 8775), False, 'import io\n'), ((8794, 8870), 'cdigraphlayout.cdigraphlayoutcmd.run_layout', 'cdigraphlayoutcmd.run_layout', (['args'], {'out_stream': 'o_stream', 'err_stream': 'e_stream'}), '(args, out_stream=o_stream, err_stream=e_stream)\n', (8822, 8870), False, 'from cdigraphlayout import cdigraphlayoutcmd\n'), ((9607, 9630), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (9620, 9630), False, 'import shutil\n'), ((5945, 5970), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5960, 5970), False, 'import os\n'), ((7079, 7104), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7094, 7104), False, 'import os\n'), ((8345, 8370), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8360, 8370), False, 'import os\n'), ((4764, 4795), 'os.path.join', 'os.path.join', (['temp_dir', '"""input"""'], {}), "(temp_dir, 'input')\n", (4776, 4795), False, 'import os\n')] |
"""
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385
"""
from typing import Type
import torch.nn as nn
from uninas.modules.networks.stackedcells import StackedCellsNetworkBody
from uninas.modules.stems.cnn import ConvStem
from uninas.modules.layers.cnn import PoolingLayer
from uninas.modules.layers.resnet import AbstractResNetLayer, ResNetLayer, ResNetBottleneckLayer
from uninas.modules.heads.cnn import ClassificationHead
from uninas.utils.shape import Shape
from uninas.utils.generate.networks.manually.abstract import get_stem_instance, get_head_instance,\
get_passthrough_partials, get_network
def _resnet(block: Type[AbstractResNetLayer], stages=(2, 2, 2, 2), inner_channels=(64, 128, 256, 512), expansion=1,
s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
stem = get_stem_instance(ConvStem, features=inner_channels[0], stride=2, k_size=7, act_fun='relu')
head = get_head_instance(ClassificationHead, bias=True, dropout=0.0)
layers = [(inner_channels[0], PoolingLayer,
dict(pool_type='max', k_size=3, padding='same', order='w', dropout_rate=0), dict(stride=2))]
channels = [int(c*expansion) for c in inner_channels]
defaults = dict(k_size=3, stride=1, padding='same', dilation=1, bn_affine=True, act_fun='relu', act_inplace=True,
expansion=1/expansion, has_first_act=False)
for s, (num, cx) in enumerate(zip(stages, channels)):
for i in range(num):
if s > 0 and i == 0:
layers.append((cx, block, defaults, dict(stride=2, shortcut_type='conv1x1')))
elif i == 0 and expansion > 1:
layers.append((cx, block, defaults, dict(stride=1, shortcut_type='conv1x1')))
else:
layers.append((cx, block, defaults, dict(stride=1, shortcut_type='id')))
cell_partials, cell_order = get_passthrough_partials(layers)
return get_network(StackedCellsNetworkBody, stem, head, cell_partials, cell_order, s_in, s_out)
def get_resnet18(s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
return _resnet(block=ResNetLayer, stages=(2, 2, 2, 2), expansion=1, s_in=s_in, s_out=s_out)
def get_resnet34(s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
return _resnet(block=ResNetLayer, stages=(3, 4, 6, 3), expansion=1, s_in=s_in, s_out=s_out)
def get_resnet50(s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
return _resnet(block=ResNetBottleneckLayer, stages=(3, 4, 6, 3), expansion=4, s_in=s_in, s_out=s_out)
def get_resnet101(s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
return _resnet(block=ResNetBottleneckLayer, stages=(3, 4, 23, 3), expansion=4, s_in=s_in, s_out=s_out)
def get_resnet152(s_in=Shape([3, 224, 224]), s_out=Shape([1000])) -> nn.Module:
return _resnet(block=ResNetBottleneckLayer, stages=(3, 8, 36, 3), expansion=4, s_in=s_in, s_out=s_out)
if __name__ == '__main__':
from uninas.utils.torch.misc import count_parameters
from uninas.builder import Builder
Builder()
net = get_resnet50().cuda()
net.eval()
print(net)
print('params', count_parameters(net))
print('cell params', count_parameters(net.cells))
for j, cell in enumerate(net.cells):
print(j, count_parameters(cell))
| [
"uninas.utils.generate.networks.manually.abstract.get_stem_instance",
"uninas.builder.Builder",
"uninas.utils.generate.networks.manually.abstract.get_network",
"uninas.utils.generate.networks.manually.abstract.get_passthrough_partials",
"uninas.utils.torch.misc.count_parameters",
"uninas.utils.shape.Shape... | [((770, 790), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (775, 790), False, 'from uninas.utils.shape import Shape\n'), ((798, 811), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (803, 811), False, 'from uninas.utils.shape import Shape\n'), ((838, 933), 'uninas.utils.generate.networks.manually.abstract.get_stem_instance', 'get_stem_instance', (['ConvStem'], {'features': 'inner_channels[0]', 'stride': '(2)', 'k_size': '(7)', 'act_fun': '"""relu"""'}), "(ConvStem, features=inner_channels[0], stride=2, k_size=7,\n act_fun='relu')\n", (855, 933), False, 'from uninas.utils.generate.networks.manually.abstract import get_stem_instance, get_head_instance, get_passthrough_partials, get_network\n'), ((941, 1002), 'uninas.utils.generate.networks.manually.abstract.get_head_instance', 'get_head_instance', (['ClassificationHead'], {'bias': '(True)', 'dropout': '(0.0)'}), '(ClassificationHead, bias=True, dropout=0.0)\n', (958, 1002), False, 'from uninas.utils.generate.networks.manually.abstract import get_stem_instance, get_head_instance, get_passthrough_partials, get_network\n'), ((1891, 1923), 'uninas.utils.generate.networks.manually.abstract.get_passthrough_partials', 'get_passthrough_partials', (['layers'], {}), '(layers)\n', (1915, 1923), False, 'from uninas.utils.generate.networks.manually.abstract import get_stem_instance, get_head_instance, get_passthrough_partials, get_network\n'), ((1935, 2027), 'uninas.utils.generate.networks.manually.abstract.get_network', 'get_network', (['StackedCellsNetworkBody', 'stem', 'head', 'cell_partials', 'cell_order', 's_in', 's_out'], {}), '(StackedCellsNetworkBody, stem, head, cell_partials, cell_order,\n s_in, s_out)\n', (1946, 2027), False, 'from uninas.utils.generate.networks.manually.abstract import get_stem_instance, get_head_instance, get_passthrough_partials, get_network\n'), ((2048, 2068), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (2053, 2068), False, 'from uninas.utils.shape import Shape\n'), ((2076, 2089), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (2081, 2089), False, 'from uninas.utils.shape import Shape\n'), ((2225, 2245), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (2230, 2245), False, 'from uninas.utils.shape import Shape\n'), ((2253, 2266), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (2258, 2266), False, 'from uninas.utils.shape import Shape\n'), ((2402, 2422), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (2407, 2422), False, 'from uninas.utils.shape import Shape\n'), ((2430, 2443), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (2435, 2443), False, 'from uninas.utils.shape import Shape\n'), ((2590, 2610), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (2595, 2610), False, 'from uninas.utils.shape import Shape\n'), ((2618, 2631), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (2623, 2631), False, 'from uninas.utils.shape import Shape\n'), ((2779, 2799), 'uninas.utils.shape.Shape', 'Shape', (['[3, 224, 224]'], {}), '([3, 224, 224])\n', (2784, 2799), False, 'from uninas.utils.shape import Shape\n'), ((2807, 2820), 'uninas.utils.shape.Shape', 'Shape', (['[1000]'], {}), '([1000])\n', (2812, 2820), False, 'from uninas.utils.shape import Shape\n'), ((3073, 3082), 'uninas.builder.Builder', 'Builder', ([], {}), '()\n', (3080, 3082), False, 'from uninas.builder import Builder\n'), ((3165, 3186), 'uninas.utils.torch.misc.count_parameters', 'count_parameters', (['net'], {}), '(net)\n', (3181, 3186), False, 'from uninas.utils.torch.misc import count_parameters\n'), ((3213, 3240), 'uninas.utils.torch.misc.count_parameters', 'count_parameters', (['net.cells'], {}), '(net.cells)\n', (3229, 3240), False, 'from uninas.utils.torch.misc import count_parameters\n'), ((3301, 3323), 'uninas.utils.torch.misc.count_parameters', 'count_parameters', (['cell'], {}), '(cell)\n', (3317, 3323), False, 'from uninas.utils.torch.misc import count_parameters\n')] |
#!/usr/bin/env python
import requests
from datetime import datetime
from config_parser.config_parser import get_config_value
from db.models import AccountBalance, Account, Session
from sqlalchemy.orm import eagerload
def update_accounts_balances(tgbot):
config_json_rpc_api_url = get_config_value('BLOCKCHAINPOLLER', 'json_rpc_api_url')
config_json_rpc_api_port = int(get_config_value('BLOCKCHAINPOLLER', 'json_rpc_api_port'))
json_rpc_api_url = 'localhost' if config_json_rpc_api_url is None else config_json_rpc_api_url
json_rpc_api_port = 8545 if config_json_rpc_api_port is None else config_json_rpc_api_port
try:
ether_stock_price_request = requests.get('https://api.coinmarketcap.com/v1/ticker/ethereum/?convert=EUR')
ether_stock_price = ether_stock_price_request.json()[0]
except (requests.ConnectionError, IndexError):
return
else:
if ether_stock_price_request.status_code != 200:
# try next time if there is network error
return
session = Session()
accounts_queryset = session.query(Account).options(eagerload(Account.chats)).all()
for account in accounts_queryset:
post_json = {'jsonrpc': '2.0', 'method': 'eth_getBalance', 'params': ['{}'.format(account.id), 'latest'], 'id': 1}
account_balance = requests.post('http://{}:{}'.format(json_rpc_api_url, json_rpc_api_port), json=post_json).json()
old_balance = session.query(AccountBalance).filter_by(account_id=account.id).order_by(AccountBalance.id.desc()).first()
if 'error' not in account_balance:
new_balance = int(account_balance['result'], 16)
if old_balance is None or new_balance != old_balance.balance:
changed_value = new_balance if old_balance is None else (new_balance - old_balance.balance) / 10 ** 18
changed_in_money = {
'EUR': changed_value * float(ether_stock_price['price_eur']),
'USD': changed_value * float(ether_stock_price['price_usd'])
}
new_account_balance = AccountBalance(account_id=account.id,
balance=new_balance,
change_in_money=changed_in_money)
session.add(new_account_balance)
session.commit()
if old_balance is not None:
for chat in account.chats:
if chat.subscription_active:
tgbot.send_message(chat_id=chat.id, text='{} UTC - 1 ETH = ${} / €{}\n'
'Account {} balance changed {} ETH.\n'
'Value ${} / €{}'
.format(
str(datetime.utcnow()),
round(float(ether_stock_price['price_usd']), 2),
round(float(ether_stock_price['price_eur']), 2),
account.id,
changed_value,
round(changed_in_money["USD"], 3),
round(changed_in_money["EUR"], 3)))
session.close()
| [
"db.models.AccountBalance",
"datetime.datetime.utcnow",
"db.models.AccountBalance.id.desc",
"requests.get",
"config_parser.config_parser.get_config_value",
"sqlalchemy.orm.eagerload",
"db.models.Session"
] | [((285, 341), 'config_parser.config_parser.get_config_value', 'get_config_value', (['"""BLOCKCHAINPOLLER"""', '"""json_rpc_api_url"""'], {}), "('BLOCKCHAINPOLLER', 'json_rpc_api_url')\n", (301, 341), False, 'from config_parser.config_parser import get_config_value\n'), ((1037, 1046), 'db.models.Session', 'Session', ([], {}), '()\n', (1044, 1046), False, 'from db.models import AccountBalance, Account, Session\n'), ((377, 434), 'config_parser.config_parser.get_config_value', 'get_config_value', (['"""BLOCKCHAINPOLLER"""', '"""json_rpc_api_port"""'], {}), "('BLOCKCHAINPOLLER', 'json_rpc_api_port')\n", (393, 434), False, 'from config_parser.config_parser import get_config_value\n'), ((675, 752), 'requests.get', 'requests.get', (['"""https://api.coinmarketcap.com/v1/ticker/ethereum/?convert=EUR"""'], {}), "('https://api.coinmarketcap.com/v1/ticker/ethereum/?convert=EUR')\n", (687, 752), False, 'import requests\n'), ((1102, 1126), 'sqlalchemy.orm.eagerload', 'eagerload', (['Account.chats'], {}), '(Account.chats)\n', (1111, 1126), False, 'from sqlalchemy.orm import eagerload\n'), ((2099, 2196), 'db.models.AccountBalance', 'AccountBalance', ([], {'account_id': 'account.id', 'balance': 'new_balance', 'change_in_money': 'changed_in_money'}), '(account_id=account.id, balance=new_balance, change_in_money=\n changed_in_money)\n', (2113, 2196), False, 'from db.models import AccountBalance, Account, Session\n'), ((1512, 1536), 'db.models.AccountBalance.id.desc', 'AccountBalance.id.desc', ([], {}), '()\n', (1534, 1536), False, 'from db.models import AccountBalance, Account, Session\n'), ((2927, 2944), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2942, 2944), False, 'from datetime import datetime\n')] |
import logging
import os
import pytest
import shakedown # required by sdk_utils version checks
import sdk_cmd
import sdk_plan
import sdk_tasks
import sdk_upgrade
import sdk_utils
from tests import config
log = logging.getLogger(__name__)
FRAMEWORK_NAME = "secrets/hello-world"
NUM_HELLO = 2
NUM_WORLD = 3
# check environment first...
if "FRAMEWORK_NAME" in os.environ:
FRAMEWORK_NAME = os.environ["FRAMEWORK_NAME"]
if "NUM_HELLO" in os.environ:
NUM_HELLO = int(os.environ["NUM_HELLO"])
if "NUM_WORLD" in os.environ:
NUM_WORLD = int(os.environ["NUM_WORLD"])
@pytest.mark.soak_upgrade
def test_soak_upgrade_downgrade():
sdk_upgrade.soak_upgrade_downgrade(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT)
@pytest.mark.soak_secrets_update
@sdk_utils.dcos_1_10_or_higher
def test_soak_secrets_update():
secret_content_alternative = "hello-world-secret-data-alternative"
test_soak_secrets_framework_alive()
sdk_cmd.run_cli("package install --cli dcos-enterprise-cli --yes")
sdk_cmd.run_cli("package install --cli hello-world --yes")
sdk_cmd.run_cli("security secrets update --value={} secrets/secret1".format(secret_content_alternative))
sdk_cmd.run_cli("security secrets update --value={} secrets/secret2".format(secret_content_alternative))
sdk_cmd.run_cli("security secrets update --value={} secrets/secret3".format(secret_content_alternative))
test_soak_secrets_restart_hello0()
# get new task ids - only first pod
hello_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0")
world_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0")
# make sure content is changed
assert secret_content_alternative == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
assert secret_content_alternative == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
assert secret_content_alternative == task_exec(world_tasks[0], "cat secrets/secret3")
# make sure content is changed
assert secret_content_alternative == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# revert back to some other value
sdk_cmd.run_cli("security secrets update --value=SECRET1 secrets/secret1")
sdk_cmd.run_cli("security secrets update --value=SECRET2 secrets/secret2")
sdk_cmd.run_cli("security secrets update --value=SECRET3 secrets/secret3")
test_soak_secrets_restart_hello0()
@pytest.mark.soak_secrets_alive
@sdk_utils.dcos_1_10_or_higher
def test_soak_secrets_framework_alive():
sdk_plan.wait_for_completed_deployment(FRAMEWORK_NAME)
sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)
def test_soak_secrets_restart_hello0():
hello_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0")
world_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0")
# restart pods to retrieve new secret's content
sdk_cmd.svc_cli(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart hello-0')
sdk_cmd.svc_cli(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart world-0')
# wait pod restart to complete
sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, "hello-0", hello_tasks_old)
sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, 'world-0', world_tasks_old)
# wait till it all running
sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)
def task_exec(task_name, command):
cmd_str = "task exec {} {}".format(task_name, command)
lines = sdk_cmd.run_cli(cmd_str).split('\n')
log.info('dcos %s output: %s', cmd_str, lines)
for i in lines:
# ignore text starting with:
# Overwriting Environment Variable ....
# Overwriting PATH ......
if not i.isspace() and not i.startswith("Overwriting"):
return i
return ""
| [
"logging.getLogger",
"sdk_cmd.run_cli",
"sdk_tasks.check_tasks_updated",
"sdk_upgrade.soak_upgrade_downgrade",
"sdk_tasks.get_task_ids",
"sdk_cmd.svc_cli",
"sdk_plan.wait_for_completed_deployment",
"sdk_tasks.check_running"
] | [((213, 240), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (230, 240), False, 'import logging\n'), ((641, 748), 'sdk_upgrade.soak_upgrade_downgrade', 'sdk_upgrade.soak_upgrade_downgrade', (['config.PACKAGE_NAME', 'config.SERVICE_NAME', 'config.DEFAULT_TASK_COUNT'], {}), '(config.PACKAGE_NAME, config.SERVICE_NAME,\n config.DEFAULT_TASK_COUNT)\n', (675, 748), False, 'import sdk_upgrade\n'), ((985, 1051), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['"""package install --cli dcos-enterprise-cli --yes"""'], {}), "('package install --cli dcos-enterprise-cli --yes')\n", (1000, 1051), False, 'import sdk_cmd\n'), ((1056, 1114), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['"""package install --cli hello-world --yes"""'], {}), "('package install --cli hello-world --yes')\n", (1071, 1114), False, 'import sdk_cmd\n'), ((1540, 1589), 'sdk_tasks.get_task_ids', 'sdk_tasks.get_task_ids', (['FRAMEWORK_NAME', '"""hello-0"""'], {}), "(FRAMEWORK_NAME, 'hello-0')\n", (1562, 1589), False, 'import sdk_tasks\n'), ((1608, 1657), 'sdk_tasks.get_task_ids', 'sdk_tasks.get_task_ids', (['FRAMEWORK_NAME', '"""world-0"""'], {}), "(FRAMEWORK_NAME, 'world-0')\n", (1630, 1657), False, 'import sdk_tasks\n'), ((2350, 2424), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['"""security secrets update --value=SECRET1 secrets/secret1"""'], {}), "('security secrets update --value=SECRET1 secrets/secret1')\n", (2365, 2424), False, 'import sdk_cmd\n'), ((2429, 2503), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['"""security secrets update --value=SECRET2 secrets/secret2"""'], {}), "('security secrets update --value=SECRET2 secrets/secret2')\n", (2444, 2503), False, 'import sdk_cmd\n'), ((2508, 2582), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['"""security secrets update --value=SECRET3 secrets/secret3"""'], {}), "('security secrets update --value=SECRET3 secrets/secret3')\n", (2523, 2582), False, 'import sdk_cmd\n'), ((2733, 2787), 'sdk_plan.wait_for_completed_deployment', 'sdk_plan.wait_for_completed_deployment', (['FRAMEWORK_NAME'], {}), '(FRAMEWORK_NAME)\n', (2771, 2787), False, 'import sdk_plan\n'), ((2792, 2854), 'sdk_tasks.check_running', 'sdk_tasks.check_running', (['FRAMEWORK_NAME', '(NUM_HELLO + NUM_WORLD)'], {}), '(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)\n', (2815, 2854), False, 'import sdk_tasks\n'), ((2920, 2969), 'sdk_tasks.get_task_ids', 'sdk_tasks.get_task_ids', (['FRAMEWORK_NAME', '"""hello-0"""'], {}), "(FRAMEWORK_NAME, 'hello-0')\n", (2942, 2969), False, 'import sdk_tasks\n'), ((2992, 3041), 'sdk_tasks.get_task_ids', 'sdk_tasks.get_task_ids', (['FRAMEWORK_NAME', '"""world-0"""'], {}), "(FRAMEWORK_NAME, 'world-0')\n", (3014, 3041), False, 'import sdk_tasks\n'), ((3099, 3174), 'sdk_cmd.svc_cli', 'sdk_cmd.svc_cli', (['config.PACKAGE_NAME', 'FRAMEWORK_NAME', '"""pod restart hello-0"""'], {}), "(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart hello-0')\n", (3114, 3174), False, 'import sdk_cmd\n'), ((3179, 3254), 'sdk_cmd.svc_cli', 'sdk_cmd.svc_cli', (['config.PACKAGE_NAME', 'FRAMEWORK_NAME', '"""pod restart world-0"""'], {}), "(config.PACKAGE_NAME, FRAMEWORK_NAME, 'pod restart world-0')\n", (3194, 3254), False, 'import sdk_cmd\n'), ((3295, 3368), 'sdk_tasks.check_tasks_updated', 'sdk_tasks.check_tasks_updated', (['FRAMEWORK_NAME', '"""hello-0"""', 'hello_tasks_old'], {}), "(FRAMEWORK_NAME, 'hello-0', hello_tasks_old)\n", (3324, 3368), False, 'import sdk_tasks\n'), ((3373, 3446), 'sdk_tasks.check_tasks_updated', 'sdk_tasks.check_tasks_updated', (['FRAMEWORK_NAME', '"""world-0"""', 'world_tasks_old'], {}), "(FRAMEWORK_NAME, 'world-0', world_tasks_old)\n", (3402, 3446), False, 'import sdk_tasks\n'), ((3483, 3545), 'sdk_tasks.check_running', 'sdk_tasks.check_running', (['FRAMEWORK_NAME', '(NUM_HELLO + NUM_WORLD)'], {}), '(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD)\n', (3506, 3545), False, 'import sdk_tasks\n'), ((3655, 3679), 'sdk_cmd.run_cli', 'sdk_cmd.run_cli', (['cmd_str'], {}), '(cmd_str)\n', (3670, 3679), False, 'import sdk_cmd\n')] |
"""
A sampler defines a method to sample random data from certain distribution.
"""
from typing import List
import numpy as np
class BaseSampler(object):
def __init__(self):
pass
def sample(self, shape, *args):
raise NotImplementedError
class IntSampler(BaseSampler):
def __init__(self, low, high=None):
super(IntSampler, self).__init__()
if high is None:
self.low = 0
self.high = low
else:
self.low = low
self.high = high
def sample(self, shape, *args):
return np.random.randint(low=self.low, high=self.high, size=shape, dtype=np.int64)
class UniformSampler(BaseSampler):
def __init__(self, low, high):
super(UniformSampler, self).__init__()
self.low = np.array(low)
self.high = np.array(high)
assert self.low.shape == self.high.shape, 'The shape of low and high must be the same. Got low type {} and high type {}'.format(
self.low.shape, self.high.shape)
def sample(self, shape, *args):
return np.random.uniform(low=self.low, high=self.high, size=shape + self.low.shape).astype(np.float32)
class GaussianSampler(BaseSampler):
def __init__(self, mu=0.0, sigma=1.0):
super(GaussianSampler, self).__init__()
self.mu = mu
self.sigma = sigma
def sample(self, shape, *args):
return np.random.normal(self.mu, self.sigma, shape)
class GaussianMixtureSampler(BaseSampler):
""" Sample from GMM with prior probability distribution """
def __init__(self, mu: List, sigma: List, prob=None):
assert type(mu) == list and type(sigma) == list, 'mu and sigma must be list'
assert len(mu) == len(sigma), 'length of mu and sigma must be the same'
if type(prob) == list:
assert len(mu) == len(prob) and np.sum(prob) == 1., 'The sum of probability list should be 1.'
super(GaussianMixtureSampler, self).__init__()
self.mu = mu
self.sigma = sigma
self.prob = prob
def sample(self, shape, *args):
ind = np.random.choice(len(self.mu), p=self.prob)
return np.random.randn(*shape) * self.sigma[ind] + self.mu[ind]
class ConditionGaussianSampler(BaseSampler):
""" Conditional Gaussian sampler """
def __init__(self, mu: List, sigma: List):
assert type(mu) == list and type(sigma) == list, 'mu and sigma must be list'
assert len(mu) == len(sigma), 'length of mu and sigma must be the same'
super(ConditionGaussianSampler, self).__init__()
self.mu = np.expand_dims(np.array(mu), axis=1)
self.sigma = np.expand_dims(np.array(sigma), axis=1)
def sample(self, shape, *args):
ind = args[0]
return np.random.randn(*shape) * self.sigma[ind] + self.mu[ind]
| [
"numpy.random.normal",
"numpy.array",
"numpy.random.randint",
"numpy.sum",
"numpy.random.uniform",
"numpy.random.randn"
] | [((582, 657), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'self.low', 'high': 'self.high', 'size': 'shape', 'dtype': 'np.int64'}), '(low=self.low, high=self.high, size=shape, dtype=np.int64)\n', (599, 657), True, 'import numpy as np\n'), ((796, 809), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (804, 809), True, 'import numpy as np\n'), ((830, 844), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (838, 844), True, 'import numpy as np\n'), ((1405, 1449), 'numpy.random.normal', 'np.random.normal', (['self.mu', 'self.sigma', 'shape'], {}), '(self.mu, self.sigma, shape)\n', (1421, 1449), True, 'import numpy as np\n'), ((2607, 2619), 'numpy.array', 'np.array', (['mu'], {}), '(mu)\n', (2615, 2619), True, 'import numpy as np\n'), ((2665, 2680), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (2673, 2680), True, 'import numpy as np\n'), ((1080, 1156), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.low', 'high': 'self.high', 'size': '(shape + self.low.shape)'}), '(low=self.low, high=self.high, size=shape + self.low.shape)\n', (1097, 1156), True, 'import numpy as np\n'), ((2159, 2182), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2174, 2182), True, 'import numpy as np\n'), ((2764, 2787), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2779, 2787), True, 'import numpy as np\n'), ((1858, 1870), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (1864, 1870), True, 'import numpy as np\n')] |
import json
import logging
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from tests.common.utils import randstring
from seahub.institutions.models import Institution, InstitutionAdmin
from seahub.profile.models import Profile
logger = logging.getLogger(__name__)
class AdminInstitutionUsersTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def test_can_get(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['user_list']) is list
inst.delete()
def test_no_permission(self):
self.logout()
self.login_as(self.admin_no_other_permission)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
def test_can_create(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
url = reverse('api-v2.1-admin-institution-users', args=[inst.id])
data = {
'email': 'invalid_email_string',
}
resp = self.client.post(url, data)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert type(json_resp['success']) is list
assert type(json_resp['failed']) is list
class AdminInstitutionUserTest(BaseTestCase):
def setUp(self):
pass
def _add_institution(self, name=''):
return Institution.objects.create(name=name)
def _delete_institution(self, name=''):
try:
institution = Institution.objects.get(name=name)
institution.delete()
except Exception as e:
logger.error(e)
def _add_user_in_institution(self, email, inst_name):
profile = Profile.objects.get_profile_by_user(email)
if not profile:
profile = Profile.objects.add_or_update(username=email, institution=inst_name)
else:
profile.institution = inst_name
profile.save()
def test_can_update(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
data = 'is_institution_admin=True'
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['is_institution_admin'] is True
inst.delete()
def test_can_delete(self):
self.login_as(self.admin)
inst = self._add_institution('int1')
self._add_user_in_institution(self.user.email, inst.name)
url = reverse('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])
resp = self.client.delete(url)
self.assertEqual(200, resp.status_code)
inst.delete()
| [
"logging.getLogger",
"seahub.profile.models.Profile.objects.get_profile_by_user",
"json.loads",
"seahub.institutions.models.Institution.objects.create",
"seahub.institutions.models.Institution.objects.get",
"django.urls.reverse",
"seahub.profile.models.Profile.objects.add_or_update"
] | [((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((435, 472), 'seahub.institutions.models.Institution.objects.create', 'Institution.objects.create', ([], {'name': 'name'}), '(name=name)\n', (461, 472), False, 'from seahub.institutions.models import Institution, InstitutionAdmin\n'), ((807, 866), 'django.urls.reverse', 'reverse', (['"""api-v2.1-admin-institution-users"""'], {'args': '[inst.id]'}), "('api-v2.1-admin-institution-users', args=[inst.id])\n", (814, 866), False, 'from django.urls import reverse\n'), ((972, 996), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (982, 996), False, 'import json\n'), ((1242, 1301), 'django.urls.reverse', 'reverse', (['"""api-v2.1-admin-institution-users"""'], {'args': '[inst.id]'}), "('api-v2.1-admin-institution-users', args=[inst.id])\n", (1249, 1301), False, 'from django.urls import reverse\n'), ((1512, 1571), 'django.urls.reverse', 'reverse', (['"""api-v2.1-admin-institution-users"""'], {'args': '[inst.id]'}), "('api-v2.1-admin-institution-users', args=[inst.id])\n", (1519, 1571), False, 'from django.urls import reverse\n'), ((1756, 1780), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (1766, 1780), False, 'import json\n'), ((2020, 2057), 'seahub.institutions.models.Institution.objects.create', 'Institution.objects.create', ([], {'name': 'name'}), '(name=name)\n', (2046, 2057), False, 'from seahub.institutions.models import Institution, InstitutionAdmin\n'), ((2346, 2388), 'seahub.profile.models.Profile.objects.get_profile_by_user', 'Profile.objects.get_profile_by_user', (['email'], {}), '(email)\n', (2381, 2388), False, 'from seahub.profile.models import Profile\n'), ((2777, 2852), 'django.urls.reverse', 'reverse', (['"""api-v2.1-admin-institution-user"""'], {'args': '[inst.id, self.user.email]'}), "('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])\n", (2784, 2852), False, 'from django.urls import reverse\n'), ((3044, 3068), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (3054, 3068), False, 'import json\n'), ((3341, 3416), 'django.urls.reverse', 'reverse', (['"""api-v2.1-admin-institution-user"""'], {'args': '[inst.id, self.user.email]'}), "('api-v2.1-admin-institution-user', args=[inst.id, self.user.email])\n", (3348, 3416), False, 'from django.urls import reverse\n'), ((557, 591), 'seahub.institutions.models.Institution.objects.get', 'Institution.objects.get', ([], {'name': 'name'}), '(name=name)\n', (580, 591), False, 'from seahub.institutions.models import Institution, InstitutionAdmin\n'), ((2142, 2176), 'seahub.institutions.models.Institution.objects.get', 'Institution.objects.get', ([], {'name': 'name'}), '(name=name)\n', (2165, 2176), False, 'from seahub.institutions.models import Institution, InstitutionAdmin\n'), ((2435, 2503), 'seahub.profile.models.Profile.objects.add_or_update', 'Profile.objects.add_or_update', ([], {'username': 'email', 'institution': 'inst_name'}), '(username=email, institution=inst_name)\n', (2464, 2503), False, 'from seahub.profile.models import Profile\n')] |
'''
A warehouse has one loading dock that workers use to load and unload goods.
Warehouse workers carrying the goods arrive at the loading dock at different times. They form two queues, a "loading" queue and an "unloading" queue. Within each queue, the workers are ordered by the time they arrive at the dock.
The arrival time (in minutes) array stores the minute the worker arrives at the loading dock. The direction array stores whether the worker is "loading" or "unloading",
a value of
0 ---> loading and
1 ---> unloading.
Loading/unloading takes 1 minute.
When a worker arrives at the loading dock, if no other worker is at the dock at the same time, then the worker can use the dock.
If a "loading" worker and an "unloading" worker arrive at the dock at the same time, then we decide who can use the dock with these rules:
if the loading dock was not in use in the previous minute, then the unloading worker can use the dock.
if the loading dock was just used by another unloading worker, then the unloading worker can use the dock.
if the loading dock was just used by another loading worker, then the loading worker can use the dock.
Return an array of the time (in minute) each worker uses the dock.
Examples
Example 1:
Input:
time = [0, 0, 1, 6] direction = [0, 1, 1, 0]
Output:
[2, 0, 1, 6]
Explanation:
At time 0, worker 0 and 1 want to use the dock. Worker 0 wants to load and worker 1 wants to unload. The dock was not used in the previous minute, so worker 1 unload first.
At time 1, workers 0 and 2 want to use the rock. Worker 2 wants to unload, and at the previous minute the dock was used to unload, so worker 2 uses the dock.
At time 2, worker 0 is the only worker at the dock, so he uses the dock.
At time 6, worker 3 arrives at the empty dock and uses the dock.
We return [2, 0, 1, 6].
'''
from typing import List
from collections import deque
def getTimes(numCustomers: int, arrTime: List[int], direction: List[int]) -> List[int]:
load_queue = deque() # customers in the enter queue
unload_queue = deque() # customers in the exit queue
cur_time = -1
last_used_type = 'UNLOAD'
for i in range(numCustomers):
if direction[i] == 0:
load_queue.append((arrTime[i], i)) # push (arrival time, custom id) into the queue
else:
unload_queue.append((arrTime[i], i))
ans = [-1] * numCustomers
while load_queue and unload_queue:
if load_queue[0][0] <= cur_time and unload_queue[0][0] <= cur_time: # both customers are at the turnstile
if cur_time == -1 or last_used_type == 'UNLOAD': # prev sec not used or last used as exit
cur_queue = unload_queue
else:
cur_queue = load_queue
elif load_queue[0][0] < unload_queue[0][0]: # only customer from enter queue at turnstile
cur_queue = load_queue
else: # only customer from exit queue at turnstile
# when both arrive at the same time, OR unload arrives before load
cur_queue = unload_queue
time, i = cur_queue.popleft()
if cur_queue == load_queue:
last_used_type = 'LOAD'
else:
last_used_type = 'UNLOAD'
cur_time = max(time, cur_time)
ans[i] = cur_time
cur_time += 1
remaining_queue = load_queue if load_queue else unload_queue
while remaining_queue:
time, i = remaining_queue.popleft()
cur_time = max(time, cur_time)
ans[i] = cur_time
cur_time += 1
return ans
if __name__ == "__main__":
numCustomers = int(input())
arrTime = [int(y) for y in input().split()]
direction = [int(z) for z in input().split()]
res = getTimes(numCustomers, arrTime, direction)
print(' '.join(str(e) for e in res)) | [
"collections.deque"
] | [((1991, 1998), 'collections.deque', 'deque', ([], {}), '()\n', (1996, 1998), False, 'from collections import deque\n'), ((2049, 2056), 'collections.deque', 'deque', ([], {}), '()\n', (2054, 2056), False, 'from collections import deque\n')] |
import numpy as np
class StandardDeviation():
@staticmethod
def standardDeviation(data):
return np.std(data) | [
"numpy.std"
] | [((113, 125), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (119, 125), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#refer:http://www.wooyun.org/bugs/wooyun-2014-081469
'''
Created on 2015-12-19
@author: 真个程序员不太冷
'''
import re
import urlparse
def assign(service, arg):
if service == "zte":
arr = urlparse.urlparse(arg)
return True, '%s://%s/' % (arr.scheme, arr.netloc)
def audit(arg):
payload='conf_control/download.jsp?filename=dd.txt&filePath=/../../../../etc/shadow'
target=arg+payload
code, head, res, errcode, _ = curl.curl2(target)
if 'root:' in res and 'ppc:' in res:
security_hole('中兴ZXV10 MS90 远程视频会议系统任意文件下载'+target)
if __name__ == '__main__':
from dummy import *
audit(assign('zte', 'http://117.40.138.30:9000/')[1]) | [
"urlparse.urlparse"
] | [((253, 275), 'urlparse.urlparse', 'urlparse.urlparse', (['arg'], {}), '(arg)\n', (270, 275), False, 'import urlparse\n')] |
from multiprocessing.pool import Pool
from script.util.Logger import Logger
from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json
class LoggerMixIn:
def __init__(self, verbose=0):
self.verbose = verbose
@property
def log(self):
level = Logger.verbose_to_level(self.verbose)
return Logger(self.__class__.__name__, level=level)
class PickleSelfMixIn:
def dump(self, path):
dump_pickle(self, path)
def load(self, path):
load_obj = load_pickle(path)
if load_obj.__class__ is not self.__class__:
raise TypeError(f"load obj is not {load_obj.__class__} is not match with expected class {self.__class__}")
new_obj = self.__class__()
for key, items in load_obj.__dict__.items():
setattr(new_obj, key, items)
return new_obj
def to_pickle(self, path, **kwargs):
dump_pickle(self, path)
def from_pickle(self, path, overwrite_self=False, **kwargs):
load_obj = load_pickle(path)
# for auto detect pickle type
if load_obj.__class__ is not self.__class__:
raise TypeError(f"load obj is not {load_obj.__class__} is not match with expected class {self.__class__}")
new_obj = self.__class__()
for key, items in load_obj.__dict__.items():
setattr(new_obj, key, items)
if overwrite_self:
for key, val in new_obj.__dict__.items():
self.__dict__[key] = val
return new_obj
class PickleMixIn:
@staticmethod
def to_pickle(obj, path):
dump_pickle(obj, path)
@staticmethod
def from_pickle(path):
return load_pickle(path)
class JsonMixIn:
@staticmethod
def _dump_json(obj, path):
dump_json(obj, path)
@staticmethod
def _load_json(path):
return load_json(path)
def from_json(self, path):
return self._load_json(path)
def to_json(self, obj, path):
self._dump_json(obj, path)
class singletonPoolMixIn:
_pool_singleton = None
_n_job = None
def __init__(self, n_job=1):
self.__class__._n_job = n_job
@property
def pool(self):
if self.__class__._pool_singleton is None:
self.__class__._pool_singleton = Pool(
processes=self.__class__._n_job)
return self.__class__._pool_singleton
| [
"multiprocessing.pool.Pool",
"script.util.Logger.Logger.verbose_to_level",
"script.util.misc_util.load_pickle",
"script.util.misc_util.dump_pickle",
"script.util.Logger.Logger",
"script.util.misc_util.dump_json",
"script.util.misc_util.load_json"
] | [((305, 342), 'script.util.Logger.Logger.verbose_to_level', 'Logger.verbose_to_level', (['self.verbose'], {}), '(self.verbose)\n', (328, 342), False, 'from script.util.Logger import Logger\n'), ((359, 403), 'script.util.Logger.Logger', 'Logger', (['self.__class__.__name__'], {'level': 'level'}), '(self.__class__.__name__, level=level)\n', (365, 403), False, 'from script.util.Logger import Logger\n'), ((468, 491), 'script.util.misc_util.dump_pickle', 'dump_pickle', (['self', 'path'], {}), '(self, path)\n', (479, 491), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((541, 558), 'script.util.misc_util.load_pickle', 'load_pickle', (['path'], {}), '(path)\n', (552, 558), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((942, 965), 'script.util.misc_util.dump_pickle', 'dump_pickle', (['self', 'path'], {}), '(self, path)\n', (953, 965), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((1054, 1071), 'script.util.misc_util.load_pickle', 'load_pickle', (['path'], {}), '(path)\n', (1065, 1071), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((1655, 1677), 'script.util.misc_util.dump_pickle', 'dump_pickle', (['obj', 'path'], {}), '(obj, path)\n', (1666, 1677), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((1743, 1760), 'script.util.misc_util.load_pickle', 'load_pickle', (['path'], {}), '(path)\n', (1754, 1760), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((1843, 1863), 'script.util.misc_util.dump_json', 'dump_json', (['obj', 'path'], {}), '(obj, path)\n', (1852, 1863), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((1928, 1943), 'script.util.misc_util.load_json', 'load_json', (['path'], {}), '(path)\n', (1937, 1943), False, 'from script.util.misc_util import dump_pickle, load_pickle, dump_json, load_json\n'), ((2378, 2415), 'multiprocessing.pool.Pool', 'Pool', ([], {'processes': 'self.__class__._n_job'}), '(processes=self.__class__._n_job)\n', (2382, 2415), False, 'from multiprocessing.pool import Pool\n')] |
import numpy as np
def bowl(vs, v_ref=1.0, scale=.1):
def normal(v, loc, scale):
return 1 / np.sqrt(2 * np.pi * scale**2) * np.exp( - 0.5 * np.square(v - loc) / scale**2 )
def _bowl(v):
if np.abs(v-v_ref) > 0.05:
return 2 * np.abs(v-v_ref) - 0.095
else:
return - 0.01 * normal(v, v_ref, scale) + 0.04
return np.array([_bowl(v) for v in vs]) | [
"numpy.abs",
"numpy.sqrt",
"numpy.square"
] | [((216, 233), 'numpy.abs', 'np.abs', (['(v - v_ref)'], {}), '(v - v_ref)\n', (222, 233), True, 'import numpy as np\n'), ((107, 138), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * scale ** 2)'], {}), '(2 * np.pi * scale ** 2)\n', (114, 138), True, 'import numpy as np\n'), ((263, 280), 'numpy.abs', 'np.abs', (['(v - v_ref)'], {}), '(v - v_ref)\n', (269, 280), True, 'import numpy as np\n'), ((155, 173), 'numpy.square', 'np.square', (['(v - loc)'], {}), '(v - loc)\n', (164, 173), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys
sys.path.append('_model')
from model import *
import korali
k = korali.Engine()
e = korali.Experiment()
e["Problem"]["Type"] = "Optimization"
e["Problem"]["Objective Function"] = model
e["Solver"]["Type"] = "Optimizer/CMAES"
e["Solver"]["Population Size"] = 5
e["Solver"]["Termination Criteria"]["Max Generations"] = 50
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Lower Bound"] = -10.0
e["Variables"][0]["Upper Bound"] = +10.0
e["Console Output"]["Frequency"] = 25
e["Solver"]["Termination Criteria"]["Max Generations"] = 100
# Specifying dry run to perform configuration checks but not actually running Korali
k["Dry Run"] = True
k.run(e)
| [
"korali.Experiment",
"sys.path.append",
"korali.Engine"
] | [((35, 60), 'sys.path.append', 'sys.path.append', (['"""_model"""'], {}), "('_model')\n", (50, 60), False, 'import sys\n'), ((100, 115), 'korali.Engine', 'korali.Engine', ([], {}), '()\n', (113, 115), False, 'import korali\n'), ((120, 139), 'korali.Experiment', 'korali.Experiment', ([], {}), '()\n', (137, 139), False, 'import korali\n')] |
import os
from datetime import timedelta
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'YOUR_SECRET_KEY'
SESSION_TYPE = 'filesystem'
PERMANENT_SESSION_LIFETIME = timedelta(minutes=30)
UPVOTE_ACCOUNT = os.environ.get('UPVOTE_ACCOUNT') or 'YOUR_USERNAME'
UPVOTE_KEY = os.environ.get('UPVOTE_KEY') or 'YOUR_PRIVATE_POSTING_KEY'
# firebase config
FB_APIKEY = os.environ.get('FB_APIKEY') or 'YOUR_FB_APIKEY'
FB_AUTHDOMAIN = 'blurtdb.firebaseapp.com'
FB_DATABASEURL = 'https://blurtdb.firebaseio.com'
FB_STORAGEBUCKET = 'blurtdb.appspot.com'
FB_SERVICEACCOUNT = os.environ.get(
'FB_SERVICEACCOUNT') or 'FB_SERVICEACCOUNT'
| [
"datetime.timedelta",
"os.environ.get"
] | [((197, 218), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (206, 218), False, 'from datetime import timedelta\n'), ((82, 110), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (96, 110), False, 'import os\n'), ((240, 272), 'os.environ.get', 'os.environ.get', (['"""UPVOTE_ACCOUNT"""'], {}), "('UPVOTE_ACCOUNT')\n", (254, 272), False, 'import os\n'), ((309, 337), 'os.environ.get', 'os.environ.get', (['"""UPVOTE_KEY"""'], {}), "('UPVOTE_KEY')\n", (323, 337), False, 'import os\n'), ((407, 434), 'os.environ.get', 'os.environ.get', (['"""FB_APIKEY"""'], {}), "('FB_APIKEY')\n", (421, 434), False, 'import os\n'), ((624, 659), 'os.environ.get', 'os.environ.get', (['"""FB_SERVICEACCOUNT"""'], {}), "('FB_SERVICEACCOUNT')\n", (638, 659), False, 'import os\n')] |
import datetime
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.core.models import Page, Site
from wagtail.tests.utils import WagtailTestUtils
from tests.app.models import NewsIndex, NewsItem
def dt(*args):
return datetime.datetime(*args, tzinfo=timezone.get_current_timezone())
def noop(x):
return x
class TestNewsList(TestCase, WagtailTestUtils):
def setUp(self):
super(TestNewsList, self).setUp()
site = Site.objects.get(is_default_site=True)
root_page = site.root_page
self.index = NewsIndex(
title='News', slug='news')
root_page.add_child(instance=self.index)
def test_index(self):
item1 = NewsItem.objects.create(
newsindex=self.index,
title='One post',
date=dt(2015, 8, 24, 0, 0, 0))
item2 = NewsItem.objects.create(
newsindex=self.index,
title='Two post',
date=dt(2015, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item1, item2], transform=noop)
def test_archive_year(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015',
date=dt(2015, 8, 24, 0, 0, 0))
item2014 = NewsItem.objects.create(
newsindex=self.index,
title='2014',
date=dt(2014, 8, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2013',
date=dt(2013, 8, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'year', kwargs={'year': '2014'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item2014], transform=noop)
def test_archive_month(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 0, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-07-24',
date=dt(2015, 7, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-06-24',
date=dt(2015, 6, 24, 0, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2014-07-24',
date=dt(2014, 7, 24, 0, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'month', kwargs={'year': '2015', 'month': '7'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
def test_archive_day(self):
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-24',
date=dt(2015, 8, 24, 12, 0, 0))
item = NewsItem.objects.create(
newsindex=self.index,
title='2015-08-23',
date=dt(2015, 8, 23, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-08-22',
date=dt(2015, 8, 22, 12, 0, 0))
NewsItem.objects.create(
newsindex=self.index,
title='2015-07-23',
date=dt(2015, 7, 23, 12, 0, 0))
response = self.client.get(self.index.url + self.index.reverse_subpage(
'day', kwargs={'year': '2015', 'month': '8', 'day': '23'}))
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[item], transform=noop)
@override_settings(ALLOWED_HOSTS=['localhost', 'site-a.com', 'site-b.org'])
class TestMultipleSites(TestCase, WagtailTestUtils):
def setUp(self):
super(TestMultipleSites, self).setUp()
root = Page.objects.get(pk=1)
root_a = Page(
title='Home A', slug='home-a')
root.add_child(instance=root_a)
root_b = Page(
title='Home B', slug='home-b')
root.add_child(instance=root_b)
self.index_a = NewsIndex(title='News A', slug='news-a')
root_a.add_child(instance=self.index_a)
self.index_b = NewsIndex(title='News B', slug='news-b')
root_b.add_child(instance=self.index_b)
self.site_a = Site.objects.create(
hostname='site-a.com',
root_page=root_a)
self.site_b = Site.objects.create(
hostname='site-b.org',
root_page=root_b)
self.item_a = NewsItem.objects.create(
newsindex=self.index_a, title='Post A', date=dt(2015, 8, 1))
self.item_b = NewsItem.objects.create(
newsindex=self.index_b, title='Post B', date=dt(2015, 8, 2))
@classmethod
def tearDownClass(cls):
super(TestMultipleSites, cls).tearDownClass()
# Clear site cache when the tests finish to prevent other tests being
# polluted by a stale cache.
cache.delete('wagtail_site_root_paths')
def test_index(self):
response = self.client.get(self.index_a.url,
HTTP_HOST=self.site_a.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_a], transform=noop)
response = self.client.get(self.index_b.url,
HTTP_HOST=self.site_b.hostname)
self.assertIn('newsitem_list', response.context)
self.assertQuerysetEqual(
response.context['newsitem_list'],
[self.item_b], transform=noop)
def test_item_url(self):
self.assertEqual(
self.item_a.url(), 'http://{}/{}/2015/8/1/{}-{}/'.format(
self.site_a.hostname, self.index_a.slug,
self.item_a.pk, self.item_a.get_nice_url()))
self.assertEqual(
self.item_b.url(), 'http://{}/{}/2015/8/2/{}-{}/'.format(
self.site_b.hostname, self.index_b.slug,
self.item_b.pk, self.item_b.get_nice_url()))
def test_item(self):
response = self.client.get(self.item_a.url(),
HTTP_HOST=self.site_a.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_a)
response = self.client.get(self.item_b.url(),
HTTP_HOST=self.site_b.hostname)
self.assertEqual(response.status_code, 200)
self.assertIn('newsitem', response.context)
self.assertEqual(response.context['newsitem'], self.item_b)
| [
"tests.app.models.NewsIndex",
"django.core.cache.cache.delete",
"wagtail.core.models.Page",
"django.utils.timezone.get_current_timezone",
"django.test.override_settings",
"wagtail.core.models.Page.objects.get",
"wagtail.core.models.Site.objects.get",
"wagtail.core.models.Site.objects.create"
] | [((3911, 3985), 'django.test.override_settings', 'override_settings', ([], {'ALLOWED_HOSTS': "['localhost', 'site-a.com', 'site-b.org']"}), "(ALLOWED_HOSTS=['localhost', 'site-a.com', 'site-b.org'])\n", (3928, 3985), False, 'from django.test import TestCase, override_settings\n'), ((531, 569), 'wagtail.core.models.Site.objects.get', 'Site.objects.get', ([], {'is_default_site': '(True)'}), '(is_default_site=True)\n', (547, 569), False, 'from wagtail.core.models import Page, Site\n'), ((626, 662), 'tests.app.models.NewsIndex', 'NewsIndex', ([], {'title': '"""News"""', 'slug': '"""news"""'}), "(title='News', slug='news')\n", (635, 662), False, 'from tests.app.models import NewsIndex, NewsItem\n'), ((4123, 4145), 'wagtail.core.models.Page.objects.get', 'Page.objects.get', ([], {'pk': '(1)'}), '(pk=1)\n', (4139, 4145), False, 'from wagtail.core.models import Page, Site\n'), ((4163, 4198), 'wagtail.core.models.Page', 'Page', ([], {'title': '"""Home A"""', 'slug': '"""home-a"""'}), "(title='Home A', slug='home-a')\n", (4167, 4198), False, 'from wagtail.core.models import Page, Site\n'), ((4270, 4305), 'wagtail.core.models.Page', 'Page', ([], {'title': '"""Home B"""', 'slug': '"""home-b"""'}), "(title='Home B', slug='home-b')\n", (4274, 4305), False, 'from wagtail.core.models import Page, Site\n'), ((4383, 4423), 'tests.app.models.NewsIndex', 'NewsIndex', ([], {'title': '"""News A"""', 'slug': '"""news-a"""'}), "(title='News A', slug='news-a')\n", (4392, 4423), False, 'from tests.app.models import NewsIndex, NewsItem\n'), ((4496, 4536), 'tests.app.models.NewsIndex', 'NewsIndex', ([], {'title': '"""News B"""', 'slug': '"""news-b"""'}), "(title='News B', slug='news-b')\n", (4505, 4536), False, 'from tests.app.models import NewsIndex, NewsItem\n'), ((4608, 4668), 'wagtail.core.models.Site.objects.create', 'Site.objects.create', ([], {'hostname': '"""site-a.com"""', 'root_page': 'root_a'}), "(hostname='site-a.com', root_page=root_a)\n", (4627, 4668), False, 'from wagtail.core.models import Page, Site\n'), ((4717, 4777), 'wagtail.core.models.Site.objects.create', 'Site.objects.create', ([], {'hostname': '"""site-b.org"""', 'root_page': 'root_b'}), "(hostname='site-b.org', root_page=root_b)\n", (4736, 4777), False, 'from wagtail.core.models import Page, Site\n'), ((5267, 5306), 'django.core.cache.cache.delete', 'cache.delete', (['"""wagtail_site_root_paths"""'], {}), "('wagtail_site_root_paths')\n", (5279, 5306), False, 'from django.core.cache import cache\n'), ((341, 372), 'django.utils.timezone.get_current_timezone', 'timezone.get_current_timezone', ([], {}), '()\n', (370, 372), False, 'from django.utils import timezone\n')] |
#!/usr/bin/env python
# encoding: utf-8
'''
#-------------------------------------------------------------------
# CONFIDENTIAL --- CUSTOM STUDIOS
#-------------------------------------------------------------------
#
# @Project Name : 线程增加阅读量点击量
#
# @File Name : blog-click-read-num.py
#
# @Programmer : autofelix
#
# @Start Date : 2022/01/05 13:14
#
# @Last Update : 2022/01/05 13:14
#
#-------------------------------------------------------------------
'''
import time, random, threading
from selenium import webdriver
class csdn:
'''
This is a main Class, the file contains all documents.
One document contains paragraphs that have several sentences
It loads the original file and converts the original file to new content
Then the new content will be saved by this class
'''
def __init__(self):
self.userAgent = [
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
# 谷歌
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:86.0) Gecko/20100101 Firefox/86.0', # 火狐
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', # IE
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
# 360
]
def hello(self):
'''
This is a welcome speech
:return: self
'''
print('*' * 50)
print(' ' * 15 + 'csdn线程阅读量点击量阅读量')
print(' ' * 5 + '作者: autofelix Date: 2022-01-05 13:14')
print(' ' * 5 + '主页: https://autofelix.blog.csdn.net')
print('*' * 50)
return self
def init_driver(self):
'''
The browser setting
'''
chrome_options = webdriver.ChromeOptions()
# 关掉浏览器左上角的通知提示,如上图
chrome_options.add_experimental_option('prefs', {
'profile.default_content_setting_values': {'notifications': 2}
})
# 关闭'chrome正受到自动测试软件的控制'提示
chrome_options.add_argument('disable-infobars')
# 设置浏览器请求头
chrome_options.add_argument("user-agent={}".format(random.choices(self.userAgent)))
# 后台运行
# chrome_options.add_argument('headless')
return webdriver.Chrome(options=chrome_options)
def run(self):
'''
The program entry
'''
blogerUrl = input('请输入博主主页地址:') or 'https://autofelix.blog.csdn.net'
while True:
t1 = threading.Thread(target=self.start, args=(self.init_driver(), blogerUrl,))
t2 = threading.Thread(target=self.start, args=(self.init_driver(), blogerUrl,))
t3 = threading.Thread(target=self.start, args=(self.init_driver(), blogerUrl,))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def start(self, driver, url):
'''
The program run
'''
driver.get(url)
time.sleep(3)
# 适用于csdn新版主页
articles = driver.find_elements_by_class_name('blog-list-box')[0:3]
try:
for article in articles:
article.find_element_by_tag_name('h4').click()
time.sleep(5)
except Exception as e:
print(e)
finally:
driver.quit()
if __name__ == "__main__":
csdn().hello().run()
| [
"selenium.webdriver.Chrome",
"selenium.webdriver.ChromeOptions",
"random.choices",
"time.sleep"
] | [((2420, 2445), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (2443, 2445), False, 'from selenium import webdriver\n'), ((2921, 2961), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'chrome_options'}), '(options=chrome_options)\n', (2937, 2961), False, 'from selenium import webdriver\n'), ((3678, 3691), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3688, 3691), False, 'import time, random, threading\n'), ((2801, 2831), 'random.choices', 'random.choices', (['self.userAgent'], {}), '(self.userAgent)\n', (2815, 2831), False, 'import time, random, threading\n'), ((3925, 3938), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3935, 3938), False, 'import time, random, threading\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 13:43:01 2016
@author: fergal
A series of metrics to quantify the noise in a lightcurve:
Includes:
x sgCdpp
x Marshall's noise estimate
o An FT based estimate of 6 hour artifact strength.
o A per thruster firing estimate of 6 hour artifact strength.
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
from scipy.signal import savgol_filter
import matplotlib.pyplot as mp
import numpy as np
import fft
keplerLongCadence_s = 1765.4679
keplerLongCadence_days = keplerLongCadence_s / float(86400)
def computeRollTweakAmplitude(y, nHarmonics = 3, tweakPeriod_days = .25, \
expTime_days=None, plot=False):
"""Compute strength of roll tweak artifact in K2 data with an FT approach.
Compute FT of lightcurve
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
--------
float indicating strength of correction. A value of 1 means the
amplitude of the tweak is approx equal to the strength of all other
signals in the FT.
"""
if expTime_days is None:
expTime_days = keplerLongCadence_days
#computes FT with frequencies in cycles per days
ft = fft.computeFft(y, expTime_days)
#Thruster firings every 6 hours
artifactFreq_cd = 1/tweakPeriod_days #cycles per day
if plot:
mp.clf()
mp.plot(ft[:,0], 1e6*ft[:,1], 'b-')
metric = 0
nPtsForMed = 50
for i in range(1, nHarmonics+1):
wh = np.argmin( np.fabs(ft[:,0] - i*artifactFreq_cd))
med = np.median(ft[wh-nPtsForMed:wh+nPtsForMed, 1])
metric += ft[wh, 1] / med
if plot:
mp.axvline(i*artifactFreq_cd, color='m')
return metric / float(nHarmonics)
def computeSgCdpp_ppm(y, transitDuration_cadences=13, plot=False):
"""Estimates 6hr CDPP using <NAME> Cleve's Savitzy-Golay technique
An interesting estimate of the noise in a lightcurve is the scatter
after all long term trends have been removed. This is the kernel of
the idea behind the Combined Differential Photometric Precision (CDPP)
metric used in classic Kepler. <NAME> devised a much simpler
algorithm for computing CDPP using a Savitzy-Golay detrending, which
he called Savitzy-Golay CDPP, or SG-CDPP. We implement his algorithm
here.
Inputs:
----------
y
(1d numpy array) normalised flux to calculate noise from. Flux
should have a mean of zero and be in units of fractional amplitude.
Note: Bad data in input will skew result. Some filtering of
outliers is performed, but Nan's or Infs will not be caught.
Optional Inputs:
-----------------
transitDuration_cadences
(int) Adjust the assumed transit width, in cadences. Default is
13, which corresponds to a 6.5 hour transit in K2
plot
Show a diagnostic plot
Returns:
------------
Estimated noise in parts per million.
Notes:
-------------
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
by <NAME>
"""
#These 3 values were chosen for the original algorithm, and we don't
#change them here.
window = 101
polyorder=2
noiseNorm = 1.40
#Name change for consistency with original algorithm
cadencesPerTransit = transitDuration_cadences
if cadencesPerTransit < 4:
raise ValueError("Cadences per transit must be >= 4")
if len(y) < window:
raise ValueError("Can't compute CDPP for timeseries with fewer points than defined window (%i points)" %(window))
trend = savgol_filter(y, window_length=window, polyorder=polyorder)
detrend = y-trend
filtered = np.ones(cadencesPerTransit)/float(cadencesPerTransit)
smoothed = np.convolve(detrend, filtered, mode='same')
if plot:
mp.clf()
mp.plot(y, 'ko')
mp.plot(trend, 'r-')
mp.plot(smoothed, 'g.')
sgCdpp_ppm = noiseNorm*robustStd(smoothed, 1)*1e6
return sgCdpp_ppm
def estimateScatterWithMarshallMethod(flux, plot=False):
"""Estimate the typical scatter in a lightcurve.
Uses the same method as Marshall (Mullally et al 2016 submitted)
Inputs:
----------
flux
(np 1d array). Flux to measure scatter of. Need not have
zero mean.
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
------------
(float) scatter of data in the same units as in the input ``flux``
Notes:
----------
Algorithm is reasonably sensitive to outliers. For best results
uses outlier rejection on your lightcurve before computing scatter.
Nan's and infs in lightcurve will propegate to the return value.
"""
diff= np.diff(flux)
#Remove egregious outliers. Shouldn't make much difference
idx = sigmaClip(diff, 5)
diff = diff[~idx]
mean = np.mean(diff)
mad = np.median(np.fabs(diff-mean))
std = 1.4826*mad
if plot:
mp.clf()
mp.plot(flux, 'ko')
mp.plot(diff, 'r.')
mp.figure(2)
mp.clf()
bins = np.linspace(-3000, 3000, 61)
mp.hist(1e6*diff, bins=bins, ec="none")
mp.xlim(-3000, 3000)
mp.axvline(-1e6*float(std/np.sqrt(2)), color='r')
mp.axvline(1e6*float(std/np.sqrt(2)), color='r')
#std is the rms of the diff. std on single point
#is 1/sqrt(2) of that value,
return float(std/np.sqrt(2))
def singlePointDifferenceSigmaClip(a, nSigma=4, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers in first derivative
If a dataset can be modeled as a constant offset + noise + outliers,
those outliers can be found and rejected with a sigma-clipping approach.
If the data contains some time-varying signal, this signal must be removed
before applying a sigma clip. This function removes the signal by applying
a single point difference.
The function computes a[i+1] - a[i], and sigma clips the result. Slowly
varying trends will have single point differences that are dominated by noise,
but outliers have strong first derivatives and will show up strongly in this
metric.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#Scatter in single point difference is root 2 time larger
#than in initial lightcurve
threshold = nSigma/np.sqrt(2)
diff1 = np.roll(a, -1) - a
diff1[-1] = 0 #Don't trust the last value because a[-1] not necessarily equal to a
idx1 = sigmaClip(diff1, nSigma, maxIter, initialClip)
diff2 = np.roll(a, 1) - a
diff2[0] = 0
idx2 = sigmaClip(diff2, nSigma, maxIter, initialClip)
flags = idx1 & idx2
#This bit of magic ensures only single point outliers are marked,
#not strong trends in the data. It insists that the previous point
#in difference time series is an outlier in the opposite direction, otherwise
#the point is considered unflagged. This prevents marking transits as bad data.
outlierIdx = flags
outlierIdx &= np.roll(idx1, 1)
outlierIdx &= (np.roll(diff1, 1) * diff1 < 0)
return outlierIdx
def sigmaClip(y, nSigma, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers
Find outliers by identifiny all points more than **nSigma** from
the mean value. The recalculate the mean and std and repeat until
no more outliers found.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#import matplotlib.pyplot as mp
idx = initialClip
if initialClip is None:
idx = np.zeros( len(y), dtype=bool)
assert(len(idx) == len(y))
#x = np.arange(len(y))
#mp.plot(x, y, 'k.')
oldNumClipped = np.sum(idx)
for i in range(int(maxIter)):
mean = np.nanmean(y[~idx])
std = np.nanstd(y[~idx])
newIdx = np.fabs(y-mean) > nSigma*std
newIdx = np.logical_or(idx, newIdx)
newNumClipped = np.sum(newIdx)
#print "Iter %i: %i (%i) clipped points " \
#%(i, newNumClipped, oldNumClipped)
if newNumClipped == oldNumClipped:
return newIdx
oldNumClipped = newNumClipped
idx = newIdx
i+=1
return idx
def robustMean(y, percent):
"""Compute the mean of the percent.. 100-percent percentile points
A fast, and typically good enough estimate of the mean in the presence
of outliers.
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.mean( ySorted[lwr:upr])
def robustStd(y, percent):
"""Compute a robust standard deviation with JVC's technique
A fast, and typically good enough estimate of the mean in the presence
of outliers.Cuts out 1st and 99th percentile values and computes std
of the rest. Used by computeSgCdpp() to match the behaviour of
<NAME> Cleve's original algorithm
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/robust_std.m
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.std( ySorted[lwr:upr])
| [
"numpy.convolve",
"matplotlib.pyplot.hist",
"numpy.sqrt",
"scipy.signal.savgol_filter",
"numpy.nanmean",
"numpy.isfinite",
"fft.computeFft",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.linspace",
"numpy.nanstd",
"numpy.ones",
"numpy.std",
"matplotlib.pyplot.xlim",
"num... | [((1197, 1228), 'fft.computeFft', 'fft.computeFft', (['y', 'expTime_days'], {}), '(y, expTime_days)\n', (1211, 1228), False, 'import fft\n'), ((3608, 3667), 'scipy.signal.savgol_filter', 'savgol_filter', (['y'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(y, window_length=window, polyorder=polyorder)\n', (3621, 3667), False, 'from scipy.signal import savgol_filter\n'), ((3775, 3818), 'numpy.convolve', 'np.convolve', (['detrend', 'filtered'], {'mode': '"""same"""'}), "(detrend, filtered, mode='same')\n", (3786, 3818), True, 'import numpy as np\n'), ((4759, 4772), 'numpy.diff', 'np.diff', (['flux'], {}), '(flux)\n', (4766, 4772), True, 'import numpy as np\n'), ((4900, 4913), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (4907, 4913), True, 'import numpy as np\n'), ((7666, 7682), 'numpy.roll', 'np.roll', (['idx1', '(1)'], {}), '(idx1, 1)\n', (7673, 7682), True, 'import numpy as np\n'), ((8912, 8923), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (8918, 8923), True, 'import numpy as np\n'), ((9772, 9797), 'numpy.mean', 'np.mean', (['ySorted[lwr:upr]'], {}), '(ySorted[lwr:upr])\n', (9779, 9797), True, 'import numpy as np\n'), ((10392, 10416), 'numpy.std', 'np.std', (['ySorted[lwr:upr]'], {}), '(ySorted[lwr:upr])\n', (10398, 10416), True, 'import numpy as np\n'), ((1348, 1356), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (1354, 1356), True, 'import matplotlib.pyplot as mp\n'), ((1365, 1410), 'matplotlib.pyplot.plot', 'mp.plot', (['ft[:, 0]', '(1000000.0 * ft[:, 1])', '"""b-"""'], {}), "(ft[:, 0], 1000000.0 * ft[:, 1], 'b-')\n", (1372, 1410), True, 'import matplotlib.pyplot as mp\n'), ((1551, 1600), 'numpy.median', 'np.median', (['ft[wh - nPtsForMed:wh + nPtsForMed, 1]'], {}), '(ft[wh - nPtsForMed:wh + nPtsForMed, 1])\n', (1560, 1600), True, 'import numpy as np\n'), ((3706, 3733), 'numpy.ones', 'np.ones', (['cadencesPerTransit'], {}), '(cadencesPerTransit)\n', (3713, 3733), True, 'import numpy as np\n'), ((3841, 3849), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (3847, 3849), True, 'import matplotlib.pyplot as mp\n'), ((3858, 3874), 'matplotlib.pyplot.plot', 'mp.plot', (['y', '"""ko"""'], {}), "(y, 'ko')\n", (3865, 3874), True, 'import matplotlib.pyplot as mp\n'), ((3883, 3903), 'matplotlib.pyplot.plot', 'mp.plot', (['trend', '"""r-"""'], {}), "(trend, 'r-')\n", (3890, 3903), True, 'import matplotlib.pyplot as mp\n'), ((3912, 3935), 'matplotlib.pyplot.plot', 'mp.plot', (['smoothed', '"""g."""'], {}), "(smoothed, 'g.')\n", (3919, 3935), True, 'import matplotlib.pyplot as mp\n'), ((4934, 4954), 'numpy.fabs', 'np.fabs', (['(diff - mean)'], {}), '(diff - mean)\n', (4941, 4954), True, 'import numpy as np\n'), ((4997, 5005), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (5003, 5005), True, 'import matplotlib.pyplot as mp\n'), ((5014, 5033), 'matplotlib.pyplot.plot', 'mp.plot', (['flux', '"""ko"""'], {}), "(flux, 'ko')\n", (5021, 5033), True, 'import matplotlib.pyplot as mp\n'), ((5042, 5061), 'matplotlib.pyplot.plot', 'mp.plot', (['diff', '"""r."""'], {}), "(diff, 'r.')\n", (5049, 5061), True, 'import matplotlib.pyplot as mp\n'), ((5070, 5082), 'matplotlib.pyplot.figure', 'mp.figure', (['(2)'], {}), '(2)\n', (5079, 5082), True, 'import matplotlib.pyplot as mp\n'), ((5091, 5099), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (5097, 5099), True, 'import matplotlib.pyplot as mp\n'), ((5115, 5143), 'numpy.linspace', 'np.linspace', (['(-3000)', '(3000)', '(61)'], {}), '(-3000, 3000, 61)\n', (5126, 5143), True, 'import numpy as np\n'), ((5152, 5199), 'matplotlib.pyplot.hist', 'mp.hist', (['(1000000.0 * diff)'], {'bins': 'bins', 'ec': '"""none"""'}), "(1000000.0 * diff, bins=bins, ec='none')\n", (5159, 5199), True, 'import matplotlib.pyplot as mp\n'), ((5201, 5221), 'matplotlib.pyplot.xlim', 'mp.xlim', (['(-3000)', '(3000)'], {}), '(-3000, 3000)\n', (5208, 5221), True, 'import matplotlib.pyplot as mp\n'), ((6981, 6991), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6988, 6991), True, 'import numpy as np\n'), ((7009, 7023), 'numpy.roll', 'np.roll', (['a', '(-1)'], {}), '(a, -1)\n', (7016, 7023), True, 'import numpy as np\n'), ((7191, 7204), 'numpy.roll', 'np.roll', (['a', '(1)'], {}), '(a, 1)\n', (7198, 7204), True, 'import numpy as np\n'), ((8973, 8992), 'numpy.nanmean', 'np.nanmean', (['y[~idx]'], {}), '(y[~idx])\n', (8983, 8992), True, 'import numpy as np\n'), ((9007, 9025), 'numpy.nanstd', 'np.nanstd', (['y[~idx]'], {}), '(y[~idx])\n', (9016, 9025), True, 'import numpy as np\n'), ((9090, 9116), 'numpy.logical_or', 'np.logical_or', (['idx', 'newIdx'], {}), '(idx, newIdx)\n', (9103, 9116), True, 'import numpy as np\n'), ((9141, 9155), 'numpy.sum', 'np.sum', (['newIdx'], {}), '(newIdx)\n', (9147, 9155), True, 'import numpy as np\n'), ((1498, 1537), 'numpy.fabs', 'np.fabs', (['(ft[:, 0] - i * artifactFreq_cd)'], {}), '(ft[:, 0] - i * artifactFreq_cd)\n', (1505, 1537), True, 'import numpy as np\n'), ((1661, 1703), 'matplotlib.pyplot.axvline', 'mp.axvline', (['(i * artifactFreq_cd)'], {'color': '"""m"""'}), "(i * artifactFreq_cd, color='m')\n", (1671, 1703), True, 'import matplotlib.pyplot as mp\n'), ((5445, 5455), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5452, 5455), True, 'import numpy as np\n'), ((7702, 7719), 'numpy.roll', 'np.roll', (['diff1', '(1)'], {}), '(diff1, 1)\n', (7709, 7719), True, 'import numpy as np\n'), ((9044, 9061), 'numpy.fabs', 'np.fabs', (['(y - mean)'], {}), '(y - mean)\n', (9051, 9061), True, 'import numpy as np\n'), ((9643, 9657), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (9654, 9657), True, 'import numpy as np\n'), ((10263, 10277), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (10274, 10277), True, 'import numpy as np\n'), ((5256, 5266), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5263, 5266), True, 'import numpy as np\n'), ((5313, 5323), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5320, 5323), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# test_parse_data.py -
#
# Author: <NAME> <<EMAIL>>
#
import argparse
from fsl.utils import parse_data, tempdir, path
import os.path as op
from fsl.data.vtk import VTKMesh
from fsl.data.gifti import GiftiMesh
from fsl.data.image import Image
from fsl.data.atlases import Atlas
from pytest import raises
from .test_image import make_image
import os
import pytest
datadir = op.join(op.dirname(__file__), 'testdata')
def test_mesh():
mesh_parser = argparse.ArgumentParser("Reads a VTK file")
mesh_parser.add_argument("mesh", type=parse_data.Mesh)
real_filename = op.join(datadir, 'test_mesh.vtk')
args = mesh_parser.parse_args([real_filename])
assert isinstance(args.mesh, VTKMesh)
real_filename = op.join(datadir, 'test_mesh')
args = mesh_parser.parse_args([real_filename])
assert isinstance(args.mesh, VTKMesh)
real_filename = op.join(datadir, 'example.surf.gii')
args = mesh_parser.parse_args([real_filename])
assert isinstance(args.mesh, GiftiMesh)
real_filename = op.join(datadir, 'example')
args = mesh_parser.parse_args([real_filename])
assert isinstance(args.mesh, GiftiMesh)
fake_filename = op.join(datadir, 'test_mesh_fake.vtk')
with raises(SystemExit):
mesh_parser.parse_args([fake_filename])
fake_filename = op.join(datadir, 'example.shape.gii')
with raises(SystemExit):
mesh_parser.parse_args([fake_filename])
def test_image():
with tempdir.tempdir() as testdir:
image_parser = argparse.ArgumentParser("Reads an image")
image_parser.add_argument("image", type=parse_data.Image)
for filetype in range(3):
filename = op.join(testdir, 'image%r' % filetype)
make_image(filename, filetype)
args = image_parser.parse_args([filename])
assert isinstance(args.image, Image)
if filetype == 0:
args = image_parser.parse_args([filename + '.hdr'])
assert isinstance(args.image, Image)
args = image_parser.parse_args([filename + '.img'])
assert isinstance(args.image, Image)
with raises(SystemExit):
image_parser.parse_args([filename + '.nii'])
with raises(SystemExit):
image_parser.parse_args([filename + '.nii.gz'])
else:
args = image_parser.parse_args([filename + '.nii'])
assert isinstance(args.image, Image)
with raises(SystemExit):
image_parser.parse_args([filename + '.img'])
with raises(SystemExit):
image_parser.parse_args([filename + '.hdr'])
with raises(SystemExit):
image_parser.parse_args([filename + '.nii.gz'])
args = None
double_filename = op.join(testdir, 'image1')
make_image(double_filename, 0)
with raises(SystemExit):
image_parser.parse_args([double_filename])
def test_image_out():
image_parser = argparse.ArgumentParser("Reads an image")
image_parser.add_argument("image_out", type=parse_data.ImageOut)
for fsl_output_type, extension in (
('NIFTI', '.nii'),
('NIFTI_PAIR', '.img'),
('NIFTI_GZ', '.nii.gz')
):
os.environ['FSLOUTPUTTYPE'] = fsl_output_type
args = image_parser.parse_args(['test'])
assert path.hasExt(args.image_out, extension)
assert args.image_out == 'test' + extension
args = image_parser.parse_args(['test.nii'])
assert path.hasExt(args.image_out, '.nii')
assert args.image_out == 'test.nii'
args = image_parser.parse_args(['test.nii.gz'])
assert path.hasExt(args.image_out, '.nii.gz')
assert args.image_out == 'test.nii.gz'
args = image_parser.parse_args(['test.img'])
assert path.hasExt(args.image_out, '.img')
assert args.image_out == 'test.img'
args = image_parser.parse_args(['test.surf.gii'])
assert path.hasExt(args.image_out, extension)
assert args.image_out == 'test.surf.gii' + extension
@pytest.mark.fsltest
def test_atlas():
atlas_parser = argparse.ArgumentParser('reads an atlas')
atlas_parser.add_argument('atlas', type=parse_data.Atlas)
args = atlas_parser.parse_args(['cerebellum_mniflirt'])
assert isinstance(args.atlas, Atlas)
with raises(SystemExit):
atlas_parser.parse_args(['fake'])
| [
"fsl.utils.tempdir.tempdir",
"fsl.utils.path.hasExt",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"pytest.raises"
] | [((407, 427), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (417, 427), True, 'import os.path as op\n'), ((478, 521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reads a VTK file"""'], {}), "('Reads a VTK file')\n", (501, 521), False, 'import argparse\n'), ((602, 635), 'os.path.join', 'op.join', (['datadir', '"""test_mesh.vtk"""'], {}), "(datadir, 'test_mesh.vtk')\n", (609, 635), True, 'import os.path as op\n'), ((750, 779), 'os.path.join', 'op.join', (['datadir', '"""test_mesh"""'], {}), "(datadir, 'test_mesh')\n", (757, 779), True, 'import os.path as op\n'), ((894, 930), 'os.path.join', 'op.join', (['datadir', '"""example.surf.gii"""'], {}), "(datadir, 'example.surf.gii')\n", (901, 930), True, 'import os.path as op\n'), ((1047, 1074), 'os.path.join', 'op.join', (['datadir', '"""example"""'], {}), "(datadir, 'example')\n", (1054, 1074), True, 'import os.path as op\n'), ((1191, 1229), 'os.path.join', 'op.join', (['datadir', '"""test_mesh_fake.vtk"""'], {}), "(datadir, 'test_mesh_fake.vtk')\n", (1198, 1229), True, 'import os.path as op\n'), ((1328, 1365), 'os.path.join', 'op.join', (['datadir', '"""example.shape.gii"""'], {}), "(datadir, 'example.shape.gii')\n", (1335, 1365), True, 'import os.path as op\n'), ((3073, 3114), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reads an image"""'], {}), "('Reads an image')\n", (3096, 3114), False, 'import argparse\n'), ((4233, 4274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""reads an atlas"""'], {}), "('reads an atlas')\n", (4256, 4274), False, 'import argparse\n'), ((1239, 1257), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (1245, 1257), False, 'from pytest import raises\n'), ((1375, 1393), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (1381, 1393), False, 'from pytest import raises\n'), ((1472, 1489), 'fsl.utils.tempdir.tempdir', 'tempdir.tempdir', ([], {}), '()\n', (1487, 1489), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((1525, 1566), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reads an image"""'], {}), "('Reads an image')\n", (1548, 1566), False, 'import argparse\n'), ((2876, 2902), 'os.path.join', 'op.join', (['testdir', '"""image1"""'], {}), "(testdir, 'image1')\n", (2883, 2902), True, 'import os.path as op\n'), ((3452, 3490), 'fsl.utils.path.hasExt', 'path.hasExt', (['args.image_out', 'extension'], {}), '(args.image_out, extension)\n', (3463, 3490), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((3612, 3647), 'fsl.utils.path.hasExt', 'path.hasExt', (['args.image_out', '""".nii"""'], {}), "(args.image_out, '.nii')\n", (3623, 3647), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((3764, 3802), 'fsl.utils.path.hasExt', 'path.hasExt', (['args.image_out', '""".nii.gz"""'], {}), "(args.image_out, '.nii.gz')\n", (3775, 3802), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((3919, 3954), 'fsl.utils.path.hasExt', 'path.hasExt', (['args.image_out', '""".img"""'], {}), "(args.image_out, '.img')\n", (3930, 3954), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((4073, 4111), 'fsl.utils.path.hasExt', 'path.hasExt', (['args.image_out', 'extension'], {}), '(args.image_out, extension)\n', (4084, 4111), False, 'from fsl.utils import parse_data, tempdir, path\n'), ((4449, 4467), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (4455, 4467), False, 'from pytest import raises\n'), ((1691, 1729), 'os.path.join', 'op.join', (['testdir', "('image%r' % filetype)"], {}), "(testdir, 'image%r' % filetype)\n", (1698, 1729), True, 'import os.path as op\n'), ((2955, 2973), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2961, 2973), False, 'from pytest import raises\n'), ((2170, 2188), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2176, 2188), False, 'from pytest import raises\n'), ((2276, 2294), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2282, 2294), False, 'from pytest import raises\n'), ((2525, 2543), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2531, 2543), False, 'from pytest import raises\n'), ((2631, 2649), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2637, 2649), False, 'from pytest import raises\n'), ((2737, 2755), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (2743, 2755), False, 'from pytest import raises\n')] |
# Copyright 2020 ASL19 Organization
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
class Region(models.Model):
"""
Class to store different Regions for
landing page
"""
name = models.CharField(
max_length=128)
def __str__(self):
return self.name
| [
"django.db.models.CharField"
] | [((729, 761), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (745, 761), False, 'from django.db import models\n')] |
#!/usr/bin/env python
import base64, json, pika
from xml.etree.ElementTree import Element, tostring, fromstring
# RabbitMQ Connection Information
RABBIT_HOST = 'vcd-cell1.lab.orange.sk'
RABBIT_HOST = 'oblak.orange.sk'
RABBIT_PORT = '5672'
RABBIT_USER = 'vcdext'
RABBIT_PASSWORD = '<PASSWORD>.'
# Exchange and Queue we will subscribe to
RABBIT_EXCHANGE = 'vcdext'
RABBIT_ROUTINGKEY = 'gcp-ticketing'
#Stores the tickets in use by the system.
tickets = {'9aee51e8-654e-49a8-8dab-3fdbf00a21ae' :\
{'href':'/api/org/9aee51e8-654e-49a8-8dab-3fdbf00a21ae',
'name':'Coke',
'tickets' : [{'ticket_id':1000,
'user_id': '44fbd6f9-7a76-4bca-b273-3536b181ad09',
'href':'/api/org/9aee51e8-654e-49a8-8dab-3fdbf00a21ae/ticketing/1000',
'ticket_msg': "I am opening a ticket!",
'status' : "open"},
{'ticket_id':1001,
'user_id': '44fbd6f9-7a76-4bca-b273-3536b181ad09',
'href':'/api/org/9aee51e8-654e-49a8-8dab-3fdbf00a21ae/ticketing/1001',
'ticket_msg': "My server is slow!",
'status' : "open"}]},\
'2ce0365d-4d7d-4c15-a603-9257ea338c99' :\
{'href':'/api/org/2ce0365d-4d7d-4c15-a603-9257ea338c99',
'name':'Pepsi',
'tickets' : [{'ticket_id':1002,
'user_id': '44fbd6f9-7a76-4bca-b273-3536b181ad09',
'href':'/api/org/9aee51e8-654e-49a8-8dab-3fdbf00a21ae/ticketing/1002',
'ticket_msg': "Can I get some VSAN?",
'status' : "open"}]}}
ticket_id = 2000
pub_channel = None
def _dict_to_xml(tag, in_dict):
"""
Turn a simple dict of key/value pairs into XML
"""
elem = Element(tag)
for key, val in in_dict.items():
child = Element(key)
child.text = str(val)
elem.append(child)
return tostring(elem)
def _xml_to_dict(xml_str):
"""
Turn a very set structure of xml to a dictionary.
"""
root = fromstring(xml_str)
ret_dict = {}
for child in root:
ret_dict[child.tag] = child.text
return ret_dict
def _create_ticket(user_id, msg, uri):
"""
Helper function for creating a ticket.
"""
global ticket_id
ticket_id += 1
href = "%s/%s" % (uri, str(ticket_id))
return {'ticket_id':ticket_id, 'href':href, 'user_id':user_id,
'ticket_msg':msg, 'status':"open"}
def get_org_tickets(org_id):
"""
Get all the ticekts for a given organization.
"""
org_ts = [{'ticket_id':t['ticket_id'], 'href':t['href'], 'status':t['status'], 'ticket_msg':t['ticket_msg']} for t in tickets[org_id]['tickets']]
if len(org_ts) != 0:
ret_str = ''
for tick in org_ts:
ret_str += '\n\t'+_dict_to_xml('ticket', tick)
return '<tickets>%s\n</tickets>' % ret_str
else:
return "No tickets found."
def get_ticket(org_id, sel_ticket_id):
"""
Return a specific ticket.
"""
tick_list = [t for t in tickets[org_id]['tickets'] if t['ticket_id'] == sel_ticket_id]
if len(tick_list) == 1:
return _dict_to_xml('ticket', tick_list[0])
else:
return "No ticket found."
def post_new_ticket(org_id, user_id, msg, uri):
"""
Method called on POST action for creating a new ticket.
"""
ticket = _create_ticket(user_id, msg, uri)
tickets[org_id]['tickets'].append(ticket)
return _dict_to_xml('ticket', ticket)
def delete_ticket(org_id, sel_ticket_id):
"""
Delete an individual ticket.
"""
for idx, tick in enumerate(tickets[org_id]['tickets']):
if tick['ticket_id'] == sel_ticket_id:
tickets[org_id]['tickets'].pop(idx)
return get_org_tickets(org_id)
def update_ticket(org_id, sel_ticket_id, update_dict):
"""
Apply update to a ticket. Called on PUT action.
"""
ret_str = ''
for tick in tickets[org_id]['tickets']:
if tick['ticket_id'] == sel_ticket_id:
ret_str = 'PUT XML STRING HERE'
tick.update(update_dict)
ret_str = _dict_to_xml('ticket', tick)
return ret_str
def callback(ch, method, properties, body):
"""
Function for handleing all messages received on the RabbitMQ Exchange
"""
print (' [!] Received a message!')
temp = json.loads(body)
body = temp[0]
vcd = temp[1]
req_uri = body['requestUri'].split('/api/org/')[1].split('/')
org_id = req_uri[0]
user_id = vcd['user'].split('user:')[1]
incoming_tick_id = int(req_uri[-1]) if req_uri[-1].isdigit() else None
method = body['method']
# The response body that we will sent back to the client.
rsp_body = ''
status_code = 200
if method == 'GET':
if incoming_tick_id:
rsp_body = get_ticket(org_id, incoming_tick_id)
else:
rsp_body = get_org_tickets(org_id)
elif method == 'POST' and not incoming_tick_id:
rsp_body = 'Make sure you provide a message: \
<ticket>\n\t<ticket_msg>Your mess</ticket_msg>\n</ticket>'
new_tick = _xml_to_dict(base64.b64decode(body['body']))
#Only thing we care about is a msg, make sure it is there
if new_tick.get('ticket_msg') != None:
rsp_body = post_new_ticket(org_id, user_id, new_tick['ticket_msg'], body['requestUri'])
status_code = 201
else:
# Bad input
status_code = 400
elif method == 'PUT' and incoming_tick_id:
rsp_body = 'To update a ticket provide: \
<ticket>\n\t<ticket_msg>Your mess</ticket_msg>\n\t<status>open|closed</status>\n</ticket>'
#Must have incoming_tick_id, and ticket_msg or status
up_tick = _xml_to_dict(base64.b64decode(body['body']))
if up_tick.get('ticket_msg') != None or up_tick.get('status') != None:
#update the ticket
rsp_body = update_ticket(org_id, incoming_tick_id, up_tick)
else:
# Bad input
status_code = 400
elif method == 'DELETE' and incoming_tick_id:
rsp_body = delete_ticket(org_id, incoming_tick_id)
else:
#Method not supported.
status_code = 405
rsp_body = "ERROR: This method is not supported."
# Build the response message to return
rsp_msg = {'id':body['id'],
'headers':{'Content-Type':body['headers']['Accept'],
'Content-Length':len(rsp_body)},
'statusCode':status_code,
'body':base64.b64encode(rsp_body),
'request':False}
# vCD sets unique correlation_id in every message sent to extension and the extension must set
# the same value in the corresponding response.
rsp_properties = pika.BasicProperties(correlation_id=properties.correlation_id)
print ("\t Sending response...")
# We send our response to the Exchange and queue that were specified in the received properties.
pub_channel.basic_publish(properties.headers['replyToExchange'],
properties.reply_to,
json.dumps(rsp_msg),
rsp_properties)
print (' [X] message handled')
def main():
"""
Main function executed when script is run.
"""
print ("Starting ticketing...")
# Connect to RabbitMQ
connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBIT_HOST, port=RABBIT_PORT,\
credentials=pika.PlainCredentials(RABBIT_USER, RABBIT_PASSWORD)))
# Create a channel to subscribe to the incoming messages.
sub_channel = connection.channel()
sub_channel.exchange_declare(exchange=RABBIT_EXCHANGE, exchange_type='direct', durable=True)
sub_channel.queue_declare(queue=RABBIT_ROUTINGKEY, durable=True)
sub_channel.queue_bind(exchange=RABBIT_EXCHANGE,
queue=RABBIT_ROUTINGKEY)
# Create a channel for publishing messages back to the client.
global pub_channel
pub_channel = connection.channel()
# Bind to the the queue we will be listening on with a callback function.
sub_channel.basic_consume(on_message_callback=callback,
queue=RABBIT_ROUTINGKEY,
auto_ack=True)
# Start to continuously monitor the queue for messages.
sub_channel.start_consuming()
print (' [*] Waiting for messages on exchange %s. To exit press CTRL+C' % RABBIT_EXCHANGE)
if __name__ == '__main__':
main() | [
"json.loads",
"xml.etree.ElementTree.tostring",
"base64.b64encode",
"json.dumps",
"pika.PlainCredentials",
"base64.b64decode",
"xml.etree.ElementTree.Element",
"pika.BasicProperties",
"xml.etree.ElementTree.fromstring"
] | [((1661, 1673), 'xml.etree.ElementTree.Element', 'Element', (['tag'], {}), '(tag)\n', (1668, 1673), False, 'from xml.etree.ElementTree import Element, tostring, fromstring\n'), ((1808, 1822), 'xml.etree.ElementTree.tostring', 'tostring', (['elem'], {}), '(elem)\n', (1816, 1822), False, 'from xml.etree.ElementTree import Element, tostring, fromstring\n'), ((1932, 1951), 'xml.etree.ElementTree.fromstring', 'fromstring', (['xml_str'], {}), '(xml_str)\n', (1942, 1951), False, 'from xml.etree.ElementTree import Element, tostring, fromstring\n'), ((4242, 4258), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (4252, 4258), False, 'import base64, json, pika\n'), ((6670, 6732), 'pika.BasicProperties', 'pika.BasicProperties', ([], {'correlation_id': 'properties.correlation_id'}), '(correlation_id=properties.correlation_id)\n', (6690, 6732), False, 'import base64, json, pika\n'), ((1727, 1739), 'xml.etree.ElementTree.Element', 'Element', (['key'], {}), '(key)\n', (1734, 1739), False, 'from xml.etree.ElementTree import Element, tostring, fromstring\n'), ((6437, 6463), 'base64.b64encode', 'base64.b64encode', (['rsp_body'], {}), '(rsp_body)\n', (6453, 6463), False, 'import base64, json, pika\n'), ((7022, 7041), 'json.dumps', 'json.dumps', (['rsp_msg'], {}), '(rsp_msg)\n', (7032, 7041), False, 'import base64, json, pika\n'), ((5020, 5050), 'base64.b64decode', 'base64.b64decode', (["body['body']"], {}), "(body['body'])\n", (5036, 5050), False, 'import base64, json, pika\n'), ((7387, 7438), 'pika.PlainCredentials', 'pika.PlainCredentials', (['RABBIT_USER', 'RABBIT_PASSWORD'], {}), '(RABBIT_USER, RABBIT_PASSWORD)\n', (7408, 7438), False, 'import base64, json, pika\n'), ((5654, 5684), 'base64.b64decode', 'base64.b64decode', (["body['body']"], {}), "(body['body'])\n", (5670, 5684), False, 'import base64, json, pika\n')] |
from datetime import datetime
from uuid import uuid4
from typing import (
ClassVar,
TypeVar,
Union
)
from sqlalchemy import update as sa_update
from sqlalchemy.future import select as sa_select
from sqlalchemy.sql.elements import BinaryExpression
from .base import BaseRepository
from .types_ import ModelType
from ..errors import (
EmailInUpdateIsAlreadyTakenError,
EntityDoesNotExistError
)
from ..models import User
from ...schemas.entities.user import UserInUpdate
from ...services.security import UserPasswordService
__all__ = ['UsersRepository']
# actually, where statement emits <BinaryExpression>, but linter supposes comparing result as bool
WhereStatement = TypeVar('WhereStatement', bound=Union[BinaryExpression, bool])
class UsersRepository(BaseRepository):
model: ClassVar[ModelType] = User
async def update_by_email(self, email: str, user_in_update: UserInUpdate) -> User:
update_data = self._exclude_unset_from_schema(user_in_update)
if 'email' in update_data:
if await self.check_email_is_taken(update_data['email']):
raise EmailInUpdateIsAlreadyTakenError
update_data.update(self._get_update_data_on_email_update())
if 'password' in update_data:
update_data.update(self._get_update_data_on_password_update(update_data['password']))
stmt = sa_update(User).where(User.email == email).values(**update_data)
return await self._return_from_statement(stmt)
@staticmethod
def _get_update_data_on_email_update() -> dict:
return {
'is_email_confirmed': False,
'email_confirmed_at': None,
'email_confirmation_link': uuid4()
}
@staticmethod
def _get_update_data_on_password_update(password: str) -> dict:
user = UserPasswordService(User()).change_password(password)
return {
'hashed_password': user.hashed_password,
'password_salt': user.password_salt
}
async def confirm_by_email(self, email: str) -> User:
return await self._confirm_by_where_statement(User.email == email)
async def confirm_by_link(self, link: str) -> User:
return await self._confirm_by_where_statement(User.email_confirmation_link == link)
async def _confirm_by_where_statement(self, where_statement: WhereStatement) -> User:
update_data = self._get_update_data_on_email_confirmation()
stmt = sa_update(User).where(where_statement).values(**update_data)
return await self._return_from_statement(stmt)
@staticmethod
def _get_update_data_on_email_confirmation() -> dict:
return {
'is_email_confirmed': True,
'email_confirmed_at': datetime.utcnow()
}
async def fetch_by_email(self, email: str) -> User:
stmt = sa_select(User).where(User.email == email)
return await self._fetch_entity(stmt)
async def fetch_by_id(self, id_: int) -> ModelType:
stmt = (
sa_select(self.model)
.where(self.model.id == id_)
)
return await self._fetch_entity(stmt)
async def check_email_is_taken(self, email: str) -> bool:
try:
_ = await self.fetch_by_email(email)
except EntityDoesNotExistError:
return False
else:
return True
| [
"datetime.datetime.utcnow",
"sqlalchemy.future.select",
"uuid.uuid4",
"sqlalchemy.update",
"typing.TypeVar"
] | [((694, 756), 'typing.TypeVar', 'TypeVar', (['"""WhereStatement"""'], {'bound': 'Union[BinaryExpression, bool]'}), "('WhereStatement', bound=Union[BinaryExpression, bool])\n", (701, 756), False, 'from typing import ClassVar, TypeVar, Union\n'), ((1705, 1712), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1710, 1712), False, 'from uuid import uuid4\n'), ((2748, 2765), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2763, 2765), False, 'from datetime import datetime\n'), ((2848, 2863), 'sqlalchemy.future.select', 'sa_select', (['User'], {}), '(User)\n', (2857, 2863), True, 'from sqlalchemy.future import select as sa_select\n'), ((3023, 3044), 'sqlalchemy.future.select', 'sa_select', (['self.model'], {}), '(self.model)\n', (3032, 3044), True, 'from sqlalchemy.future import select as sa_select\n'), ((1377, 1392), 'sqlalchemy.update', 'sa_update', (['User'], {}), '(User)\n', (1386, 1392), True, 'from sqlalchemy import update as sa_update\n'), ((2464, 2479), 'sqlalchemy.update', 'sa_update', (['User'], {}), '(User)\n', (2473, 2479), True, 'from sqlalchemy import update as sa_update\n')] |
from scipy import optimize
# Generate noisy data from NCSTR system with tau=10 and n=2
a = rtdpy.Ncstr(tau=10, n=2, dt=1, time_end=50)
xdata = a.time
noisefactor = 0.01
ydata = a.exitage \
+ (noisefactor * (np.random.rand(a.time.size) - 0.5))
def f(xdata, tau, n):
a = rtdpy.Ncstr(tau=tau, n=n, dt=1, time_end=50)
return a.exitage
# Give initial guess of tau=5 and n=4
popt, pcov = optimize.curve_fit(f, xdata, ydata, p0=[5, 4],
bounds=(0, np.inf))
plt.plot(xdata, ydata, label='Impulse Experiment')
b = rtdpy.Ncstr(tau=popt[0], n=popt[1], dt=1, time_end=50)
plt.plot(xdata, b.exitage, label='RTD Fit')
plt.title(f'tau={popt[0]: .2f}, n={popt[1]: .2f}')
plt.legend() | [
"scipy.optimize.curve_fit"
] | [((397, 463), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['f', 'xdata', 'ydata'], {'p0': '[5, 4]', 'bounds': '(0, np.inf)'}), '(f, xdata, ydata, p0=[5, 4], bounds=(0, np.inf))\n', (415, 463), False, 'from scipy import optimize\n')] |
import os
from flask import Flask, flash, request, redirect, url_for, session, render_template
from werkzeug.utils import secure_filename
from common import utility, config
from repositories import db, cos
def admin():
try:
player_id = session.get("mobileno")
if player_id is None:
flash("Please Login as admin.")
return redirect(request.referrer)
if player_id != "1000000000":
flash("Please Login as admin.")
return redirect(request.referrer)
if request.method == 'GET':
return render_template("admin.html")
elif request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No CSV file selected')
return redirect(request.url)
if file and utility.allowed_file(file.filename):
folder_path = config.get("DB_IMPORT", "UPLOAD_FOLDER")
filename = secure_filename(file.filename)
file.save(os.path.join(folder_path, filename))
table_name = request.form.get('tablename')
isHeader = request.form.get("chkHeader") != None
isCSVImportOK = db.import_csv_data(folder_path + "/" + filename, table_name, isHeader)
if isCSVImportOK:
flash("Data imported successfully")
upldmsg = cos.saveCSVFile(filename, folder_path)
flash (upldmsg)
else:
flash("Unable to import data..")
return redirect(request.url)
else:
flash("Please select a CSV file")
return redirect(request.url)
except Exception as ex:
print ("Unable to process request..", ex)
return | [
"flask.render_template",
"common.config.get",
"flask.session.get",
"flask.flash",
"repositories.cos.saveCSVFile",
"os.path.join",
"repositories.db.import_csv_data",
"flask.request.form.get",
"flask.redirect",
"common.utility.allowed_file",
"werkzeug.utils.secure_filename"
] | [((249, 272), 'flask.session.get', 'session.get', (['"""mobileno"""'], {}), "('mobileno')\n", (260, 272), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((315, 346), 'flask.flash', 'flash', (['"""Please Login as admin."""'], {}), "('Please Login as admin.')\n", (320, 346), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((366, 392), 'flask.redirect', 'redirect', (['request.referrer'], {}), '(request.referrer)\n', (374, 392), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((444, 475), 'flask.flash', 'flash', (['"""Please Login as admin."""'], {}), "('Please Login as admin.')\n", (449, 475), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((495, 521), 'flask.redirect', 'redirect', (['request.referrer'], {}), '(request.referrer)\n', (503, 521), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((578, 607), 'flask.render_template', 'render_template', (['"""admin.html"""'], {}), "('admin.html')\n", (593, 607), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((765, 786), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (770, 786), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((810, 831), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (818, 831), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1034, 1063), 'flask.flash', 'flash', (['"""No CSV file selected"""'], {}), "('No CSV file selected')\n", (1039, 1063), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1087, 1108), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1095, 1108), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1146, 1181), 'common.utility.allowed_file', 'utility.allowed_file', (['file.filename'], {}), '(file.filename)\n', (1166, 1181), False, 'from common import utility, config\n'), ((1213, 1253), 'common.config.get', 'config.get', (['"""DB_IMPORT"""', '"""UPLOAD_FOLDER"""'], {}), "('DB_IMPORT', 'UPLOAD_FOLDER')\n", (1223, 1253), False, 'from common import utility, config\n'), ((1281, 1311), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (1296, 1311), False, 'from werkzeug.utils import secure_filename\n'), ((1405, 1434), 'flask.request.form.get', 'request.form.get', (['"""tablename"""'], {}), "('tablename')\n", (1421, 1434), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1533, 1603), 'repositories.db.import_csv_data', 'db.import_csv_data', (["(folder_path + '/' + filename)", 'table_name', 'isHeader'], {}), "(folder_path + '/' + filename, table_name, isHeader)\n", (1551, 1603), False, 'from repositories import db, cos\n'), ((1915, 1936), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (1923, 1936), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1971, 2004), 'flask.flash', 'flash', (['"""Please select a CSV file"""'], {}), "('Please select a CSV file')\n", (1976, 2004), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((2028, 2049), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2036, 2049), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1338, 1373), 'os.path.join', 'os.path.join', (['folder_path', 'filename'], {}), '(folder_path, filename)\n', (1350, 1373), False, 'import os\n'), ((1462, 1491), 'flask.request.form.get', 'request.form.get', (['"""chkHeader"""'], {}), "('chkHeader')\n", (1478, 1491), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1675, 1710), 'flask.flash', 'flash', (['"""Data imported successfully"""'], {}), "('Data imported successfully')\n", (1680, 1710), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1741, 1779), 'repositories.cos.saveCSVFile', 'cos.saveCSVFile', (['filename', 'folder_path'], {}), '(filename, folder_path)\n', (1756, 1779), False, 'from repositories import db, cos\n'), ((1800, 1814), 'flask.flash', 'flash', (['upldmsg'], {}), '(upldmsg)\n', (1805, 1814), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n'), ((1858, 1890), 'flask.flash', 'flash', (['"""Unable to import data.."""'], {}), "('Unable to import data..')\n", (1863, 1890), False, 'from flask import Flask, flash, request, redirect, url_for, session, render_template\n')] |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from typing import List, Optional, Union
import pandas as pd
from feature_engine.dataframe_checks import _is_dataframe
from feature_engine.imputation.base_imputer import BaseImputer
from feature_engine.variable_manipulation import _check_input_parameter_variables
class DropMissingData(BaseImputer):
"""
DropMissingData() will delete rows containing missing values. It provides
similar functionality to pandas.drop_na().
It works for numerical and categorical variables. You can enter the list of
variables for which missing values should be evaluated. Alternatively, the imputer
will evaluate missing data in all variables in the dataframe.
More details in the :ref:`User Guide <drop_missing_data>`.
Parameters
----------
missing_only: bool, default=True
If `True`, rows will be dropped when they show missing data in variables with
missing data in the train set, that is, in the data set used in `fit()`. If
`False`, rows will be dropped if there is missing data in any of the variables.
This parameter only works when `threshold=None`, otherwise it is ignored.
variables: list, default=None
The list of variables to consider for the imputation. If None, the imputer will
evaluate missing data in all variables in the dataframe. Alternatively, the
imputer will evaluate missing data only in the variables in the list.
Note that if `missing_only=True` only variables with missing data in the train
set will be considered to drop a row, which might be a subset of the indicated
list.
threshold: int or float, default=None
Require that percentage of non-NA values in a row to keep it. If
`threshold=1`, all variables need to have data to keep the row. If
`threshold=0.5`, 50% of the variables need to have data to keep the row.
If `threshold=0.01`, 10% of the variables need to have data to keep the row.
If `thresh=None`, rows with NA in any of the variables will be dropped.
Attributes
----------
variables_:
The variables for which missing data will be examined to decide if a row is
dropped. The attribute `variables_` is different from the parameter `variables`
when the latter is `None`, or when only a subset of the indicated variables
show NA in the train set if `missing_only=True`.
n_features_in_:
The number of features in the train set used in fit.
Methods
-------
fit:
Find the variables for which missing data should be evaluated.
transform:
Remove rows with missing data.
fit_transform:
Fit to the data, then transform it.
return_na_data:
Returns a dataframe with the rows that contain missing data.
"""
def __init__(
self,
missing_only: bool = True,
threshold: Union[None, int, float] = None,
variables: Union[None, int, str, List[Union[str, int]]] = None,
) -> None:
if not isinstance(missing_only, bool):
raise ValueError(
"missing_only takes values True or False. "
f"Got {missing_only} instead."
)
if threshold is not None:
if not isinstance(threshold, (int, float)) or not (0 < threshold <= 1):
raise ValueError(
"threshold must be a value between 0 < x <= 1. "
f"Got {threshold} instead."
)
self.variables = _check_input_parameter_variables(variables)
self.missing_only = missing_only
self.threshold = threshold
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
Find the variables for which missing data should be evaluated to decide if a
row should be dropped.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The training data set.
y: pandas Series, default=None
y is not needed in this imputation. You can pass None or y.
"""
# check input dataframe
X = _is_dataframe(X)
# find variables for which indicator should be added
# if threshold, then missing_only is ignored:
if self.threshold is not None:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
# if threshold is None, we have the option to identify
# variables with NA only.
else:
if self.missing_only:
if not self.variables:
self.variables_ = [
var for var in X.columns if X[var].isnull().sum() > 0
]
else:
self.variables_ = [
var for var in self.variables if X[var].isnull().sum() > 0
]
else:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
self.n_features_in_ = X.shape[1]
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Remove rows with missing data.
Parameters
----------
X: pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_new: pandas dataframe
The complete case dataframe for the selected variables, of shape
[n_samples - n_samples_with_na, n_features]
"""
X = self._check_transform_input_and_state(X)
if self.threshold:
X.dropna(
thresh=len(self.variables_) * self.threshold,
subset=self.variables_,
axis=0,
inplace=True,
)
else:
X.dropna(axis=0, how="any", subset=self.variables_, inplace=True)
return X
def return_na_data(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Returns the subset of the dataframe with the rows with missing values. That is,
the subset of the dataframe that would be removed with the `transform()` method.
This method may be useful in production, for example if we want to store or log
the removed observations, that is, rows that will not be fed into the model.
Parameters
----------
X_na: pandas dataframe of shape = [n_samples_with_na, features]
The subset of the dataframe with the rows with missing data.
"""
X = self._check_transform_input_and_state(X)
if self.threshold:
idx = pd.isnull(X[self.variables_]).mean(axis=1) >= self.threshold
idx = idx[idx]
else:
idx = pd.isnull(X[self.variables_]).any(1)
idx = idx[idx]
return X.loc[idx.index, :]
| [
"feature_engine.variable_manipulation._check_input_parameter_variables",
"feature_engine.dataframe_checks._is_dataframe",
"pandas.isnull"
] | [((3587, 3630), 'feature_engine.variable_manipulation._check_input_parameter_variables', '_check_input_parameter_variables', (['variables'], {}), '(variables)\n', (3619, 3630), False, 'from feature_engine.variable_manipulation import _check_input_parameter_variables\n'), ((4209, 4225), 'feature_engine.dataframe_checks._is_dataframe', '_is_dataframe', (['X'], {}), '(X)\n', (4222, 4225), False, 'from feature_engine.dataframe_checks import _is_dataframe\n'), ((6991, 7020), 'pandas.isnull', 'pd.isnull', (['X[self.variables_]'], {}), '(X[self.variables_])\n', (7000, 7020), True, 'import pandas as pd\n'), ((6871, 6900), 'pandas.isnull', 'pd.isnull', (['X[self.variables_]'], {}), '(X[self.variables_])\n', (6880, 6900), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple webexteamssdk demonstration script.
Very simple script to create a demo room, post a message, and post a file.
If one or more rooms with the name of the demo room already exist, it will
delete the previously existing rooms.
The package natively retrieves your Webex Teams access token from the
WEBEX_TEAMS_ACCESS_TOKEN environment variable. You must have this environment
variable set to run this script.
Copyright (c) 2016-2019 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
from webexteamssdk import WebexTeamsAPI
DEMO_ROOM_NAME = "webexteamssdk Demo Room"
DEMO_PEOPLE = ["<EMAIL>", "<EMAIL>"]
DEMO_MESSAGE = u"Webex Teams rocks! \ud83d\ude0e"
DEMO_FILE_URL = \
"https://www.webex.com/content/dam/wbx/us/images/dg-integ/teams_icon.png"
# Create a WebexTeamsAPI connection object; uses your WEBEX_TEAMS_ACCESS_TOKEN
api = WebexTeamsAPI()
# Clean up previous demo rooms
print("Searching for existing demo rooms...")
# Create a generator container (iterable) that lists the rooms where you are
# a member
rooms = api.rooms.list()
# Build a list of rooms with the name DEMO_ROOM_NAME
existing_demo_rooms = [room for room in rooms if room.title == DEMO_ROOM_NAME]
if existing_demo_rooms:
print("Found {} existing room(s); deleting them."
"".format(len(existing_demo_rooms)))
for room in existing_demo_rooms:
# Delete the room
api.rooms.delete(room.id)
print("Room '{}' deleted.".format(room.id))
# Create a new demo room
demo_room = api.rooms.create(DEMO_ROOM_NAME)
# Print the room details (formatted JSON)
print(demo_room)
for person_email in DEMO_PEOPLE:
# Add people to the room
api.memberships.create(demo_room.id, personEmail=person_email)
# Create a message in the new room
message = api.messages.create(demo_room.id, text=DEMO_MESSAGE)
# Print the message details (formatted JSON)
print(message)
# Post a file in the new room from test_url
message = api.messages.create(demo_room.id, files=[DEMO_FILE_URL])
# Print the message details (formatted JSON)
print(message)
| [
"webexteamssdk.WebexTeamsAPI"
] | [((1944, 1959), 'webexteamssdk.WebexTeamsAPI', 'WebexTeamsAPI', ([], {}), '()\n', (1957, 1959), False, 'from webexteamssdk import WebexTeamsAPI\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import warnings
from natsort.natsort import (
as_ascii,
as_utf8,
decoder,
humansorted,
index_humansorted,
index_natsorted,
index_realsorted,
index_versorted,
natsort_key,
natsort_keygen,
natsorted,
ns,
order_by_index,
realsorted,
versorted,
)
from natsort.utils import chain_functions
if float(sys.version[:3]) < 3:
from natsort.natsort import natcmp
__version__ = "5.5.0"
__all__ = [
"natsort_key",
"natsort_keygen",
"natsorted",
"versorted",
"humansorted",
"realsorted",
"index_natsorted",
"index_versorted",
"index_humansorted",
"index_realsorted",
"order_by_index",
"decoder",
"natcmp",
"as_ascii",
"as_utf8",
"ns",
"chain_functions",
]
# Add the ns keys to this namespace for convenience.
# A dict comprehension is not used for Python 2.6 compatibility.
# We catch warnings from the deprecated ns enum values when adding
# them to natsort's main namespace.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
globals().update(ns._asdict())
| [
"warnings.simplefilter",
"natsort.natsort.ns._asdict",
"warnings.catch_warnings"
] | [((1123, 1148), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1146, 1148), False, 'import warnings\n'), ((1154, 1185), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1175, 1185), False, 'import warnings\n'), ((1207, 1219), 'natsort.natsort.ns._asdict', 'ns._asdict', ([], {}), '()\n', (1217, 1219), False, 'from natsort.natsort import as_ascii, as_utf8, decoder, humansorted, index_humansorted, index_natsorted, index_realsorted, index_versorted, natsort_key, natsort_keygen, natsorted, ns, order_by_index, realsorted, versorted\n')] |
# Generated by Django 2.2.6 on 2020-01-24 00:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0006_auto_20200124_0048'),
]
operations = [
migrations.AddField(
model_name='doubleentry',
name='account',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='books.Account'),
preserve_default=False,
),
]
| [
"django.db.models.ForeignKey"
] | [((371, 468), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(1)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""books.Account"""'}), "(default=1, on_delete=django.db.models.deletion.CASCADE,\n to='books.Account')\n", (388, 468), False, 'from django.db import migrations, models\n')] |
"""
This file will stored all dynamic class and methods.
These methods will be used throughout the whole program.
"""
import sys
import concurrent.futures as cf
import threading
from functools import wraps
from ..utils.request import Session
from ..errors import BuildError
def Threader(f):
@wraps(f)
def inner(*a, **kw):
t = threading.Thread(target=f, args=(a), kwargs=dict(kw))
t.daemon = True
t.start()
return t
return inner
class Queued:
THREAD_COUNT = 75
def __init__(self, main_job, input_work, *args, **kwargs):
self.input = input_work # work to put in queue
self.main_job = main_job # job to perform with work
# song to search for TODO: move this to main function
self.results = []
self.args = args
self.kwargs = kwargs
self.thread_pool = []
def run(self, *args, **kwargs):
with cf.ThreadPoolExecutor() as ex:
if args or kwargs:
args = tuple(args) * len(self.input)
kwargs = list(dict(kwargs) for _ in range(len(self.input)))
data = ex.map(self.main_job, self.input, args, kwargs)
else:
data = ex.map(self.main_job, self.input)
return [x for x in data]
@classmethod
def _setup(cls):
"""Stops the requests session from timing out before queue is finish"""
cls._session_timeout = Session.TIMEOUT
Session.TIMEOUT = 60
@classmethod
def _teardown(cls):
"""Sets the requests Session back to its original timeout"""
Session.TIMEOUT = cls._session_timeout
def capture_threads(self, threads):
self.thread_pool.append(threads)
def kill_all_threads(self):
for t in self.thread_pool:
if t.is_Alive():
t.terminate()
def set_queue(self):
if Datatype.isList(self.input):
# setting the queue from ^
for obj in self.input: # |
self.q.put(obj)
def put_worker_to_work(self):
worker = self.q.get()
if self.search:
self.results.append(self.main_job(worker, self.search))
else:
self.results.append(self.main_job(worker))
self.q.task_done()
def start_thread(self):
try:
for _ in range(self.THREAD_COUNT):
t = self.mp.map(self.put_worker_to_work)
self.capture_threads(t)
t.daemon = True
t.start()
except:
self.kill_all_threads()
self.start_thread()
def run2(self):
"""Old threading method"""
self._setup()
self.set_queue()
while True:
self.start_thread()
if self.q.empty():
break
# data will not be filter here for 'None type' in list
# must catch all None types in the base method
self._teardown()
return self.results
class Datatype:
@staticmethod
def isDict(_type):
return isinstance(_type, dict)
@staticmethod
def isStr(_type):
return isinstance(_type, str)
@staticmethod
def isList(_type):
return isinstance(_type, list)
@classmethod
def removeNone(cls, _list):
if not cls.isList(_list):
msg = "Can not remove None value. ..datatype must be a list "
raise NotImplementedError(msg)
return list(filter(None, _list))
@classmethod
def enumerate_it(cls, data, start=0):
"""Return enumerate object"""
if cls.isDict(data) or cls.isList(data):
if cls.isDict(data):
data = data.items()
return list(enumerate(data, start=start))
raise NotImplementedError("datatype is not a dictionary or list ")
@staticmethod
def strip_lowered(string):
"""Return strip and lower value"""
return str(string).lower().strip()
@classmethod
def lowered_dict(cls, data):
"""Strip and lower keys in dictionary"""
if not cls.isDict(data):
raise NotImplementedError("datatype is not a dictionary")
item = {}
for key, val in data.items():
item[cls.strip_lowered(key)] = val
return item
@classmethod
def lowered_list(cls, data):
"""Strip and lower string in list"""
if not cls.isList(data):
raise NotImplementedError("datatype is not a List")
return [cls.strip_lowered(x) for x in data]
class User:
@staticmethod
def choice_is_str(choice, data):
"""
Parse user string choice and return the corresponding data
:params:: choice,data
choice - user choice. datatype: str
data - list or dict object
"""
choice = Datatype.strip_lowered(choice)
if Datatype.isDict(data):
data = Datatype.lowered_dict(data)
val = [val for key, val in data.items() if choice in key]
else:
val = [val for val in data if choice in Datatype.strip_lowered(val)]
if val:
return min(val)
@staticmethod
def choice_is_int(choice, data):
"""
Parse user int choice and return the corresponding data
:params:: choice,data
choice - user choice. datatype: str
data - list or dict object
"""
try:
choice = int(choice)
results = Datatype.enumerate_it(data)
length = len(data)
if Datatype.isDict(data):
return results[choice][1][1]
return results[choice][1]
except Exception as e:
return
@classmethod
def selection(cls, select, byint=None, bystr=None):
""" select ablums_link by artist name, album name ('title')
or by index number of title or artist
"""
try:
# checking from index
if isinstance(select, int):
select -= 1
length = len(byint) + 1
# catch index errors if user choose a mixtape out of range
select = 0 if (0 >= select or select > len(byint)) else select
return select
else:
# checking from artists
choosen = cls.choice_is_str
choice = choosen(select, byint)
if choice:
return byint.index(choice)
# check by mixtapes
choice = choosen(select, bystr)
if choice:
return bystr.index(choice)
if not choice: # will catch None value in Media
return
except BuildError as e:
raise BuildError(1)
| [
"concurrent.futures.ThreadPoolExecutor",
"functools.wraps"
] | [((312, 320), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (317, 320), False, 'from functools import wraps\n'), ((930, 953), 'concurrent.futures.ThreadPoolExecutor', 'cf.ThreadPoolExecutor', ([], {}), '()\n', (951, 953), True, 'import concurrent.futures as cf\n')] |
import requests
import os
import sys # provide option
import argparse # parse options
import json
PATH = os.path.abspath(os.path.dirname(sys.argv[0]))
token_url = "https://api.weixin.qq.com/cgi-bin/token" # Change to control server
create_interface = "https://api.weixin.qq.com/cgi-bin/menu/create"
get_Allinterface = "https://api.weixin.qq.com/cgi-bin/menu/get"
get_Currentinterface = "https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info"
del_interface = "https://api.weixin.qq.com/cgi-bin/menu/delete"
# del, getall, getcurrent could be merge into one method by changing params
class userInterface:
credential = dict()
@staticmethod
def init():
# Initialization: acquire access_token from control server
with open(PATH+r"\..\Static\accesskey_token.json","r") as credential_file:
credential = eval("".join([i.strip() for i in credential_file.readlines()]))
#print(credential)
credential_file.close()
try:
key = requests.get(url=token_url,params=credential).json()
token = key["access_token"]
userInterface.credential.update([("access_token",token)])
#print(token)
except Exception as err:
if "errcode" in key:
print("ERROR: errcode:%\t%",(key["errcode"],key["errmsg"]))
else:
print("ERROR: "+str(err))
exit()
@staticmethod
def createManual(file_addr):
with open(file_addr,"rb") as config_file:
try:
# print([ i.decode() for i in config_file.readlines()])
config = eval("".join([i.strip().decode() for i in config_file.readlines()]))
# print(config)
except Exception as err:
print(str(err))
exit()
response = requests.post(create_interface,params=userInterface.credential,data=json.dumps(config)).json() # Must use json
print("Result\nerrcode:",response["errcode"],response["errmsg"])
@staticmethod
def delInterface():
# Write into function? paras are url and credential?
response = requests.get(del_interface,params=userInterface.credential).json()
print("Result\nerrcode:",response["errcode"],response["errmsg"])
@staticmethod
def viewAllInterface():
response = requests.get(get_Allinterface,params=userInterface.credential).json()
if "errcode" in response:
print("Result\nerrcode:",response["errcode"],response["errmsg"])
else:
print(response)
@staticmethod
def viewCurrentInterface():
response = requests.get(get_Currentinterface,params=userInterface.credential).json()
if "errcode" in response:
print("Result\nerrcode:",response["errcode"],response["errmsg"])
else:
print(response)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Provide access to modify/view wechat customized usermanual")
parser.add_argument('-d','--delete',action='store_true',help='Delete ALL interfaces')
parser.add_argument('-l','--list',action="store_true",help="List all userinterfaces")
parser.add_argument('-i','--inspect',action="store_true",help="List current userinterface")
parser.add_argument('-c','--config',help="upload the userinterface configuration")
option=parser.parse_args()
userInterface.init()
if option.delete:
userInterface.delInterface()
elif option.list:
userInterface.viewAllInterface()
elif option.inspect:
userInterface.viewCurrentInterface()
elif option.config:
userInterface.createManual(option.config)
exit() | [
"os.path.dirname",
"json.dumps",
"argparse.ArgumentParser",
"requests.get"
] | [((122, 150), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (137, 150), False, 'import os\n'), ((2923, 3025), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Provide access to modify/view wechat customized usermanual"""'}), "(description=\n 'Provide access to modify/view wechat customized usermanual')\n", (2946, 3025), False, 'import argparse\n'), ((2158, 2218), 'requests.get', 'requests.get', (['del_interface'], {'params': 'userInterface.credential'}), '(del_interface, params=userInterface.credential)\n', (2170, 2218), False, 'import requests\n'), ((2363, 2426), 'requests.get', 'requests.get', (['get_Allinterface'], {'params': 'userInterface.credential'}), '(get_Allinterface, params=userInterface.credential)\n', (2375, 2426), False, 'import requests\n'), ((2655, 2722), 'requests.get', 'requests.get', (['get_Currentinterface'], {'params': 'userInterface.credential'}), '(get_Currentinterface, params=userInterface.credential)\n', (2667, 2722), False, 'import requests\n'), ((1007, 1053), 'requests.get', 'requests.get', ([], {'url': 'token_url', 'params': 'credential'}), '(url=token_url, params=credential)\n', (1019, 1053), False, 'import requests\n'), ((1916, 1934), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (1926, 1934), False, 'import json\n')] |
# TODO: update or remove this file
from pytest import raises
from pypkg import cli
def test_main(capsys):
cli.main([])
captured = capsys.readouterr()
assert "write me" in captured.err
def test_usage(capsys):
with raises(SystemExit):
cli.main(["-h"])
captured = capsys.readouterr()
assert "Usage:" in captured.out
def test_error():
with raises(SystemExit, match="input file not found"):
cli.main(["nosuchfile"])
def test_exception(mocker):
mocker.patch("pypkg.cli.run", side_effect=NotImplementedError)
with raises(SystemExit):
cli.main([])
with raises(NotImplementedError):
cli.main(["--trace"])
| [
"pypkg.cli.main",
"pytest.raises"
] | [((113, 125), 'pypkg.cli.main', 'cli.main', (['[]'], {}), '([])\n', (121, 125), False, 'from pypkg import cli\n'), ((234, 252), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (240, 252), False, 'from pytest import raises\n'), ((262, 278), 'pypkg.cli.main', 'cli.main', (["['-h']"], {}), "(['-h'])\n", (270, 278), False, 'from pypkg import cli\n'), ((387, 435), 'pytest.raises', 'raises', (['SystemExit'], {'match': '"""input file not found"""'}), "(SystemExit, match='input file not found')\n", (393, 435), False, 'from pytest import raises\n'), ((445, 469), 'pypkg.cli.main', 'cli.main', (["['nosuchfile']"], {}), "(['nosuchfile'])\n", (453, 469), False, 'from pypkg import cli\n'), ((577, 595), 'pytest.raises', 'raises', (['SystemExit'], {}), '(SystemExit)\n', (583, 595), False, 'from pytest import raises\n'), ((605, 617), 'pypkg.cli.main', 'cli.main', (['[]'], {}), '([])\n', (613, 617), False, 'from pypkg import cli\n'), ((628, 655), 'pytest.raises', 'raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (634, 655), False, 'from pytest import raises\n'), ((665, 686), 'pypkg.cli.main', 'cli.main', (["['--trace']"], {}), "(['--trace'])\n", (673, 686), False, 'from pypkg import cli\n')] |
# coding=utf-8
# Copyright 2014, <NAME> http://github.com/rafi
# vim: set ts=8 sw=4 tw=80 et :
import logging
import requests
from beets.plugins import BeetsPlugin
from beets import ui
from beets import dbcore
from beets import config
log = logging.getLogger('beets')
api_url = 'http://ws.audioscrobbler.com/2.0/?method=library.gettracks&user=%s&api_key=%s&format=json&page=%s&limit=%s'
class LastImportPlugin(BeetsPlugin):
def __init__(self):
super(LastImportPlugin, self).__init__()
config['lastfm'].add({
'user': '',
'api_key': '',
})
self.config.add({
'per_page': 500,
'retry_limit': 3,
})
def commands(self):
cmd = ui.Subcommand('lastimport',
help='import last.fm play-count')
def func(lib, opts, args):
import_lastfm(lib)
cmd.func = func
return [cmd]
def import_lastfm(lib):
user = config['lastfm']['user']
api_key = config['lastfm']['api_key']
per_page = config['lastimport']['per_page']
if not user:
raise ui.UserError('You must specify a user name for lastimport')
if not api_key:
raise ui.UserError('You must specify an api_key for lastimport')
log.info('Fetching last.fm library for @{0}'.format(user))
page_total = 1
page_current = 0
found_total = 0
unknown_total = 0
retry_limit = config['lastimport']['retry_limit'].get(int)
# Iterate through a yet to be known page total count
while page_current < page_total:
log.info(
'lastimport: Querying page #{0}{1}...'
.format(
page_current+1,
'/'+str(page_total) if page_total > 1 else ''
)
)
for retry in range(0, retry_limit):
page = fetch_tracks(user, api_key, page_current+1, per_page)
if 'tracks' in page:
# Let us the reveal the holy total pages!
page_total = int(page['tracks']['@attr']['totalPages'])
if page_total < 1:
# It means nothing to us!
raise ui.UserError('No data to process, empty query from last.fm')
found, unknown = process_tracks(lib, page['tracks']['track'])
found_total += found
unknown_total += unknown
break
else:
log.error('lastimport: ERROR: unable to read page #{0}'
.format(page_current+1))
if retry < retry_limit:
log.info('lastimport: Retrying page #{0}... ({1}/{2} retry)'
.format(page_current+1, retry+1, retry_limit))
else:
log.error('lastimport: FAIL: unable to fetch page #{0}, tried {1} times'
.format(page_current, retry+1))
page_current += 1
log.info('lastimport: ... done!')
log.info('lastimport: finished processing {0} song pages'.format(page_total))
log.info('lastimport: {0} unknown play-counts'.format(unknown_total))
log.info('lastimport: {0} play-counts imported'.format(found_total))
def fetch_tracks(user, api_key, page, limit):
return requests.get(api_url % (user, api_key, page, limit)).json()
def process_tracks(lib, tracks):
total = len(tracks)
total_found = 0
total_fails = 0
log.info('lastimport: Received {0} tracks in this page, processing...'
.format(total))
for num in xrange(0, total):
song = ''
trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip()
title = tracks[num]['name'].strip()
album = ''
if 'album' in tracks[num]:
album = tracks[num]['album'].get('name', '').strip()
# log.debug(u'lastimport: query: {0} - {1} ({2})'
# .format(artist, title, album))
# First try to query by musicbrainz's trackid
if (trackid):
song = lib.items('mb_trackid:'+trackid).get()
# Otherwise try artist/title/album
if (not song):
# log.debug(u'lastimport: no match for mb_trackid {0}, trying by '
# 'artist/title/album'.format(trackid))
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title),
dbcore.query.SubstringQuery('album', album)
])
song = lib.items(query).get()
# If not, try just artist/title
if (not song):
# log.debug(u'lastimport: no album match, trying by artist/title')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
# Last resort, try just replacing to utf-8 quote
if (not song):
title = title.replace('\'', u'’')
# log.debug(u'lastimport: no title match, trying utf-8 single quote')
query = dbcore.AndQuery([
dbcore.query.SubstringQuery('artist', artist),
dbcore.query.SubstringQuery('title', title)
])
song = lib.items(query).get()
if (song):
count = int(song.get('play_count', 0))
new_count = int(tracks[num]['playcount'])
log.debug(u'lastimport: match: {0} - {1} ({2}) updating: play_count {3} => {4}'
.format(song.artist, song.title, song.album, count, new_count))
song['play_count'] = new_count
song.store()
total_found += 1
else:
total_fails += 1
log.info(u'lastimport: - No match: {0} - {1} ({2})'
.format(artist, title, album))
if total_fails > 0:
log.info('lastimport: Acquired {0}/{1} play-counts ({2} unknown)'
.format(total_found, total, total_fails))
return total_found, total_fails
| [
"logging.getLogger",
"beets.ui.UserError",
"beets.dbcore.query.SubstringQuery",
"requests.get",
"beets.ui.Subcommand"
] | [((244, 270), 'logging.getLogger', 'logging.getLogger', (['"""beets"""'], {}), "('beets')\n", (261, 270), False, 'import logging\n'), ((734, 795), 'beets.ui.Subcommand', 'ui.Subcommand', (['"""lastimport"""'], {'help': '"""import last.fm play-count"""'}), "('lastimport', help='import last.fm play-count')\n", (747, 795), False, 'from beets import ui\n'), ((1108, 1167), 'beets.ui.UserError', 'ui.UserError', (['"""You must specify a user name for lastimport"""'], {}), "('You must specify a user name for lastimport')\n", (1120, 1167), False, 'from beets import ui\n'), ((1202, 1260), 'beets.ui.UserError', 'ui.UserError', (['"""You must specify an api_key for lastimport"""'], {}), "('You must specify an api_key for lastimport')\n", (1214, 1260), False, 'from beets import ui\n'), ((3283, 3335), 'requests.get', 'requests.get', (['(api_url % (user, api_key, page, limit))'], {}), '(api_url % (user, api_key, page, limit))\n', (3295, 3335), False, 'import requests\n'), ((2181, 2241), 'beets.ui.UserError', 'ui.UserError', (['"""No data to process, empty query from last.fm"""'], {}), "('No data to process, empty query from last.fm')\n", (2193, 2241), False, 'from beets import ui\n'), ((4373, 4418), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""artist"""', 'artist'], {}), "('artist', artist)\n", (4400, 4418), False, 'from beets import dbcore\n'), ((4436, 4479), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""title"""', 'title'], {}), "('title', title)\n", (4463, 4479), False, 'from beets import dbcore\n'), ((4497, 4540), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""album"""', 'album'], {}), "('album', album)\n", (4524, 4540), False, 'from beets import dbcore\n'), ((4794, 4839), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""artist"""', 'artist'], {}), "('artist', artist)\n", (4821, 4839), False, 'from beets import dbcore\n'), ((4857, 4900), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""title"""', 'title'], {}), "('title', title)\n", (4884, 4900), False, 'from beets import dbcore\n'), ((5220, 5265), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""artist"""', 'artist'], {}), "('artist', artist)\n", (5247, 5265), False, 'from beets import dbcore\n'), ((5283, 5326), 'beets.dbcore.query.SubstringQuery', 'dbcore.query.SubstringQuery', (['"""title"""', 'title'], {}), "('title', title)\n", (5310, 5326), False, 'from beets import dbcore\n')] |
import test_util.linkage.run_full_linkage_test as flt
def run_test():
print("Starting test...")
flt.run_full_linkage_test("test-data/envs/small-no-households-with-matches-single-schema/config.json")
print("Done with test")
if __name__ == "__main__":
run_test()
| [
"test_util.linkage.run_full_linkage_test.run_full_linkage_test"
] | [((106, 218), 'test_util.linkage.run_full_linkage_test.run_full_linkage_test', 'flt.run_full_linkage_test', (['"""test-data/envs/small-no-households-with-matches-single-schema/config.json"""'], {}), "(\n 'test-data/envs/small-no-households-with-matches-single-schema/config.json'\n )\n", (131, 218), True, 'import test_util.linkage.run_full_linkage_test as flt\n')] |
# Notes from this experiment:
# 1. adapt() is way slower than np.unique -- takes forever for 1M, hangs for 10M
# 2. TF returns error if adapt is inside tf.function. adapt uses graph inside anyway
# 3. OOM in batch mode during sparse_to_dense despite of seting sparse in keras
# 4. Mini-batch works but 15x(g)/20x slower than sklearn
# 5. Always replace NaNs in string cols as np.nan is float
# 6. Full graph mode lazily triggers all models together -- produce OOM
# 7. Partial graph mode sequentially execute graph-models
# TODO1: all sparse intermediates, including the outputs
# TODO2: Tune mini-batch size for best performance
import sys
import time
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
import pandas as pd
import math
import warnings
import os
# Force to CPU (default is GPU)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
warnings.filterwarnings('ignore') #cleaner, but not recommended
def readNprep(nRows):
# Read the 1M or the 10M dataset
if nRows == 1:
print("Reading file: criteo_day21_1M")
criteo = pd.read_csv("~/datasets/criteo_day21_1M", delimiter=",", header=None)
else:
print("Reading file: criteo_day21_10M")
criteo = pd.read_csv("~/datasets/criteo_day21_10M", delimiter=",", header=None)
print(criteo.head())
# Replace NaNs with 0 for numeric and empty string for categorical
criteo = criteo.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna(''))
# Pandas infer the type of first 14 columns as float and int.
# SystemDS reads those as STRINGS and apply passthrough FT on those.
# For a fair comparision, convert those here to str and later back to float
pt = [*range(0,14)]
criteo[pt] = criteo[pt].astype(str)
#print(criteo.info())
return criteo
def getCategoricalLayer(X, name, useNumpy):
# NaN handling. np.nan is a float, which leads to ValueError for str cols
X[name].fillna('', inplace=True)
if useNumpy:
vocab = np.unique(X[name].astype(np.string_))
onehot = layers.StringLookup(vocabulary=vocab, output_mode="multi_hot", num_oov_indices=0, sparse=True)
# adapt is not required if vocabulary is passed
else:
onehot = layers.StringLookup(output_mode="multi_hot", num_oov_indices=0)
df2tf = tf.convert_to_tensor(np.array(X[name], dtype=np.string_))
onehot.adapt(df2tf)
#print("#uniques in col ", name, " is ", onehot.vocabulary_size())
return onehot
def getLayers(X):
# Passh through transformation -- convert to float
pt = [*range(0,14)]
X[pt] = X[pt].astype(np.float64)
# Build a dictionary with symbolic input tensors w/ proper dtype
inputs = {}
for name, column in X.items():
dtype = column.dtype
if dtype == object:
dtype = tf.string
else:
dtype = tf.float64
inputs[name] = tf.keras.Input(shape=(1,), dtype=dtype, sparse=True)
# Seperate out the numeric inputs
numeric = {name:input for name,input in inputs.items()
if input.dtype==tf.float64}
# Concatenate the numeric inputs together and
# add to the list of layers as is
prepro = [layers.Concatenate()(list(numeric.values()))]
# Recode and dummycode the string inputs
for name, input in inputs.items():
if input.dtype == tf.float64:
continue
onehot = getCategoricalLayer(X, name, True) #use np.unique
encoded = onehot(input)
# Append to the same list
prepro.append(encoded)
# Concatenate all the preprocessed inputs together,
# and build a model to apply batch wise later
cat_layers = layers.Concatenate()(prepro)
print(cat_layers)
model_prep = tf.keras.Model(inputs, cat_layers)
return model_prep
def lazyGraphTransform(X, model, n, isSmall):
# This method builds a graph of all the mini-batch transformations
# by pushing the loop-slicing logic inside a tf.function.
# However, lazily triggering all the models produce OOM
X_dict = {name: tf.convert_to_tensor(np.array(value)) for name, value in X.items()}
res = batchTransform(X_dict, model, X.shape[0], isSmall)
@tf.function
def batchTransform(X, model_prep, n, isSmall):
# Batch-wise transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
allRes = []
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: X[name][beg:end] for name, value in X.items()}
X_batch = model_prep(batch_dict)
print(X_batch[:1, :]) #print the placeholder
allRes.append(X_batch)
if end == n:
break
else:
beg = end
out = tf.stack(allRes, axis=0) #fix rank
print(out.shape)
return out
def batchGraphTransform(X, model, n, isSmall):
# Batch-wise eager transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: np.array(value)[beg:end] for name, value in X.items()}
X_batch = transform(batch_dict, model)
# Don't stack the results to avoid OOM
print(X_batch[:1, :]) #print first 1 row
if end == n:
break
else:
beg = end
@tf.function
def transform(X_dict, model_prep):
X_prep = model_prep(X_dict)
#print(X_prep[:5, :]) #print to verify lazy execution
return X_prep
isSmall = int(sys.argv[1]) #1M vs 10M subset of Criteo
X = readNprep(isSmall)
t1 = time.time()
model = getLayers(X)
# Lazy transform triggers all models togther -- produce OOM
#res = lazyGraphTransform(X, model, X.shape[0], isSmall)
# Partially lazy mode keeps the slice-look outside of tf.function
batchGraphTransform(X, model, X.shape[0], isSmall)
print("Elapsed time for transformations using tf-keras = %s sec" % (time.time() - t1))
#np.savetxt("X_prep_sk.csv", X_prep, fmt='%1.2f', delimiter=',') #dense
#sp.sparse.save_npz("X_prep_sk.npz", X_prep) #sparse
| [
"tensorflow.keras.layers.Concatenate",
"pandas.read_csv",
"tensorflow.keras.layers.StringLookup",
"numpy.array",
"tensorflow.keras.Input",
"tensorflow.keras.Model",
"time.time",
"warnings.filterwarnings",
"tensorflow.stack",
"numpy.set_printoptions"
] | [((1020, 1067), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (1039, 1067), True, 'import numpy as np\n'), ((1068, 1101), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1091, 1101), False, 'import warnings\n'), ((5797, 5808), 'time.time', 'time.time', ([], {}), '()\n', (5806, 5808), False, 'import time\n'), ((3889, 3923), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'cat_layers'], {}), '(inputs, cat_layers)\n', (3903, 3923), True, 'import tensorflow as tf\n'), ((4912, 4936), 'tensorflow.stack', 'tf.stack', (['allRes'], {'axis': '(0)'}), '(allRes, axis=0)\n', (4920, 4936), True, 'import tensorflow as tf\n'), ((1271, 1340), 'pandas.read_csv', 'pd.read_csv', (['"""~/datasets/criteo_day21_1M"""'], {'delimiter': '""","""', 'header': 'None'}), "('~/datasets/criteo_day21_1M', delimiter=',', header=None)\n", (1282, 1340), True, 'import pandas as pd\n'), ((1412, 1482), 'pandas.read_csv', 'pd.read_csv', (['"""~/datasets/criteo_day21_10M"""'], {'delimiter': '""","""', 'header': 'None'}), "('~/datasets/criteo_day21_10M', delimiter=',', header=None)\n", (1423, 1482), True, 'import pandas as pd\n'), ((2246, 2344), 'tensorflow.keras.layers.StringLookup', 'layers.StringLookup', ([], {'vocabulary': 'vocab', 'output_mode': '"""multi_hot"""', 'num_oov_indices': '(0)', 'sparse': '(True)'}), "(vocabulary=vocab, output_mode='multi_hot',\n num_oov_indices=0, sparse=True)\n", (2265, 2344), False, 'from tensorflow.keras import layers\n'), ((2420, 2483), 'tensorflow.keras.layers.StringLookup', 'layers.StringLookup', ([], {'output_mode': '"""multi_hot"""', 'num_oov_indices': '(0)'}), "(output_mode='multi_hot', num_oov_indices=0)\n", (2439, 2483), False, 'from tensorflow.keras import layers\n'), ((3067, 3119), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(1,)', 'dtype': 'dtype', 'sparse': '(True)'}), '(shape=(1,), dtype=dtype, sparse=True)\n', (3081, 3119), True, 'import tensorflow as tf\n'), ((3821, 3841), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (3839, 3841), False, 'from tensorflow.keras import layers\n'), ((2519, 2554), 'numpy.array', 'np.array', (['X[name]'], {'dtype': 'np.string_'}), '(X[name], dtype=np.string_)\n', (2527, 2554), True, 'import numpy as np\n'), ((3356, 3376), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (3374, 3376), False, 'from tensorflow.keras import layers\n'), ((4227, 4242), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4235, 4242), True, 'import numpy as np\n'), ((6135, 6146), 'time.time', 'time.time', ([], {}), '()\n', (6144, 6146), False, 'import time\n'), ((5299, 5314), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5307, 5314), True, 'import numpy as np\n')] |
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Glance documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../bin'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'stevedore.sphinxext',
'sphinx.ext.viewcode',
'oslo_config.sphinxext',
'oslo_config.sphinxconfiggen',
'oslo_policy.sphinxpolicygen',
'openstackdocstheme',
'sphinxcontrib.apidoc',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/glance'
openstackdocs_bug_project = 'glance'
openstackdocs_bug_tag = 'documentation'
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../glance'
apidoc_output_dir = 'contributor/api'
apidoc_excluded_paths = [
'hacking/*',
'hacking',
'tests/*',
'tests',
'db/sqlalchemy/*',
'db/sqlalchemy']
apidoc_separate_modules = True
config_generator_config_file = [
('../../etc/oslo-config-generator/glance-api.conf',
'_static/glance-api'),
('../../etc/oslo-config-generator/glance-cache.conf',
'_static/glance-cache'),
('../../etc/oslo-config-generator/glance-manage.conf',
'_static/glance-manage'),
('../../etc/oslo-config-generator/glance-scrubber.conf',
'_static/glance-scrubber'),
]
policy_generator_config_file = [
('../../etc/glance-policy-generator.conf', '_static/glance'),
]
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2010-present, OpenStack Foundation.'
exclude_patterns = [
# The man directory includes some snippet files that are included
# in other documents during the build but that should not be
# included in the toctree themselves, so tell Sphinx to ignore
# them when scanning for input files.
'cli/footer.txt',
'cli/general_options.txt',
'cli/openstack_options.txt',
]
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['glance.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('cli/glanceapi', 'glance-api', u'Glance API Server',
[u'OpenStack'], 1),
('cli/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner',
[u'OpenStack'], 1),
('cli/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager',
[u'OpenStack'], 1),
('cli/glancecacheprefetcher', 'glance-cache-prefetcher',
u'Glance Cache Pre-fetcher', [u'OpenStack'], 1),
('cli/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner',
[u'OpenStack'], 1),
('cli/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ',
[u'OpenStack'], 1),
('cli/glancemanage', 'glance-manage', u'Glance Management Utility',
[u'OpenStack'], 1),
('cli/glancereplicator', 'glance-replicator', u'Glance Replicator',
[u'OpenStack'], 1),
('cli/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service',
[u'OpenStack'], 1)
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Glance.tex', u'Glance Documentation',
u'Glance Team', 'manual'),
]
| [
"os.path.abspath"
] | [((919, 943), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (934, 943), False, 'import os\n'), ((964, 992), 'os.path.abspath', 'os.path.abspath', (['"""../../bin"""'], {}), "('../../bin')\n", (979, 992), False, 'import os\n')] |
import warnings
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from scipy.special import expit
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array, check_random_state
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
from .multidynet import initialize_node_effects_single
from .omega_lsm import update_omega
from .deltas_lsm import update_deltas
from .lds_lsm import update_latent_positions
from .variances import update_tau_sq, update_sigma_sq
__all__ = ['DynamicNetworkLSM']
class ModelParameters(object):
def __init__(self, omega, X, X_sigma, X_cross_cov,
delta, delta_sigma,
a_tau_sq, b_tau_sq, c_sigma_sq, d_sigma_sq):
self.omega_ = omega
self.X_ = X
self.X_sigma_ = X_sigma
self.X_cross_cov_ = X_cross_cov
self.delta_ = delta
self.delta_sigma_ = delta_sigma
self.a_tau_sq_ = a_tau_sq
self.b_tau_sq_ = b_tau_sq
self.c_sigma_sq_ = c_sigma_sq
self.d_sigma_sq_ = d_sigma_sq
self.converged_ = False
self.logp_ = []
def initialize_parameters(Y, n_features, delta_var_prior,
a, b, c, d, random_state):
rng = check_random_state(random_state)
n_time_steps, n_nodes, _ = Y.shape
# omega is initialized by drawing from the prior?
omega = np.zeros((n_time_steps, n_nodes, n_nodes))
# intialize latent space randomly
X = rng.randn(n_time_steps, n_nodes, n_features)
# intialize to marginal covariances
sigma_init = np.eye(n_features)
X_sigma = np.tile(
sigma_init[None, None], reps=(n_time_steps, n_nodes, 1, 1))
# initialize cross-covariances
cross_init = np.eye(n_features)
X_cross_cov = np.tile(
cross_init[None, None], reps=(n_time_steps - 1, n_nodes, 1, 1))
# initialize node-effects based on a logistic regression with
# no higher order structure
delta = initialize_node_effects_single(Y)
delta_sigma = delta_var_prior * np.ones(n_nodes)
# initialize based on prior information
a_tau_sq = a
b_tau_sq = b
c_sigma_sq = c
d_sigma_sq = d
return ModelParameters(
omega=omega, X=X, X_sigma=X_sigma, X_cross_cov=X_cross_cov,
delta=delta, delta_sigma=delta_sigma,
a_tau_sq=a_tau_sq, b_tau_sq=b_tau_sq, c_sigma_sq=c_sigma_sq,
d_sigma_sq=d_sigma_sq)
def optimize_elbo(Y, n_features, delta_var_prior, tau_sq, sigma_sq, a, b, c, d,
max_iter, tol, random_state, verbose=True):
# convergence criteria (Eq{L(Y | theta)})
loglik = -np.infty
# initialize parameters of the model
model = initialize_parameters(
Y, n_features, delta_var_prior, a, b, c, d, random_state)
for n_iter in tqdm(range(max_iter), disable=not verbose):
prev_loglik = loglik
# coordinate ascent
# omega updates
loglik = update_omega(
Y, model.omega_, model.X_, model.X_sigma_,
model.delta_, model.delta_sigma_)
# latent trajectory updates
tau_sq_prec = (
model.a_tau_sq_ / model.b_tau_sq_ if tau_sq == 'auto' else
1. / tau_sq)
sigma_sq_prec = (
model.c_sigma_sq_ / model.d_sigma_sq_ if sigma_sq == 'auto' else
1. / sigma_sq)
update_latent_positions(
Y, model.X_, model.X_sigma_, model.X_cross_cov_,
model.delta_, model.omega_, tau_sq_prec, sigma_sq_prec)
# update node random effects
update_deltas(
Y, model.X_, model.delta_, model.delta_sigma_,
model.omega_, delta_var_prior)
# update intial variance of the latent space
if tau_sq == 'auto':
model.a_tau_sq_, model.b_tau_sq_ = update_tau_sq(
Y, model.X_, model.X_sigma_, a, b)
# update step sizes
if sigma_sq == 'auto':
model.c_sigma_sq_, model.d_sigma_sq_ = update_sigma_sq(
Y, model.X_, model.X_sigma_, model.X_cross_cov_, c, d)
model.logp_.append(loglik)
# check convergence
change = loglik - prev_loglik
if abs(change) < tol:
model.converged_ = True
model.logp_ = np.asarray(model.logp_)
break
return model
def calculate_probabilities(X, delta):
n_time_steps = X.shape[0]
n_nodes = X.shape[1]
probas = np.zeros(
(n_time_steps, n_nodes, n_nodes), dtype=np.float64)
deltas = delta.reshape(-1, 1)
for t in range(n_time_steps):
probas[t] = expit(np.add(deltas, deltas.T) + np.dot(X[t], X[t].T))
return probas
class DynamicNetworkLSM(object):
def __init__(self, n_features=2, delta_var_prior=4,
tau_sq='auto', sigma_sq='auto',
a=4.0, b=20.0, c=10, d=0.1,
n_init=1, max_iter=500, tol=1e-2,
n_jobs=-1, random_state=42):
self.n_features = n_features
self.delta_var_prior = delta_var_prior
self.tau_sq = tau_sq
self.sigma_sq = sigma_sq
self.a = a
self.b = b
self.c = c
self.d = d
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, Y):
"""
Parameters
----------
Y : array-like, shape (n_time_steps, n_nodes, n_nodes)
"""
Y = check_array(Y, order='C', dtype=np.float64,
ensure_2d=False, allow_nd=True, copy=False)
random_state = check_random_state(self.random_state)
# run the elbo optimization over different initializations
seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
verbose = True if self.n_init == 1 else False
models = Parallel(n_jobs=self.n_jobs)(delayed(optimize_elbo)(
Y, self.n_features, self.delta_var_prior,
self.tau_sq, self.sigma_sq, self.a, self.b, self.c, self.d,
self.max_iter, self.tol, seed, verbose=verbose)
for seed in seeds)
# choose model with the largest convergence criteria
best_model = models[0]
best_criteria = models[0].logp_[-1]
for i in range(1, len(models)):
if models[i].logp_[-1] > best_criteria:
best_model = models[i]
if not best_model.converged_:
warnings.warn('Best model did not converge. '
'Try a different random initialization, '
'or increase max_iter, tol '
'or check for degenerate data.', ConvergenceWarning)
self._set_parameters(best_model)
# calculate dyad-probabilities
self.probas_ = calculate_probabilities(
self.X_, self.delta_)
# calculate in-sample AUC
#self.auc_ = calculate_auc_layer(Y, self.probas_)
return self
def _set_parameters(self, model):
self.omega_ = model.omega_
self.X_ = model.X_
self.X_sigma_ = model.X_sigma_
self.X_cross_cov_ = model.X_cross_cov_
self.delta_ = model.delta_
self.delta_sigma_ = model.delta_sigma_
self.a_tau_sq_ = model.a_tau_sq_
self.b_tau_sq_ = model.b_tau_sq_
self.tau_sq_ = self.b_tau_sq_ / (self.a_tau_sq_ - 1)
self.c_sigma_sq_ = model.c_sigma_sq_
self.d_sigma_sq_ = model.d_sigma_sq_
self.sigma_sq_ = self.d_sigma_sq_ / (self.c_sigma_sq_ - 1)
self.logp_ = model.logp_
return self
| [
"numpy.tile",
"numpy.eye",
"sklearn.utils.check_random_state",
"numpy.ones",
"numpy.add",
"numpy.asarray",
"numpy.iinfo",
"joblib.Parallel",
"numpy.zeros",
"numpy.dot",
"sklearn.utils.check_array",
"warnings.warn",
"joblib.delayed"
] | [((1275, 1307), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1293, 1307), False, 'from sklearn.utils import check_array, check_random_state\n'), ((1415, 1457), 'numpy.zeros', 'np.zeros', (['(n_time_steps, n_nodes, n_nodes)'], {}), '((n_time_steps, n_nodes, n_nodes))\n', (1423, 1457), True, 'import numpy as np\n'), ((1608, 1626), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1614, 1626), True, 'import numpy as np\n'), ((1641, 1708), 'numpy.tile', 'np.tile', (['sigma_init[None, None]'], {'reps': '(n_time_steps, n_nodes, 1, 1)'}), '(sigma_init[None, None], reps=(n_time_steps, n_nodes, 1, 1))\n', (1648, 1708), True, 'import numpy as np\n'), ((1771, 1789), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1777, 1789), True, 'import numpy as np\n'), ((1808, 1879), 'numpy.tile', 'np.tile', (['cross_init[None, None]'], {'reps': '(n_time_steps - 1, n_nodes, 1, 1)'}), '(cross_init[None, None], reps=(n_time_steps - 1, n_nodes, 1, 1))\n', (1815, 1879), True, 'import numpy as np\n'), ((4463, 4523), 'numpy.zeros', 'np.zeros', (['(n_time_steps, n_nodes, n_nodes)'], {'dtype': 'np.float64'}), '((n_time_steps, n_nodes, n_nodes), dtype=np.float64)\n', (4471, 4523), True, 'import numpy as np\n'), ((2070, 2086), 'numpy.ones', 'np.ones', (['n_nodes'], {}), '(n_nodes)\n', (2077, 2086), True, 'import numpy as np\n'), ((5515, 5606), 'sklearn.utils.check_array', 'check_array', (['Y'], {'order': '"""C"""', 'dtype': 'np.float64', 'ensure_2d': '(False)', 'allow_nd': '(True)', 'copy': '(False)'}), "(Y, order='C', dtype=np.float64, ensure_2d=False, allow_nd=True,\n copy=False)\n", (5526, 5606), False, 'from sklearn.utils import check_array, check_random_state\n'), ((5651, 5688), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (5669, 5688), False, 'from sklearn.utils import check_array, check_random_state\n'), ((4293, 4316), 'numpy.asarray', 'np.asarray', (['model.logp_'], {}), '(model.logp_)\n', (4303, 4316), True, 'import numpy as np\n'), ((5907, 5935), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (5915, 5935), False, 'from joblib import Parallel, delayed\n'), ((6508, 6678), 'warnings.warn', 'warnings.warn', (['"""Best model did not converge. Try a different random initialization, or increase max_iter, tol or check for degenerate data."""', 'ConvergenceWarning'], {}), "(\n 'Best model did not converge. Try a different random initialization, or increase max_iter, tol or check for degenerate data.'\n , ConvergenceWarning)\n", (6521, 6678), False, 'import warnings\n'), ((4628, 4652), 'numpy.add', 'np.add', (['deltas', 'deltas.T'], {}), '(deltas, deltas.T)\n', (4634, 4652), True, 'import numpy as np\n'), ((4655, 4675), 'numpy.dot', 'np.dot', (['X[t]', 'X[t].T'], {}), '(X[t], X[t].T)\n', (4661, 4675), True, 'import numpy as np\n'), ((5794, 5812), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5802, 5812), True, 'import numpy as np\n'), ((5936, 5958), 'joblib.delayed', 'delayed', (['optimize_elbo'], {}), '(optimize_elbo)\n', (5943, 5958), False, 'from joblib import Parallel, delayed\n')] |
#!/usr/bin/env python
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
__credits__ = "<NAME> -- An amazing Linear Algebra Professor"
import cvxopt
import numpy as np
class SupportVectorClassification:
"""
Support Vector Machine classification model
"""
def __init__(self):
"""
Instantiate class: SupportVectorClassification
"""
self._bias = np.array([])
self._weights = np.array([])
def predict(self, predictors):
"""
Predict output given a set of predictors
:param predictors: ndarray -> used to calculate prediction
:return: ndarray -> prediction
"""
def train(self, predictors, expected_values):
"""
Train model based on list of predictors and expected value
:param predictors: list(ndarray) -> list of predictors to train model on
:param expected_values: list(float) -> list of expected values for given predictors
"""
if len(predictors) != len(expected_values):
raise Exception('Length of predictors != length of expected values')
self._generate_optimal_hyperplanes(predictors, expected_values)
def _generate_optimal_hyperplanes(self, predictors, expected_values):
"""
Find and generate optimal hyperplanes given set of predictors and expected values
:param predictors: list(ndarray) -> list of predictors to train model on
:param expected_values: list(float) -> list of expected values for given predictors
"""
m = predictors.shape[0]
k = np.array([np.dot(predictors[i], predictors[j]) for j in range(m) for i in range(m)]).reshape((m, m))
p = cvxopt.matrix(np.outer(expected_values, expected_values)*k)
q = cvxopt.matrix(-1*np.ones(m))
equality_constraint1 = cvxopt.matrix(expected_values, (1, m))
equality_constraint2 = cvxopt.matrix(0.0)
inequality_constraint1 = cvxopt.matrix(np.diag(-1*np.ones(m)))
inequality_constraint2 = cvxopt.matrix(np.zeros(m))
solution = cvxopt.solvers.qp(p, q, inequality_constraint1, inequality_constraint2,
equality_constraint1, equality_constraint2)
multipliers = np.ravel(solution['x'])
has_positive_multiplier = multipliers > 1e-7
sv_multipliers = multipliers[has_positive_multiplier]
support_vectors = predictors[has_positive_multiplier]
support_vectors_y = expected_values[has_positive_multiplier]
if support_vectors and support_vectors_y and sv_multipliers:
self._weights = np.sum(multipliers[i]*expected_values[i]*predictors[i] for i in range(len(expected_values)))
self._bias = np.sum([expected_values[i] - np.dot(self._weights, predictors[i])
for i in range(len(predictors))])/len(predictors)
else:
pass
svm = SupportVectorClassification()
y = np.array([np.array([1]), np.array([-1]), np.array([-1]), np.array([1]), np.array([-1])])
t_data = np.array([np.array([1, 1]), np.array([2, 2]), np.array([2, 3]), np.array([0, 0]), np.array([2, 4])])
svm.train(t_data, y)
| [
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"numpy.dot",
"cvxopt.matrix",
"numpy.ravel",
"cvxopt.solvers.qp"
] | [((407, 419), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (415, 419), True, 'import numpy as np\n'), ((444, 456), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (452, 456), True, 'import numpy as np\n'), ((1843, 1881), 'cvxopt.matrix', 'cvxopt.matrix', (['expected_values', '(1, m)'], {}), '(expected_values, (1, m))\n', (1856, 1881), False, 'import cvxopt\n'), ((1914, 1932), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (1927, 1932), False, 'import cvxopt\n'), ((2083, 2202), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['p', 'q', 'inequality_constraint1', 'inequality_constraint2', 'equality_constraint1', 'equality_constraint2'], {}), '(p, q, inequality_constraint1, inequality_constraint2,\n equality_constraint1, equality_constraint2)\n', (2100, 2202), False, 'import cvxopt\n'), ((2258, 2281), 'numpy.ravel', 'np.ravel', (["solution['x']"], {}), "(solution['x'])\n", (2266, 2281), True, 'import numpy as np\n'), ((2975, 2988), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2983, 2988), True, 'import numpy as np\n'), ((2990, 3004), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (2998, 3004), True, 'import numpy as np\n'), ((3006, 3020), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3014, 3020), True, 'import numpy as np\n'), ((3022, 3035), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3030, 3035), True, 'import numpy as np\n'), ((3037, 3051), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3045, 3051), True, 'import numpy as np\n'), ((3073, 3089), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (3081, 3089), True, 'import numpy as np\n'), ((3091, 3107), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (3099, 3107), True, 'import numpy as np\n'), ((3109, 3125), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (3117, 3125), True, 'import numpy as np\n'), ((3127, 3143), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3135, 3143), True, 'import numpy as np\n'), ((3145, 3161), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (3153, 3161), True, 'import numpy as np\n'), ((2051, 2062), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2059, 2062), True, 'import numpy as np\n'), ((1725, 1767), 'numpy.outer', 'np.outer', (['expected_values', 'expected_values'], {}), '(expected_values, expected_values)\n', (1733, 1767), True, 'import numpy as np\n'), ((1800, 1810), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (1807, 1810), True, 'import numpy as np\n'), ((1991, 2001), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (1998, 2001), True, 'import numpy as np\n'), ((1608, 1644), 'numpy.dot', 'np.dot', (['predictors[i]', 'predictors[j]'], {}), '(predictors[i], predictors[j])\n', (1614, 1644), True, 'import numpy as np\n'), ((2772, 2808), 'numpy.dot', 'np.dot', (['self._weights', 'predictors[i]'], {}), '(self._weights, predictors[i])\n', (2778, 2808), True, 'import numpy as np\n')] |
import os
import requests
import json
import time
import random
import math
baseUrl = "https://www.edeka.de/rezepte/rezept/suche"
external_baseUrl = "https://www.edeka.de"
resultsPerPage = 50
pageQuery = f"?size={resultsPerPage}&page="
def getJson(url):
jsonText = requests.get(url).json()
return jsonText
def getMeta():
data = getJson(baseUrl)
return {
"totalCount": int(data["totalCount"]),
"pages": math.ceil(int(data["totalCount"]) / resultsPerPage)
}
def getRecipes(pageNumber):
url = baseUrl + pageQuery + str(pageNumber)
data = getJson(url)
return data["recipes"]
def getAllIngredients(recipe):
ingredients_map = {}
for ingredientGroup in recipe["ingredientGroups"]:
for ingredientGroupIngredient in ingredientGroup["ingredientGroupIngredients"]:
ingredient = ingredientGroupIngredient["ingredient"]
quantity = ingredientGroupIngredient["quantity"]
unit = ingredientGroupIngredient["unit"]
if ingredient in ingredients_map and ingredients_map[ingredient]["unit"] == unit and quantity:
if ingredients_map[ingredient]["quantity"] is float:
ingredients_map[ingredient]["quantity"] += float(quantity)
else:
ingredients_map[ingredient]["quantity"] = float(quantity)
else:
ingredients_map[ingredient] = {
"quantity": quantity,
"unit": unit
}
ingredients = []
for k,v in ingredients_map.items():
ingredient = k
quantity = float(v["quantity"]) if v["quantity"] != None else None
unit = v["unit"]
ingredients.append(f"{quantity} {unit} {ingredient}")
return ingredients
def addRecipe(recipe):
print("Adding \"" + recipe["title"] + "\"")
quantity = str(recipe["servings"]) + " Portionen"
ingredients = getAllIngredients(recipe)
external_img_url = recipe["media"]["images"]["ratio_1:1"]["url"]["mediumLarge"]
print(quantity)
print(ingredients)
return {
"title": recipe["title"],
"external_id": recipe["recipeReference"],
"external_source": "Edeka Rezepte",
"external_url": external_baseUrl + recipe["uri"],
"external_img_url": external_img_url,
"quantity": quantity,
"ingredients": ingredients
}
def addPageRecipes(pageRecipes):
scrapedRecipes = []
for recipe in pageRecipes:
scrapedRecipe = addRecipe(recipe)
if(scrapedRecipe):
scrapedRecipes.append(scrapedRecipe)
return scrapedRecipes
def saveResults(pageNumber, recipes, prefix):
if not os.path.exists('recipes'):
os.makedirs('recipes')
with open(f"recipes/{prefix}_recipes_{pageNumber}.json", "w", encoding='utf8') as f:
f.write(json.dumps(recipes))
if not os.path.exists('ingredients'):
os.makedirs('ingredients')
allingredients = list(set([ingredient for l in list(map(lambda i: i["ingredients"], recipes)) for ingredient in l]))
with open(f"ingredients/{prefix}_ingredients_{pageNumber}.txt", "w", encoding='utf8') as f:
f.write("\n".join(allingredients))
if not os.path.exists('quantities'):
os.makedirs('quantities')
allquantities = list(set(map(lambda i: i["quantity"], recipes)))
with open(f"quantities/{prefix}_quantities_{pageNumber}.txt", "w", encoding='utf8') as f:
f.write("\n".join(allquantities))
meta = getMeta()
print("Meta: ", meta)
pageNumbers = list(set(range(0, int(meta["pages"]))).difference(set([])))
for pageNumber in pageNumbers:
print("Page", pageNumber)
pageRecipes = getRecipes(pageNumber)
scrapedRecipes = addPageRecipes(pageRecipes)
saveResults(pageNumber, scrapedRecipes, "edeka")
| [
"os.path.exists",
"json.dumps",
"os.makedirs",
"requests.get"
] | [((2782, 2807), 'os.path.exists', 'os.path.exists', (['"""recipes"""'], {}), "('recipes')\n", (2796, 2807), False, 'import os\n'), ((2818, 2840), 'os.makedirs', 'os.makedirs', (['"""recipes"""'], {}), "('recipes')\n", (2829, 2840), False, 'import os\n'), ((2985, 3014), 'os.path.exists', 'os.path.exists', (['"""ingredients"""'], {}), "('ingredients')\n", (2999, 3014), False, 'import os\n'), ((3025, 3051), 'os.makedirs', 'os.makedirs', (['"""ingredients"""'], {}), "('ingredients')\n", (3036, 3051), False, 'import os\n'), ((3331, 3359), 'os.path.exists', 'os.path.exists', (['"""quantities"""'], {}), "('quantities')\n", (3345, 3359), False, 'import os\n'), ((3370, 3395), 'os.makedirs', 'os.makedirs', (['"""quantities"""'], {}), "('quantities')\n", (3381, 3395), False, 'import os\n'), ((285, 302), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (297, 302), False, 'import requests\n'), ((2949, 2968), 'json.dumps', 'json.dumps', (['recipes'], {}), '(recipes)\n', (2959, 2968), False, 'import json\n')] |
# Generated by Django 3.1.8 on 2021-10-31 02:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dogs', '0003_auto_20210928_2058'),
]
operations = [
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owners', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('activity', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-id'],
},
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((459, 552), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (475, 552), False, 'from django.db import migrations, models\n'), ((582, 614), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (598, 614), False, 'from django.db import migrations, models\n'), ((647, 679), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (663, 679), False, 'from django.db import migrations, models\n'), ((710, 742), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (726, 742), False, 'from django.db import migrations, models\n'), ((771, 803), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (787, 803), False, 'from django.db import migrations, models\n'), ((832, 864), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (848, 864), False, 'from django.db import migrations, models\n'), ((898, 937), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (918, 937), False, 'from django.db import migrations, models\n'), ((971, 1006), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (991, 1006), False, 'from django.db import migrations, models\n'), ((1037, 1156), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""owners"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='owners', to=settings.AUTH_USER_MODEL)\n", (1054, 1156), False, 'from django.db import migrations, models\n'), ((1356, 1449), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1372, 1449), False, 'from django.db import migrations, models\n'), ((1474, 1506), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1490, 1506), False, 'from django.db import migrations, models\n'), ((1538, 1570), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1554, 1570), False, 'from django.db import migrations, models\n'), ((1602, 1634), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1618, 1634), False, 'from django.db import migrations, models\n'), ((1668, 1707), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1688, 1707), False, 'from django.db import migrations, models\n'), ((1741, 1776), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1761, 1776), False, 'from django.db import migrations, models\n'), ((1807, 1926), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""events"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='events', to=settings.AUTH_USER_MODEL)\n", (1824, 1926), False, 'from django.db import migrations, models\n')] |
"""
Copyright 2016 adpoliak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tkinter as tk
import tkinter.ttk
import typing
from distutils.version import LooseVersion
class VersionChoiceDialog(tk.Toplevel):
KEEP = True
UPGRADE = False
def accept_callback(self, event=None):
_ = event
self.return_code = 'accept'
self.destroy()
self.keep_while_available = self.keep_while_available.get()
self.persist = self.persist.get()
self.do_not_ask_again = self.do_not_ask_again.get()
def cancel_callback(self, event=None):
_ = event
self.return_code = 'cancel'
self.destroy()
def update_persist_state(self):
if self.keep_while_available.get() == self.KEEP or self.do_not_ask_again.get():
self._persist_changes.config(state=tk.DISABLED)
else:
self._persist_changes.config(state=tk.NORMAL)
self.update_idletasks()
def keep_callback(self, event=None):
_ = event
self.update_persist_state()
def noprompt_callback(self, event=None):
_ = event
if self.do_not_ask_again.get():
self._version_chooser.config(state=tk.DISABLED)
self._version_chooser.set('Always Install Greatest Version Available')
else:
self._version_chooser.config(state=tk.NORMAL)
self._version_chooser.set(self.chosen_version)
self.update_persist_state()
def persist_callback(self, event=None):
_ = event
if self.persist.get():
self._keep_while_available.config(state=tk.NORMAL)
self._dont_ask_again.config(state=tk.NORMAL)
else:
self.keep_while_available.set(self.UPGRADE)
self.do_not_ask_again.set(False)
self._keep_while_available.config(state=tk.DISABLED)
self._dont_ask_again.config(state=tk.DISABLED)
self.update_idletasks()
def combo_callback(self, event=None):
_ = event
self.chosen_version = self._version_chooser.get()
if self.chosen_version.endswith(':KEEP'):
self._accept_button.config(state=tk.DISABLED)
self.persist.set(False)
self.keep_while_available.set(self.UPGRADE)
self.do_not_ask_again.set(False)
self._keep_while_available.config(state=tk.DISABLED)
self._persist_changes.config(state=tk.DISABLED)
self._dont_ask_again.config(state=tk.DISABLED)
else:
self._accept_button.config(state=tk.NORMAL)
self._persist_changes.config(state=tk.NORMAL)
self.update_idletasks()
def __init__(self, master, versions: typing.List[LooseVersion], persist: typing.Optional[bool] = False,
keep: typing.Optional[bool] = False, last_used: typing.Optional[str] = None, *args, **kwargs):
tk.Toplevel.__init__(self, master, *args, **kwargs)
self.grid()
for col in range(2):
self.columnconfigure(col, weight=1)
for row in range(3):
self.rowconfigure(row, weight=1)
self.rowconfigure(3, weight=0)
self.transient(master)
self.master = master
self.protocol('WM_DELETE_WINDOW', self.cancel_callback)
self.bind("<Escape>", self.cancel_callback)
self.bind("<Return>", self.accept_callback)
self.title('Choose Version')
self.return_code = None
self.chosen_version = None
self.persist = tk.BooleanVar()
self.persist.set(persist)
self.do_not_ask_again = tk.BooleanVar()
self.do_not_ask_again.set(False)
self.keep_while_available = tk.BooleanVar() # self.KEEP or self.UPGRADE or None
self.keep_while_available.set(keep)
self._version_chooser = tk.ttk.Combobox(self, justify=tk.LEFT, state='readonly', values=versions,
takefocus=True)
self._version_chooser.bind('<<ComboboxSelected>>', self.combo_callback)
self._version_chooser.grid(column=0, row=0, columnspan=2, padx=1, pady=1, ipadx=1, ipady=1,
sticky=tk.N + tk.E + tk.W + tk.S)
self._initial_focus = self._version_chooser
self._persist_changes = tk.ttk.Checkbutton(self, state=tk.DISABLED, takefocus=True, text='Persist Choices',
underline=1, variable=self.persist, onvalue=True, offvalue=False,
command=self.persist_callback)
self._persist_changes.grid(column=0, row=1, padx=1, pady=1, ipadx=1, ipady=1, sticky=tk.N + tk.E + tk.W + tk.S)
self._keep_while_available = tk.ttk.Checkbutton(self, state=tk.DISABLED, takefocus=True,
text='Keep While Available', underline=1,
variable=self.keep_while_available, onvalue=self.KEEP,
offvalue=self.UPGRADE, command=self.keep_callback)
self._keep_while_available.grid(column=1, row=1, padx=1, pady=1, ipadx=1, ipady=1,
sticky=tk.N + tk.E + tk.W + tk.S)
self._dont_ask_again = tk.ttk.Checkbutton(self, state=tk.DISABLED, takefocus=True, text='Don\'t Prompt Again',
underline=0, variable=self.do_not_ask_again, onvalue=True,
offvalue=False, command=self.noprompt_callback)
self._dont_ask_again.grid(column=0, row=2, columnspan=2, padx=1, pady=1, ipadx=1, ipady=1,
sticky=tk.N + tk.E + tk.W + tk.S)
self._cancel_button = tk.ttk.Button(self, command=self.cancel_callback, takefocus=True,
default=tk.ACTIVE, text='Cancel',
underline=1)
self._cancel_button.grid(column=0, row=3, padx=1, pady=1, ipadx=1, ipady=1,
sticky=tk.N + tk.E + tk.W + tk.S)
self._accept_button = tk.ttk.Button(self, command=self.accept_callback, takefocus=True, default=tk.ACTIVE,
text='Continue', underline=1, state=tk.DISABLED)
self._accept_button.grid(column=1, row=3, padx=1, pady=1, ipadx=1, ipady=1,
sticky=tk.N + tk.E + tk.W + tk.S)
if persist:
self.persist_callback()
if keep:
self.keep_callback()
if last_used is not None:
self._version_chooser.set(last_used)
self.combo_callback()
else:
self._version_chooser.set('Choose Version...')
self.grab_set()
if not self._initial_focus:
self._initial_focus = self
self._initial_focus.focus_set()
self.wait_window(self)
| [
"tkinter.ttk.Checkbutton",
"tkinter.ttk.Button",
"tkinter.BooleanVar",
"tkinter.Toplevel.__init__",
"tkinter.ttk.Combobox"
] | [((3392, 3443), 'tkinter.Toplevel.__init__', 'tk.Toplevel.__init__', (['self', 'master', '*args'], {}), '(self, master, *args, **kwargs)\n', (3412, 3443), True, 'import tkinter as tk\n'), ((4009, 4024), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (4022, 4024), True, 'import tkinter as tk\n'), ((4091, 4106), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (4104, 4106), True, 'import tkinter as tk\n'), ((4184, 4199), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (4197, 4199), True, 'import tkinter as tk\n'), ((4313, 4406), 'tkinter.ttk.Combobox', 'tk.ttk.Combobox', (['self'], {'justify': 'tk.LEFT', 'state': '"""readonly"""', 'values': 'versions', 'takefocus': '(True)'}), "(self, justify=tk.LEFT, state='readonly', values=versions,\n takefocus=True)\n", (4328, 4406), True, 'import tkinter as tk\n'), ((4784, 4973), 'tkinter.ttk.Checkbutton', 'tk.ttk.Checkbutton', (['self'], {'state': 'tk.DISABLED', 'takefocus': '(True)', 'text': '"""Persist Choices"""', 'underline': '(1)', 'variable': 'self.persist', 'onvalue': '(True)', 'offvalue': '(False)', 'command': 'self.persist_callback'}), "(self, state=tk.DISABLED, takefocus=True, text=\n 'Persist Choices', underline=1, variable=self.persist, onvalue=True,\n offvalue=False, command=self.persist_callback)\n", (4802, 4973), True, 'import tkinter as tk\n'), ((5224, 5440), 'tkinter.ttk.Checkbutton', 'tk.ttk.Checkbutton', (['self'], {'state': 'tk.DISABLED', 'takefocus': '(True)', 'text': '"""Keep While Available"""', 'underline': '(1)', 'variable': 'self.keep_while_available', 'onvalue': 'self.KEEP', 'offvalue': 'self.UPGRADE', 'command': 'self.keep_callback'}), "(self, state=tk.DISABLED, takefocus=True, text=\n 'Keep While Available', underline=1, variable=self.keep_while_available,\n onvalue=self.KEEP, offvalue=self.UPGRADE, command=self.keep_callback)\n", (5242, 5440), True, 'import tkinter as tk\n'), ((5796, 5998), 'tkinter.ttk.Checkbutton', 'tk.ttk.Checkbutton', (['self'], {'state': 'tk.DISABLED', 'takefocus': '(True)', 'text': '"""Don\'t Prompt Again"""', 'underline': '(0)', 'variable': 'self.do_not_ask_again', 'onvalue': '(True)', 'offvalue': '(False)', 'command': 'self.noprompt_callback'}), '(self, state=tk.DISABLED, takefocus=True, text=\n "Don\'t Prompt Again", underline=0, variable=self.do_not_ask_again,\n onvalue=True, offvalue=False, command=self.noprompt_callback)\n', (5814, 5998), True, 'import tkinter as tk\n'), ((6288, 6405), 'tkinter.ttk.Button', 'tk.ttk.Button', (['self'], {'command': 'self.cancel_callback', 'takefocus': '(True)', 'default': 'tk.ACTIVE', 'text': '"""Cancel"""', 'underline': '(1)'}), "(self, command=self.cancel_callback, takefocus=True, default=\n tk.ACTIVE, text='Cancel', underline=1)\n", (6301, 6405), True, 'import tkinter as tk\n'), ((6670, 6808), 'tkinter.ttk.Button', 'tk.ttk.Button', (['self'], {'command': 'self.accept_callback', 'takefocus': '(True)', 'default': 'tk.ACTIVE', 'text': '"""Continue"""', 'underline': '(1)', 'state': 'tk.DISABLED'}), "(self, command=self.accept_callback, takefocus=True, default=\n tk.ACTIVE, text='Continue', underline=1, state=tk.DISABLED)\n", (6683, 6808), True, 'import tkinter as tk\n')] |
# stdlib imports
import subprocess
import sys
# local imports
from errors import AppliedAlterError
# TODO: Move connection management to schema.py. Instantiate a connection
# before each run() method and close it at the end, using the DB.conn() method.
class Db(object):
"""
Do not instantiate directly.
Contains all the methods related to initialization of the environment that the
script will be running in.
"""
@classmethod
def new(cls, config):
cls.config = config
cls.conn_initialized = False
return cls
@classmethod
def init(cls, force=False):
"""
Make sure that the table to track revisions is there.
"""
if force:
sys.stdout.write('Removing existing history')
cls.drop_revision()
sys.stdout.write('Creating revision database\n')
cls.create_revision()
sys.stdout.write('Creating history table\n')
cls.create_history()
sys.stdout.write('DB Initialized\n')
@classmethod
def run_up(cls, alter, force=False, verbose=False):
"""
Run the up-alter against the DB
"""
sys.stdout.write('Running alter: %s\n' % alter.filename)
filename = alter.abs_filename()
cls._run_file(filename=filename, exit_on_error=not force, verbose=verbose)
cls.append_commit(ref=alter.id)
@classmethod
def run_down(cls, alter, force=False, verbose=False):
"""
Run the down-alter against the DB
"""
sys.stdout.write('Running alter: %s\n' % alter.down_filename())
filename = alter.abs_filename(direction='down')
cls._run_file(filename=filename, exit_on_error=not force, verbose=verbose)
cls.remove_commit(ref=alter.id)
@classmethod
def _run_file(cls, filename, exit_on_error=True, verbose=False):
# Used for testing to simulate an error in the running of an alter file
if getattr(cls, 'auto_throw_error', False) and 'error' in filename:
command, my_env, stdin_stream = cls.run_file_cmd_with_error(filename)
else:
command, my_env, stdin_stream = cls.run_file_cmd(filename)
if stdin_stream:
proc = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
script = open(filename)
out, err = proc.communicate(script.read())
else:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
out, err = proc.communicate()
if err:
sys.stderr.write("\n----------------------\n")
sys.stderr.write(out.rstrip())
sys.stderr.write(err.rstrip())
sys.stderr.write("\n----------------------\n")
if not proc.returncode == 0:
sys.stderr.write('Error')
if verbose:
sys.stderr.write("\n----------------------\n")
sys.stderr.write(out.rstrip())
sys.stderr.write(err.rstrip())
sys.stderr.write("\n----------------------\n")
sys.stderr.write("\n")
if exit_on_error:
raise AppliedAlterError('%s execution unsuccessful' % filename)
@classmethod
def get_applied_alters(cls):
results = cls.execute('SELECT alter_hash FROM %s' % cls.full_table_name)
alters_hashes = [result[0] for result in results]
return alters_hashes
| [
"subprocess.Popen",
"errors.AppliedAlterError",
"sys.stderr.write",
"sys.stdout.write"
] | [((820, 868), 'sys.stdout.write', 'sys.stdout.write', (['"""Creating revision database\n"""'], {}), "('Creating revision database\\n')\n", (836, 868), False, 'import sys\n'), ((907, 951), 'sys.stdout.write', 'sys.stdout.write', (['"""Creating history table\n"""'], {}), "('Creating history table\\n')\n", (923, 951), False, 'import sys\n'), ((989, 1025), 'sys.stdout.write', 'sys.stdout.write', (['"""DB Initialized\n"""'], {}), "('DB Initialized\\n')\n", (1005, 1025), False, 'import sys\n'), ((1172, 1228), 'sys.stdout.write', 'sys.stdout.write', (["('Running alter: %s\\n' % alter.filename)"], {}), "('Running alter: %s\\n' % alter.filename)\n", (1188, 1228), False, 'import sys\n'), ((733, 778), 'sys.stdout.write', 'sys.stdout.write', (['"""Removing existing history"""'], {}), "('Removing existing history')\n", (749, 778), False, 'import sys\n'), ((2242, 2354), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'env': 'my_env'}), '(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=my_env)\n', (2258, 2354), False, 'import subprocess\n'), ((2620, 2709), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'env': 'my_env'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=my_env)\n', (2636, 2709), False, 'import subprocess\n'), ((2885, 2933), 'sys.stderr.write', 'sys.stderr.write', (['"""\n----------------------\n"""'], {}), '("""\n----------------------\n""")\n', (2901, 2933), False, 'import sys\n'), ((3030, 3078), 'sys.stderr.write', 'sys.stderr.write', (['"""\n----------------------\n"""'], {}), '("""\n----------------------\n""")\n', (3046, 3078), False, 'import sys\n'), ((3126, 3151), 'sys.stderr.write', 'sys.stderr.write', (['"""Error"""'], {}), "('Error')\n", (3142, 3151), False, 'import sys\n'), ((3408, 3430), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (3424, 3430), False, 'import sys\n'), ((3192, 3240), 'sys.stderr.write', 'sys.stderr.write', (['"""\n----------------------\n"""'], {}), '("""\n----------------------\n""")\n', (3208, 3240), False, 'import sys\n'), ((3349, 3397), 'sys.stderr.write', 'sys.stderr.write', (['"""\n----------------------\n"""'], {}), '("""\n----------------------\n""")\n', (3365, 3397), False, 'import sys\n'), ((3483, 3540), 'errors.AppliedAlterError', 'AppliedAlterError', (["('%s execution unsuccessful' % filename)"], {}), "('%s execution unsuccessful' % filename)\n", (3500, 3540), False, 'from errors import AppliedAlterError\n')] |
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login
from django.views.generic import View
from django.contrib.auth.models import User
from .forms import UserForm
from .models import UserProfile, Project, Tag
from .search import get_query
def Leaderboard(request):
users = User.objects.all()[:5]
context = {"users":users}
return render(request, 'volunteerapp/leaderboard.html', context)
def ProjectDetail(request, project_id):
project = get_object_or_404(Project, pk=project_id)
context = {"project":project}
return render(request, 'volunteerapp/project_detail.html', context)
def Index(request):
projects = []
query_string = ''
selected_tag = ''
tags = Tag.objects.all()
project_set = Project.objects
if ('tag' in request.GET) and request.GET['tag']:
try:
selected_tag = int(request.GET['tag'])
project_set = Tag.objects.get(id=selected_tag).project_set
except ValueError as e:
pass
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
search_query = get_query(query_string, ['title', 'description'])
project_set = project_set.filter(search_query).order_by('-created_at')
projects = project_set.all()
context = {"projects": projects, "query_string": query_string, "tags": tags, "selected_tag": selected_tag}
return render(request, 'volunteerapp/index.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'volunteerapp/registration_form.html'
# display empty form
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# process form post
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
# any custom validation or cleaning
username = form.cleaned_data['username']
password = form.cleaned_data['<PASSWORD>']
if len(password) < 8: # if password is shorter than 8 chars
return redirect(self)
user.save()
if user.profile is None:
user.profile = UserProfile()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('volunteerapp:index')
return render(request, self.template_name, {'form': form})
| [
"django.shortcuts.render",
"django.contrib.auth.authenticate",
"django.shortcuts.get_object_or_404",
"django.contrib.auth.login",
"django.shortcuts.redirect",
"django.contrib.auth.models.User.objects.all"
] | [((407, 464), 'django.shortcuts.render', 'render', (['request', '"""volunteerapp/leaderboard.html"""', 'context'], {}), "(request, 'volunteerapp/leaderboard.html', context)\n", (413, 464), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((520, 561), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Project'], {'pk': 'project_id'}), '(Project, pk=project_id)\n', (537, 561), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((607, 667), 'django.shortcuts.render', 'render', (['request', '"""volunteerapp/project_detail.html"""', 'context'], {}), "(request, 'volunteerapp/project_detail.html', context)\n", (613, 667), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((1458, 1509), 'django.shortcuts.render', 'render', (['request', '"""volunteerapp/index.html"""', 'context'], {}), "(request, 'volunteerapp/index.html', context)\n", (1464, 1509), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((343, 361), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (359, 361), False, 'from django.contrib.auth.models import User\n'), ((1726, 1777), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (1732, 1777), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2577, 2628), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (2583, 2628), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2342, 2392), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (2354, 2392), False, 'from django.contrib.auth import authenticate, login\n'), ((2201, 2215), 'django.shortcuts.redirect', 'redirect', (['self'], {}), '(self)\n', (2209, 2215), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((2482, 2502), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (2487, 2502), False, 'from django.contrib.auth import authenticate, login\n'), ((2530, 2560), 'django.shortcuts.redirect', 'redirect', (['"""volunteerapp:index"""'], {}), "('volunteerapp:index')\n", (2538, 2560), False, 'from django.shortcuts import render, redirect, get_object_or_404\n')] |
# -*- coding: utf-8 -*-
## \package dbr.log
# MIT licensing
# See: docs/LICENSE.txt
import os, sys
from fileio.fileio import AppendFile
from globals.dateinfo import GetDate
from globals.dateinfo import GetTime
from globals.dateinfo import dtfmt
from globals.paths import PATH_logs
from globals.strings import GetModuleString
from globals.strings import IsString
## Verbosity levels at which the logger will output text
class LogLevel:
# Logging levels
INFO, WARN, ERROR, DEBUG, TEST = range(5)
## A log class for outputting messages
#
# TODO: Add 'quiet' (0) log level.
#
# A log that will output messages to the terminal &
# a log text file.
class DebreateLogger:
LogLevelList = {
LogLevel.INFO: u'info',
LogLevel.WARN: u'warn',
LogLevel.ERROR: u'error',
LogLevel.DEBUG: u'debug',
LogLevel.TEST: u'test',
}
## Constructor
#
# \param level
# \b \e int|str : The level at which messages will be output (default is 2 (ERROR))
# \param logsPath
# \b \e str : The file to which messages will be written
def __init__(self, level=LogLevel.ERROR, logsPath=PATH_logs):
## The level at which to output messages
self.LogLevel = level
## Directory where logs are located
self.LogsDir = logsPath
## Log file path
self.LogFile = u'{}/{}.log'.format(self.LogsDir, GetDate(dtfmt.LOG))
## Forces space between header & first log entry (changed to None after first entry)
self.NoStrip = u'\n'
self.OnInit()
## Opens a log file or creates a new one & adds log header with timestamp
def OnInit(self):
if not os.path.isdir(self.LogsDir):
os.makedirs(self.LogsDir)
# Initialize the log with date & time
date_time = u'{} {}'.format(GetDate(dtfmt.LOG), GetTime(dtfmt.LOG))
log_header = u'--------------- Log Start: {} ---------------\n'.format(date_time)
'''
# Add whitespace for new entries
if os.path.isfile(self.LogFile):
log_header = u'\n\n{}'.format(log_header)
'''
# Write header to log file
AppendFile(self.LogFile, log_header, noStrip=u'\n')
## Adds footer with timestamp to log file
def OnClose(self):
# Don't write to log if user deleted it
if os.path.isfile(self.LogFile):
# Close the log with date & time
date_time = u'{} {}'.format(GetDate(dtfmt.LOG), GetTime(dtfmt.LOG))
log_footer = u'\n--------------- Log End: {} ---------------\n\n'.format(date_time)
AppendFile(self.LogFile, log_footer, noStrip=u'\n')
## Checks if log can be written at supplied level
#
# \param level
# \b \e int|str : The desired message level to output
# \return
# \b \e tuple container int & unicode/str values of output level,
# or None for invalid log level
def CheckLogLevel(self, level):
# Check if level is of type INFO, WARN, ERROR, DEBUG, TEST
if level in self.LogLevelList:
return level
# Check if level is a string value of 'info', 'warn', 'error', 'debug', 'test'
if isinstance(level, (unicode, str)):
for L in self.LogLevelList:
if level.lower() == self.LogLevelList[L].lower():
return L
return None
## Prints a message to stdout & logs to file
#
# \param level
# Level at which to display the message
# \param module
# Name of the script/module or the globals.moduleaccess.ModuleAccessCtrl
# instance where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
# \param pout
# Stream to which message should be output (stdout/stderr)
def LogMessage(self, level, module, message, details=[], newline=False, pout=sys.stdout):
level = self.CheckLogLevel(level)
# Use the object to retrieve module string
if not IsString(module):
module = GetModuleString(module)
if (level in self.LogLevelList) and (level <= self.LogLevel):
l_string = self.LogLevelList[level].upper()
message = u'{}: [{}] {}'.format(l_string, module, message)
if details:
if IsString(details):
message += u'\n • {}'.format(details)
else:
for ITEM in details:
message += u'\n • {}'.format(ITEM)
if newline:
message = u'\n{}'.format(message)
# Message is shown in terminal
if pout not in (sys.stdout, sys.stderr,):
print(message)
else:
# Need to manually add newline when using sys.stdout/sys.stderr
pout.write(u'{}\n'.format(message))
# Open log for writing
AppendFile(self.LogFile, u'{}\n'.format(message), self.NoStrip)
# Allow stripping leading & trailing newlines from opened log file
if self.NoStrip:
self.NoStrip = None
## Show a log message at 'info' level
#
# \param module
# Name of the script/module where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
def Info(self, module, message, details=[], newline=False):
self.LogMessage(u'info', module, message, details=details, newline=newline)
## Show a log message at 'warn' level
#
# \param module
# Name of the script/module where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
def Warn(self, module, message, details=[], newline=False):
self.LogMessage(u'warn', module, message, details=details, newline=newline)
## Show a log message at 'error' level
#
# Messages at 'error' level are written to stderr
#
# \param module
# Name of the script/module where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
def Error(self, module, message, details=[], newline=False):
self.LogMessage(u'error', module, message, details=details, newline=newline, pout=sys.stderr)
## Show a log message at 'debug' level
#
# \param module
# Name of the script/module where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
def Debug(self, module, message, details=[], newline=False):
self.LogMessage(u'debug', module, message, details=details, newline=newline)
## Show a log message at '' level
#
# \param module
# Name of the script/module where the message originates
# \param message
# Message to display
# \param newline
# If <b><i>True</i></b>, prepends an empty line to beginning of message
def Test(self, module, message, details=[], newline=False):
self.LogMessage(u'test', module, message, details=details, newline=newline)
## Sets the level at which messages will be output to terminal & log file
#
# \param level
# Level at which to print & output messages
# \return
# <b><i>True</i></b> if log level successfully set
def SetLogLevel(self, level):
log_set = False
if level.isdigit():
level = int(level)
if level in self.LogLevelList:
self.LogLevel = level
log_set = True
elif isinstance(level, (unicode, str)):
for L in self.LogLevelList:
if level.lower() == self.LogLevelList[L].lower():
self.LogLevel = L
log_set = True
return log_set
## Retrieves the current logging level
#
# \return
# <b><i>Integer</i></b> logging level
def GetLogLevel(self):
return self.LogLevel
## Retrieves the current file be written to
#
# \return
# <b><i>String</i></b> path of log file
def GetLogFile(self):
return self.LogFile
## Instantiated logger with default level & output path
Logger = DebreateLogger()
## Checks if logging level is set to 'debug'
#
# \return
# <b><i>True</i></b> if logging level is 'debug'
def DebugEnabled():
return Logger.GetLogLevel() >= LogLevel.DEBUG
| [
"os.makedirs",
"globals.dateinfo.GetDate",
"os.path.isfile",
"globals.strings.IsString",
"fileio.fileio.AppendFile",
"os.path.isdir",
"globals.strings.GetModuleString",
"globals.dateinfo.GetTime"
] | [((1968, 2019), 'fileio.fileio.AppendFile', 'AppendFile', (['self.LogFile', 'log_header'], {'noStrip': 'u"""\n"""'}), "(self.LogFile, log_header, noStrip=u'\\n')\n", (1978, 2019), False, 'from fileio.fileio import AppendFile\n'), ((2132, 2160), 'os.path.isfile', 'os.path.isfile', (['self.LogFile'], {}), '(self.LogFile)\n', (2146, 2160), False, 'import os, sys\n'), ((1301, 1319), 'globals.dateinfo.GetDate', 'GetDate', (['dtfmt.LOG'], {}), '(dtfmt.LOG)\n', (1308, 1319), False, 'from globals.dateinfo import GetDate\n'), ((1554, 1581), 'os.path.isdir', 'os.path.isdir', (['self.LogsDir'], {}), '(self.LogsDir)\n', (1567, 1581), False, 'import os, sys\n'), ((1586, 1611), 'os.makedirs', 'os.makedirs', (['self.LogsDir'], {}), '(self.LogsDir)\n', (1597, 1611), False, 'import os, sys\n'), ((1683, 1701), 'globals.dateinfo.GetDate', 'GetDate', (['dtfmt.LOG'], {}), '(dtfmt.LOG)\n', (1690, 1701), False, 'from globals.dateinfo import GetDate\n'), ((1703, 1721), 'globals.dateinfo.GetTime', 'GetTime', (['dtfmt.LOG'], {}), '(dtfmt.LOG)\n', (1710, 1721), False, 'from globals.dateinfo import GetTime\n'), ((2363, 2414), 'fileio.fileio.AppendFile', 'AppendFile', (['self.LogFile', 'log_footer'], {'noStrip': 'u"""\n"""'}), "(self.LogFile, log_footer, noStrip=u'\\n')\n", (2373, 2414), False, 'from fileio.fileio import AppendFile\n'), ((3673, 3689), 'globals.strings.IsString', 'IsString', (['module'], {}), '(module)\n', (3681, 3689), False, 'from globals.strings import IsString\n'), ((3703, 3726), 'globals.strings.GetModuleString', 'GetModuleString', (['module'], {}), '(module)\n', (3718, 3726), False, 'from globals.strings import GetModuleString\n'), ((2229, 2247), 'globals.dateinfo.GetDate', 'GetDate', (['dtfmt.LOG'], {}), '(dtfmt.LOG)\n', (2236, 2247), False, 'from globals.dateinfo import GetDate\n'), ((2249, 2267), 'globals.dateinfo.GetTime', 'GetTime', (['dtfmt.LOG'], {}), '(dtfmt.LOG)\n', (2256, 2267), False, 'from globals.dateinfo import GetTime\n'), ((3924, 3941), 'globals.strings.IsString', 'IsString', (['details'], {}), '(details)\n', (3932, 3941), False, 'from globals.strings import IsString\n')] |
import torch
import torch.utils.data as data
import random
import math
import os
import logging
from utils import config
import pickle
from tqdm import tqdm
import numpy as np
import pprint
pp = pprint.PrettyPrinter(indent=1)
import re
import time
import nltk
class Lang:
def __init__(self, init_index2word):
self.word2index = {str(v): int(k) for k, v in init_index2word.items()}
self.word2count = {str(v): 1 for k, v in init_index2word.items()}
self.index2word = init_index2word
self.n_words = len(init_index2word) # Count default tokens
def index_words(self, sentence):
for word in sentence:
self.index_word(word.strip())
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def read_langs(vocab):
data_train = {'context':[],'target':[]}
data_dev = {'context':[],'target':[]}
data_test = {'context':[],'target':[]}
with open("data/cnn_dm_data/vocab.txt", encoding='utf-8') as f:
for word in f:
vocab.index_word(word.strip())
with open("data/cnn_dm_data/train.source.pre", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_train['context'].append(line)
with open("data/cnn_dm_data/train.target.pre", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_train['target'].append(line)
assert len(data_train['context']) == len(data_train['target'])
with open("data/cnn_dm_data/val.source.pre", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_dev['context'].append(line)
with open("data/cnn_dm_data/val.target", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_dev['target'].append(line)
assert len(data_dev['context']) == len(data_dev['target'])
with open("data/cnn_dm_data/test.source.pre", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_test['context'].append(line)
with open("data/cnn_dm_data/test.target", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
data_test['target'].append(line)
assert len(data_test['context']) == len(data_test['target'])
return data_train, data_dev, data_test, vocab
def load_dataset():
if(os.path.exists('data/cnn_dm/dataset_preproc.p')):
print("LOADING cnn_dm")
with open('data/cnn_dm/dataset_preproc.p', "rb") as f:
[data_tra, data_val, data_tst, vocab] = pickle.load(f)
else:
print("Building dataset...")
data_tra, data_val, data_tst, vocab = read_langs(vocab=Lang({config.UNK_idx: "UNK", config.PAD_idx: "PAD", config.EOS_idx: "EOS", config.SOS_idx: "SOS", config.USR_idx:"USR", config.SYS_idx:"SYS", config.CLS_idx:"CLS", config.CLS1_idx:"CLS1", config.Y_idx:"Y"}))
with open('data/cnn_dm/dataset_preproc.p', "wb") as f:
pickle.dump([data_tra, data_val, data_tst, vocab], f)
print("Saved PICKLE")
for i in range(3):
print("Examples:")
print('[context]:', " ".join(data_tra['context'][i]))
print('[target]:', ' '.join(data_tra['target'][i]))
print(" ")
return data_tra, data_val, data_tst, vocab
| [
"pickle.dump",
"os.path.exists",
"pickle.load",
"pprint.PrettyPrinter"
] | [((196, 226), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(1)'}), '(indent=1)\n', (216, 226), False, 'import pprint\n'), ((2640, 2687), 'os.path.exists', 'os.path.exists', (['"""data/cnn_dm/dataset_preproc.p"""'], {}), "('data/cnn_dm/dataset_preproc.p')\n", (2654, 2687), False, 'import os\n'), ((2837, 2851), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2848, 2851), False, 'import pickle\n'), ((3248, 3301), 'pickle.dump', 'pickle.dump', (['[data_tra, data_val, data_tst, vocab]', 'f'], {}), '([data_tra, data_val, data_tst, vocab], f)\n', (3259, 3301), False, 'import pickle\n')] |
from tkinter import *
from tkinter import messagebox
import sys
import os
import signal
import time
from subprocess import *
from tkinter.scrolledtext import ScrolledText
import sqlite3
def file_previous_close():
try:
with open('home_id.txt', 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
page=lines[-2]
if(page!='registration'):
os.kill(int(last_line),signal.SIGKILL)
except:
print('first instance no need to close previous file')
#file_previous_close()
def writing_id():
file_home_id=open("home_id.txt","w+")
home_id=os.getpid()
file_home_id.writelines('registration\n')
file_home_id.writelines(str(home_id))
file_home_id.close()
print('writing id')
print(home_id)
def write_message_no(number):
file=open("message.txt","w+")
file.writelines(number)
file.close()
def login_details(username,password):
file=open("login_details.txt","w+")
file.writelines(username+'\n')
file.writelines(password)
file.close()
#writing_id()
class Home():
def __init__(self,master):
menu = Menu(master)
master.config(menu=menu)
home=Menu(menu)
menu.add_cascade(label='Home',menu=home)
home.add_command(label='How to use?',command=self.take_a_tour)
home.add_command(label='Terms of Use',command=self.terms_of_use)
home.add_separator()
login_option=Menu(menu)
menu.add_cascade(label='Register and Login',menu=login_option)
login_option.add_command(label='Login',command=self.login)
login_option.add_command(label='Register',command=self.register)
login_option.add_separator()
submenu = Menu(menu)
menu.add_cascade(label='Help', menu=submenu)
submenu.add_command(label='Contact Us',command=self.contact_us)
submenu.add_command(label='FAQs', command=self.faq)
submenu.add_separator()
about_us=Menu(menu)
menu.add_cascade(label='About Us',menu=about_us)
about_us.add_command(label='About us',command=self.about_us)
about_us.add_separator()
exit_button=Menu(menu)
menu.add_cascade(label='Exit',menu=exit_button)
exit_button.add_command(label='Exit',command=menu.quit)
#can do a prompt to do yes or no
exit_button.add_command(label='Minimize',command=self.minimize)
##login frame starts here
frame = Frame(master)
self.username = StringVar(master)
self.password = StringVar()
self.name=StringVar()
self.dob=StringVar()
self.email=StringVar()
self.mobile_number=StringVar()
self.aadhar_number=StringVar()
self.variable = StringVar(master)
self.address_var=StringVar()
self.variable.set("Select Post:") # default value
post_employee=Label(master,text="Post:")
#for options menu of post of employee
w = OptionMenu(master, self.variable, "Maintainance", "Developer", "Engineer","HR")
w.pack(padx=15,pady=3)
employee_name=Label(master,text="Name:")
employee_name.pack(padx=15,pady=4)
employee_name_entry=Entry(master,bd=5,textvariable=self.name)
employee_name_entry.pack(padx=24,pady=4)
Label1 = Label(master, text='Username:')
Label1.pack(padx=15, pady=5)
entry1 = Entry(master, bd=5,textvariable=self.username)
entry1.pack(padx=15, pady=5)
email_label=Label(master,text='Email:')
email_label.pack(padx=15,pady=6)
email_entry=Entry(master,bd=5,textvariable=self.email)
email_entry.pack(padx=15,pady=6)
mobile_label=Label(master,text="Mobile:")
mobile_label.pack(padx=15,pady=7)
mobile_entry=Entry(master,bd=5,textvariable=self.mobile_number)
mobile_entry.pack(padx=15,pady=7)
aadhar_label=Label(master,text="Aadhar:")
aadhar_label.pack(padx=15,pady=8)
aadhar_entry=Entry(master,bd=5,textvariable=self.aadhar_number)
aadhar_entry.pack(padx=15,pady=8)
Label2 = Label(master, text='Password: ')
Label2.pack(padx=15, pady=9)
entry2 = Entry(master,show="*" ,bd=5,textvariable=self.password)
entry2.pack(padx=15, pady=9)
address_label=Label(master,text='Address:')
address_label.pack(padx=15,pady=10)
large_font = ('Verdana', 30)
address_entry=Entry(master,textvariable=self.address_var,bd=10,font=large_font)
address_entry.pack(padx=15,pady=10)
btn = Button(frame, text='Register', command=self.register_submit)
btn.pack(side=RIGHT, padx=5)
frame.pack(padx=100, pady=19)
def register_submit(self):
self.select_employee_type=self.variable.get()
self.username_call=self.username.get()
self.name_call=self.name.get()
self.email_call=self.email.get()
self.aadhar_number_call=self.aadhar_number.get()
self.mobile_number_call=self.mobile_number.get()
self.password_call=self.password.get()
self.address_call=self.address_var.get()
try:
conn = sqlite3.connect('omega.db')
register_db_object = conn.cursor()
login_db_object=conn.cursor()
except:
messagebox.showinfo("Failed", "Can't connect to the server")
print("cant connect")
if(self.select_employee_type=='Maintainance'):
print('Maintainance')
self.salary='30000'
try:
register_db_object.execute("INSERT INTO register (username,password,salary,aadhar_number,email,name,post,phone,address) values (?,?,?,?,?,?,?,?,?)",(self.username_call,self.password_call,self.salary,self.aadhar_number_call,self.email_call,self.name_call,self.select_employee_type,self.mobile_number_call,self.address_call))
conn.commit()
login_db_object.execute("INSERT INTO main.login_details (username,password) values (?,?)",(self.username_call,self.password_call))
conn.commit()
conn.close()
counter=1
except:
counter=0
messagebox.showinfo("Failed", "Can't Register :( Username Occupied ")
elif(self.select_employee_type=='Developer'):
print('Dev')
self.salary = '130000'
try:
register_db_object.execute(
"INSERT INTO register (username,password,salary,aadhar_number,email,name,post,phone,address) values (?,?,?,?,?,?,?,?,?)",
(self.username_call, self.password_call, self.salary, self.aadhar_number_call, self.email_call,
self.name_call, self.select_employee_type, self.mobile_number_call, self.address_call))
conn.commit()
login_db_object.execute("INSERT INTO main.login_details (username,password) values (?,?)",
(self.username_call, self.password_call))
conn.commit()
conn.close()
counter=1
except:
counter=0
messagebox.showinfo("Failed", "Can't Register :( Username Occupied ")
else:
self.salary = '70000'
try:
register_db_object.execute(
"INSERT INTO register (username,password,salary,aadhar_number,email,name,post,phone,address) values (?,?,?,?,?,?,?,?,?)",
(self.username_call, self.password_call, self.salary, self.aadhar_number_call, self.email_call,
self.name_call, self.select_employee_type, self.mobile_number_call, self.address_call))
conn.commit()
login_db_object.execute("INSERT INTO main.login_details (username,password) values (?,?)",
(self.username_call, self.password_call))
conn.commit()
conn.close()
counter=1
except:
print("Unable to register")
messagebox.showinfo("Failed", "Can't Register :( Username Occupied ")
counter=0
try:
print("Checking for mobile number")
write_message_no(self.mobile_number_call)
except:
print("Can't Write Mobile Number")
try:
def send_email(user, pwd, recipient, subject, body):
import smtplib
FROM = user
TO = recipient if isinstance(recipient, list) else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
email = ''
pwd = ''
recipient = self.email_call
subject = 'Registered with Omega'
body = 'Hi '+self.name_call+',\nYou have successfully registered with Omega Employee Management'+'\nRegards,\nTeam Omega'
send_email(email, pwd, recipient, subject, body)
print("mailsent")
except:
messagebox.showinfo("Error while sending Mail","Mail can't be sent")
if(counter!=0):
messagebox.showinfo("Successfully Registered","Taking you to the Login Page")
call('python login.py', shell=True)
def contact_us(self):
##declare a message box
messagebox.showerror("Contact Us","You can contact us at <EMAIL> in case software dosen't respond or for any suggestions for improvements.")
print("contact")
def faq(self):
##message box indicating faqs
print('he')
def login(self):
print('login ')
call('python login.py', shell=True)
def register(self):
##create a register Frame
print('register')
#sys.exit()
def take_a_tour(self):
##take a tour of the app
tour_take=Toplevel()
tour_take.geometry("220x180")
tour_take.title("Take a Tour")
message=Message(tour_take,text="Go to Register and Login section to register as an employee. You need to provide the required details. After registration is completed you can login using your login credentials to access your employee dashboard.\n\n")
message.grid(row=0,column=1)
button = Button(tour_take, text="Close", command=tour_take.destroy)
button.grid(row=6, column=1)
print('take a tour')
def terms_of_use(self):
string_terms='''Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: \n \n The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.'''
dialog=Toplevel()
dialog.geometry("310x260")
dialog.title("Terms of Use")
message=Message(dialog,text=string_terms)
message.grid(row=0,column=0)
button = Button(dialog, text="Close", command=dialog.destroy)
button.grid(row=4, column=0)
print('message box having terms of use')
def about_us(self):
top = Toplevel()
top.geometry("230x200")
top.title("About Us")
msg = Message(top, text='This is Omega Employee Management System. This was primarily made as a basic Employee Management System as a project for University Curriculum but later after constantly improving this project and adding features it was made public.\n\n')
msg.grid(row=0, column=15)
button = Button(top, text="Okay", command=top.destroy)
button.grid(row=4, column=15)
print('Display your info')
def minimize(self):
print('minimize the window')
root=Tk()
login_home=Home(root)
root.wm_geometry("1360x1200")
root.title("Register Here")
root.mainloop()
| [
"tkinter.messagebox.showerror",
"smtplib.SMTP",
"sqlite3.connect",
"os.getpid",
"tkinter.messagebox.showinfo"
] | [((640, 651), 'os.getpid', 'os.getpid', ([], {}), '()\n', (649, 651), False, 'import os\n'), ((9831, 9981), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Contact Us"""', '"""You can contact us at <EMAIL> in case software dosen\'t respond or for any suggestions for improvements."""'], {}), '(\'Contact Us\',\n "You can contact us at <EMAIL> in case software dosen\'t respond or for any suggestions for improvements."\n )\n', (9851, 9981), False, 'from tkinter import messagebox\n'), ((5207, 5234), 'sqlite3.connect', 'sqlite3.connect', (['"""omega.db"""'], {}), "('omega.db')\n", (5222, 5234), False, 'import sqlite3\n'), ((9635, 9713), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Successfully Registered"""', '"""Taking you to the Login Page"""'], {}), "('Successfully Registered', 'Taking you to the Login Page')\n", (9654, 9713), False, 'from tkinter import messagebox\n'), ((5352, 5412), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Failed"""', '"""Can\'t connect to the server"""'], {}), '(\'Failed\', "Can\'t connect to the server")\n', (5371, 5412), False, 'from tkinter import messagebox\n'), ((8921, 8956), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (8933, 8956), False, 'import smtplib\n'), ((9529, 9598), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error while sending Mail"""', '"""Mail can\'t be sent"""'], {}), '(\'Error while sending Mail\', "Mail can\'t be sent")\n', (9548, 9598), False, 'from tkinter import messagebox\n'), ((6252, 6321), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Failed"""', '"""Can\'t Register :( Username Occupied """'], {}), '(\'Failed\', "Can\'t Register :( Username Occupied ")\n', (6271, 6321), False, 'from tkinter import messagebox\n'), ((7237, 7306), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Failed"""', '"""Can\'t Register :( Username Occupied """'], {}), '(\'Failed\', "Can\'t Register :( Username Occupied ")\n', (7256, 7306), False, 'from tkinter import messagebox\n'), ((8174, 8243), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Failed"""', '"""Can\'t Register :( Username Occupied """'], {}), '(\'Failed\', "Can\'t Register :( Username Occupied ")\n', (8193, 8243), False, 'from tkinter import messagebox\n')] |
import argparse
from yaml import load
from utils import print_dic
from io import open
from toolz.dicttoolz import merge
from runner import Runner
from evaluator import Evaluator
parser = argparse.ArgumentParser(description='Bezos')
parser.add_argument('--config', default='test.yaml',
help='Configuration file')
parser.add_argument('command', choices=['train', 'evaluate'], default='train')
parser.add_argument('--load-dir', action='store', dest='load_dir',
help='Trained model dir', default="./trained_models/")
parser.add_argument('--det', action='store_true',
dest='det', help='Deterministic evaluation')
args = parser.parse_args()
header = """
▀█████████▄ ▄████████ ▄███████▄ ▄██████▄ ▄████████
███ ███ ███ ███ ██▀ ▄██ ███ ███ ███ ███
███ ███ ███ █▀ ▄███▀ ███ ███ ███ █▀
▄███▄▄▄██▀ ▄███▄▄▄ ▀█▀▄███▀▄▄ ███ ███ ███
▀▀███▀▀▀██▄ ▀▀███▀▀▀ ▄███▀ ▀ ███ ███ ▀███████████
███ ██▄ ███ █▄ ▄███▀ ███ ███ ███
███ ███ ███ ███ ███▄ ▄█ ███ ███ ▄█ ███
▄█████████▀ ██████████ ▀████████▀ ▀██████▀ ▄████████▀
"""
def main():
print(header)
stream = open(args.config, 'r')
default = open('./configs/default.yaml', 'r')
parameters = load(stream)
default_parameters = load(default)
if(args.command == 'train'):
parameters = merge(default_parameters, parameters)
print("Training parameters\n-------")
print_dic(parameters)
runner = Runner(**parameters)
runner.run()
else:
parameters = merge(merge(default_parameters, parameters), {
'deterministic_evaluation': args.det,
'load_dir': args.load_dir
})
evaluator = Evaluator(**parameters)
evaluator.evaluate()
if __name__ == "__main__":
main()
| [
"toolz.dicttoolz.merge",
"argparse.ArgumentParser",
"utils.print_dic",
"evaluator.Evaluator",
"yaml.load",
"io.open",
"runner.Runner"
] | [((188, 232), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Bezos"""'}), "(description='Bezos')\n", (211, 232), False, 'import argparse\n'), ((1263, 1285), 'io.open', 'open', (['args.config', '"""r"""'], {}), "(args.config, 'r')\n", (1267, 1285), False, 'from io import open\n'), ((1300, 1335), 'io.open', 'open', (['"""./configs/default.yaml"""', '"""r"""'], {}), "('./configs/default.yaml', 'r')\n", (1304, 1335), False, 'from io import open\n'), ((1353, 1365), 'yaml.load', 'load', (['stream'], {}), '(stream)\n', (1357, 1365), False, 'from yaml import load\n'), ((1391, 1404), 'yaml.load', 'load', (['default'], {}), '(default)\n', (1395, 1404), False, 'from yaml import load\n'), ((1459, 1496), 'toolz.dicttoolz.merge', 'merge', (['default_parameters', 'parameters'], {}), '(default_parameters, parameters)\n', (1464, 1496), False, 'from toolz.dicttoolz import merge\n'), ((1551, 1572), 'utils.print_dic', 'print_dic', (['parameters'], {}), '(parameters)\n', (1560, 1572), False, 'from utils import print_dic\n'), ((1590, 1610), 'runner.Runner', 'Runner', ([], {}), '(**parameters)\n', (1596, 1610), False, 'from runner import Runner\n'), ((1829, 1852), 'evaluator.Evaluator', 'Evaluator', ([], {}), '(**parameters)\n', (1838, 1852), False, 'from evaluator import Evaluator\n'), ((1669, 1706), 'toolz.dicttoolz.merge', 'merge', (['default_parameters', 'parameters'], {}), '(default_parameters, parameters)\n', (1674, 1706), False, 'from toolz.dicttoolz import merge\n')] |
from _revkit import netlist, gate
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
def _to_qiskit(self, circuit=None, with_classical_register=False):
"""
Convert RevKit quantum circuit into Qiskit quantum circuit
:param qiskit.QuantumCircuit circuit: If not `None`, add gates to this circuit
and also use its quantum registers. If the circuit does not have enough
qubit, the method fails. If `None` (default), a new circuit is
constructed.
:param bool with_classical_register: Add a classical register, if new circuit
is constructed (i.e., `circuit` is `None`)
:rtype: qiskit.QuatumCircuit
"""
if circuit is None:
qr = QuantumRegister(self.num_qubits, "qr")
if with_classical_register:
cr = ClassicalRegister(self.num_qubits, "cr")
circuit = QuantumCircuit(qr, cr)
else:
circuit = QuantumCircuit(qr)
# collect all qubits from all quantum registers
qr = [q for reg in circuit.qregs for q in reg]
for g in self.gates:
if g.kind == gate.gate_type.pauli_x:
for t in g.targets:
circuit.x(qr[t])
elif g.kind == gate.gate_type.hadamard:
for t in g.targets:
circuit.h(qr[t])
elif g.kind == gate.gate_type.rotation_z:
for t in g.targets:
circuit.rz(g.angle, qr[t])
elif g.kind == gate.gate_type.cx:
ctrl = g.controls[0]
for t in g.targets:
circuit.cx(qr[int(ctrl)], qr[t])
if not bool(ctrl):
circuit.x(qr[t])
elif g.kind == gate.gate_type.mcx:
ctls = g.controls
# only at most 2 controls and no negative controls
if len(ctls) > 2: raise Exception("X gates cannot have more than 2 controls")
negs = [qr[int(q)] for q in ctls if not bool(q)]
ctls = [qr[int(q)] for q in ctls]
tgts = [qr[q] for q in g.targets]
for t in tgts[1:]:
circuit.cx(tgts[0], t)
for n in negs:
circuit.x(n)
if len(ctls) == 0:
circuit.x(tgts[0])
elif len(ctls) == 1:
circuit.cx(ctls[0], tgts[0])
else:
circuit.ccx(ctls[0], ctls[1], tgts[0])
for n in negs:
circuit.x(n)
for t in tgts[1:]:
circuit.cx(tgts[0], t)
else:
raise Exception(f"Unsupported gate type {g.kind}")
return circuit
netlist.to_qiskit = _to_qiskit
| [
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister",
"qiskit.ClassicalRegister"
] | [((686, 724), 'qiskit.QuantumRegister', 'QuantumRegister', (['self.num_qubits', '"""qr"""'], {}), "(self.num_qubits, 'qr')\n", (701, 724), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((769, 809), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['self.num_qubits', '"""cr"""'], {}), "(self.num_qubits, 'cr')\n", (786, 809), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((826, 848), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr', 'cr'], {}), '(qr, cr)\n', (840, 848), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((875, 893), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (889, 893), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n')] |
"""
MIT License
Copyright 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import signal
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import datetime
from pkg_resources import get_distribution
import numpy as np
import shutil
from hans.tools import abort
from hans.material import Material
from hans.plottools import adaptiveLimits
from hans.integrate import ConservedField
class Problem:
def __init__(self, options, disc, bc, geometry, numerics, material, surface, ic):
"""
Collects all information about a single problem
and contains the methods to run a simulation, based on the problem defintiion."
Parameters
----------
options : dict
Contains IO options.
disc : dict
Contains discretization parameters.
bc : dict
Contains boundary condition parameters.
geometry : dict
Contains geometry parameters.
numerics : dict
Contains numerics parameters.
material : dict
Contains material parameters.
surface : dict
Contains surface parameters.
restart_file : str
Filename of the netCDF file, from which simulation is restarted.
"""
self.options = options
self.disc = disc
self.bc = bc
self.geometry = geometry
self.numerics = numerics
self.material = material
self.surface = surface
self.ic = ic
self.sanity_checks()
def sanity_checks(self):
self.check_options()
self.check_disc()
self.check_geo()
self.check_num()
self.check_mat()
self.check_bc()
if self.ic is not None:
self.check_ic()
if self.surface is not None:
self.check_surface()
print("Sanity checks completed. Start simulation!")
print(60 * "-")
def run(self, out_dir="data", out_name=None, plot=False):
"""
Starts the simulation.
Parameters
----------
out_dir : str
Output directory (default: data).
out_name : str
NetCDF output filename (default: None)
plot : bool
On-the-fly plotting flag (default: False).
"""
# global write options
writeInterval = self.options['writeInterval']
if "maxT" in self.numerics.keys():
maxT = self.numerics["maxT"]
else:
maxT = np.inf
if "maxIt" in self.numerics.keys():
maxIt = self.numerics["maxIt"]
else:
maxIt = np.inf
if "tol" in self.numerics.keys():
tol = self.numerics["tol"]
else:
tol = 0.
# Initial conditions
q_init, t_init = self.get_initial_conditions()
# intialize solution field
self.q = ConservedField(self.disc,
self.bc,
self.geometry,
self.material,
self.numerics,
self.surface,
q_init=q_init,
t_init=t_init)
rank = self.q.comm.Get_rank()
# time stamp of simulation start time
self.tStart = datetime.now()
# Header line for screen output
if rank == 0:
print(f"{'Step':10s}\t{'Timestep':12s}\t{'Time':12s}\t{'Epsilon':12s}", flush=True)
if plot:
# on-the-fly plotting
self.plot(writeInterval)
else:
nc = self.init_netcdf(out_dir, out_name, rank)
i = 0
self._write_mode = 0
while self._write_mode == 0:
# Perform time update
self.q.update(i)
# increase time step
i += 1
# catch signals and execute signal handler
signal.signal(signal.SIGINT, self.receive_signal)
signal.signal(signal.SIGTERM, self.receive_signal)
signal.signal(signal.SIGHUP, self.receive_signal)
signal.signal(signal.SIGUSR1, self.receive_signal)
signal.signal(signal.SIGUSR2, self.receive_signal)
# convergence
if i > 1 and self.q.eps < tol:
self._write_mode = 1
break
# maximum time reached
if round(self.q.time, 15) >= maxT:
self._write_mode = 2
break
# maximum number of iterations reached
if i >= maxIt:
self._write_mode = 3
break
if i % writeInterval == 0:
self.write_to_netcdf(i, nc, mode=self._write_mode)
if rank == 0:
self.write_to_stdout(i, mode=self._write_mode)
self.write_to_netcdf(i, nc, mode=self._write_mode)
if rank == 0:
self.write_to_stdout(i, mode=self._write_mode)
def get_initial_conditions(self):
"""
Return the initial field given by last frame of restart file
or as defined through inputs.
Returns
-------
np.array
Inital field of conserved variables.
tuple
Inital time and timestep
"""
if self.ic is None:
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
t_init = (0., self.numerics["dt"])
else:
# read last frame of restart file
if self.ic["type"] == "restart":
q_init, t_init = self.read_last_frame()
elif self.ic["type"] == "perturbation":
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
t_init = (0., self.numerics["dt"])
q_init[0, self.disc["Nx"] // 2, self.disc["Ny"] // 2] *= self.ic["factor"]
elif self.ic["type"] == "longitudinal_wave":
x = np.linspace(0 + self.disc["dx"]/2, self.disc["Lx"] - self.disc["dx"]/2, self.disc["Nx"])
y = np.linspace(0 + self.disc["dy"]/2, self.disc["Ly"] - self.disc["dy"]/2, self.disc["Ny"])
xx, yy = np.meshgrid(x, y, indexing="ij")
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
k = 2. * np.pi / self.disc["Lx"] * self.ic["nwave"]
q_init[1] += self.ic["amp"] * np.sin(k * xx)
t_init = (0., self.numerics["dt"])
elif self.ic["type"] == "shear_wave":
x = np.linspace(0 + self.disc["dx"]/2, self.disc["Lx"] - self.disc["dx"]/2, self.disc["Nx"])
y = np.linspace(0 + self.disc["dy"]/2, self.disc["Ly"] - self.disc["dy"]/2, self.disc["Ny"])
xx, yy = np.meshgrid(x, y, indexing="ij")
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
k = 2. * np.pi / self.disc["Lx"] * self.ic["nwave"]
q_init[2] += self.ic["amp"] * np.sin(k * xx)
t_init = (0., self.numerics["dt"])
return q_init, t_init
def read_last_frame(self):
"""
Read last frame from restart file and use as initial values for new run.
Returns
-------
np.array
Solution field at last frame, used as inital field.
tuple
Total time and timestep of last frame.
"""
file = Dataset(self.ic["file"], "r")
rho = np.array(file.variables['rho'])[-1]
jx = np.array(file.variables['jx'])[-1]
jy = np.array(file.variables['jy'])[-1]
dt = float(file.variables["dt"][-1])
t = float(file.variables["time"][-1])
q0 = np.zeros([3] + list(rho.shape))
q0[0] = rho
q0[1] = jx
q0[2] = jy
return q0, (t, dt)
def init_netcdf(self, out_dir, out_name, rank):
"""
Initialize netCDF4 file, create dimensions, variables and metadata.
Parameters
----------
out_dir : str
Output directoy.
out_name : str
Filename prefix.
rank : int
Rank of the MPI communicator
Returns
-------
netCDF4.Dataset
Initialized dataset.
"""
if rank == 0:
if not(os.path.exists(out_dir)):
os.makedirs(out_dir)
if self.ic is None or self.ic["type"] != "restart":
if rank == 0:
if out_name is None:
# default unique filename with timestamp
timestamp = datetime.now().replace(microsecond=0).strftime("%Y-%m-%d_%H%M%S")
name = self.options["name"]
outfile = f"{timestamp}_{name}.nc"
else:
# custom filename with zero padded number
tag = str(len([1 for f in os.listdir(out_dir) if f.startswith(out_name)]) + 1).zfill(4)
outfile = f"{out_name}-{tag}.nc"
self.outpath = os.path.join(out_dir, outfile)
else:
self.outpath = None
self.outpath = self.q.comm.bcast(self.outpath, root=0)
# initialize NetCDF file
parallel = False
if self.q.comm.Get_size() > 1:
parallel = True
nc = Dataset(self.outpath, 'w', parallel=parallel, format='NETCDF3_64BIT_OFFSET')
nc.restarts = 0
nc.createDimension('x', self.disc["Nx"])
nc.createDimension('y', self.disc["Ny"])
nc.createDimension('step', None)
# create unknown variable buffer as timeseries of 2D fields
var0 = nc.createVariable('rho', 'f8', ('step', 'x', 'y'))
var1 = nc.createVariable('jx', 'f8', ('step', 'x', 'y'))
var2 = nc.createVariable('jy', 'f8', ('step', 'x', 'y'))
var0.set_collective(True)
var1.set_collective(True)
var2.set_collective(True)
# create scalar variables
nc.createVariable('time', 'f8', ('step'))
nc.createVariable('mass', 'f8', ('step'))
nc.createVariable('vmax', 'f8', ('step'))
nc.createVariable('vSound', 'f8', ('step'))
nc.createVariable('dt', 'f8', ('step'))
nc.createVariable('eps', 'f8', ('step'))
nc.createVariable('ekin', 'f8', ('step'))
# write metadata
nc.setncattr(f"tStart-{nc.restarts}", self.tStart.strftime("%d/%m/%Y %H:%M:%S"))
nc.setncattr("version", get_distribution('hans').version)
disc = self.disc.copy()
bc = self.bc.copy()
categories = {"options": self.options,
"disc": disc,
"bc": bc,
"geometry": self.geometry,
"numerics": self.numerics,
"material": self.material}
if self.surface is not None:
categories["surface"] = self.surface
if self.ic is not None:
categories["ic"] = self.ic
# reset modified input dictionaries
bc["x0"] = "".join(bc["x0"])
bc["x1"] = "".join(bc["x1"])
bc["y0"] = "".join(bc["y0"])
bc["y1"] = "".join(bc["y1"])
del disc["nghost"]
del disc["pX"]
del disc["pY"]
for name, cat in categories.items():
for key, value in cat.items():
nc.setncattr(f"{name}_{key}", value)
else:
# append to existing netCDF file
parallel = False
if self.q.comm.Get_size() > 1:
parallel = True
nc = Dataset(self.ic["file"], 'a', parallel=parallel, format='NETCDF3_64BIT_OFFSET')
self.outpath = os.path.relpath(self.ic["file"])
backup_file = f"{os.path.splitext(self.ic['file'])[0]}-{nc.restarts}.nc"
# create backup
if rank == 0:
shutil.copy(self.ic["file"], backup_file)
# increase restart counter
nc.restarts += 1
# append modified attributes
nc.setncattr(f"tStart-{nc.restarts}", self.tStart.strftime("%d/%m/%Y %H:%M:%S"))
for key, value in self.numerics.items():
name = f"numerics_{key}-{nc.restarts}"
nc.setncattr(name, value)
nc.setncattr(f"ic_type-{nc.restarts}", "restart")
nc.setncattr(f"ic_file-{nc.restarts}", backup_file)
return nc
def write_to_stdout(self, i, mode):
"""
Write information about the current time step to stdout.
Parameters
----------
i : int
Current time step.
mode : int
Writing mode (0: normal, 1: converged, 2: max time, 3: execution stopped).
"""
print(f"{i:10d}\t{self.q.dt:.6e}\t{self.q.time:.6e}\t{self.q.eps:.6e}", flush=True)
if mode == 1:
print(f"\nSolution has converged after {i:d} steps. Output written to: {self.outpath}", flush=True)
elif mode == 2:
print(f"\nNo convergence within {i: d} steps.", end=" ", flush=True)
print(f"Stopping criterion: maximum time {self.numerics['maxT']: .1e} s reached.", flush=True)
print(f"Output written to: {self.outpath}", flush=True)
elif mode == 3:
print(f"\nNo convergence within {i: d} steps.", end=" ", flush=True)
print(f"Stopping criterion: maximum number of iterations reached.", flush=True)
print(f"Output written to: {self.outpath}", flush=True)
elif mode == 4:
print(f"Execution stopped. Output written to: {self.outpath}", flush=True)
if mode > 0:
walltime = datetime.now() - self.tStart
print(f"Total wall clock time: {str(walltime).split('.')[0]}", end=" ", flush=True)
print(f"(Performance: {i/walltime.total_seconds(): .2f} steps/s", end=" ", flush=True)
print(f"on {self.q.comm.dims[0]} x {self.q.comm.dims[1]} MPI grid)", flush=True)
def write_to_netcdf(self, i, nc, mode):
"""
Append current solution field to netCDF file.
Parameters
----------
i : int
Current time step.
nc : netCDF4.Dataset
NetCDF Dataset object.
mode : int
Writing mode (0: normal, 1: converged, 2: max time, 3: execution stopped).
"""
step = nc.variables["rho"].shape[0]
xrange, yrange = self.q.without_ghost
nc.variables['rho'][step, xrange, yrange] = self.q.inner[0]
nc.variables['jx'][step, xrange, yrange] = self.q.inner[1]
nc.variables['jy'][step, xrange, yrange] = self.q.inner[2]
nc.variables["time"][step] = self.q.time
nc.variables["mass"][step] = self.q.mass
nc.variables["vmax"][step] = self.q.vmax
nc.variables["vSound"][step] = self.q.vSound
nc.variables["dt"][step] = self.q.dt
nc.variables["eps"][step] = self.q.eps
nc.variables["ekin"][step] = self.q.ekin
if mode > 0:
nc.setncattr(f"tEnd-{nc.restarts}", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
nc.close()
def receive_signal(self, signum, frame):
"""
Signal handler. Catches signals send to the process and sets write mode to 3 (abort).
Parameters
----------
signum :
signal code
frame :
Description of parameter `frame`.
"""
if signum in [signal.SIGINT, signal.SIGTERM, signal.SIGHUP, signal.SIGUSR1]:
self._write_mode = 4
def plot(self, writeInterval):
"""
Initialize on-the-fly plotting.
Parameters
----------
writeInterval : int
Write interval for stdout in plotting mode.
"""
fig, ax = plt.subplots(2, 2, figsize=(14, 9), sharex=True)
Nx = self.disc["Nx"]
dx = self.disc["dx"]
x = np.arange(Nx) * dx + dx / 2
ax[0, 0].plot(x, self.q.centerline_x[1])
ax[0, 1].plot(x, self.q.centerline_x[2])
ax[1, 0].plot(x, self.q.centerline_x[0])
ax[1, 1].plot(x, Material(self.material).eos_pressure(self.q.centerline_x[0]))
ax[0, 0].set_title(r'$j_x$')
ax[0, 1].set_title(r'$j_y$')
ax[1, 0].set_title(r'$\rho$')
ax[1, 1].set_title(r'$p$')
ax[1, 0].set_xlabel('distance x (m)')
ax[1, 1].set_xlabel('distance x (m)')
def init():
pass
_ = animation.FuncAnimation(fig,
self.animate1D,
100000,
fargs=(fig, ax, writeInterval),
interval=1,
init_func=init,
repeat=False)
plt.show()
def animate1D(self, i, fig, ax, writeInterval):
"""
Animator function. Update solution and plots.
Parameters
----------
i : type
Current time step.
fig : matplotlib.figure
Figure object.
ax : np.array
Array containing the axes of the figure.
writeInterval : int
Write interval for stdout in plotting mode.
"""
self.q.update(i)
fig.suptitle('time = {:.2f} ns'.format(self.q.time * 1e9))
ax[0, 0].lines[0].set_ydata(self.q.centerline_x[1])
ax[0, 1].lines[0].set_ydata(self.q.centerline_x[2])
ax[1, 0].lines[0].set_ydata(self.q.centerline_x[0])
ax[1, 1].lines[0].set_ydata(Material(self.material).eos_pressure(self.q.centerline_x[0]))
ax = adaptiveLimits(ax)
if i % writeInterval == 0:
print(f"{i:10d}\t{self.q.dt:.6e}\t{self.q.time:.6e}\t{self.q.eps:.6e}", flush=True)
def check_options(self):
"""
Sanity check for I/O options input.
"""
print("Checking I/O options... ")
try:
writeInterval = int(self.options["writeInterval"])
assert writeInterval > 0
except KeyError:
print("***Output interval not given, fallback to 1000")
self.options["writeInterval"] = 1000
except AssertionError:
try:
assert writeInterval != 0
except AssertionError:
print("***Output interval is zero. fallback to 1000")
self.options["writeInterval"] = 1000
else:
print("***Output interval is negative. Converting to positive value.")
writeInterval *= -1
self.options["writeInterval"] = writeInterval
def check_disc(self):
"""
Sanity check for discretization input.
[Nx, Ny] are required, then look for [Lx, Ly] or [dx, dy] (in that order).
"""
print("Checking discretization... ")
try:
self.disc["Nx"] = int(self.disc['Nx'])
assert self.disc["Nx"] > 0
except KeyError:
print("***Number of grid cells Nx not specified. Abort.")
abort()
except AssertionError:
print("***Number of grid cells Nx must be larger than zero. Abort")
abort()
try:
self.disc["Ny"] = int(self.disc['Ny'])
assert self.disc["Ny"] > 0
except KeyError:
print("***Number of grid cells 'Ny' not specified. Abort.")
abort()
except AssertionError:
print("***Number of grid cells 'Ny' must be larger than zero. Abort")
abort()
try:
self.disc["Lx"] = float(self.disc["Lx"])
except KeyError:
try:
self.disc["dx"] = float(self.disc["dx"])
except KeyError:
print("At least two of 'Nx' 'Lx', 'dx' must be given. Abort.")
abort()
else:
self.disc["Lx"] = self.disc["dx"] * self.disc["Nx"]
else:
self.disc["dx"] = self.disc["Lx"] / self.disc["Nx"]
try:
self.disc["Ly"] = float(self.disc["Ly"])
except KeyError:
try:
self.disc["dy"] = float(self.disc["dy"])
except KeyError:
print("At least two of 'Ny' 'Ly', 'dy' must be given. Abort.")
abort()
else:
self.disc["Ly"] = self.disc["dy"] * self.disc["Ny"]
else:
self.disc["dy"] = self.disc["Ly"] / self.disc["Ny"]
def check_geo(self):
"""
Sanity check for geometry input.
"""
print("Checking geometry... ")
if self.geometry["type"] in ["journal", "journal_x", "journal_y"]:
self.geometry["CR"] = float(self.geometry["CR"])
self.geometry["eps"] = float(self.geometry["eps"])
elif self.geometry["type"] == "parabolic":
self.geometry["hmin"] = float(self.geometry['hmin'])
self.geometry["hmax"] = float(self.geometry['hmax'])
elif self.geometry["type"] == "twin_parabolic":
self.geometry["hmin"] = float(self.geometry['hmin'])
self.geometry["hmax"] = float(self.geometry['hmax'])
elif self.geometry["type"] in ["inclined", "inclined_x", "inclined_y"]:
self.geometry["h1"] = float(self.geometry['h1'])
self.geometry["h2"] = float(self.geometry['h2'])
elif self.geometry["type"] == "inclined_pocket":
self.geometry["h1"] = float(self.geometry['h1'])
self.geometry["h2"] = float(self.geometry['h2'])
self.geometry["hp"] = float(self.geometry['hp'])
self.geometry["c"] = float(self.geometry['c'])
self.geometry["l"] = float(self.geometry['l'])
self.geometry["w"] = float(self.geometry['w'])
elif self.geometry["type"] in ["half_sine", "half_sine_squared"]:
self.geometry["h0"] = float(self.geometry['h0'])
self.geometry["amp"] = float(self.geometry['amp'])
self.geometry["num"] = float(self.geometry['num'])
def check_num(self):
"""
Sanity check for numerics options.
"""
print("Checking numerics options... ")
try:
self.numerics["integrator"] = self.numerics["integrator"]
assert self.numerics["integrator"] in ["MC", "MC_bf", "MC_fb", "MC_alt", "LW", "RK3"]
except KeyError:
print("***Integrator not specified. Use default (MacCormack).")
self.numerics["integrator"] = "MC"
except AssertionError:
print(f'***Unknown integrator \'{self.numerics["integrator"]}\'. Abort.')
abort()
if self.numerics["integrator"].startswith("MC"):
try:
self.numerics["fluxLim"] = float(self.numerics["fluxLim"])
except KeyError:
pass
try:
self.numerics["stokes"] = int(self.numerics["stokes"])
except KeyError:
print("***Boolean parameter 'stokes' not given. Use default (True).")
self.numerics["stokes"] = 1
try:
self.numerics["adaptive"] = int(self.numerics["adaptive"])
except KeyError:
print("***Boolean parameter 'adaptive' not given. Use default (False).")
self.numerics["adaptive"] = 0
if self.numerics["adaptive"] == 1:
try:
self.numerics["C"] = float(self.numerics["C"])
except KeyError:
print("***CFL number not given. Use default (0.5).")
self.numerics["C"] = 0.5
try:
self.numerics["dt"] = float(self.numerics["dt"])
except KeyError:
print("***Timestep not given. Use default (1e-10).")
self.numerics["dt"] = 1e-10
stopping_criteria = 0
try:
self.numerics["tol"] = float(self.numerics["tol"])
stopping_criteria += 1
except KeyError:
pass
try:
self.numerics["maxT"] = float(self.numerics["maxT"])
stopping_criteria += 1
except KeyError:
pass
try:
self.numerics["maxIt"] = int(self.numerics["maxIt"])
stopping_criteria += 1
except KeyError:
pass
if stopping_criteria == 0:
print("***No stopping criterion given. Abort.")
abort()
if self.numerics["integrator"] == "RK3":
self.disc["nghost"] = 2
else:
self.disc["nghost"] = 1
def check_mat(self):
"""
Sanity check on material settings.
"""
print("Checking material options... ")
if self.material["EOS"] == "DH":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["C1"] = float(self.material["C1"])
self.material["C2"] = float(self.material["C2"])
elif self.material["EOS"] == "PL":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["alpha"] = float(self.material['alpha'])
elif self.material["EOS"] == "vdW":
self.material["M"] = float(self.material['M'])
self.material["T"] = float(self.material['T0'])
self.material["a"] = float(self.material['a'])
self.material["b"] = float(self.material['b'])
elif self.material["EOS"] == "Tait":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["K"] = float(self.material['K'])
self.material["n"] = float(self.material['n'])
elif self.material["EOS"] == "cubic":
self.material["a"] = float(self.material['a'])
self.material["b"] = float(self.material['b'])
self.material["c"] = float(self.material['c'])
self.material["d"] = float(self.material['d'])
elif self.material["EOS"].startswith("Bayada"):
self.material["cl"] = float(self.material["cl"])
self.material["cv"] = float(self.material["cv"])
self.material["rhol"] = float(self.material["rhol"])
self.material["rhov"] = float(self.material["rhov"])
self.material["shear"] = float(self.material["shear"])
self.material["shearv"] = float(self.material["shearv"])
self.material["rhov"] = float(self.material["rhov"])
self.material["shear"] = float(self.material["shear"])
self.material["bulk"] = float(self.material["bulk"])
if "Pcav" in self.material.keys():
self.material["Pcav"] = float(self.material["Pcav"])
if "piezo" in self.material.keys():
if self.material["piezo"] == "Barus":
self.material["aB"] = float(self.material["aB"])
elif self.material["piezo"] == "Vogel":
self.material["rho0"] = float(self.material['rho0'])
self.material["g"] = float(self.material["g"])
self.material["mu_inf"] = float(self.material["mu_inf"])
self.material["phi_inf"] = float(self.material["phi_inf"])
self.material["BF"] = float(self.material["BF"])
if "thinning" in self.material.keys():
if self.material["thinning"] == "Eyring":
self.material["tau0"] = float(self.material["tau0"])
elif self.material["thinning"] == "Carreau":
self.material["relax"] = float(self.material["relax"])
self.material["a"] = float(self.material["a"])
self.material["N"] = float(self.material["N"])
elif self.material["thinning"] == "PL":
self.material["shear"] = float(self.material["shear"])
self.material["n"] = float(self.material["n"])
if "PLindex" in self.material.keys():
self.material["PLindex"] = float(self.material["PLindex"])
def check_surface(self):
"""
Sanity check for surface input.
"""
print("Checking surface parameters... ")
if "lslip" in self.surface.keys():
self.surface["lslip"] = float(self.surface["lslip"])
else:
self.surface["lslip"] = 0.
if self.surface["type"] in ["stripes", "stripes_x", "stripes_y"]:
try:
self.surface["num"] = int(self.surface["num"])
except KeyError:
self.surface["num"] = 1
try:
self.surface["sign"] = int(self.surface["sign"])
except KeyError:
self.surface["sign"] = -1
def check_ic(self):
"""
Sanity check for initial conditions input.
"""
print("Checking initial conditions... ")
if self.ic["type"] != "restart":
if self.ic["type"] == "perturbation":
self.ic["factor"] = float(self.ic["factor"])
elif self.ic["type"] in ["longitudinal_wave", "shear_wave"]:
self.ic["amp"] = float(self.ic["amp"])
if "nwave" in self.ic.keys():
self.ic["nwave"] = int(self.ic["nwave"])
else:
self.ic["nwave"] = 1
def check_bc(self):
"""
Sanity check for boundary condition input.
Parameters
----------
bc : dict
Boundary condition parameters read from yaml input file.
disc : dict
Discretization parameters.
material : dict
Material parameters.
Returns
-------
dict
Boundary condition parameters.
"""
print("Checking boundary conditions... ")
self.bc["x0"] = np.array(list(self.bc["x0"]))
self.bc["x1"] = np.array(list(self.bc["x1"]))
self.bc["y0"] = np.array(list(self.bc["y0"]))
self.bc["y1"] = np.array(list(self.bc["y1"]))
assert len(self.bc["x0"]) == 3
assert len(self.bc["x1"]) == 3
assert len(self.bc["y0"]) == 3
assert len(self.bc["y1"]) == 3
if "P" in self.bc["x0"] and "P" in self.bc["x1"]:
self.disc["pX"] = 1
else:
self.disc["pX"] = 0
if "P" in self.bc["y0"] and "P" in self.bc["y1"]:
self.disc["pY"] = 1
else:
self.disc["pY"] = 0
if "D" in self.bc["x0"]:
if "px0" in self.bc.keys():
px0 = float(self.bc["px0"])
self.bc["rhox0"] = Material(self.material).eos_density(px0)
else:
self.bc["rhox0"] = self.material["rho0"]
if "D" in self.bc["x1"]:
if "px1" in self.bc.keys():
px1 = float(self.bc["px1"])
self.bc["rhox1"] = Material(self.material).eos_density(px1)
else:
self.bc["rhox1"] = self.material["rho0"]
if "D" in self.bc["y0"]:
if "py0" in self.bc.keys():
py0 = float(self.bc["py0"])
self.bc["rhoy0"] = Material(self.material).eos_density(py0)
else:
self.bc["rhoy0"] = self.material["rho0"]
if "D" in self.bc["y1"]:
if "py1" in self.bc.keys():
py1 = float(self.bc["py1"])
self.bc["rhoy1"] = Material(self.material).eos_density(py1)
else:
self.bc["rhoy1"] = self.material["rho0"]
assert np.all((self.bc["x0"] == "P") == (self.bc["x1"] == "P")), "Inconsistent boundary conditions (x)"
assert np.all((self.bc["y0"] == "P") == (self.bc["y1"] == "P")), "Inconsistent boundary conditions (y)"
| [
"hans.tools.abort",
"numpy.array",
"numpy.sin",
"numpy.arange",
"os.path.exists",
"os.listdir",
"netCDF4.Dataset",
"numpy.linspace",
"numpy.meshgrid",
"pkg_resources.get_distribution",
"os.path.relpath",
"os.path.splitext",
"shutil.copy",
"hans.plottools.adaptiveLimits",
"hans.material.M... | [((3950, 4078), 'hans.integrate.ConservedField', 'ConservedField', (['self.disc', 'self.bc', 'self.geometry', 'self.material', 'self.numerics', 'self.surface'], {'q_init': 'q_init', 't_init': 't_init'}), '(self.disc, self.bc, self.geometry, self.material, self.\n numerics, self.surface, q_init=q_init, t_init=t_init)\n', (3964, 4078), False, 'from hans.integrate import ConservedField\n'), ((4406, 4420), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4418, 4420), False, 'from datetime import datetime\n'), ((8815, 8844), 'netCDF4.Dataset', 'Dataset', (["self.ic['file']", '"""r"""'], {}), "(self.ic['file'], 'r')\n", (8822, 8844), False, 'from netCDF4 import Dataset\n'), ((17398, 17446), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(14, 9)', 'sharex': '(True)'}), '(2, 2, figsize=(14, 9), sharex=True)\n', (17410, 17446), True, 'import matplotlib.pyplot as plt\n'), ((18073, 18203), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'self.animate1D', '(100000)'], {'fargs': '(fig, ax, writeInterval)', 'interval': '(1)', 'init_func': 'init', 'repeat': '(False)'}), '(fig, self.animate1D, 100000, fargs=(fig, ax,\n writeInterval), interval=1, init_func=init, repeat=False)\n', (18096, 18203), True, 'import matplotlib.animation as animation\n'), ((18425, 18435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18433, 18435), True, 'import matplotlib.pyplot as plt\n'), ((19260, 19278), 'hans.plottools.adaptiveLimits', 'adaptiveLimits', (['ax'], {}), '(ax)\n', (19274, 19278), False, 'from hans.plottools import adaptiveLimits\n'), ((33229, 33285), 'numpy.all', 'np.all', (["((self.bc['x0'] == 'P') == (self.bc['x1'] == 'P'))"], {}), "((self.bc['x0'] == 'P') == (self.bc['x1'] == 'P'))\n", (33235, 33285), True, 'import numpy as np\n'), ((33341, 33397), 'numpy.all', 'np.all', (["((self.bc['y0'] == 'P') == (self.bc['y1'] == 'P'))"], {}), "((self.bc['y0'] == 'P') == (self.bc['y1'] == 'P'))\n", (33347, 33397), True, 'import numpy as np\n'), ((6562, 6609), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (6570, 6609), True, 'import numpy as np\n'), ((8860, 8891), 'numpy.array', 'np.array', (["file.variables['rho']"], {}), "(file.variables['rho'])\n", (8868, 8891), True, 'import numpy as np\n'), ((8909, 8939), 'numpy.array', 'np.array', (["file.variables['jx']"], {}), "(file.variables['jx'])\n", (8917, 8939), True, 'import numpy as np\n'), ((8957, 8987), 'numpy.array', 'np.array', (["file.variables['jy']"], {}), "(file.variables['jy'])\n", (8965, 8987), True, 'import numpy as np\n'), ((10742, 10818), 'netCDF4.Dataset', 'Dataset', (['self.outpath', '"""w"""'], {'parallel': 'parallel', 'format': '"""NETCDF3_64BIT_OFFSET"""'}), "(self.outpath, 'w', parallel=parallel, format='NETCDF3_64BIT_OFFSET')\n", (10749, 10818), False, 'from netCDF4 import Dataset\n'), ((13167, 13246), 'netCDF4.Dataset', 'Dataset', (["self.ic['file']", '"""a"""'], {'parallel': 'parallel', 'format': '"""NETCDF3_64BIT_OFFSET"""'}), "(self.ic['file'], 'a', parallel=parallel, format='NETCDF3_64BIT_OFFSET')\n", (13174, 13246), False, 'from netCDF4 import Dataset\n'), ((13274, 13306), 'os.path.relpath', 'os.path.relpath', (["self.ic['file']"], {}), "(self.ic['file'])\n", (13289, 13306), False, 'import os\n'), ((26034, 26041), 'hans.tools.abort', 'abort', ([], {}), '()\n', (26039, 26041), False, 'from hans.tools import abort\n'), ((5045, 5094), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.receive_signal'], {}), '(signal.SIGINT, self.receive_signal)\n', (5058, 5094), False, 'import signal\n'), ((5111, 5161), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.receive_signal'], {}), '(signal.SIGTERM, self.receive_signal)\n', (5124, 5161), False, 'import signal\n'), ((5178, 5227), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'self.receive_signal'], {}), '(signal.SIGHUP, self.receive_signal)\n', (5191, 5227), False, 'import signal\n'), ((5244, 5294), 'signal.signal', 'signal.signal', (['signal.SIGUSR1', 'self.receive_signal'], {}), '(signal.SIGUSR1, self.receive_signal)\n', (5257, 5294), False, 'import signal\n'), ((5311, 5361), 'signal.signal', 'signal.signal', (['signal.SIGUSR2', 'self.receive_signal'], {}), '(signal.SIGUSR2, self.receive_signal)\n', (5324, 5361), False, 'import signal\n'), ((9703, 9726), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (9717, 9726), False, 'import os\n'), ((9745, 9765), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (9756, 9765), False, 'import os\n'), ((10429, 10459), 'os.path.join', 'os.path.join', (['out_dir', 'outfile'], {}), '(out_dir, outfile)\n', (10441, 10459), False, 'import os\n'), ((13464, 13505), 'shutil.copy', 'shutil.copy', (["self.ic['file']", 'backup_file'], {}), "(self.ic['file'], backup_file)\n", (13475, 13505), False, 'import shutil\n'), ((15257, 15271), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15269, 15271), False, 'from datetime import datetime\n'), ((17518, 17531), 'numpy.arange', 'np.arange', (['Nx'], {}), '(Nx)\n', (17527, 17531), True, 'import numpy as np\n'), ((20695, 20702), 'hans.tools.abort', 'abort', ([], {}), '()\n', (20700, 20702), False, 'from hans.tools import abort\n'), ((20826, 20833), 'hans.tools.abort', 'abort', ([], {}), '()\n', (20831, 20833), False, 'from hans.tools import abort\n'), ((21047, 21054), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21052, 21054), False, 'from hans.tools import abort\n'), ((21180, 21187), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21185, 21187), False, 'from hans.tools import abort\n'), ((24288, 24295), 'hans.tools.abort', 'abort', ([], {}), '()\n', (24293, 24295), False, 'from hans.tools import abort\n'), ((6942, 6989), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (6950, 6989), True, 'import numpy as np\n'), ((11968, 11992), 'pkg_resources.get_distribution', 'get_distribution', (['"""hans"""'], {}), "('hans')\n", (11984, 11992), False, 'from pkg_resources import get_distribution\n'), ((17719, 17742), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (17727, 17742), False, 'from hans.material import Material\n'), ((19184, 19207), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (19192, 19207), False, 'from hans.material import Material\n'), ((7260, 7356), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dx'] / 2)", "(self.disc['Lx'] - self.disc['dx'] / 2)", "self.disc['Nx']"], {}), "(0 + self.disc['dx'] / 2, self.disc['Lx'] - self.disc['dx'] / 2,\n self.disc['Nx'])\n", (7271, 7356), True, 'import numpy as np\n'), ((7369, 7465), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dy'] / 2)", "(self.disc['Ly'] - self.disc['dy'] / 2)", "self.disc['Ny']"], {}), "(0 + self.disc['dy'] / 2, self.disc['Ly'] - self.disc['dy'] / 2,\n self.disc['Ny'])\n", (7380, 7465), True, 'import numpy as np\n'), ((7483, 7515), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (7494, 7515), True, 'import numpy as np\n'), ((7542, 7589), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (7550, 7589), True, 'import numpy as np\n'), ((13337, 13370), 'os.path.splitext', 'os.path.splitext', (["self.ic['file']"], {}), "(self.ic['file'])\n", (13353, 13370), False, 'import os\n'), ((16660, 16674), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16672, 16674), False, 'from datetime import datetime\n'), ((21478, 21485), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21483, 21485), False, 'from hans.tools import abort\n'), ((21940, 21947), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21945, 21947), False, 'from hans.tools import abort\n'), ((32290, 32313), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32298, 32313), False, 'from hans.material import Material\n'), ((32559, 32582), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32567, 32582), False, 'from hans.material import Material\n'), ((32828, 32851), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32836, 32851), False, 'from hans.material import Material\n'), ((33097, 33120), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (33105, 33120), False, 'from hans.material import Material\n'), ((7755, 7769), 'numpy.sin', 'np.sin', (['(k * xx)'], {}), '(k * xx)\n', (7761, 7769), True, 'import numpy as np\n'), ((7891, 7987), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dx'] / 2)", "(self.disc['Lx'] - self.disc['dx'] / 2)", "self.disc['Nx']"], {}), "(0 + self.disc['dx'] / 2, self.disc['Lx'] - self.disc['dx'] / 2,\n self.disc['Nx'])\n", (7902, 7987), True, 'import numpy as np\n'), ((8000, 8096), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dy'] / 2)", "(self.disc['Ly'] - self.disc['dy'] / 2)", "self.disc['Ny']"], {}), "(0 + self.disc['dy'] / 2, self.disc['Ly'] - self.disc['dy'] / 2,\n self.disc['Ny'])\n", (8011, 8096), True, 'import numpy as np\n'), ((8114, 8146), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (8125, 8146), True, 'import numpy as np\n'), ((8173, 8220), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (8181, 8220), True, 'import numpy as np\n'), ((8386, 8400), 'numpy.sin', 'np.sin', (['(k * xx)'], {}), '(k * xx)\n', (8392, 8400), True, 'import numpy as np\n'), ((9983, 9997), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9995, 9997), False, 'from datetime import datetime\n'), ((10282, 10301), 'os.listdir', 'os.listdir', (['out_dir'], {}), '(out_dir)\n', (10292, 10301), False, 'import os\n')] |
"""
Script to make nucleosome occupancy track!
@author: <NAME>
"""
##### IMPORT MODULES #####
# import necessary python modules
#import matplotlib as mpl
#mpl.use('PS')
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import traceback
import itertools
import pysam
from pyatac.utils import shell_command,read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta
from pyatac.chunk import ChunkList
from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk
from pyatac.fragmentsizes import FragmentSizes
from pyatac.bias import PWM
def _occHelper(arg):
"""function to get occupancy for a set of bed regions
"""
(chunk, params) = arg
try:
occ = OccChunk(chunk)
occ.process(params)
out = (occ.getNucDist(),
occ.occ, [occ.peaks[i] for i in sorted(occ.peaks.keys())])
occ.removeData()
except Exception as e:
print(('Caught exception when processing:\n'+ chunk.asBed()+"\n"))
traceback.print_exc()
print()
raise e
return out
def _writeOcc(track_queue, out):
out_handle1 = open(out + '.occ.bedgraph','a')
out_handle2 = open(out + '.occ.lower_bound.bedgraph','a')
out_handle3 = open(out + '.occ.upper_bound.bedgraph','a')
try:
for track in iter(track_queue.get, 'STOP'):
track.write_track(out_handle1, vals = track.smoothed_vals)
track.write_track(out_handle2, vals = track.smoothed_lower)
track.write_track(out_handle3, vals = track.smoothed_upper)
track_queue.task_done()
except Exception as e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle1.close()
out_handle2.close()
out_handle3.close()
return True
def _writePeaks(pos_queue, out):
out_handle = open(out + '.occpeaks.bed','a')
try:
for poslist in iter(pos_queue.get, 'STOP'):
for pos in poslist:
pos.write(out_handle)
pos_queue.task_done()
except Exception as e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle.close()
return True
def run_occ(args):
"""run occupancy calling
"""
if args.fasta:
chrs = read_chrom_sizes_from_fasta(args.fasta)
else:
chrs = read_chrom_sizes_from_bam(args.bam)
pwm = PWM.open(args.pwm)
chunks = ChunkList.read(args.bed, chromDict = chrs, min_offset = args.flank + args.upper//2 + max(pwm.up,pwm.down) + args.nuc_sep//2)
chunks.slop(chrs, up = args.nuc_sep//2, down = args.nuc_sep//2)
chunks.merge()
maxQueueSize = args.cores*10
fragment_dist = FragmentMixDistribution(0, upper = args.upper)
if args.sizes is not None:
tmp = FragmentSizes.open(args.sizes)
fragment_dist.fragmentsizes = FragmentSizes(0, args.upper, vals = tmp.get(0,args.upper))
else:
fragment_dist.getFragmentSizes(args.bam, chunks)
fragment_dist.modelNFR()
fragment_dist.plotFits(args.out + '.occ_fit.pdf')
fragment_dist.fragmentsizes.save(args.out + '.fragmentsizes.txt')
params = OccupancyParameters(fragment_dist, args.upper, args.fasta, args.pwm, sep = args.nuc_sep, min_occ = args.min_occ,
flank = args.flank, bam = args.bam, ci = args.confidence_interval, step = args.step)
sets = chunks.split(items = args.cores * 5)
pool1 = mp.Pool(processes = max(1,args.cores-1))
out_handle1 = open(args.out + '.occ.bedgraph','w')
out_handle1.close()
out_handle2 = open(args.out + '.occ.lower_bound.bedgraph','w')
out_handle2.close()
out_handle3 = open(args.out + '.occ.upper_bound.bedgraph','w')
out_handle3.close()
write_queue = mp.JoinableQueue(maxsize = maxQueueSize)
write_process = mp.Process(target = _writeOcc, args=(write_queue, args.out))
write_process.start()
peaks_handle = open(args.out + '.occpeaks.bed','w')
peaks_handle.close()
peaks_queue = mp.JoinableQueue()
peaks_process = mp.Process(target = _writePeaks, args=(peaks_queue, args.out))
peaks_process.start()
nuc_dist = np.zeros(args.upper)
for j in sets:
tmp = pool1.map(_occHelper, list(zip(j,itertools.repeat(params))))
for result in tmp:
nuc_dist += result[0]
write_queue.put(result[1])
peaks_queue.put(result[2])
pool1.close()
pool1.join()
write_queue.put('STOP')
peaks_queue.put('STOP')
write_process.join()
peaks_process.join()
pysam.tabix_compress(args.out + '.occpeaks.bed', args.out + '.occpeaks.bed.gz',force = True)
shell_command('rm ' + args.out + '.occpeaks.bed')
pysam.tabix_index(args.out + '.occpeaks.bed.gz', preset = "bed", force = True)
for i in ('occ','occ.lower_bound','occ.upper_bound'):
pysam.tabix_compress(args.out + '.' + i + '.bedgraph', args.out + '.'+i+'.bedgraph.gz',force = True)
shell_command('rm ' + args.out + '.' + i + '.bedgraph')
pysam.tabix_index(args.out + '.' + i + '.bedgraph.gz', preset = "bed", force = True)
dist_out = FragmentSizes(0, args.upper, vals = nuc_dist)
dist_out.save(args.out + '.nuc_dist.txt')
print("Making figure")
#make figure
fig = plt.figure()
plt.plot(list(range(0,args.upper)),dist_out.get(0,args.upper),label = "Nucleosome Distribution")
plt.xlabel("Fragment Size")
plt.ylabel("Frequency")
fig.savefig(args.out+'.nuc_dist.pdf')
plt.close(fig)
| [
"pysam.tabix_compress",
"multiprocessing.JoinableQueue",
"nucleoatac.Occupancy.OccChunk",
"matplotlib.pyplot.ylabel",
"multiprocessing.Process",
"itertools.repeat",
"pyatac.utils.read_chrom_sizes_from_fasta",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"pysam.tabix_index",
"traceback.... | [((2492, 2510), 'pyatac.bias.PWM.open', 'PWM.open', (['args.pwm'], {}), '(args.pwm)\n', (2500, 2510), False, 'from pyatac.bias import PWM\n'), ((2789, 2833), 'nucleoatac.Occupancy.FragmentMixDistribution', 'FragmentMixDistribution', (['(0)'], {'upper': 'args.upper'}), '(0, upper=args.upper)\n', (2812, 2833), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((3242, 3437), 'nucleoatac.Occupancy.OccupancyParameters', 'OccupancyParameters', (['fragment_dist', 'args.upper', 'args.fasta', 'args.pwm'], {'sep': 'args.nuc_sep', 'min_occ': 'args.min_occ', 'flank': 'args.flank', 'bam': 'args.bam', 'ci': 'args.confidence_interval', 'step': 'args.step'}), '(fragment_dist, args.upper, args.fasta, args.pwm, sep=\n args.nuc_sep, min_occ=args.min_occ, flank=args.flank, bam=args.bam, ci=\n args.confidence_interval, step=args.step)\n', (3261, 3437), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((3832, 3870), 'multiprocessing.JoinableQueue', 'mp.JoinableQueue', ([], {'maxsize': 'maxQueueSize'}), '(maxsize=maxQueueSize)\n', (3848, 3870), True, 'import multiprocessing as mp\n'), ((3893, 3951), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_writeOcc', 'args': '(write_queue, args.out)'}), '(target=_writeOcc, args=(write_queue, args.out))\n', (3903, 3951), True, 'import multiprocessing as mp\n'), ((4079, 4097), 'multiprocessing.JoinableQueue', 'mp.JoinableQueue', ([], {}), '()\n', (4095, 4097), True, 'import multiprocessing as mp\n'), ((4118, 4178), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_writePeaks', 'args': '(peaks_queue, args.out)'}), '(target=_writePeaks, args=(peaks_queue, args.out))\n', (4128, 4178), True, 'import multiprocessing as mp\n'), ((4222, 4242), 'numpy.zeros', 'np.zeros', (['args.upper'], {}), '(args.upper)\n', (4230, 4242), True, 'import numpy as np\n'), ((4626, 4721), 'pysam.tabix_compress', 'pysam.tabix_compress', (["(args.out + '.occpeaks.bed')", "(args.out + '.occpeaks.bed.gz')"], {'force': '(True)'}), "(args.out + '.occpeaks.bed', args.out +\n '.occpeaks.bed.gz', force=True)\n", (4646, 4721), False, 'import pysam\n'), ((4723, 4772), 'pyatac.utils.shell_command', 'shell_command', (["('rm ' + args.out + '.occpeaks.bed')"], {}), "('rm ' + args.out + '.occpeaks.bed')\n", (4736, 4772), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((4777, 4851), 'pysam.tabix_index', 'pysam.tabix_index', (["(args.out + '.occpeaks.bed.gz')"], {'preset': '"""bed"""', 'force': '(True)'}), "(args.out + '.occpeaks.bed.gz', preset='bed', force=True)\n", (4794, 4851), False, 'import pysam\n'), ((5201, 5244), 'pyatac.fragmentsizes.FragmentSizes', 'FragmentSizes', (['(0)', 'args.upper'], {'vals': 'nuc_dist'}), '(0, args.upper, vals=nuc_dist)\n', (5214, 5244), False, 'from pyatac.fragmentsizes import FragmentSizes\n'), ((5348, 5360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5358, 5360), True, 'import matplotlib.pyplot as plt\n'), ((5466, 5493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fragment Size"""'], {}), "('Fragment Size')\n", (5476, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (5508, 5521), True, 'import matplotlib.pyplot as plt\n'), ((5568, 5582), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5577, 5582), True, 'import matplotlib.pyplot as plt\n'), ((728, 743), 'nucleoatac.Occupancy.OccChunk', 'OccChunk', (['chunk'], {}), '(chunk)\n', (736, 743), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((2381, 2420), 'pyatac.utils.read_chrom_sizes_from_fasta', 'read_chrom_sizes_from_fasta', (['args.fasta'], {}), '(args.fasta)\n', (2408, 2420), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((2446, 2481), 'pyatac.utils.read_chrom_sizes_from_bam', 'read_chrom_sizes_from_bam', (['args.bam'], {}), '(args.bam)\n', (2471, 2481), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((2881, 2911), 'pyatac.fragmentsizes.FragmentSizes.open', 'FragmentSizes.open', (['args.sizes'], {}), '(args.sizes)\n', (2899, 2911), False, 'from pyatac.fragmentsizes import FragmentSizes\n'), ((4927, 5034), 'pysam.tabix_compress', 'pysam.tabix_compress', (["(args.out + '.' + i + '.bedgraph')", "(args.out + '.' + i + '.bedgraph.gz')"], {'force': '(True)'}), "(args.out + '.' + i + '.bedgraph', args.out + '.' + i +\n '.bedgraph.gz', force=True)\n", (4947, 5034), False, 'import pysam\n'), ((5036, 5091), 'pyatac.utils.shell_command', 'shell_command', (["('rm ' + args.out + '.' + i + '.bedgraph')"], {}), "('rm ' + args.out + '.' + i + '.bedgraph')\n", (5049, 5091), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((5100, 5185), 'pysam.tabix_index', 'pysam.tabix_index', (["(args.out + '.' + i + '.bedgraph.gz')"], {'preset': '"""bed"""', 'force': '(True)'}), "(args.out + '.' + i + '.bedgraph.gz', preset='bed', force=True\n )\n", (5117, 5185), False, 'import pysam\n'), ((1016, 1037), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1035, 1037), False, 'import traceback\n'), ((1705, 1726), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1724, 1726), False, 'import traceback\n'), ((2196, 2217), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2215, 2217), False, 'import traceback\n'), ((4314, 4338), 'itertools.repeat', 'itertools.repeat', (['params'], {}), '(params)\n', (4330, 4338), False, 'import itertools\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
import unittest
from yalign.wordpairscore import WordPairScore
class TestWordPairScore(unittest.TestCase):
def setUp(self):
self.word_pair_score = self._create_word_pair_score('test_word_scores.csv')
def _create_word_pair_score(self, filename):
base_path = os.path.dirname(os.path.abspath(__file__))
translations = os.path.join(base_path, "data", filename)
return WordPairScore(translations)
def test_load_translations_in_gz_format(self):
word_pair_score = self._create_word_pair_score('test_word_scores.csv.gz')
translations = word_pair_score.translations
self.check_translations(translations)
def test_translations(self):
translations = self.word_pair_score.translations
self.check_translations(translations)
def check_translations(self, translations):
self.assertEqual(3, len(translations))
self.assertEqual(translations[u'house'], {u'casa': 1.0})
self.assertEqual(translations[u'you'], {u'ustedes': 0.625,
u'vosotros': 0.375,
u'vos': 0.75})
self.assertEqual(translations[u'yourselves'], {u'vosotros': 0.75})
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"yalign.wordpairscore.WordPairScore",
"os.path.join",
"os.path.abspath"
] | [((1320, 1335), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1333, 1335), False, 'import unittest\n'), ((400, 441), 'os.path.join', 'os.path.join', (['base_path', '"""data"""', 'filename'], {}), "(base_path, 'data', filename)\n", (412, 441), False, 'import os\n'), ((457, 484), 'yalign.wordpairscore.WordPairScore', 'WordPairScore', (['translations'], {}), '(translations)\n', (470, 484), False, 'from yalign.wordpairscore import WordPairScore\n'), ((350, 375), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (365, 375), False, 'import os\n')] |
#!/usr/bin/env python3
"""Extended tools for datetime"""
import datetime
def how_many_seconds_to_time(now, hour, minute):
"""Return amount seconds from now to given hour and minute"""
target_time = now.replace(hour=hour, minute=minute, second=0)
if now >= target_time:
target_time += datetime.timedelta(days=1)
return int((target_time - now).total_seconds())
def local_tz_now():
"""Return current time with local timezone"""
return datetime.datetime.now(datetime.timezone.utc).astimezone()
| [
"datetime.datetime.now",
"datetime.timedelta"
] | [((306, 332), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (324, 332), False, 'import datetime\n'), ((467, 511), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (488, 511), False, 'import datetime\n')] |
from datetime import date
from unittest import TestCase
from workalendar.registry import IsoRegistry
from workalendar.core import Calendar
class RegionCalendar(Calendar):
'Region'
def holidays(self, year=None):
return tuple((
(date(year, 12, 25), 'Christmas'),
(date(year, 1, 1), 'New year'),
))
def get_weekend_days(self):
return [] # no week-end, yes, it's sad
class SubRegionCalendar(Calendar):
'Sub Region'
def holidays(self, year=None):
return tuple((
(date(year, 12, 25), 'Christmas'),
(date(year, 1, 1), 'New year'),
))
def get_weekend_days(self):
return [] # no week-end, yes, it's sad
class MockCalendarTest(TestCase):
def setUp(self):
self.region = RegionCalendar
self.subregion = SubRegionCalendar
def test_register(self):
registry = IsoRegistry(load_standard_modules=False)
self.assertEqual(0, len(registry.region_registry.items()))
registry.register('RE', self.region)
self.assertEqual(1, len(registry.region_registry.items()))
self.assertEqual(RegionCalendar, registry.region_registry['RE'])
def test_get_calendar_class(self):
registry = IsoRegistry(load_standard_modules=False)
registry.register('RE', self.region)
calendar_class = registry.get_calendar_class('RE')
self.assertEqual(calendar_class, RegionCalendar)
def test_get_subregions(self):
registry = IsoRegistry(load_standard_modules=False)
registry.register('RE', self.region)
registry.register('RE-SR', self.subregion)
registry.register('OR-SR', self.subregion)
subregions = registry.get_subregions('RE')
self.assertIn('RE-SR', subregions)
self.assertEqual(1, len(subregions))
def test_get_items(self):
registry = IsoRegistry(load_standard_modules=False)
registry.register('RE', self.region)
registry.register('RE-SR', self.subregion)
registry.register('OR-SR', self.subregion)
items = registry.items(['RE'], include_subregions=True)
self.assertEqual(2, len(items))
self.assertIn('RE', items)
self.assertIn('RE-SR', items)
items = registry.items(['RE'], include_subregions=False)
self.assertEqual(1, len(items))
self.assertIn('RE', items)
| [
"workalendar.registry.IsoRegistry",
"datetime.date"
] | [((912, 952), 'workalendar.registry.IsoRegistry', 'IsoRegistry', ([], {'load_standard_modules': '(False)'}), '(load_standard_modules=False)\n', (923, 952), False, 'from workalendar.registry import IsoRegistry\n'), ((1264, 1304), 'workalendar.registry.IsoRegistry', 'IsoRegistry', ([], {'load_standard_modules': '(False)'}), '(load_standard_modules=False)\n', (1275, 1304), False, 'from workalendar.registry import IsoRegistry\n'), ((1521, 1561), 'workalendar.registry.IsoRegistry', 'IsoRegistry', ([], {'load_standard_modules': '(False)'}), '(load_standard_modules=False)\n', (1532, 1561), False, 'from workalendar.registry import IsoRegistry\n'), ((1898, 1938), 'workalendar.registry.IsoRegistry', 'IsoRegistry', ([], {'load_standard_modules': '(False)'}), '(load_standard_modules=False)\n', (1909, 1938), False, 'from workalendar.registry import IsoRegistry\n'), ((259, 277), 'datetime.date', 'date', (['year', '(12)', '(25)'], {}), '(year, 12, 25)\n', (263, 277), False, 'from datetime import date\n'), ((306, 322), 'datetime.date', 'date', (['year', '(1)', '(1)'], {}), '(year, 1, 1)\n', (310, 322), False, 'from datetime import date\n'), ((555, 573), 'datetime.date', 'date', (['year', '(12)', '(25)'], {}), '(year, 12, 25)\n', (559, 573), False, 'from datetime import date\n'), ((602, 618), 'datetime.date', 'date', (['year', '(1)', '(1)'], {}), '(year, 1, 1)\n', (606, 618), False, 'from datetime import date\n')] |
# coding:utf-8
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import models
from django.contrib.auth.models import User
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django import forms
from django.template import RequestContext
from django.http import HttpResponseRedirect,HttpResponse,Http404
from django.db import models
from django.shortcuts import render_to_response,HttpResponse,render
from django.core.mail import send_mail
from staff.models import Staff
def show_staff(req):
staffs = Staff.objects.all()
return render_to_response('staff/show_staff.html',{'staffs':staffs}) | [
"staff.models.Staff.objects.all",
"django.shortcuts.render_to_response"
] | [((627, 646), 'staff.models.Staff.objects.all', 'Staff.objects.all', ([], {}), '()\n', (644, 646), False, 'from staff.models import Staff\n'), ((658, 721), 'django.shortcuts.render_to_response', 'render_to_response', (['"""staff/show_staff.html"""', "{'staffs': staffs}"], {}), "('staff/show_staff.html', {'staffs': staffs})\n", (676, 721), False, 'from django.shortcuts import render_to_response, HttpResponse, render\n')] |
from connections import i2cAdafruitConnection
from observer import observe
import parameter
import datetime
class MassFlow(i2cAdafruitConnection.I2cAdafruitConnection, observe.Observer):
dataStr = "'NaN'"
headerStr = "'Gas Mass Flow [kg/h]'"
def __init__(self, observable):
# rs232Connection.Rs232Connection.__init__()
#observe.Observer.__init__(observable)
i2cAdafruitConnection.I2cAdafruitConnection.__init__(self)
observe.Observer.__init__(self, observable)
def notifyData(self):
return self.dataStr
def notifyHeader(self):
return self.headerStr
def request(self):
try:
# Read all the ADC channel values in a list.
values = [0] * 4
# for i in range(4):
# Read the specified ADC channel using the previously set gain value.
values[0] = self.adc.read_adc(0, gain=self.GAIN)
powerTs = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# values[1] = adc.read_adc(1, gain=GAIN)
# values[2] = adc.read_adc(2, gain=GAIN)
# values[3] = adc.read_adc(3, gain=GAIN)
# Note you can also pass in an optional data_rate parameter that controls
# the ADC conversion time (in samples/second). Each chip has a different
# set of allowed data rate values, see datasheet Table 9 config register
# DR bit values.
# values[i] = adc.read_adc(i, gain=GAIN, data_rate=128)
# Each value will be a 16 bit signed integer value depending on the
# ADC (ADS1115 = 16-bit).
# Calculation and calibration of the gas fuel flow
fuelflow = (values[0]) / (3276.8) * 4.2 * 0.046166667 * 0.82615
#print (fuelflow)
except:
print ("Gas Mass Flow Sensor is switched off!")
try:
self.dataStr = "{:8.6f}".format(fuelflow)
except:
pass
def getHeader(self):
return self.headerStr
def getData(self):
return self.dataStr
| [
"connections.i2cAdafruitConnection.I2cAdafruitConnection.__init__",
"datetime.datetime.now",
"observer.observe.Observer.__init__"
] | [((397, 455), 'connections.i2cAdafruitConnection.I2cAdafruitConnection.__init__', 'i2cAdafruitConnection.I2cAdafruitConnection.__init__', (['self'], {}), '(self)\n', (449, 455), False, 'from connections import i2cAdafruitConnection\n'), ((464, 507), 'observer.observe.Observer.__init__', 'observe.Observer.__init__', (['self', 'observable'], {}), '(self, observable)\n', (489, 507), False, 'from observer import observe\n'), ((943, 966), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (964, 966), False, 'import datetime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import sys, os
from soma import aims, aimsalgo
import numpy
import optparse
parser = optparse.OptionParser( description='Voronoi diagram of the sulci ' \
'nodes regions, in the grey matter, and extending to the whole 3D space' )
parser.add_option( '-g', '--greywhite', dest='lgw',
help='left grey/white mask' )
parser.add_option( '-o', '--output', dest='voronoi',
help='output voronoi diagram volume' )
parser.add_option( '-f', '--folds', dest='graph',
help='sulci graph file' )
options, args = parser.parse_args()
lgw_vol_file = options.lgw
fold_graph_file = options.graph
voronoi_vol_file = options.voronoi
if lgw_vol_file is None and len( args ) > 0:
lgw_vol_file = args[0]
del args[0]
if voronoi_vol_file is None and len( args ) > 0:
voronoi_vol_file = args[0]
del args[0]
if fold_graph_file is None and len( args ) > 0:
fold_graph_file = args[0]
del args[0]
if lgw_vol_file is None or voronoi_vol_file is None \
or fold_graph_file is None or len( args ) != 0:
parser.parse( [ '-h' ] )
lgw_vol = aims.read( lgw_vol_file )
fold_graph = aims.read( fold_graph_file )
LCR_label = 255
GM_label = 100
seed = - lgw_vol
voxel_size = lgw_vol.header()["voxel_size"]
def printbucket( bck, vol, value ):
c = aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16( False, True, value )
c.printToVolume( bck._get(), vol )
seed_label_list = []
for v in fold_graph.vertices():
try:
b = v[ 'aims_ss' ]
index = v[ 'skeleton_label' ]
seed_label_list.append(int(index))
printbucket( b, seed, index )
printbucket( b, lgw_vol, LCR_label ) #pour que le lcr rentre jusqu au fond des silons
try:
b = v[ 'aims_bottom' ]
printbucket( b, seed, index )
except:
pass
try:
b = v[ 'aims_other' ]
printbucket( b, seed, index )
except:
pass
except:
pass
f1 = aims.FastMarching()
print("Voronoi in Grey matter")
f1.doit(seed, [-LCR_label, -GM_label], seed_label_list)
voronoi_vol = f1.voronoiVol()
print("Voronoi in White matter")
f1 = aims.FastMarching()
n = numpy.array( voronoi_vol, copy=False )
n[ n == -1 ] = -100
f1.doit( voronoi_vol, [-100], seed_label_list )
# f1.doit( voronoi_vol, [-100], [ 940, 760] )
voronoi_vol = f1.voronoiVol()
aims.write( voronoi_vol, voronoi_vol_file )
| [
"soma.aims.write",
"soma.aims.FastMarching",
"optparse.OptionParser",
"numpy.array",
"soma.aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16",
"soma.aims.read"
] | [((211, 357), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'description': '"""Voronoi diagram of the sulci nodes regions, in the grey matter, and extending to the whole 3D space"""'}), "(description=\n 'Voronoi diagram of the sulci nodes regions, in the grey matter, and extending to the whole 3D space'\n )\n", (232, 357), False, 'import optparse\n'), ((1154, 1177), 'soma.aims.read', 'aims.read', (['lgw_vol_file'], {}), '(lgw_vol_file)\n', (1163, 1177), False, 'from soma import aims, aimsalgo\n'), ((1193, 1219), 'soma.aims.read', 'aims.read', (['fold_graph_file'], {}), '(fold_graph_file)\n', (1202, 1219), False, 'from soma import aims, aimsalgo\n'), ((1967, 1986), 'soma.aims.FastMarching', 'aims.FastMarching', ([], {}), '()\n', (1984, 1986), False, 'from soma import aims, aimsalgo\n'), ((2143, 2162), 'soma.aims.FastMarching', 'aims.FastMarching', ([], {}), '()\n', (2160, 2162), False, 'from soma import aims, aimsalgo\n'), ((2167, 2203), 'numpy.array', 'numpy.array', (['voronoi_vol'], {'copy': '(False)'}), '(voronoi_vol, copy=False)\n', (2178, 2203), False, 'import numpy\n'), ((2351, 2392), 'soma.aims.write', 'aims.write', (['voronoi_vol', 'voronoi_vol_file'], {}), '(voronoi_vol, voronoi_vol_file)\n', (2361, 2392), False, 'from soma import aims, aimsalgo\n'), ((1358, 1428), 'soma.aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16', 'aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16', (['(False)', '(True)', 'value'], {}), '(False, True, value)\n', (1408, 1428), False, 'from soma import aims, aimsalgo\n')] |
"""
这个是用来测试,以redis为中间件,随意关闭代码会不会造成任务丢失的。
"""
import time
from funboost import boost,BrokerEnum
@boost('test_cost_long_time_fun_queue2', broker_kind=BrokerEnum.REDIS_ACK_ABLE, concurrent_num=5)
def cost_long_time_fun(x):
print(f'正在消费 {x} 中 。。。。')
time.sleep(3)
print(f'消费完成 {x} ')
if __name__ == '__main__':
cost_long_time_fun.consume() | [
"funboost.boost",
"time.sleep"
] | [((98, 199), 'funboost.boost', 'boost', (['"""test_cost_long_time_fun_queue2"""'], {'broker_kind': 'BrokerEnum.REDIS_ACK_ABLE', 'concurrent_num': '(5)'}), "('test_cost_long_time_fun_queue2', broker_kind=BrokerEnum.\n REDIS_ACK_ABLE, concurrent_num=5)\n", (103, 199), False, 'from funboost import boost, BrokerEnum\n'), ((256, 269), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (266, 269), False, 'import time\n')] |
# coding: utf-8
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import unittest
from fireworks.utilities.fw_serializers import load_object
from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet, WriteVaspFromPMGObjects, \
ModifyPotcar, ModifyIncar
from atomate.utils.testing import AtomateTest
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints
from pymatgen.io.vasp.sets import MPRelaxSet
__author__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>, <EMAIL>'
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
class TestWriteVasp(AtomateTest):
@classmethod
def setUpClass(cls):
cls.struct_si = PymatgenTest.get_structure("Si")
cls.ref_incar = Incar.from_file(
os.path.join(module_dir, "..", "..", "test_files", "setup_test", "INCAR"))
cls.ref_poscar = Poscar.from_file(
os.path.join(module_dir, "..", "..", "test_files", "setup_test",
"POSCAR"))
cls.ref_potcar = Potcar.from_file(
os.path.join(module_dir, "..", "..", "test_files", "setup_test",
"POTCAR"))
cls.ref_kpoints = Kpoints.from_file(
os.path.join(module_dir, "..", "..", "test_files", "setup_test",
"KPOINTS"))
cls.ref_incar_preserve = Incar.from_file(os.path.join(module_dir,
"..", "..", "test_files",
"preserve_incar", "INCAR"))
def setUp(self):
super(TestWriteVasp, self).setUp(lpad=False)
def tearDown(self):
for x in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
if os.path.exists(os.path.join(module_dir, x)):
os.remove(os.path.join(module_dir, x))
def _verify_files(self, skip_kpoints=False, preserve_incar=False):
if not preserve_incar:
self.assertEqual(Incar.from_file("INCAR"), self.ref_incar)
self.assertEqual(str(Poscar.from_file("POSCAR")), str(self.ref_poscar))
self.assertEqual(Potcar.from_file("POTCAR").symbols,
self.ref_potcar.symbols)
if not skip_kpoints:
self.assertEqual(str(Kpoints.from_file("KPOINTS")),
str(self.ref_kpoints))
else:
self.assertEqual(Incar.from_file("INCAR"),
self.ref_incar_preserve)
def test_ioset_explicit(self):
ft = WriteVaspFromIOSet(dict(structure=self.struct_si,
vasp_input_set=MPRelaxSet(self.struct_si, force_gamma=True)))
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
self._verify_files()
def test_ioset_implicit(self):
ft = WriteVaspFromIOSet(
dict(structure=self.struct_si, vasp_input_set="MPRelaxSet"))
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
self._verify_files(skip_kpoints=True)
def test_ioset_params(self):
ft = WriteVaspFromIOSet(
dict(structure=self.struct_si, vasp_input_set="MPRelaxSet",
vasp_input_params={"user_incar_settings": {"ISMEAR": 1000}}))
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
incar = Incar.from_file("INCAR")
self.assertEqual(incar["ISMEAR"], 1000) # make sure override works
incar['ISMEAR'] = -5 # switch back to default
incar.write_file("INCAR")
self._verify_files(skip_kpoints=True)
def test_pmgobjects(self):
mpvis = MPRelaxSet(self.struct_si, force_gamma=True)
ft = WriteVaspFromPMGObjects({"incar": mpvis.incar,
"poscar": mpvis.poscar,
"kpoints": mpvis.kpoints,
"potcar": mpvis.potcar})
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
self._verify_files()
def _get_processed_incar_dict(self, incar, poscar):
incar_dict = incar.as_dict()
comp = poscar.structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
for lda_param in ("LDAUL", "LDAUU", "LDAUJ"):
if incar_dict.get(lda_param):
vals = incar_dict[lda_param]
if isinstance(vals, list):
incar_dict[lda_param] = {most_electroneg: {}}
for i, sym in enumerate(poscar.site_symbols):
incar_dict[lda_param][most_electroneg][sym] = vals[i]
return incar_dict
def test_modify_incar(self):
# create an INCAR
incar = self.ref_incar
incar.write_file("INCAR")
# modify and test
ft = ModifyIncar(
{"incar_update": {"ISMEAR": 1000}, "incar_multiply": {"ENCUT": 1.5},
"incar_dictmod": {"_inc": {"ISPIN": -1}}})
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
incar_mod = Incar.from_file("INCAR")
self.assertEqual(incar_mod['ISMEAR'], 1000)
self.assertEqual(incar_mod['ENCUT'], 780)
self.assertEqual(incar_mod['ISPIN'], 1)
def test_modify_potcar(self):
Potcar(["Si"]).write_file("POTCAR")
potcar = Potcar.from_file("POTCAR")
self.assertFalse("alt" in potcar[0].header)
# modify/test
ft = ModifyPotcar(potcar_symbols={"Si": "O"})
ft = load_object(ft.to_dict()) # simulate database insertion
ft.run_task({})
new_potcar = Potcar.from_file("POTCAR")
self.assertEqual(len(new_potcar), 1)
self.assertEqual(new_potcar[0].symbol, 'O')
if __name__ == '__main__':
unittest.main()
| [
"pymatgen.util.testing.PymatgenTest.get_structure",
"pymatgen.io.vasp.Potcar",
"atomate.vasp.firetasks.write_inputs.ModifyIncar",
"os.path.join",
"pymatgen.io.vasp.Poscar.from_file",
"atomate.vasp.firetasks.write_inputs.ModifyPotcar",
"pymatgen.io.vasp.sets.MPRelaxSet",
"atomate.vasp.firetasks.write_i... | [((6026, 6041), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6039, 6041), False, 'import unittest\n'), ((621, 646), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (636, 646), False, 'import os\n'), ((751, 783), 'pymatgen.util.testing.PymatgenTest.get_structure', 'PymatgenTest.get_structure', (['"""Si"""'], {}), "('Si')\n", (777, 783), False, 'from pymatgen.util.testing import PymatgenTest\n'), ((3474, 3498), 'pymatgen.io.vasp.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (3489, 3498), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((3758, 3802), 'pymatgen.io.vasp.sets.MPRelaxSet', 'MPRelaxSet', (['self.struct_si'], {'force_gamma': '(True)'}), '(self.struct_si, force_gamma=True)\n', (3768, 3802), False, 'from pymatgen.io.vasp.sets import MPRelaxSet\n'), ((3816, 3941), 'atomate.vasp.firetasks.write_inputs.WriteVaspFromPMGObjects', 'WriteVaspFromPMGObjects', (["{'incar': mpvis.incar, 'poscar': mpvis.poscar, 'kpoints': mpvis.kpoints,\n 'potcar': mpvis.potcar}"], {}), "({'incar': mpvis.incar, 'poscar': mpvis.poscar,\n 'kpoints': mpvis.kpoints, 'potcar': mpvis.potcar})\n", (3839, 3941), False, 'from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet, WriteVaspFromPMGObjects, ModifyPotcar, ModifyIncar\n'), ((5061, 5189), 'atomate.vasp.firetasks.write_inputs.ModifyIncar', 'ModifyIncar', (["{'incar_update': {'ISMEAR': 1000}, 'incar_multiply': {'ENCUT': 1.5},\n 'incar_dictmod': {'_inc': {'ISPIN': -1}}}"], {}), "({'incar_update': {'ISMEAR': 1000}, 'incar_multiply': {'ENCUT': \n 1.5}, 'incar_dictmod': {'_inc': {'ISPIN': -1}}})\n", (5072, 5189), False, 'from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet, WriteVaspFromPMGObjects, ModifyPotcar, ModifyIncar\n'), ((5326, 5350), 'pymatgen.io.vasp.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (5341, 5350), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((5597, 5623), 'pymatgen.io.vasp.Potcar.from_file', 'Potcar.from_file', (['"""POTCAR"""'], {}), "('POTCAR')\n", (5613, 5623), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((5712, 5752), 'atomate.vasp.firetasks.write_inputs.ModifyPotcar', 'ModifyPotcar', ([], {'potcar_symbols': "{'Si': 'O'}"}), "(potcar_symbols={'Si': 'O'})\n", (5724, 5752), False, 'from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet, WriteVaspFromPMGObjects, ModifyPotcar, ModifyIncar\n'), ((5869, 5895), 'pymatgen.io.vasp.Potcar.from_file', 'Potcar.from_file', (['"""POTCAR"""'], {}), "('POTCAR')\n", (5885, 5895), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((838, 911), 'os.path.join', 'os.path.join', (['module_dir', '""".."""', '""".."""', '"""test_files"""', '"""setup_test"""', '"""INCAR"""'], {}), "(module_dir, '..', '..', 'test_files', 'setup_test', 'INCAR')\n", (850, 911), False, 'import os\n'), ((968, 1042), 'os.path.join', 'os.path.join', (['module_dir', '""".."""', '""".."""', '"""test_files"""', '"""setup_test"""', '"""POSCAR"""'], {}), "(module_dir, '..', '..', 'test_files', 'setup_test', 'POSCAR')\n", (980, 1042), False, 'import os\n'), ((1124, 1198), 'os.path.join', 'os.path.join', (['module_dir', '""".."""', '""".."""', '"""test_files"""', '"""setup_test"""', '"""POTCAR"""'], {}), "(module_dir, '..', '..', 'test_files', 'setup_test', 'POTCAR')\n", (1136, 1198), False, 'import os\n'), ((1282, 1357), 'os.path.join', 'os.path.join', (['module_dir', '""".."""', '""".."""', '"""test_files"""', '"""setup_test"""', '"""KPOINTS"""'], {}), "(module_dir, '..', '..', 'test_files', 'setup_test', 'KPOINTS')\n", (1294, 1357), False, 'import os\n'), ((1433, 1510), 'os.path.join', 'os.path.join', (['module_dir', '""".."""', '""".."""', '"""test_files"""', '"""preserve_incar"""', '"""INCAR"""'], {}), "(module_dir, '..', '..', 'test_files', 'preserve_incar', 'INCAR')\n", (1445, 1510), False, 'import os\n'), ((1825, 1852), 'os.path.join', 'os.path.join', (['module_dir', 'x'], {}), '(module_dir, x)\n', (1837, 1852), False, 'import os\n'), ((2042, 2066), 'pymatgen.io.vasp.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (2057, 2066), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((2487, 2511), 'pymatgen.io.vasp.Incar.from_file', 'Incar.from_file', (['"""INCAR"""'], {}), "('INCAR')\n", (2502, 2511), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((5544, 5558), 'pymatgen.io.vasp.Potcar', 'Potcar', (["['Si']"], {}), "(['Si'])\n", (5550, 5558), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((1881, 1908), 'os.path.join', 'os.path.join', (['module_dir', 'x'], {}), '(module_dir, x)\n', (1893, 1908), False, 'import os\n'), ((2117, 2143), 'pymatgen.io.vasp.Poscar.from_file', 'Poscar.from_file', (['"""POSCAR"""'], {}), "('POSCAR')\n", (2133, 2143), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((2197, 2223), 'pymatgen.io.vasp.Potcar.from_file', 'Potcar.from_file', (['"""POTCAR"""'], {}), "('POTCAR')\n", (2213, 2223), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n'), ((2694, 2738), 'pymatgen.io.vasp.sets.MPRelaxSet', 'MPRelaxSet', (['self.struct_si'], {'force_gamma': '(True)'}), '(self.struct_si, force_gamma=True)\n', (2704, 2738), False, 'from pymatgen.io.vasp.sets import MPRelaxSet\n'), ((2357, 2385), 'pymatgen.io.vasp.Kpoints.from_file', 'Kpoints.from_file', (['"""KPOINTS"""'], {}), "('KPOINTS')\n", (2374, 2385), False, 'from pymatgen.io.vasp import Incar, Poscar, Potcar, Kpoints\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def edc_t(path):
"""EPICA Dome C Ice Core 800KYr Temperature Estimates
Temperature record, using Deuterium as a proxy, from the EPICA (European
Project for Ice Coring in Antarctica) Dome C ice core covering 0 to 800
kyr BP.
A data frame with 5788 observations on the following 5 variables.
`Bag`
Bag number
`ztop`
Top depth (m)
`Age`
Years before 1950
`Deuterium`
Deuterium dD data
`dT`
Temperature difference from the average of the last 1000 years ~
-54.5degC
http://www.ncdc.noaa.gov/paleo/icecore/antarctica/domec/domec_epica_data.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `edc_t.csv`.
Returns:
Tuple of np.ndarray `x_train` with 5788 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'edc_t.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/edcT.csv'
maybe_download_and_extract(path, url,
save_file_name='edc_t.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| [
"observations.util.maybe_download_and_extract",
"os.path.join",
"os.path.expanduser"
] | [((1196, 1220), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (1214, 1220), False, 'import os\n'), ((1360, 1439), 'observations.util.maybe_download_and_extract', 'maybe_download_and_extract', (['path', 'url'], {'save_file_name': '"""edc_t.csv"""', 'resume': '(False)'}), "(path, url, save_file_name='edc_t.csv', resume=False)\n", (1386, 1439), False, 'from observations.util import maybe_download_and_extract\n'), ((1524, 1552), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1536, 1552), False, 'import os\n'), ((1270, 1298), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1282, 1298), False, 'import os\n')] |
import datetime,pymysql
db=pymysql.connect("localhost", "flowerbot", "mycroft", "sensordata")
curs=db.cursor()
from arduinodata import getCurrentSensorData
sensordata = getCurrentSensorData()
select_stmt = "SELECT moisture,last_watered FROM history ORDER BY date DESC"
with db:
curs.execute(select_stmt)
moistureInfo = curs.fetchone()
moistDifference = sensordata[0] - float(moistureInfo[0])
datalist = list(sensordata)
if(moistDifference > 100):
currentTime= datetime.datetime.now()
datalist.append(currentTime)
else:
datalist.append(moistureInfo[1])
print(datalist)
insert_stmt = (
"INSERT INTO history (moisture, lux, pressure, humidity, temperature, last_watered) " "VALUES (%s, %s, %s, %s, %s, %s)"
)
with db:
curs.execute (insert_stmt, datalist)
| [
"datetime.datetime.now",
"pymysql.connect",
"arduinodata.getCurrentSensorData"
] | [((28, 94), 'pymysql.connect', 'pymysql.connect', (['"""localhost"""', '"""flowerbot"""', '"""mycroft"""', '"""sensordata"""'], {}), "('localhost', 'flowerbot', 'mycroft', 'sensordata')\n", (43, 94), False, 'import datetime, pymysql\n'), ((172, 194), 'arduinodata.getCurrentSensorData', 'getCurrentSensorData', ([], {}), '()\n', (192, 194), False, 'from arduinodata import getCurrentSensorData\n'), ((477, 500), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (498, 500), False, 'import datetime, pymysql\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from config import Config
def cuda_(var):
return var.cuda() if Config.use_gpu else var
class Net(nn.Module):
def __init__(self, state_dim, num_actions):
super(Net, self).__init__()
self.linear1 = nn.Linear(state_dim, 20)
self.linear2 = nn.Linear(20, num_actions)
# self.W1 = nn.Parameter(torch.randn(state_dim, 20))
# self.b1 = nn.Parameter(torch.randn(20))
# self.W2 = nn.Parameter(torch.randn(20, num_actions))
# self.b2 = nn.Parameter(torch.randn(num_actions))
# self.myparameters = nn.ParameterList([nn.Parameter(self.W1), nn.Parameter(self.W2),
# nn.Parameter(self.b1), nn.Parameter(self.b2)])
def forward(self, states, bit_vecs=None):
h1 = torch.tanh(self.linear1(states))
p = self.linear2(h1)
import pdb
# pdb.set_trace()
p = F.log_softmax(p, dim=1)
# if bit_vecs :
# if not isinstance(bit_vecs, torch.Tensor):
# bit_vecs = torch.tensor(bit_vecs, dtype=torch.float32, device=Config.device)
# bit_vecs.detach_()
# p = p * bit_vecs
# h1 = F.tanh((torch.matmul(states, self.W1) + self.b1))
# p = torch.matmul(h1, self.W2) + self.b2
return p
| [
"torch.nn.functional.log_softmax",
"torch.nn.Linear"
] | [((318, 342), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(20)'], {}), '(state_dim, 20)\n', (327, 342), True, 'import torch.nn as nn\n'), ((366, 392), 'torch.nn.Linear', 'nn.Linear', (['(20)', 'num_actions'], {}), '(20, num_actions)\n', (375, 392), True, 'import torch.nn as nn\n'), ((995, 1018), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['p'], {'dim': '(1)'}), '(p, dim=1)\n', (1008, 1018), True, 'import torch.nn.functional as F\n')] |
from flask import Flask, request
from tgmsg import TelegramClient, messages
app = Flask(__name__)
client = TelegramClient('<YOUR_TOKEN>')
@client.register_message_processor()
def text_handler(incoming):
msg = messages.TextMessage(incoming.message.text)
client.send_message(msg, incoming.message.chat.id)
@app.route('/incoming')
def incoming():
if request.json:
client.process_json(request.json)
return ''
| [
"tgmsg.messages.TextMessage",
"tgmsg.TelegramClient",
"flask.Flask"
] | [((83, 98), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (88, 98), False, 'from flask import Flask, request\n'), ((108, 138), 'tgmsg.TelegramClient', 'TelegramClient', (['"""<YOUR_TOKEN>"""'], {}), "('<YOUR_TOKEN>')\n", (122, 138), False, 'from tgmsg import TelegramClient, messages\n'), ((216, 259), 'tgmsg.messages.TextMessage', 'messages.TextMessage', (['incoming.message.text'], {}), '(incoming.message.text)\n', (236, 259), False, 'from tgmsg import TelegramClient, messages\n')] |
#!/usr/bin/env python3
import numpy as np
import copy
import itertools
import sys
import ete3
import numpy as np
from Bio import AlignIO
# import CIAlign.cropSeq as cropSeq
# from AlignmentStats import find_removed_cialign
def writeOutfile(outfile, arr, nams, rmfile=None):
'''
Writes an alignment stored in a numpy array into a FASTA file.
Parameters
----------
outfile: str
Path to FASTA file where the output should be stored
arr: np.array
Numpy array containing the cleaned alignment
nams: list
List of nams of sequences in the input alignment
removed: set
Set of names of sequences which have been removed
rmfile: str
Path to file used to log sequences and columns which have been removed
Returns
-------
None
'''
out = open(outfile, "w")
i = 0
for nam in nams:
out.write(">%s\n%s\n" % (nam, "".join(list(arr[i]))))
i += 1
out.close()
# helper function to read MSA from file into np array
def readMSA(infile, log=None, outfile_stem=None):
'''
Convert an alignment into a numpy array.
Parameters
----------
infile: string
path to input alignment file in FASTA format
log: logging.Logger
An open log file object
Returns
-------
arr: np.array
2D numpy array in the same order as fasta_dict where each row
represents a single column in the alignment and each column a
single sequence.
nams: list
List of sequence names in the same order as in the input file
'''
formatErrorMessage = "The MSA file needs to be in FASTA format."
nams = []
seqs = []
nam = ""
seq = ""
with open(infile) as input:
for line in input:
line = line.strip()
if len(line) == 0:
continue # todo: test!
if line[0] == ">":
seqs.append([s.upper() for s in seq])
nams.append(nam.upper())
seq = []
nam = line.replace(">", "")
else:
if len(nams) == 0:
if log:
log.error(formatErrorMessage)
print(formatErrorMessage)
exit()
seq += list(line)
seqs.append(np.array([s.upper() for s in seq]))
nams.append(nam.upper())
arr = np.array(seqs[1:])
return (arr, nams[1:])
def find_removed_cialign(removed_file, arr, nams, keeprows=False):
'''
Reads the "_removed.txt" file generated by CIAlign to determine
what CIAlign has removed from the original alignment.
Replaces nucleotides removed by CIAlign with "!" in the array representing
the alignment so that it is still possible to compare these alignments
with uncleaned alignments in terms of knowing which columns and pairs
of residues are aligned.
! characters are always counted as mismatches in comparisons between
alignments.
Also counts how many total characters were removed by CIAlign and
how many non-gap characters.
Parameters
----------
removed_file: str
Path to a CIAlign _removed.txt log file
arr: np.array
Numpy array containing the alignment represented as a 2D matrix, where
dimension 1 is sequences and dimension 2 is columns
nams: list
List of names in the original alignment, in the same order as in the
input and the sequence array (these should always be the same).
Returns
-------
cleanarr:
2D numpy array containing the alignment represented as a 2D matrix,
where dimension 1 is sequences and dimension 2 is columns, with
residues removed by CIAlign represented as !
Fully removed sequences are removed from this array.
cleannams:
List of names in the output alignment, with any sequences fully
removed by CIAlign removed.
'''
# Read the CIAlign _removed.txt log file
lines = [line.strip().split("\t")
for line in open(removed_file).readlines()]
removed_count_total = 0
removed_count_nongap = 0
# Make an empty dictionary
D = {x: set() for x in nams}
for line in lines:
func = line[0]
if len(line) != 1:
ids = line[-1].split(",")
else:
ids = []
ids = [id.upper() for id in ids]
# for crop_ends and remove_insertions columns are removed so keep
# track of column numbers as integers
if func in ['crop_ends', 'remove_insertions', 'other']:
ids = [int(x) for x in ids]
# crop_ends is only applied to some sequences so also
# keep track of sequence names
if func == "crop_ends":
nam = line[1].upper()
D[nam] = D[nam] | set(ids)
# no need to remove insertions from sequences which were removed
# completely later
elif func == "remove_insertions":
for nam in nams:
# nam = nam.upper()
if D[nam] != "removed":
D[nam] = D[nam] | set(ids)
# remove divergent and remove short remove the whole sequence
elif func in ["remove_divergent", "remove_short", "otherc"]:
for nam in ids:
D[nam] = "removed"
elif func == "other":
for nam in nams:
if D[nam] != "removed":
D[nam] = D[nam] | set(ids)
# make copies of the arrays (because I'm never quite sure when
# python makes links rather than copies)
cleannams = copy.copy(nams)
cleannams = np.array([x.upper() for x in cleannams])
cleanarr = copy.copy(arr)
# iterate through everything that has been changed
for nam, val in D.items():
which_nam = np.where(cleannams == nam)[0][0]
# remove the removed sequences from the array
if val == "removed":
# keep track of the number of removed positions
removed_count_total += len(cleanarr[which_nam])
# keep track of the number of removed residues
removed_count_nongap += sum(cleanarr[which_nam] != "-")
# only keep names of sequences which are not removed
cleannams = np.append(cleannams[:which_nam],
cleannams[which_nam + 1:])
# only keep the sequences which are not removed
cleanarr = np.vstack([cleanarr[:which_nam],
cleanarr[which_nam+1:]])
# remove them from the input temporarily just to keep the shapes
# the same
arr = np.vstack([arr[:which_nam], arr[which_nam+1:]])
else:
# replace column substitutions with !
which_pos = np.array(sorted(list(val)))
if len(which_pos) != 0:
cleanarr[which_nam, which_pos] = "!"
removed_count_total += np.sum(cleanarr == "!")
# sometimes gaps are removed - make these gaps in the output rather than
# !s
cleanarr[arr == "-"] = "-"
removed_count_nongap += np.sum(cleanarr == "!")
return (cleanarr, cleannams, removed_count_total, removed_count_nongap)
msa_file = sys.argv[1]
removed_file = sys.argv[2]
fake_outfile = sys.argv[3]
arr, nams = readMSA(msa_file)
arr = np.char.upper(arr)
(cleanarr, cleannams, removed_count_total, removed_count_nongap) = find_removed_cialign(removed_file, arr, nams)
writeOutfile(fake_outfile, cleanarr, cleannams)
| [
"numpy.char.upper",
"numpy.where",
"numpy.append",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"copy.copy"
] | [((7324, 7342), 'numpy.char.upper', 'np.char.upper', (['arr'], {}), '(arr)\n', (7337, 7342), True, 'import numpy as np\n'), ((2403, 2421), 'numpy.array', 'np.array', (['seqs[1:]'], {}), '(seqs[1:])\n', (2411, 2421), True, 'import numpy as np\n'), ((5606, 5621), 'copy.copy', 'copy.copy', (['nams'], {}), '(nams)\n', (5615, 5621), False, 'import copy\n'), ((5694, 5708), 'copy.copy', 'copy.copy', (['arr'], {}), '(arr)\n', (5703, 5708), False, 'import copy\n'), ((6938, 6961), 'numpy.sum', 'np.sum', (["(cleanarr == '!')"], {}), "(cleanarr == '!')\n", (6944, 6961), True, 'import numpy as np\n'), ((7108, 7131), 'numpy.sum', 'np.sum', (["(cleanarr == '!')"], {}), "(cleanarr == '!')\n", (7114, 7131), True, 'import numpy as np\n'), ((6269, 6328), 'numpy.append', 'np.append', (['cleannams[:which_nam]', 'cleannams[which_nam + 1:]'], {}), '(cleannams[:which_nam], cleannams[which_nam + 1:])\n', (6278, 6328), True, 'import numpy as np\n'), ((6447, 6506), 'numpy.vstack', 'np.vstack', (['[cleanarr[:which_nam], cleanarr[which_nam + 1:]]'], {}), '([cleanarr[:which_nam], cleanarr[which_nam + 1:]])\n', (6456, 6506), True, 'import numpy as np\n'), ((6657, 6706), 'numpy.vstack', 'np.vstack', (['[arr[:which_nam], arr[which_nam + 1:]]'], {}), '([arr[:which_nam], arr[which_nam + 1:]])\n', (6666, 6706), True, 'import numpy as np\n'), ((5816, 5842), 'numpy.where', 'np.where', (['(cleannams == nam)'], {}), '(cleannams == nam)\n', (5824, 5842), True, 'import numpy as np\n')] |
import click
from vlc_helper import vlcstart_precise
@click.command()
@click.argument('filename', nargs=1, default='')
@click.argument('starttime', nargs=1, default='')
@click.argument('stoptime', nargs=1, default='')
def main(filename, starttime, stoptime):
"""Start filename at specific start time (and/or end at specific end time)
"""
vlcstart_precise(filename, starttime, stoptime)
if __name__ == '__main__':
main()
| [
"click.argument",
"click.command",
"vlc_helper.vlcstart_precise"
] | [((56, 71), 'click.command', 'click.command', ([], {}), '()\n', (69, 71), False, 'import click\n'), ((73, 120), 'click.argument', 'click.argument', (['"""filename"""'], {'nargs': '(1)', 'default': '""""""'}), "('filename', nargs=1, default='')\n", (87, 120), False, 'import click\n'), ((122, 170), 'click.argument', 'click.argument', (['"""starttime"""'], {'nargs': '(1)', 'default': '""""""'}), "('starttime', nargs=1, default='')\n", (136, 170), False, 'import click\n'), ((172, 219), 'click.argument', 'click.argument', (['"""stoptime"""'], {'nargs': '(1)', 'default': '""""""'}), "('stoptime', nargs=1, default='')\n", (186, 219), False, 'import click\n'), ((352, 399), 'vlc_helper.vlcstart_precise', 'vlcstart_precise', (['filename', 'starttime', 'stoptime'], {}), '(filename, starttime, stoptime)\n', (368, 399), False, 'from vlc_helper import vlcstart_precise\n')] |
# -*- coding: utf-8 -*-
#
# Author: <NAME>
# Project: crawler 1.0
# Date: 2021-07-10
#
import unittest
from pkg.util.parser_cve_json import is_cve_json_filename
from pkg.util.parser_cve_json import extract_cveid
from pkg.util.parser_cve_json import splitcveid
class IsCveJsonFilenameTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_is_cve_json_filename_10(self):
self.assertTrue(is_cve_json_filename('CVE-2021-28809'))
def test_is_cve_json_filename_20(self):
self.assertTrue(is_cve_json_filename('CVE-2021-3660'))
def test_is_cve_json_filename_30(self):
self.assertFalse(is_cve_json_filename('CVE-2021-3660.json'))
def test_is_cve_json_filename_40(self):
self.assertFalse(is_cve_json_filename('openpgp-encrypted-message'))
class ExtractCveidTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_extract_cveid_10(self):
self.assertTrue('CVE-2021-28491'==extract_cveid('CVE-2021-28491. - SQLite heap overflow'))
def test_extract_cveid_20(self):
self.assertTrue(None==extract_cveid('TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting'))
def test_extract_cveid_30(self):
self.assertTrue('CVE-2020-11575'==extract_cveid('Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow'))
class ExtractCveidTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_splitcveid_10(self):
self.assertTrue('2021', '28491'==splitcveid('CVE-2021-28491'))
def test_splitcveid_20(self):
self.assertTrue((None, None)==splitcveid('TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting'))
def test_splitcveid_30(self):
self.assertTrue('2020', '11575'==splitcveid('Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow'))
| [
"pkg.util.parser_cve_json.splitcveid",
"pkg.util.parser_cve_json.extract_cveid",
"pkg.util.parser_cve_json.is_cve_json_filename"
] | [((484, 522), 'pkg.util.parser_cve_json.is_cve_json_filename', 'is_cve_json_filename', (['"""CVE-2021-28809"""'], {}), "('CVE-2021-28809')\n", (504, 522), False, 'from pkg.util.parser_cve_json import is_cve_json_filename\n'), ((600, 637), 'pkg.util.parser_cve_json.is_cve_json_filename', 'is_cve_json_filename', (['"""CVE-2021-3660"""'], {}), "('CVE-2021-3660')\n", (620, 637), False, 'from pkg.util.parser_cve_json import is_cve_json_filename\n'), ((716, 758), 'pkg.util.parser_cve_json.is_cve_json_filename', 'is_cve_json_filename', (['"""CVE-2021-3660.json"""'], {}), "('CVE-2021-3660.json')\n", (736, 758), False, 'from pkg.util.parser_cve_json import is_cve_json_filename\n'), ((837, 886), 'pkg.util.parser_cve_json.is_cve_json_filename', 'is_cve_json_filename', (['"""openpgp-encrypted-message"""'], {}), "('openpgp-encrypted-message')\n", (857, 886), False, 'from pkg.util.parser_cve_json import is_cve_json_filename\n'), ((1112, 1167), 'pkg.util.parser_cve_json.extract_cveid', 'extract_cveid', (['"""CVE-2021-28491. - SQLite heap overflow"""'], {}), "('CVE-2021-28491. - SQLite heap overflow')\n", (1125, 1167), False, 'from pkg.util.parser_cve_json import extract_cveid\n'), ((1244, 1365), 'pkg.util.parser_cve_json.extract_cveid', 'extract_cveid', (['"""TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting"""'], {}), "(\n 'TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting'\n )\n", (1257, 1365), False, 'from pkg.util.parser_cve_json import extract_cveid\n'), ((1444, 1557), 'pkg.util.parser_cve_json.extract_cveid', 'extract_cveid', (['"""Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow"""'], {}), "(\n 'Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow'\n )\n", (1457, 1557), False, 'from pkg.util.parser_cve_json import extract_cveid\n'), ((1769, 1797), 'pkg.util.parser_cve_json.splitcveid', 'splitcveid', (['"""CVE-2021-28491"""'], {}), "('CVE-2021-28491')\n", (1779, 1797), False, 'from pkg.util.parser_cve_json import splitcveid\n'), ((1879, 1997), 'pkg.util.parser_cve_json.splitcveid', 'splitcveid', (['"""TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting"""'], {}), "(\n 'TYPO3 Form Designer backend module of the Form Framework is vulnerable to cross-site scripting'\n )\n", (1889, 1997), False, 'from pkg.util.parser_cve_json import splitcveid\n'), ((2072, 2182), 'pkg.util.parser_cve_json.splitcveid', 'splitcveid', (['"""Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow"""'], {}), "(\n 'Display and loop C codes, CVE-2020-11575, are vulnerable to heap based buffer overflow'\n )\n", (2082, 2182), False, 'from pkg.util.parser_cve_json import splitcveid\n')] |
import matplotlib
import numpy as np
import pandas as pd
from funcoes.dados import mse
from PyQt5.QtCore import QObject, QAbstractTableModel, Qt
from PyQt5.QtWidgets import QTableView
class ctrl_func(mse):
fs: float = 2
def __init__(self, views):
super().__init__()
self._views = views
def av_ctrl(self):
self.sondagens = self._views[0].num_son.value()
pd.set_option('display.max_rows', self.sondagens)
self.tab = pd.DataFrame(index=range(0, self.sondagens),
columns=['Prof_m', 'Nspt', 'F1', 'K_kN/cm²',
'Ap_cm²', 'Pp_kN', 'F2', 'K_kN/cm²',
'α', 'Perim_cm', 'Pl, unit._kN',
'Pl, acum._kN', 'Pr_kN'])
self.tab['Prof_m'] = range(0, self.sondagens)
self.tab['Nspt'] = (1, 2, 3, 4)
self.tab['Nspt'] = self.tab['Nspt'].apply(lambda x: x + 1)
def av_lst_cpt(self):
self.cpt_lst = []
self.cpt_lst_int = []
for i in range(0, self.sondagens):
self.cpt_lst.append(self._views[5].cpt_layout.itemAt(i, 1))
for ele in self.cpt_lst:
self.cpt_lst_int.append(ele.widget().value())
print(self.cpt_lst_int)
def av_conv_cpt(self, txt):
print('teste')
self.solo = self.kc_cpt_nspt_av
self.solo_escolha = txt
for index, tipo in enumerate(self.solo['Tipo de solo']):
if tipo == self.solo_escolha:
self.kc = self.solo.at[int(index), 'Kc_kPa']
self.nspt_lst = [ele * float(self.kc) for ele in self.cpt_lst_int]
print(self.nspt_lst)
def ref_tab_est(self):
self.reftab = tabelas(self.estaca_mil)
self.view = QTableView()
self.header = self.view.horizontalHeader()
self.header.setDefaultAlignment(Qt.AlignHCenter)
self.view.setModel(self.reftab)
self.view.resize(800, 600)
self.view.show()
class tabelas(QAbstractTableModel):
def __init__(self, data):
QAbstractTableModel.__init__(self)
self._data = data
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return str(self._data.iloc[index.row(), index.column()])
return None
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
return None
| [
"PyQt5.QtCore.QAbstractTableModel.__init__",
"PyQt5.QtWidgets.QTableView",
"pandas.set_option"
] | [((403, 452), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'self.sondagens'], {}), "('display.max_rows', self.sondagens)\n", (416, 452), True, 'import pandas as pd\n'), ((1798, 1810), 'PyQt5.QtWidgets.QTableView', 'QTableView', ([], {}), '()\n', (1808, 1810), False, 'from PyQt5.QtWidgets import QTableView\n'), ((2095, 2129), 'PyQt5.QtCore.QAbstractTableModel.__init__', 'QAbstractTableModel.__init__', (['self'], {}), '(self)\n', (2123, 2129), False, 'from PyQt5.QtCore import QObject, QAbstractTableModel, Qt\n')] |
import unittest
import pathlib
import shutil
import pytest
import torch
from transformers import PretrainedConfig
from sgnlp.models.nea import (
NEAConfig,
NEARegPoolingModel,
NEARegModel,
NEABiRegModel,
NEABiRegPoolingModel,
NEATokenizer,
)
PARENT_DIR = pathlib.Path(__file__).parent
class NEATest(unittest.TestCase):
def setUp(self):
self.config = NEAConfig
self.reg_model = NEARegModel
self.reg_pooling_model = NEARegPoolingModel
self.bi_reg_model = NEABiRegModel
self.bi_reg_pooling_model = NEABiRegPoolingModel
self.model_input = torch.ones((2, 20)).int()
self.model_input_with_label = {
"input_ids": self.model_input,
"labels": torch.tensor([1, 1]),
}
def test_config_can_be_init(self):
config = self.config()
self.assertIsNotNone(config)
self.assertIsInstance(config, PretrainedConfig)
self.assertEqual(config.vocab_size, 4000)
self.assertEqual(config.embedding_dim, 50)
self.assertEqual(config.dropout, 0.5)
self.assertEqual(config.cnn_input_dim, 0)
self.assertEqual(config.cnn_output_dim, 0)
self.assertEqual(config.cnn_kernel_size, 0)
self.assertEqual(config.cnn_padding, 0)
self.assertEqual(config.rec_layer_type, "lstm")
self.assertEqual(config.rec_input_dim, 50)
self.assertEqual(config.rec_output_dim, 300)
self.assertEqual(config.aggregation, "mot")
self.assertEqual(config.linear_input_dim, 300)
self.assertEqual(config.linear_output_dim, 1)
self.assertEqual(config.skip_init_bias, False)
self.assertEqual(config.loss_function, "mse")
def test_reg_model_can_be_init(self):
config = self.config()
model = self.reg_model(config=config)
self.assertIsNotNone(model)
def test_reg_pooling_model_can_be_init(self):
config = self.config()
model = self.reg_pooling_model(config=config)
self.assertIsNotNone(model)
def test_bi_reg_model_can_be_init(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
self.assertIsNotNone(model)
def test_bi_reg_pooling_model_can_be_init(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
self.assertIsNotNone(model)
def test_reg_model_forward_pass(self):
config = self.config()
model = self.reg_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_reg_pooling_model_forward_pass(self):
config = self.config()
model = self.reg_pooling_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_model_forward_pass(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_pooling_model_forward_pass(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
@pytest.mark.slow
def test_from_pretrained(self):
config = self.config.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/nea/config.json"
)
model = self.reg_pooling_model.from_pretrained(
"https://sgnlp.blob.core.windows.net/models/nea/pytorch_model.bin",
config=config,
)
output = model(self.model_input)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([2, 1]))
output_with_label = model(**self.model_input_with_label)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([2, 1]))
self.assertIsNotNone(output_with_label["loss"])
class NEAIntegrationTest(unittest.TestCase):
def setUp(self):
self.config = NEAConfig
self.tokenizer = NEATokenizer
self.vocab_path = PARENT_DIR / "test_data/vocab"
self.reg_model = NEARegModel
self.reg_pooling_model = NEARegPoolingModel
self.bi_reg_model = NEABiRegModel
self.bi_reg_pooling_model = NEABiRegPoolingModel
# for initialising linear bias
self.y_train = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5])
# for loading embedding
self.emb_matrix = torch.ones((4000, 50))
# train tokenizer to get the vocab artifacts
train_path = str(PARENT_DIR / "test_data/train.tsv")
vocab_dir = str(self.vocab_path)
nea_tokenizer = NEATokenizer(train_file=train_path, train_vocab=True)
nea_tokenizer.save_pretrained(vocab_dir)
def test_reg_model_integration(self):
config = self.config()
model = self.reg_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_reg_pooling_model_integration(self):
config = self.config()
model = self.reg_pooling_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_model_integration(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def test_bi_reg_pooling_model_integration(self):
config = self.config(linear_input_dim=600)
model = self.bi_reg_pooling_model(config=config)
model.initialise_linear_bias(self.y_train)
model.load_pretrained_embedding(self.emb_matrix)
tokenizer = self.tokenizer.from_pretrained(self.vocab_path)
inputs = tokenizer("this is a test", return_tensors="pt")["input_ids"]
output = model(inputs)
self.assertIsInstance(output["logits"], torch.Tensor)
self.assertEqual(output["logits"].shape, torch.Size([1, 1]))
inputs_with_labels = {"input_ids": inputs, "labels": torch.Tensor([0.9])}
output_with_label = model(**inputs_with_labels)
self.assertIsInstance(output_with_label["logits"], torch.Tensor)
self.assertEqual(output_with_label["logits"].shape, torch.Size([1, 1]))
self.assertIsNotNone(output_with_label["loss"])
def tearDown(self) -> None:
shutil.rmtree(self.vocab_path)
| [
"pathlib.Path",
"torch.Tensor",
"torch.tensor",
"sgnlp.models.nea.NEATokenizer",
"shutil.rmtree",
"torch.Size",
"torch.ones"
] | [((283, 305), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (295, 305), False, 'import pathlib\n'), ((6045, 6084), 'torch.Tensor', 'torch.Tensor', (['[0.1, 0.2, 0.3, 0.4, 0.5]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5])\n', (6057, 6084), False, 'import torch\n'), ((6144, 6166), 'torch.ones', 'torch.ones', (['(4000, 50)'], {}), '((4000, 50))\n', (6154, 6166), False, 'import torch\n'), ((6347, 6400), 'sgnlp.models.nea.NEATokenizer', 'NEATokenizer', ([], {'train_file': 'train_path', 'train_vocab': '(True)'}), '(train_file=train_path, train_vocab=True)\n', (6359, 6400), False, 'from sgnlp.models.nea import NEAConfig, NEARegPoolingModel, NEARegModel, NEABiRegModel, NEABiRegPoolingModel, NEATokenizer\n'), ((10119, 10149), 'shutil.rmtree', 'shutil.rmtree', (['self.vocab_path'], {}), '(self.vocab_path)\n', (10132, 10149), False, 'import shutil\n'), ((748, 768), 'torch.tensor', 'torch.tensor', (['[1, 1]'], {}), '([1, 1])\n', (760, 768), False, 'import torch\n'), ((2704, 2722), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (2714, 2722), False, 'import torch\n'), ((2923, 2941), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (2933, 2941), False, 'import torch\n'), ((3289, 3307), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (3299, 3307), False, 'import torch\n'), ((3508, 3526), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (3518, 3526), False, 'import torch\n'), ((3884, 3902), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (3894, 3902), False, 'import torch\n'), ((4103, 4121), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (4113, 4121), False, 'import torch\n'), ((4495, 4513), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (4505, 4513), False, 'import torch\n'), ((4714, 4732), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (4724, 4732), False, 'import torch\n'), ((5304, 5322), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (5314, 5322), False, 'import torch\n'), ((5523, 5541), 'torch.Size', 'torch.Size', (['[2, 1]'], {}), '([2, 1])\n', (5533, 5541), False, 'import torch\n'), ((6968, 6986), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (6978, 6986), False, 'import torch\n'), ((7050, 7069), 'torch.Tensor', 'torch.Tensor', (['[0.9]'], {}), '([0.9])\n', (7062, 7069), False, 'import torch\n'), ((7260, 7278), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (7270, 7278), False, 'import torch\n'), ((7870, 7888), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (7880, 7888), False, 'import torch\n'), ((7952, 7971), 'torch.Tensor', 'torch.Tensor', (['[0.9]'], {}), '([0.9])\n', (7964, 7971), False, 'import torch\n'), ((8162, 8180), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (8172, 8180), False, 'import torch\n'), ((8782, 8800), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (8792, 8800), False, 'import torch\n'), ((8864, 8883), 'torch.Tensor', 'torch.Tensor', (['[0.9]'], {}), '([0.9])\n', (8876, 8883), False, 'import torch\n'), ((9074, 9092), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (9084, 9092), False, 'import torch\n'), ((9710, 9728), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (9720, 9728), False, 'import torch\n'), ((9792, 9811), 'torch.Tensor', 'torch.Tensor', (['[0.9]'], {}), '([0.9])\n', (9804, 9811), False, 'import torch\n'), ((10002, 10020), 'torch.Size', 'torch.Size', (['[1, 1]'], {}), '([1, 1])\n', (10012, 10020), False, 'import torch\n'), ((617, 636), 'torch.ones', 'torch.ones', (['(2, 20)'], {}), '((2, 20))\n', (627, 636), False, 'import torch\n')] |
'''
Created on Jan 26, 2011
This module is a local copy of python locale in order to allow
passing in localconv as an argument to functions without affecting
system-wide settings. (The system settings can remain in 'C' locale.)
@author: Mark V Systems Limited (incorporating python locale module code)
(original python authors: <NAME>, improved by <NAME>)
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import sys, subprocess
try:
import regex as re
except ImportError:
import re
import collections
import unicodedata
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
C_LOCALE = None # culture-invariant locale
def getUserLocale(localeCode=''):
# get system localeconv and reset system back to default
import locale
global C_LOCALE
conv = None
if sys.platform == "darwin" and not localeCode:
# possibly this MacOS bug: http://bugs.python.org/issue18378
# macOS won't provide right default code for english-european culture combinations
localeQueryResult = subprocess.getstatusoutput("defaults read -g AppleLocale") # MacOS only
if localeQueryResult[0] == 0 and '_' in localeQueryResult[1]: # successful
localeCode = localeQueryResult[1]
try:
locale.setlocale(locale.LC_ALL, _STR_8BIT(localeCode)) # str needed for 3to2 2.7 python to work
conv = locale.localeconv()
except locale.Error:
if sys.platform == "darwin":
# possibly this MacOS bug: http://bugs.python.org/issue18378
# the fix to this bug will loose the monetary/number configuration with en_BE, en_FR, etc
# so use this alternative which gets the right culture code for numbers even if english default language
localeCulture = '-' + localeCode[3:]
# find culture (for any language) in available locales
for availableLocale in availableLocales():
if len(availableLocale) >= 5 and localeCulture in availableLocale:
try:
locale.setlocale(locale.LC_ALL, availableLocale.replace('-','_'))
conv = locale.localeconv() # should get monetary and numeric codes
break
except locale.Error:
pass # this one didn't work
locale.setlocale(locale.LC_ALL, _STR_8BIT('C')) # str needed for 3to2 2.7 python to work
if conv is None: # some other issue prevents getting culture code, use 'C' defaults (no thousands sep, no currency, etc)
conv = locale.localeconv() # use 'C' environment, e.g., en_US
if C_LOCALE is None: # load culture-invariant C locale
C_LOCALE = locale.localeconv()
return conv
def getLanguageCode():
if sys.platform == "darwin":
# possibly this MacOS bug: http://bugs.python.org/issue18378
# even when fixed, macOS won't provide right default code for some language-culture combinations
localeQueryResult = subprocess.getstatusoutput("defaults read -g AppleLocale") # MacOS only
if localeQueryResult[0] == 0 and localeQueryResult[1]: # successful
return localeQueryResult[1][:5].replace("_","-")
import locale
try:
return locale.getdefaultlocale()[0].replace("_","-")
except (AttributeError, ValueError): #language code and encoding may be None if their values cannot be determined.
return "en"
def getLanguageCodes(lang=None):
if lang is None:
lang = getLanguageCode()
# allow searching on the lang with country part, either python or standard form, or just language
return [lang, lang.replace("-","_"), lang.partition("-")[0]]
iso3region = {
"AU": "aus",
"AT": "aut",
"BE": "bel",
"BR": "bra",
"CA": "can",
"CN": "chn",
"CZ": "cze",
"DA": "dnk",
"FN": "fin",
"FR": "fra",
"DE": "deu",
"GR": "grc",
"HK": "hkg",
"HU": "hun",
"IS": "isl",
"IE": "irl",
"IT": "ita",
"JA": "jpn",
"KO": "kor",
"MX": "mex",
"NL": "nld",
"NZ": "nzl",
"NO": "nor",
"PL": "pol",
"PT": "prt",
"RU": "rus",
"SG": "sgp",
"SL": "svk",
"ES": "esp",
"SV": "swe",
"CH": "che",
"TW": "twn",
"TR": "tur",
"UK": "gbr",
"US": "usa"}
_availableLocales = None
def availableLocales():
global _availableLocales
if _availableLocales is not None:
return _availableLocales
else:
localeQueryResult = subprocess.getstatusoutput("locale -a") # Mac and Unix only
if localeQueryResult[0] == 0: # successful
_availableLocales = set(locale.partition(".")[0].replace("_", "-")
for locale in localeQueryResult[1].split("\n"))
else:
_availableLocales = set()
return _availableLocales
_languageCodes = None
def languageCodes(): # dynamically initialize after gettext is loaded
global _languageCodes
if _languageCodes is not None:
return _languageCodes
else:
_languageCodes = { # language name (in English), code, and setlocale string which works in windows
_("Afrikaans (South Africa)"): "af-ZA afrikaans",
_("Albanian (Albania)"): "sq-AL albanian",
_("Arabic (Algeria)"): "ar-DZ arb_algeria",
_("Arabic (Bahrain)"): "ar-BH arabic_bahrain",
_("Arabic (Egypt)"): "ar-EG arb_egy",
_("Arabic (Iraq)"): "ar-IQ arb_irq",
_("Arabic (Jordan)"): "ar-JO arb_jor",
_("Arabic (Kuwait)"): "ar-KW arb_kwt",
_("Arabic (Lebanon)"): "ar-LB arb_lbn",
_("Arabic (Libya)"): "ar-LY arb_lby",
_("Arabic (Morocco)"): "ar-MA arb_morocco",
_("Arabic (Oman)"): "ar-OM arb_omn",
_("Arabic (Qatar)"): "ar-QA arabic_qatar",
_("Arabic (Saudi Arabia)"): "ar-SA arb_sau",
_("Arabic (Syria)"): "ar-SY arb_syr",
_("Arabic (Tunisia)"): "ar-TN arb_tunisia",
_("Arabic (U.A.E.)"): "ar-AE arb_are",
_("Arabic (Yemen)"): "ar-YE arb_yem",
_("Basque (Spain)"): "eu-ES basque",
_("Bulgarian (Bulgaria)"): "bg-BG bulgarian",
_("Belarusian (Belarus)"): "be-BY belarusian",
_("Catalan (Spain)"): "ca-ES catalan",
_("Chinese (PRC)"): "zh-CN chs",
_("Chinese (Taiwan)"): "zh-TW cht",
_("Chinese (Singapore)"): "zh-SG chs",
_("Croatian (Croatia)"): "hr-HR croatian",
_("Czech (Czech Republic)"): "cs-CZ czech",
_("Danish (Denmark)"): "da-DK danish",
_("Dutch (Belgium)"): "nl-BE nlb",
_("Dutch (Netherlands)"): "nl-NL nld",
_("English (Australia)"): "en-AU ena",
_("English (Belize)"): "en-BZ eng_belize",
_("English (Canada)"): "en-CA enc",
_("English (Caribbean)"): "en-029 eng_caribbean",
_("English (Ireland)"): "en-IE eni",
_("English (Jamaica)"): "en-JM enj",
_("English (New Zealand)"): "en-NZ enz",
_("English (South Africa)"): "en-ZA ens",
_("English (Trinidad)"): "en-TT eng",
_("English (United States)"): "en-US enu",
_("English (United Kingdom)"): "en-GB eng",
_("Estonian (Estonia)"): "et-EE estonian",
_("Faeroese (Faroe Islands)"): "fo-FO faroese",
_("Farsi (Iran)"): "fa-IR persian",
_("Finnish (Finland)"): "fi-FI fin",
_("French (Belgium)"): "fr-BE frb",
_("French (Canada)"): "fr-CA frc",
_("French (France)"): "fr-FR fra",
_("French (Luxembourg)"): "fr-LU frl",
_("French (Switzerland)"): "fr-CH frs",
_("German (Austria)"): "de-AT dea",
_("German (Germany)"): "de-DE deu",
_("German (Luxembourg)"): "de-LU del",
_("German (Switzerland)"): "de-CH des",
_("Greek (Greece)"): "el-GR ell",
_("Hebrew (Israel)"): "he-IL hebrew",
_("Hindi (India)"): "hi-IN hindi",
_("Hungarian (Hungary)"): "hu-HU hun",
_("Icelandic (Iceland)"): "is-IS icelandic",
_("Indonesian (Indonesia)"): "id-ID indonesian",
_("Italian (Italy)"): "it-IT ita",
_("Italian (Switzerland)"): "it-CH its",
_("Japanese (Japan)"): "ja-JP jpn",
_("Korean (Korea)"): "ko-KR kor",
_("Latvian (Latvia)"): "lv-LV latvian",
_("Lithuanian (Lituania)"): "lt-LT lithuanian",
_("Malaysian (Malaysia)"): "ms-MY malay",
_("Maltese (Malta)"): "mt-MT maltese",
_("Norwegian (Bokmal)"): "no-NO nor",
_("Norwegian (Nynorsk)"): "no-NO non",
_("Persian (Iran)"): "fa-IR persian",
_("Polish (Poland)"): "pl-PL plk",
_("Portuguese (Brazil)"): "pt-BR ptb",
_("Portuguese (Portugal)"): "pt-PT ptg",
_("Romanian (Romania)"): "ro-RO rom",
_("Russian (Russia)"): "ru-RU rus",
_("Serbian (Cyrillic)"): "sr-RS srb",
_("Serbian (Latin)"): "sr-RS srl",
_("Slovak (Slovakia)"): "sk-SK sky",
_("Slovenian (Slovania)"): "sl-SI slovenian",
_("Spanish (Argentina)"): "es-AR esr",
_("Spanish (Bolivia)"): "es-BO esb",
_("Spanish (Colombia)"): "es-CO eso",
_("Spanish (Chile)"): "es-CL esl",
_("Spanish (Costa Rica)"): "es-CR esc",
_("Spanish (Dominican Republic)"): "es-DO esd",
_("Spanish (Ecuador)"): "es-EC esf",
_("Spanish (El Salvador)"): "es-SV ese",
_("Spanish (Guatemala)"): "es-GT esg",
_("Spanish (Honduras)"): "es-HN esh",
_("Spanish (Mexico)"): "es-MX esm",
_("Spanish (Nicaragua)"): "es-NI esi",
_("Spanish (Panama)"): "es-PA esa",
_("Spanish (Paraguay)"): "es-PY esz",
_("Spanish (Peru)"): "es-PE esr",
_("Spanish (Puerto Rico)"): "es-PR esu",
_("Spanish (Spain)"): "es-ES esn",
_("Spanish (United States)"): "es-US est",
_("Spanish (Uruguay)"): "es-UY esy",
_("Spanish (Venezuela)"): "es-VE esv",
_("Swedish (Sweden)"): "sv-SE sve",
_("Swedish (Finland)"): "sv-FI svf",
_("Thai (Thailand)"): "th-TH thai",
_("Turkish (Turkey)"): "tr-TR trk",
_("Ukrainian (Ukraine)"): "uk-UA ukr",
_("Urdu (Pakistan)"): "ur-PK urdu",
_("Vietnamese (Vietnam)"): "vi-VN vietnamese",
}
return _languageCodes
def rtlString(source, lang):
if lang and source and lang[0:2] in {"ar","he"}:
line = []
lineInsertion = 0
words = []
rtl = True
for c in source:
bidi = unicodedata.bidirectional(c)
if rtl:
if bidi == 'L':
if words:
line.insert(lineInsertion, ''.join(words))
words = []
rtl = False
elif bidi in ('R', 'NSM', 'AN'):
pass
else:
if words:
line.insert(lineInsertion, ''.join(words))
words = []
line.insert(lineInsertion, c)
continue
else:
if bidi == 'R' or bidi == 'AN':
if words:
line.append(''.join(words))
words = []
rtl = True
words.append(c)
if words:
if rtl:
line.insert(0, ''.join(words))
return ''.join(line)
else:
return source
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = 3 # added by Mark V to prevent compile error but not necessary semantically
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(conv, s, monetary=False):
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
result = ""
seps = 0
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(conv, percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
if not percent.startswith("{"):
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(conv, percent, value, grouping, monetary, *additional)
def _format(conv, percent, value, grouping=False, monetary=False, *additional):
if percent.startswith("{"): # new formatting {:.{}f}
formattype = percent[-2]
if additional:
formatted = percent.format(*((value,) + additional))
else:
formatted = percent.format(*value if isinstance(value,tuple) else value)
else: # percent formatting %.*f
formattype = percent[-1]
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if formattype in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(conv, parts[0], monetary=monetary)
decimal_point = conv[monetary and 'mon_decimal_point' or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif formattype in 'diu':
seps = 0
if grouping:
formatted, seps = _group(conv, formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(conv, f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, collections.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(conv, perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(conv,
perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(conv, val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def ftostr(conv, val):
"""Convert float to integer, taking the locale into account."""
return format(conv, "%.12g", val)
def atof(conv, string, func=float):
"Parses a string as a float according to the locale settings."
#First, get rid of the grouping
ts = conv['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = conv['decimal_point']
if dd:
string = string.replace(dd, '.')
#finally, parse the string
return func(string)
def atoi(conv, str):
"Converts a string to an integer according to the locale settings."
return atof(conv, str, _INT)
# decimal formatting
from decimal import getcontext, Decimal
def format_picture(conv, value, picture):
monetary = False
decimal_point = conv['decimal_point']
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
percent = '%'
per_mille = '\u2030'
minus_sign = '-'
#grouping = conv[monetary and 'mon_grouping' or 'grouping']
if isinstance(value, float):
value = Decimal.from_float(value)
elif isinstance(value, _STR_NUM_TYPES):
value = Decimal(value)
elif not isinstance(value, Decimal):
raise ValueError(_('Picture requires a number convertable to decimal or float').format(picture))
if value.is_nan():
return 'NaN'
isNegative = value.is_signed()
pic, sep, negPic = picture.partition(';')
if negPic and ';' in negPic:
raise ValueError(_('Picture contains multiple picture sepearators {0}').format(picture))
if isNegative and negPic:
pic = negPic
if len([c for c in pic if c in (percent, per_mille) ]) > 1:
raise ValueError(_('Picture contains multiple percent or per_mille charcters {0}').format(picture))
if percent in pic:
value *= 100
elif per_mille in pic:
value *= 1000
intPart, sep, fractPart = pic.partition(decimal_point)
prefix = ''
numPlaces = 0
intPlaces = 0
grouping = 0
fractPlaces = 0
suffix = ''
if fractPart:
if decimal_point in fractPart:
raise ValueError(_('Sub-picture contains decimal point sepearators {0}').format(pic))
for c in fractPart:
if c.isdecimal():
numPlaces += 1
fractPlaces += 1
if suffix:
raise ValueError(_('Sub-picture passive character {0} between active characters {1}').format(c, fractPart))
else:
suffix += c
intPosition = 0
for c in reversed(intPart):
if c.isdecimal() or c == '#' or c == thousands_sep:
if prefix:
raise ValueError(_('Sub-picture passive character {0} between active characters {1}').format(c, intPart))
if c.isdecimal():
numPlaces += 1
intPlaces += 1
intPosition += 1
prefix = ''
elif c == '#':
numPlaces += 1
intPosition += 1
elif c == thousands_sep:
if not grouping:
grouping = intPosition
else:
prefix = c + prefix
if not numPlaces and prefix != minus_sign:
raise ValueError(_('Sub-picture must contain at least one digit position or sign character {0}').format(pic))
if intPlaces == 0 and fractPlaces == 0:
intPlaces = 1
return format_decimal(None, value, intPlaces=intPlaces, fractPlaces=fractPlaces,
sep=thousands_sep, dp=decimal_point, grouping=grouping,
pos=prefix,
neg=prefix if negPic else prefix + minus_sign,
trailpos=suffix,
trailneg=suffix)
def format_decimal(conv, value, intPlaces=1, fractPlaces=2, curr='', sep=None, grouping=None, dp=None, pos=None, neg=None, trailpos=None, trailneg=None):
"""Convert Decimal to a formatted string including currency if any.
intPlaces: required number of digits before the decimal point
fractPlaces: required number of places after the decimal point
curr: optional currency symbol before the sign (may be blank)
sep: optional grouping separator (comma, period, space, or blank)
dp: decimal point indicator (comma or period)
only specify as blank when places is zero
pos: optional sign for positive numbers: '+', space or blank
neg: optional sign for negative numbers: '-', '(', space or blank
trailneg:optional trailing minus indicator: '-', ')', space or blank
>>> d = Decimal('-1234567.8901')
>>> format(d, curr='$')
'-$1,234,567.89'
>>> format(d, places=0, sep='.', dp='', neg='', trailneg='-')
'1.234.568-'
>>> format(d, curr='$', neg='(', trailneg=')')
'($1,234,567.89)'
>>> format(Decimal(123456789), sep=' ')
'123 456 789.00'
>>> format(Decimal('-0.02'), neg='<', trailneg='>')
'<0.02>'
"""
if conv is not None:
if dp is None:
dp = conv['decimal_point'] or '.'
if sep is None:
sep = conv['thousands_sep'] or ','
if pos is None and trailpos is None:
possign = conv['positive_sign']
pospos = conv['p_sign_posn']
if pospos in('0', 0):
pos = '('; trailpos = ')'
elif pospos in ('1', 1, '3', 3):
pos = possign; trailpos = ''
elif pospos in ('2', 2, '4', 4):
pos = ''; trailpos = possign
else:
pos = ''; trailpos = ''
if neg is None and trailneg is None:
negsign = conv['negative_sign']
negpos = conv['n_sign_posn']
if negpos in ('0', 0):
neg = '('; trailneg = ')'
elif negpos in ('1', 1, '3', 3):
neg = negsign; trailneg = ''
elif negpos in ('2', 2, '4', 4):
neg = ''; trailneg = negsign
elif negpos == 127:
neg = '-'; trailneg = ''
else:
neg = ''; trailneg = ''
if grouping is None:
groups = conv['grouping']
grouping = groups[0] if groups else 3
else:
if dp is None:
dp = '.'
if sep is None:
sep = ','
if neg is None and trailneg is None:
neg = '-'; trailneg = ''
if grouping is None:
grouping = 3
q = Decimal(10) ** -fractPlaces # 2 places --> '0.01'
sign, digits, exp = value.quantize(q).as_tuple()
result = []
digits = list(map(str, digits))
build, next = result.append, digits.pop
build(trailneg if sign else trailpos)
if value.is_finite():
for i in range(fractPlaces):
build(next() if digits else '0')
if fractPlaces:
build(dp)
i = 0
while digits or intPlaces > 0:
build(next() if digits else '0')
intPlaces -= 1
i += 1
if grouping and i == grouping and digits:
i = 0
build(sep)
elif value.is_nan():
result.append("NaN")
elif value.is_infinite():
result.append("ytinifnI")
build(curr)
build(neg if sign else pos)
return ''.join(reversed(result))
| [
"re.compile",
"locale.partition",
"unicodedata.bidirectional",
"decimal.Decimal.from_float",
"locale.getdefaultlocale",
"locale.localeconv",
"decimal.Decimal",
"subprocess.getstatusoutput"
] | [((13519, 13616), 're.compile', 're.compile', (['"""%(?:\\\\((?P<key>.*?)\\\\))?(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]"""'], {}), "(\n '%(?:\\\\((?P<key>.*?)\\\\))?(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]'\n )\n", (13529, 13616), False, 'import re\n'), ((1099, 1157), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""defaults read -g AppleLocale"""'], {}), "('defaults read -g AppleLocale')\n", (1125, 1157), False, 'import sys, subprocess\n'), ((1430, 1449), 'locale.localeconv', 'locale.localeconv', ([], {}), '()\n', (1447, 1449), False, 'import locale\n'), ((2621, 2640), 'locale.localeconv', 'locale.localeconv', ([], {}), '()\n', (2638, 2640), False, 'import locale\n'), ((2754, 2773), 'locale.localeconv', 'locale.localeconv', ([], {}), '()\n', (2771, 2773), False, 'import locale\n'), ((3049, 3107), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""defaults read -g AppleLocale"""'], {}), "('defaults read -g AppleLocale')\n", (3075, 3107), False, 'import sys, subprocess\n'), ((4405, 4444), 'subprocess.getstatusoutput', 'subprocess.getstatusoutput', (['"""locale -a"""'], {}), "('locale -a')\n", (4431, 4444), False, 'import sys, subprocess\n'), ((19409, 19434), 'decimal.Decimal.from_float', 'Decimal.from_float', (['value'], {}), '(value)\n', (19427, 19434), False, 'from decimal import getcontext, Decimal\n'), ((24853, 24864), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (24860, 24864), False, 'from decimal import getcontext, Decimal\n'), ((10830, 10858), 'unicodedata.bidirectional', 'unicodedata.bidirectional', (['c'], {}), '(c)\n', (10855, 10858), False, 'import unicodedata\n'), ((19495, 19509), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (19502, 19509), False, 'from decimal import getcontext, Decimal\n'), ((3301, 3326), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (3324, 3326), False, 'import locale\n'), ((2204, 2223), 'locale.localeconv', 'locale.localeconv', ([], {}), '()\n', (2221, 2223), False, 'import locale\n'), ((4553, 4574), 'locale.partition', 'locale.partition', (['"""."""'], {}), "('.')\n", (4569, 4574), False, 'import locale\n')] |
from wtforms import StringField
from flask_babel import lazy_gettext
from wtforms.validators import DataRequired
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_appbuilder.forms import DynamicForm
class TestForm(DynamicForm):
TestFieldOne = StringField(lazy_gettext('Test Field One'), validators=[DataRequired()], widget=BS3TextFieldWidget())
TestFieldTwo = StringField(lazy_gettext('Test Field One'), validators=[DataRequired()], widget=BS3TextFieldWidget())
| [
"flask_babel.lazy_gettext",
"flask_appbuilder.fieldwidgets.BS3TextFieldWidget",
"wtforms.validators.DataRequired"
] | [((283, 313), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Test Field One"""'], {}), "('Test Field One')\n", (295, 313), False, 'from flask_babel import lazy_gettext\n'), ((404, 434), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Test Field One"""'], {}), "('Test Field One')\n", (416, 434), False, 'from flask_babel import lazy_gettext\n'), ((351, 371), 'flask_appbuilder.fieldwidgets.BS3TextFieldWidget', 'BS3TextFieldWidget', ([], {}), '()\n', (369, 371), False, 'from flask_appbuilder.fieldwidgets import BS3TextFieldWidget\n'), ((472, 492), 'flask_appbuilder.fieldwidgets.BS3TextFieldWidget', 'BS3TextFieldWidget', ([], {}), '()\n', (490, 492), False, 'from flask_appbuilder.fieldwidgets import BS3TextFieldWidget\n'), ((327, 341), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (339, 341), False, 'from wtforms.validators import DataRequired\n'), ((448, 462), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (460, 462), False, 'from wtforms.validators import DataRequired\n')] |
"""
Contacts between nucleotides in a tetracycline aptamer
======================================================
This example reproduces a figure from the publication
*"StreAM-Tg: algorithms for analyzing coarse grained RNA dynamics based
on Markov models of connectivity-graphs"* [1]_.
The figure displays a coarse grained model of a tetracycline aptamer
and highlights interacting nucleotides based on a cutoff distance.
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>,
"StreAM-Tg: algorithms for analyzing coarse grained RNA dynamics based
on Markov models of connectivity-graphs."
Algorithms Mol Biol 12 (2017).
"""
# Code source: <NAME>
# License: CC0
import numpy as np
import biotite.structure as struc
import biotite.structure.io.mmtf as mmtf
import biotite.database.rcsb as rcsb
import ammolite
PNG_SIZE = (800, 800)
########################################################################
mmtf_file = mmtf.MMTFFile.read(rcsb.fetch("3EGZ", "mmtf"))
structure = mmtf.get_structure(mmtf_file, model=1)
aptamer = structure[struc.filter_nucleotides(structure)]
# Coarse graining: Represent each nucleotide using its C3' atom
aptamer = aptamer[aptamer.atom_name == "C3'"]
# Connect consecutive nucleotides
indices = np.arange(aptamer.array_length())
aptamer.bonds = struc.BondList(
aptamer.array_length(),
np.stack((indices[:-1], indices[1:]), axis=-1)
)
pymol_obj = ammolite.PyMOLObject.from_structure(aptamer)
pymol_obj.show("sticks")
pymol_obj.show("spheres")
pymol_obj.color("black")
ammolite.cmd.set("stick_color", "red")
ammolite.cmd.set("stick_radius", 0.5)
ammolite.cmd.set("sphere_scale", 1.0)
ammolite.cmd.set("sphere_quality", 4)
# Adjust camera
pymol_obj.orient()
pymol_obj.zoom(buffer=10)
ammolite.cmd.rotate("z", 90)
ammolite.show(PNG_SIZE)
########################################################################
CUTOFF = 13
# Find contacts within cutoff distance
adjacency_matrix = struc.CellList(aptamer, CUTOFF) \
.create_adjacency_matrix(CUTOFF)
for i, j in zip(*np.where(adjacency_matrix)):
pymol_obj.distance("", i, j, show_label=False, gap=0)
ammolite.cmd.set("dash_color", "firebrick")
# Add black outlines
ammolite.cmd.bg_color("white")
ammolite.cmd.set("ray_trace_mode", 1)
ammolite.cmd.set("ray_trace_disco_factor", 0.5)
ammolite.show(PNG_SIZE)
# sphinx_gallery_thumbnail_number = 2 | [
"ammolite.PyMOLObject.from_structure",
"biotite.database.rcsb.fetch",
"numpy.where",
"biotite.structure.io.mmtf.get_structure",
"numpy.stack",
"biotite.structure.CellList",
"ammolite.show",
"biotite.structure.filter_nucleotides",
"ammolite.cmd.set",
"ammolite.cmd.rotate",
"ammolite.cmd.bg_color"... | [((998, 1036), 'biotite.structure.io.mmtf.get_structure', 'mmtf.get_structure', (['mmtf_file'], {'model': '(1)'}), '(mmtf_file, model=1)\n', (1016, 1036), True, 'import biotite.structure.io.mmtf as mmtf\n'), ((1409, 1453), 'ammolite.PyMOLObject.from_structure', 'ammolite.PyMOLObject.from_structure', (['aptamer'], {}), '(aptamer)\n', (1444, 1453), False, 'import ammolite\n'), ((1530, 1568), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""stick_color"""', '"""red"""'], {}), "('stick_color', 'red')\n", (1546, 1568), False, 'import ammolite\n'), ((1569, 1606), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""stick_radius"""', '(0.5)'], {}), "('stick_radius', 0.5)\n", (1585, 1606), False, 'import ammolite\n'), ((1607, 1644), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""sphere_scale"""', '(1.0)'], {}), "('sphere_scale', 1.0)\n", (1623, 1644), False, 'import ammolite\n'), ((1645, 1682), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""sphere_quality"""', '(4)'], {}), "('sphere_quality', 4)\n", (1661, 1682), False, 'import ammolite\n'), ((1745, 1773), 'ammolite.cmd.rotate', 'ammolite.cmd.rotate', (['"""z"""', '(90)'], {}), "('z', 90)\n", (1764, 1773), False, 'import ammolite\n'), ((1774, 1797), 'ammolite.show', 'ammolite.show', (['PNG_SIZE'], {}), '(PNG_SIZE)\n', (1787, 1797), False, 'import ammolite\n'), ((2135, 2178), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""dash_color"""', '"""firebrick"""'], {}), "('dash_color', 'firebrick')\n", (2151, 2178), False, 'import ammolite\n'), ((2201, 2231), 'ammolite.cmd.bg_color', 'ammolite.cmd.bg_color', (['"""white"""'], {}), "('white')\n", (2222, 2231), False, 'import ammolite\n'), ((2232, 2269), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""ray_trace_mode"""', '(1)'], {}), "('ray_trace_mode', 1)\n", (2248, 2269), False, 'import ammolite\n'), ((2270, 2317), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""ray_trace_disco_factor"""', '(0.5)'], {}), "('ray_trace_disco_factor', 0.5)\n", (2286, 2317), False, 'import ammolite\n'), ((2319, 2342), 'ammolite.show', 'ammolite.show', (['PNG_SIZE'], {}), '(PNG_SIZE)\n', (2332, 2342), False, 'import ammolite\n'), ((958, 984), 'biotite.database.rcsb.fetch', 'rcsb.fetch', (['"""3EGZ"""', '"""mmtf"""'], {}), "('3EGZ', 'mmtf')\n", (968, 984), True, 'import biotite.database.rcsb as rcsb\n'), ((1057, 1092), 'biotite.structure.filter_nucleotides', 'struc.filter_nucleotides', (['structure'], {}), '(structure)\n', (1081, 1092), True, 'import biotite.structure as struc\n'), ((1347, 1393), 'numpy.stack', 'np.stack', (['(indices[:-1], indices[1:])'], {'axis': '(-1)'}), '((indices[:-1], indices[1:]), axis=-1)\n', (1355, 1393), True, 'import numpy as np\n'), ((1944, 1975), 'biotite.structure.CellList', 'struc.CellList', (['aptamer', 'CUTOFF'], {}), '(aptamer, CUTOFF)\n', (1958, 1975), True, 'import biotite.structure as struc\n'), ((2047, 2073), 'numpy.where', 'np.where', (['adjacency_matrix'], {}), '(adjacency_matrix)\n', (2055, 2073), True, 'import numpy as np\n')] |
"""Convert XFL edges to SVG paths.
If you just want to convert, use `xfl_edge_to_svg_path()`. If you're interested
in how everything works, read on.
"""
# Read these links first, as there is no official documentation for the XFL
# edge format:
#
# * https://github.com/SasQ/SavageFlask/blob/master/doc/FLA.txt
# * https://stackoverflow.com/a/4077709
#
# Overview:
#
# In Animate, graphic symbols are made of filled shapes and stroked paths.
# Both are defined by their outline, which Animate breaks into pieces. We'll
# call such a piece a "segment", rather than an "edge", to avoid confusion
# with the edge format.
#
# A segment may be part of up to two shapes: one on its left and one on its
# right. This is determined by the presence of the "fillStyle0" (left) and
# "fillStyle1" (right) attributes, which specify the style for the shape on
# that side.
#
# A segment may be part of up to one stroked path. This is determined by the
# presence of the "strokeStyle" attribute.
#
# So, to extract graphic symbols from XFL, we first convert the edge format
# into segments (represented as point lists, see below). Each <Edge> element
# produces one or more segments, each of which inherits the <Edge>'s
# "fillStyle0", "fillStyle1", and "strokeStyle" attributes.
#
# Then, for filled shapes, we join segments of the same fill style by
# matching their start/end points. The fill styles must be for the same
# side. For stroked paths, we just collect all segments of the same style.
#
# Finally, we convert segments to the SVG path format, put them in an SVG
# <path> element, and assign fill/stroke style attributes to the <path>.
from collections import defaultdict
import re
from typing import Iterator
import xml.etree.ElementTree as ET
# The XFL edge format can be described as follows:
#
# start : moveto (moveto | lineto | quadto)*
# moveto : "!" NUMBER ~ 2 select? // Move to this point
# lineto : ("|" | "/") NUMBER ~ 2 // Line from current point to here
# quadto : ("[" | "]") NUMBER ~ 4 // Quad Bézier (control point, dest)
# select : /S[1-7]/ // Only used by Animate
# NUMBER : /-?\d+(\.\d+)?/ // Decimal number
# | /#[A-Z0-9]{1,6}\.[A-Z0-9]{1,2}/ // Signed, 32-bit number in hex
# %import common.WS // Ignore whitespace
# %ignore WS
#
# Notes:
# * This grammar is written for use with Lark, a Python parsing toolkit. See:
# * Project page: https://github.com/lark-parser/lark
# * Try it online: https://www.lark-parser.org/ide/
# * The cubic commands are omitted:
# * They only appear in the "cubics" attribute and not in "edges"
# * They're just hints for Animate and aren't needed for conversion to SVG
# * "select" is also just a hint for Animate, but it appears in "edges", so we
# include it for completeness.
#
# Anyhow, this language can actually be tokenized with a single regex, which is
# faster than using Lark:
EDGE_TOKENIZER = re.compile(
r"""
[!|/[\]] | # Move to, line to, quad to
(?<!S)-?\d+(?:\.\d+)? | # Decimal number
\#[A-Z0-9]+\.[A-Z0-9]+ # Hex number
""",
re.VERBOSE,
)
# Notes:
# * Whitespace is automatically ignored, as we only match what we want.
# * The negative lookbehind assertion (?<!S) is needed to avoid matching the
# digit in select commands as a number.
# After tokenizing, we need to parse numbers:
def parse_number(num: str) -> float:
"""Parse an XFL edge format number."""
if num[0] == "#":
# Signed, 32-bit fixed-point number in hex
parts = num[1:].split(".")
# Pad to 8 digits
hex_num = "{:>06}{:<02}".format(*parts)
num = int.from_bytes(bytes.fromhex(hex_num), "big", signed=True)
# Account for hex scaling and Animate's 20x scaling (twips)
return (num / 256) / 20
else:
# Decimal number. Account for Animate's 20x scaling (twips)
return float(num) / 20
# Notes:
# * The <path>s produced by Animate's SVG export sometimes have slightly
# different numbers (e.g. flooring or subtracting 1 from decimals before
# dividing by 20). It's not clear how this works or if it's even intended,
# so I gave up trying to replicate it.
# * Animate prints round numbers as integers (e.g. "1" instead of "1.0"), but
# it makes no difference for SVG.
# Now, we can parse the edge format. To join segments into shapes, though, we
# will need a way to reverse segments (for normalizing them so that the filled
# shape is always on the left). That is, if we have a segment like:
#
# C
# / \
# | |
# A ----- B D ----- E
#
# which is represented by:
#
# moveto A, lineto B, quadto C D, lineto E
#
# We should be able to reverse it and get:
#
# moveto E, lineto D, quadto C B, lineto A
#
# The "point list" format (couldn't think of a better name) meets this
# requirement. The segment above would be represented as:
#
# [A, B, (C,), D, E]
#
# The first point is always the destination of a "move to" command. Subsequent
# points are the destinations of "line to" commands. If a point is in a tuple
# like `(C,)`, then it's the control point of a quadratic Bézier curve, and the
# following point is the destination of the curve. (Tuples are just an easy way
# to mark points--there's nothing particular about the choice.)
#
# With this format, we can see that reversing the list gives us the same
# segment, but in reverse:
#
# [E, D, (C,), B, A]
#
# In practice, each point is represented as a coordinate string, so the actual
# point list might look like:
#
# ["0 0", "10 0", ("20 10",), "30 0", "40 0"]
#
# This next function converts the XFL edge format into point lists. Since each
# "edges" attribute can contain multiple segments, but each point list only
# represents one segment, this function can yield multiple point lists.
def edge_format_to_point_lists(edges: str) -> Iterator[list]:
"""Convert the XFL edge format to point lists.
Args:
edges: The "edges" attribute of an XFL <Edge> element
Yields:
One point list for each segment parsed out of `edges`
"""
tokens = iter(EDGE_TOKENIZER.findall(edges))
def next_point():
return f"{parse_number(next(tokens))} {parse_number(next(tokens))}"
assert next(tokens) == "!", "Edge format must start with moveto (!) command"
prev_point = next_point()
point_list = [prev_point]
try:
while True:
command = next(tokens)
curr_point = next_point()
if command == "!":
# Move to
if curr_point != prev_point:
# If a move command doesn't change the current point, we
# ignore it. Otherwise, a new segment is starting, so we
# must yield the current point list and begin a new one.
yield point_list
point_list = [curr_point]
prev_point = curr_point
elif command in "|/":
# Line to
point_list.append(curr_point)
prev_point = curr_point
else:
# Quad to. The control point (curr_point) is marked by putting
# it in a tuple.
point_list.append((curr_point,))
prev_point = next_point()
point_list.append(prev_point)
except StopIteration:
yield point_list
# The next function converts point lists into the SVG path format.
def point_list_to_path_format(point_list: list) -> str:
"""Convert a point list into the SVG path format."""
point_iter = iter(point_list)
path = ["M", next(point_iter)]
last_command = "M"
try:
while True:
point = next(point_iter)
command = "Q" if isinstance(point, tuple) else "L"
# SVG lets us omit the command letter if we use the same command
# multiple times in a row.
if command != last_command:
path.append(command)
last_command = command
if command == "Q":
# Append control point and destination point
path.append(point[0])
path.append(next(point_iter))
else:
path.append(point)
except StopIteration:
if point_list[0] == point_list[-1]:
# Animate adds a "closepath" (Z) command to every filled shape and
# closed stroke. For shapes, it makes no difference, but for closed
# strokes, it turns two overlapping line caps into a bevel, miter,
# or round join, which does make a difference.
# TODO: It is likely that closed strokes can be broken into
# segments and spread across multiple Edge elements, which would
# require a function like point_lists_to_shapes(), but for strokes.
# For now, though, adding "Z" to any stroke that is already closed
# seems good enough.
path.append("Z")
return " ".join(path)
# Finally, we can convert XFL <Edge> elements into SVG <path> elements. The
# algorithm works as follows:
# First, convert the "edges" attributes into segments. Then:
#
# For filled shapes:
# * For a given <Edge>, process each of its segments:
# * If the <Edge> has "fillStyle0", associate the fill style ID
# ("index" in XFL) with the segment.
# * If the <Edge> has "fillStyle1", associate the ID with the segment,
# reversed. This way, the fill of the shape is always to the left of
# the segment (arbitrary choice--the opposite works too).
# * For each fill style ID, consider its segments:
# * Pick an unused segment. If it's already closed (start point equals
# end point), convert it to the SVG path format.
# * Otherwise, if it's open, randomly append segments (making sure to
# match start and end points) until:
# 1. The segment is closed. Convert and start over with a new,
# unused segment.
# 2. The segment intersects with itself (i.e. the current end point
# equals the end point of a previous segment). Backtrack.
# 3. There are no more valid segments. Backtrack.
# * When all segments have been joined into shapes and converted,
# concatenate the path strings and put them in *one* SVG <path>
# element. (This ensures that holes work correctly.) Finally, look up
# the fill attributes from the ID and assign them to the <path>.
#
# For stroked paths:
# * Pair up segments with their stroke style IDs. There is only one
# "strokeStyle" attribute, so we don't need to reverse any segments.
# * For each stroke style ID, convert its segments into the SVG path
# format. Concatenate all path strings and put them in an SVG <path>
# element. Look up the stroke attributes and assign them to the <path>.
#
#
# This algorithm is split across the next two functions:
# * `point_lists_to_shapes()` joins point lists into filled shapes.
# * `xfl_edge_to_svg_path()` does everything else.
#
#
# Assumptions:
# * Segments never cross. So, we only need to join them at their ends.
# * For filled shapes, there's only one way to join segments such that no
# segment is left out. So, we don't need to worry about making the wrong
# decision when there are multiple segments to pick from.
#
# Notes:
# * For stroked paths, Animate joins together segments by their start/end
# points. But, this isn't necessary: when converting to the SVG path
# format, each segment starts with a "move to" command, so they can be
# concatenated in any order.
# * For filled shapes, there is usually only one choice for the next point
# list. The only time there are multiple choices is when multiple shapes
# share a point:
#
# +<-----+
# Shape 1 | ^
# v |
# +----->o<-----+
# | ^ Shape 2
# v |
# +----->+
def point_lists_to_shapes(point_lists: list[tuple[list, str]]) -> dict[str, list[list]]:
"""Join point lists and fill style IDs into shapes.
Args:
point_lists: [(point_list, fill style ID), ...]
Returns:
{fill style ID: [shape point list, ...], ...}
"""
# {fill style ID: {origin point: [point list, ...], ...}, ...}
graph = defaultdict(lambda: defaultdict(list))
# {fill style ID: [shape point list, ...], ...}
shapes = defaultdict(list)
# Add open point lists into `graph`
for point_list, fill_id in point_lists:
if point_list[0] == point_list[-1]:
# This point list is already a closed shape
shapes[fill_id].append(point_list)
else:
graph[fill_id][point_list[0]].append(point_list)
def walk(curr_point, used_points, origin, fill_graph):
"""Recursively join point lists into shapes."""
for i in range(len(fill_graph[curr_point])):
next_point_list = fill_graph[curr_point][i]
next_point = next_point_list[-1]
if next_point == origin:
# Found a cycle. This shape is now closed
del fill_graph[curr_point][i]
return next_point_list
elif next_point not in used_points:
# Try this point list
used_points.add(next_point)
shape = walk(next_point, used_points, origin, fill_graph)
if shape is None:
# Backtrack
used_points.remove(next_point)
else:
del fill_graph[curr_point][i]
# Concat this point list, removing the redundant start move
return next_point_list + shape[1:]
# For each fill style ID, pick a random origin and join point lists into
# shapes with walk() until we're done.
for fill_id, fill_graph in graph.items():
for origin in fill_graph.keys():
while fill_graph[origin]:
point_list = fill_graph[origin].pop()
curr_point = point_list[-1]
shape = walk(curr_point, {origin, curr_point}, origin, fill_graph)
assert shape is not None, "Failed to build shape"
shapes[fill_id].append(point_list + shape[1:])
return shapes
def xfl_edge_to_svg_path(
edges_element: ET.Element,
fill_styles: dict[str, dict],
stroke_styles: dict[str, dict],
) -> tuple[list[ET.Element], list[ET.Element]]:
"""Convert the XFL <edges> element into SVG <path> elements.
Args:
edges_element: The <edges> element of a <DOMShape>
fill_styles: {fill style ID: style attribute dict, ...}
stroke_styles: {stroke style ID: style attribute dict, ...}
Returns a tuple of lists, each containing <path> elements:
([filled path, ...], [stroked path, ...])
"""
fill_edges = []
stroke_paths = defaultdict(list)
# Ignore the "cubics" attribute, as it's only used by Animate
for edge in edges_element.iterfind(".//{*}Edge[@edges]"):
edge_format = edge.get("edges")
fill_id_left = edge.get("fillStyle0")
fill_id_right = edge.get("fillStyle1")
stroke_id = edge.get("strokeStyle")
for point_list in edge_format_to_point_lists(edge_format):
# Reverse point lists so that the fill is always to the left
if fill_id_left is not None:
fill_edges.append((point_list, fill_id_left))
if fill_id_right is not None:
fill_edges.append((list(reversed(point_list)), fill_id_right))
# Convert right away since we don't need to join anything into shapes
if stroke_id is not None:
stroke_paths[stroke_id].append(point_list_to_path_format(point_list))
filled_paths = []
shapes = point_lists_to_shapes(fill_edges)
for fill_id, point_lists in shapes.items():
path = ET.Element("path", fill_styles[fill_id])
path.set("d", " ".join(point_list_to_path_format(pl) for pl in point_lists))
filled_paths.append(path)
stroked_paths = []
for stroke_id, path_data in stroke_paths.items():
stroke = ET.Element("path", stroke_styles[stroke_id])
stroke.set("d", " ".join(path_data))
stroked_paths.append(stroke)
return filled_paths, stroked_paths
| [
"xml.etree.ElementTree.Element",
"collections.defaultdict",
"re.compile"
] | [((3085, 3274), 're.compile', 're.compile', (['"""\n[!|/[\\\\]] | # Move to, line to, quad to\n(?<!S)-?\\\\d+(?:\\\\.\\\\d+)? | # Decimal number\n\\\\#[A-Z0-9]+\\\\.[A-Z0-9]+ # Hex number\n"""', 're.VERBOSE'], {}), '(\n """\n[!|/[\\\\]] | # Move to, line to, quad to\n(?<!S)-?\\\\d+(?:\\\\.\\\\d+)? | # Decimal number\n\\\\#[A-Z0-9]+\\\\.[A-Z0-9]+ # Hex number\n"""\n , re.VERBOSE)\n', (3095, 3274), False, 'import re\n'), ((12814, 12831), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12825, 12831), False, 'from collections import defaultdict\n'), ((15300, 15317), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15311, 15317), False, 'from collections import defaultdict\n'), ((16329, 16369), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""path"""', 'fill_styles[fill_id]'], {}), "('path', fill_styles[fill_id])\n", (16339, 16369), True, 'import xml.etree.ElementTree as ET\n'), ((16584, 16628), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""path"""', 'stroke_styles[stroke_id]'], {}), "('path', stroke_styles[stroke_id])\n", (16594, 16628), True, 'import xml.etree.ElementTree as ET\n'), ((12729, 12746), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12740, 12746), False, 'from collections import defaultdict\n')] |
import torch
import torch.nn.utils
import torch.cuda.amp as amp
import torchvision.ops as cv_ops
import utils.bbox_ops as bbox_ops
def train_one_epoch(model, optimizer, criterion, lr_scheduler, data_loader, dist_logger, epoch_idx):
losses, cls_losses, bbox_losses, centerness_losses = [], [], [], []
model.train()
scaler = amp.GradScaler()
processor = dist_logger.init_processor(data_loader)
for img, data in processor:
img = img.cuda(non_blocking=True)
class_targets = data['class_targets'].cuda(non_blocking=True)
bbox_targets = data['distance_targets'].cuda(non_blocking=True)
centerness_targets = data['centerness_targets'].cuda(non_blocking=True)
points = data['points'].cuda(non_blocking=True)
with amp.autocast():
class_pred, distance_pred, centerness_pred = model(img)
loss_cls, loss_bbox, loss_centerness = criterion(
{'class': class_pred, 'distance': distance_pred, 'centerness': centerness_pred},
{'class': class_targets, 'distance': bbox_targets, 'centerness': centerness_targets},
points
)
loss = loss_cls + loss_bbox + loss_centerness
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 3)
scaler.step(optimizer)
scaler.update()
losses.append(loss.clone().detach())
cls_losses.append(loss_cls.clone().detach())
bbox_losses.append(loss_bbox.clone().detach())
centerness_losses.append(loss_centerness.clone().detach())
cur_loss = dist_logger.reduce_tensor(loss)
avg_loss = dist_logger.reduce_epoch_loss(losses)
dist_logger.update_processor(processor, f'Epoch: {epoch_idx + 1}, avg_loss: {avg_loss:.2f}, cur_loss: {cur_loss:.2f}')
lr_scheduler.step()
dist_logger.update_tensorboard(super_tag='loss', tag_scaler_dict={
'loss': dist_logger.reduce_epoch_loss(losses),
'cls': dist_logger.reduce_epoch_loss(cls_losses),
'bbox': dist_logger.reduce_epoch_loss(bbox_losses),
'centerness': dist_logger.reduce_epoch_loss(centerness_losses)
}, idx=epoch_idx)
dist_logger.save_model(model)
@torch.no_grad()
def val_one_epoch(model, data_loader, coco_gt, dist_logger, epoch_idx, nms_cfg):
pred_instances = []
nms_pre, cls_score_thr, iou_thr = nms_cfg['nms_pre'], nms_cfg['cls_score_thr'], nms_cfg['iou_thr']
model.eval()
processor = dist_logger.init_processor(data_loader)
for img, data in processor:
img = img.cuda(non_blocking=True)
points = data['points'].cuda(non_blocking=True)
img_info_list = coco_gt.loadImgs(data['img_id'].numpy())
class_pred, distance_pred, centerness_pred = model(img)
class_pred = class_pred.sigmoid() # [B, num_points, num_classes]
cls_pred_scores, cls_pred_indexes = class_pred.max(dim=-1) # [B, num_points]
bbox_pred = bbox_ops.convert_distance_to_bbox(points, distance_pred) # [B, num_points, 4]
centerness_pred = centerness_pred.sigmoid() # [B, num_points]
batch_size, _, num_classes = class_pred.shape
_, _, ih, iw = img.shape
for batch_idx in range(batch_size):
b_cls_pred_scores, b_cls_pred_indexes, b_centerness_pred = cls_pred_scores[batch_idx], cls_pred_indexes[batch_idx], centerness_pred[batch_idx] # [num_points]
b_bbox_pred = bbox_pred[batch_idx, :] # [num_points, 4]
_, top_idx = (b_cls_pred_scores * b_centerness_pred).topk(nms_pre) # [topk]
top_class_pred_scores, top_class_pred_indexes, top_centerness_pred = b_cls_pred_scores[top_idx], b_cls_pred_indexes[top_idx], b_centerness_pred[top_idx] # [topk]
nms_scores = top_class_pred_scores * top_centerness_pred # [topk]
top_bbox_pred = b_bbox_pred[top_idx, :] # [topk, 4]
top_bbox_pred = cv_ops.clip_boxes_to_image(top_bbox_pred, size=(ih, iw))
valid_mask = top_class_pred_scores > cls_score_thr
valid_class_pred_scores, valid_class_pred_indexes, valid_nms_scores = top_class_pred_scores[valid_mask], top_class_pred_indexes[valid_mask], nms_scores[valid_mask]
valid_bbox_pred = top_bbox_pred[valid_mask, :]
keep_idx = cv_ops.batched_nms(valid_bbox_pred, valid_nms_scores, valid_class_pred_indexes, iou_thr)
keep_class_pred_scores, keep_class_pred_indexes = valid_class_pred_scores[keep_idx], valid_class_pred_indexes[keep_idx]
keep_bbox_pred = valid_bbox_pred[keep_idx, :]
oh, ow = img_info_list[batch_idx]['height'], img_info_list[batch_idx]['width']
keep_bbox_pred = bbox_ops.recover_bboxes(keep_bbox_pred, oh, ow, ih, iw)
keep_bbox_pred = cv_ops.box_convert(keep_bbox_pred, in_fmt='xyxy', out_fmt='xywh')
for cls_score, cls_idx, bbox in zip(keep_class_pred_scores, keep_class_pred_indexes, keep_bbox_pred):
pred_instances.append({
'image_id': int(data['img_id'][batch_idx]),
'category_id': int(cls_idx) + 1,
'bbox': [float(str('%.1f' % coord)) for coord in bbox.tolist()],
'score': float(str('%.1f' % cls_score))
})
dist_logger.save_pred_instances_local_rank(pred_instances)
dist_logger.save_val_file()
dist_logger.update_tensorboard_val_results(coco_gt, epoch_idx)
| [
"torch.cuda.amp.GradScaler",
"torchvision.ops.clip_boxes_to_image",
"torchvision.ops.box_convert",
"torch.cuda.amp.autocast",
"utils.bbox_ops.convert_distance_to_bbox",
"torch.no_grad",
"utils.bbox_ops.recover_bboxes",
"torchvision.ops.batched_nms"
] | [((2300, 2315), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2313, 2315), False, 'import torch\n'), ((339, 355), 'torch.cuda.amp.GradScaler', 'amp.GradScaler', ([], {}), '()\n', (353, 355), True, 'import torch.cuda.amp as amp\n'), ((3039, 3095), 'utils.bbox_ops.convert_distance_to_bbox', 'bbox_ops.convert_distance_to_bbox', (['points', 'distance_pred'], {}), '(points, distance_pred)\n', (3072, 3095), True, 'import utils.bbox_ops as bbox_ops\n'), ((778, 792), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (790, 792), True, 'import torch.cuda.amp as amp\n'), ((4001, 4057), 'torchvision.ops.clip_boxes_to_image', 'cv_ops.clip_boxes_to_image', (['top_bbox_pred'], {'size': '(ih, iw)'}), '(top_bbox_pred, size=(ih, iw))\n', (4027, 4057), True, 'import torchvision.ops as cv_ops\n'), ((4381, 4473), 'torchvision.ops.batched_nms', 'cv_ops.batched_nms', (['valid_bbox_pred', 'valid_nms_scores', 'valid_class_pred_indexes', 'iou_thr'], {}), '(valid_bbox_pred, valid_nms_scores,\n valid_class_pred_indexes, iou_thr)\n', (4399, 4473), True, 'import torchvision.ops as cv_ops\n'), ((4781, 4836), 'utils.bbox_ops.recover_bboxes', 'bbox_ops.recover_bboxes', (['keep_bbox_pred', 'oh', 'ow', 'ih', 'iw'], {}), '(keep_bbox_pred, oh, ow, ih, iw)\n', (4804, 4836), True, 'import utils.bbox_ops as bbox_ops\n'), ((4866, 4931), 'torchvision.ops.box_convert', 'cv_ops.box_convert', (['keep_bbox_pred'], {'in_fmt': '"""xyxy"""', 'out_fmt': '"""xywh"""'}), "(keep_bbox_pred, in_fmt='xyxy', out_fmt='xywh')\n", (4884, 4931), True, 'import torchvision.ops as cv_ops\n')] |
from unobase.support import models
__author__ = 'michael'
from django.contrib import admin
admin.site.register(models.Case)
admin.site.register(models.FrequentlyAskedQuestion) | [
"django.contrib.admin.site.register"
] | [((98, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Case'], {}), '(models.Case)\n', (117, 130), False, 'from django.contrib import admin\n'), ((131, 182), 'django.contrib.admin.site.register', 'admin.site.register', (['models.FrequentlyAskedQuestion'], {}), '(models.FrequentlyAskedQuestion)\n', (150, 182), False, 'from django.contrib import admin\n')] |
import re
from svgParsing.formatted_text import merge_pages, svg_to_text, FormattedText
from svgParsing.table_names import tables_to_remove
# A function to parse the rules from SVG files (one per page).
def generate_srd_articles():
''' Parse the svg files created from the SRD, into a dictionary of
articles. Each article has a title and its corresponding text.
return: Dict[str, str]
'''
# Read the svg file for each page, and generate a list of FormattedText objects
# representing all text in the SRD. This loop handles 'smoothing out' the page
# breaks.
merged_content = []
for page_number in range(1, 404):
current_page = svg_to_text('{}.svg'.format(page_number))
merged_content = merge_pages(merged_content, current_page)
# Identify tables using their font size. Treat the title of the table as the
# previous FormattedText object in the list. Remove tables if the title is in
# the tables_to_remove list.
indices_to_remove = []
for i in range(1, len(merged_content)):
if merged_content[i].size == 37:
if merged_content[i-1].text in tables_to_remove:
indices_to_remove += [i-1, i]
for index in sorted(indices_to_remove, reverse=True):
del merged_content[index]
# Split the list of FormattedText objects up, using text at size 108 as the
# start of a 'new article.' Extract the text from the FormattedText objects.
# [ [<list of conditions for higher-font titles>, <list of lower-font headers to split on> ]
rules = [
[["^Races$", 108], [[".*", 75]]],
[["^Beyond 1st Level$", 108], [[".*", 75]]],
[["^Using Ability Scores$", 108], [[".*", 75]]],
[["^Spellcasting$", 108], [[".*", 75]]],
[["^Spell Descriptions$", 75], [[".*", 50]]],
[["^Magic Items A-Z$", 75], [[".*", 50]]],
[["^Monsters$", 108], [["^Monsters \(.*", 75]]],
[["^Monsters \(.*", 75], [[".*", 58], [".*", 50]]],
]
# one absolute of this system - no text will be added to an article if the text font size is larger than the article title font size.
def check_text_matches_rule(formText, rule):
'''Check a FormattedText object against a rule for starting a new article.
formText: FormattedText The text to match against the rule.
rule: List[str, int] A pattern and a font size to match text against.
'''
#import pdb;pdb.set_trace()
if formText.size != rule[1]:
return False
if re.match(rule[0], formText.text):
return True
else:
return False
articles = {}
cur_larger_font_titles = [['Introduction', 108]]
current_title = 'Introduction'
current_article = ''
for formatted_text in merged_content:
#if formatted_text.text == "Spell Descriptions":
# import pdb;pdb.set_trace()
if formatted_text.size == 108:
cur_larger_font_titles = [[formatted_text.text, 108]]
# make new article
articles[current_title] = current_article
current_title = formatted_text.text
current_article = ''
continue
# if text size any 'larger font title', then remove larger title(s), and make the current piece of text a new article title.
# I believe with this code, the list of cur_larger_font_titles will be sorted by font size in descending order.
create_new_article_by_font_size = False
while len(cur_larger_font_titles) and formatted_text.size >= cur_larger_font_titles[-1][1]:
create_new_article_by_font_size = True
cur_larger_font_titles = cur_larger_font_titles[:-1]
if create_new_article_by_font_size:
articles[current_title] = current_article
current_title = formatted_text.text
current_article = ''
cur_larger_font_titles.append([current_title, formatted_text.size])
continue
# If text matches a rule for the smallest 'larger font title', then make a new article.
create_new_article_by_formatting_rules = False
if formatted_text.size >= 50: # Don't do these looping checks for pieces of text that are too small for any of the rules to turn into a header.
for title in cur_larger_font_titles:
for condition_rule_pair in rules:
#print(condition_rule_pair, title)
if check_text_matches_rule(FormattedText(title[0], '#000000', title[1]), condition_rule_pair[0]):
for rule in condition_rule_pair[1]:
if check_text_matches_rule(formatted_text, rule):
create_new_article_by_formatting_rules = True
if create_new_article_by_formatting_rules:
cur_larger_font_titles.append([formatted_text.text, formatted_text.size])
articles[current_title] = current_article
current_title = formatted_text.text
current_article = ''
continue
current_article += formatted_text.text + '\n'
articles[current_title] = current_article
return articles
| [
"svgParsing.formatted_text.FormattedText",
"re.match",
"svgParsing.formatted_text.merge_pages"
] | [((743, 784), 'svgParsing.formatted_text.merge_pages', 'merge_pages', (['merged_content', 'current_page'], {}), '(merged_content, current_page)\n', (754, 784), False, 'from svgParsing.formatted_text import merge_pages, svg_to_text, FormattedText\n'), ((2539, 2571), 're.match', 're.match', (['rule[0]', 'formText.text'], {}), '(rule[0], formText.text)\n', (2547, 2571), False, 'import re\n'), ((4506, 4550), 'svgParsing.formatted_text.FormattedText', 'FormattedText', (['title[0]', '"""#000000"""', 'title[1]'], {}), "(title[0], '#000000', title[1])\n", (4519, 4550), False, 'from svgParsing.formatted_text import merge_pages, svg_to_text, FormattedText\n')] |
#!/usr/bin/python3
from pysimplesoap.client import SoapClient
# Install the above dependency with "pip install pysimplesoap"
location = 'http://fritz.box:49000/igdupnp/control/WANCommonIFC1'
namespace = 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1'
action = 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1#'
debug = False # display http/soap requests and responses
client = SoapClient(location, action, namespace, trace=debug)
response2 = client.GetAddonInfos()
newbytesendrate = int(response2.GetAddonInfosResponse.NewByteSendRate)
newbytereceiverate = int(response2.GetAddonInfosResponse.NewByteReceiveRate)
print(newbytesendrate, newbytereceiverate) | [
"pysimplesoap.client.SoapClient"
] | [((400, 452), 'pysimplesoap.client.SoapClient', 'SoapClient', (['location', 'action', 'namespace'], {'trace': 'debug'}), '(location, action, namespace, trace=debug)\n', (410, 452), False, 'from pysimplesoap.client import SoapClient\n')] |
#!/usr/bin/env python
"""
Hackerrank Solution
-
<NAME>
<@natebwangsut | <EMAIL>>
"""
__author__ = "<NAME>"
__credits__ = ["<NAME>sutthitham"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import itertools
line = input()
line = input().split()
repeat = int(input())
com = list(itertools.combinations(line, repeat))
print(sum(("a" in x) for x in com) / len(com))
| [
"itertools.combinations"
] | [((325, 361), 'itertools.combinations', 'itertools.combinations', (['line', 'repeat'], {}), '(line, repeat)\n', (347, 361), False, 'import itertools\n')] |
import json
import aiohttp
from AsyncWebsocketStreamInterface import AsyncWebsocketStreamInterface
import websockets
from loguru import logger
class BinanceFapiAsyncWs(AsyncWebsocketStreamInterface):
ws_baseurl = 'wss://fstream.binance.com'
restful_baseurl = 'https://fapi.binance.com'
def __init__(self, apikey):
super(BinanceFapiAsyncWs, self).__init__()
self._apikey = apikey
self._session: aiohttp.ClientSession = None
self._delay_listenKey_invalid_running = False
@property
def session(self):
if not self._session:
self._session = aiohttp.ClientSession()
return self._session
async def _generate_listenkey(self, debug=False):
if not self._delay_listenKey_invalid_running: # 确保只运行一个心跳
asyncio.create_task(self._delay_listenKey_invalid())
self._delay_listenKey_invalid_running = True
async with self.session.post(
self.restful_baseurl + '/fapi/v1/listenKey',
headers={'X-MBX-APIKEY': self._apikey},
# data={
# 'recvWindow': 5000,
# 'timestamp': ts,
# 'signature': self._generate_signature(recvWindow=5000, timestamp=ts)}
) as r:
if not debug:
listenKey = (await r.json())['listenKey']
return listenKey
else:
return await r.json()
async def _delay_listenKey_invalid(self):
while True:
await asyncio.create_task(asyncio.sleep(30 * 60))
logger.debug('Time to delay listenKey invalid.')
await self._generate_listenkey()
async def _create_ws(self):
ws = await websockets.connect(self.ws_baseurl + '/ws/' + await self._generate_listenkey())
return ws
async def _when2create_new_ws(self):
listenKeyExpired_stream = self.stream_filter([{'e': 'listenKeyExpired'}])
async def read_listenKeyExpired_stream(listenKeyExpired_stream):
async for news in listenKeyExpired_stream:
try:
return
finally:
asyncio.create_task(listenKeyExpired_stream.close())
read_listenKeyExpired_stream_task = asyncio.create_task(read_listenKeyExpired_stream(listenKeyExpired_stream))
# 20小时更新连接一次,或者服务端推送消息listenKey过期
await asyncio.create_task(
asyncio.wait(
[read_listenKeyExpired_stream_task, asyncio.sleep(20 * 3600)],
return_when='FIRST_COMPLETED'))
logger.debug('Time to update ws connection.')
async def _parse_raw_data(self, raw_data):
msg = json.loads(raw_data)
return msg
async def exit(self):
super_exit_task = asyncio.create_task(super(BinanceFapiAsyncWs, self).exit())
if self._session:
await asyncio.create_task(self._session.close())
await super_exit_task
if __name__ == '__main__':
import signal
import asyncio
"""
信号值 符号 行为
2 SIGINT 进程终端,CTRL+C
9 SIGKILL 强制终端
15 SIGTEM 请求中断
20 SIGTOP 停止(挂起)进程 CRTL+D
"""
def safely_exit():
asyncio.create_task(safely_exit_management())
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGTERM, safely_exit)
loop.add_signal_handler(signal.SIGINT, safely_exit)
async def safely_exit_management():
bfws_task = asyncio.create_task(bfws.exit())
await bfws_task
print('Safely exit.')
loop.stop()
async def loop_task():
global bfws
bfws = BinanceFapiAsyncWs(input('apikey:'))
stream = bfws.stream_filter()
async for msg in stream:
logger.info(msg)
loop.create_task(loop_task())
try:
loop.run_forever()
finally:
loop.close()
exit()
| [
"aiohttp.ClientSession",
"json.loads",
"loguru.logger.debug",
"loguru.logger.info",
"asyncio.sleep",
"asyncio.get_event_loop"
] | [((3311, 3335), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3333, 3335), False, 'import asyncio\n'), ((2596, 2641), 'loguru.logger.debug', 'logger.debug', (['"""Time to update ws connection."""'], {}), "('Time to update ws connection.')\n", (2608, 2641), False, 'from loguru import logger\n'), ((2704, 2724), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (2714, 2724), False, 'import json\n'), ((613, 636), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (634, 636), False, 'import aiohttp\n'), ((1595, 1643), 'loguru.logger.debug', 'logger.debug', (['"""Time to delay listenKey invalid."""'], {}), "('Time to delay listenKey invalid.')\n", (1607, 1643), False, 'from loguru import logger\n'), ((3803, 3819), 'loguru.logger.info', 'logger.info', (['msg'], {}), '(msg)\n', (3814, 3819), False, 'from loguru import logger\n'), ((1559, 1581), 'asyncio.sleep', 'asyncio.sleep', (['(30 * 60)'], {}), '(30 * 60)\n', (1572, 1581), False, 'import asyncio\n'), ((2513, 2537), 'asyncio.sleep', 'asyncio.sleep', (['(20 * 3600)'], {}), '(20 * 3600)\n', (2526, 2537), False, 'import asyncio\n')] |