content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import yaml
from pathlib import Path
from typing import Optional, Union, List, Dict
from pydantic import BaseModel
ServerList = Dict[str, Dict[int, List[Server]]]
| [
11748,
331,
43695,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
32233,
11,
4479,
11,
7343,
11,
360,
713,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
628,
198,
198,
10697,
8053,
796,
360,
713,
58,
2536,
11,
360,
713,
... | 3.27451 | 51 |
#!/usr/bin/python3
import os
import sys
import json
import argparse
import subprocess
from PIL import Image, ImageOps
from glob import glob
animSize = 65536
srcdir = os.path.dirname(os.path.abspath(__file__))
ldsfile = os.path.join(srcdir,'animation.lds')
# The list of animations to build.
animations = ['marquee-image',
'lineface',
'djmode',
'mic-test',
'matrix',
'missingno',
'northern-lights',
'lightning-voice',
'rainbow-grin']
# The list of JSON animations to build.
jsdir = os.path.join(srcdir, 'json')
jsanimations = glob(os.path.join(jsdir, '*.json'))
CFLAGS = ['-Os', '-march=rv32i', '-mabi=ilp32', '-I', srcdir]
CFLAGS += ['-ffunction-sections', '-fdata-sections', '--specs=nano.specs']
CFLAGS += ['-D', '_POSIX_TIMERS', '-D', '_POSIX_MONOTONIC_CLOCK=200112L']
#######################################
## Locate Toolchain Paths
#######################################
if os.name=='nt':
platformio_rel = '.platformio\\packages'
#pio_rel = '.platformio\\packages\\toolchain-icestorm\\bin'
home_path = os.getenv('HOMEPATH')
# Build the full path to risc-v tools
platformio = os.path.join(home_path, platformio_rel)
# Tools used in the flow
gcc = os.path.join(platformio, 'toolchain-riscv\\bin\\riscv64-unknown-elf-gcc.exe')
objcopy = os.path.join(platformio, 'toolchain-riscv\\bin\\riscv64-unknown-elf-objcopy.exe')
objdump = os.path.join(platformio, 'toolchain-riscv\\bin\\riscv64-unknown-elf-objdump.exe')
else:
pio_rel = '.platformio/packages/'
pio = os.path.join(os.environ['HOME'], pio_rel)
# Use PlatformIO, if it exists.
if os.path.exists(pio):
gcc = os.path.join(pio, 'toolchain-riscv/bin/riscv64-unknown-elf-gcc')
objcopy = os.path.join(pio, 'toolchain-riscv/bin/riscv64-unknown-elf-objcopy')
# Otherwise, assume the tools are in the PATH.
else:
gcc = 'riscv64-unknown-elf-gcc'
objcopy = 'riscv64-unknown-elf-objcopy'
#######################################
## Check if Recompilation is Needed
#######################################
def check_rebuild(*args, target):
"""Checks if the firmware needs to be rebuilt.
Args:
target (string): Name of the target to be built.
*args (string): All of the dependencies of the target.
Returns:
True if the target needs to be rebuilt, and False otherwise.
"""
if not os.path.exists(target):
return True
targetTime = os.path.getmtime(target)
if (os.path.getmtime(__file__) > targetTime):
return True
for dep in args:
if (os.path.getmtime(dep) > targetTime):
return True
return False
#######################################
## Compile a Single Source File
#######################################
def compile(target, source):
"""Compile a single source.
Args:
target (string): Name of the output file to be generated.
source (string): Name of the source file to be compiled.
"""
ext = os.path.splitext(source)[1]
if (ext == '.c' or ext == '.s'):
print(" Compiling [" + os.path.basename(source) + "]")
subprocess.check_call([gcc] + CFLAGS + ['-c', '-o', target, source], stderr=subprocess.STDOUT)
return
if ext == '.json':
# Parse the JSON file.
with open(source) as jsfile:
frames = json.load(jsfile)
# Output the JSON animation data and compile it.
print(" Rendering [" + os.path.basename(source) + "]")
with subprocess.Popen([gcc] + CFLAGS + ['-c', '-o', target, '-xc', '-'], stderr=subprocess.STDOUT, stdin=subprocess.PIPE) as p:
#with subprocess.Popen(["sed", "-n", "w %s" % target], stderr=subprocess.STDOUT, stdin=subprocess.PIPE) as p:
boilerplate = "/* Generated by make.py from %s */\n" % os.path.basename(source)
boilerplate += "#include <stdint.h>\n"
boilerplate += "#include <stdlib.h>\n"
boilerplate += "#include <badge.h>\n\n"
boilerplate += "const char *json_name = \"%s\";\n\n" % os.path.splitext(os.path.basename(source))[0]
p.stdin.write(boilerplate.encode('utf-8'))
# Render the frames into a framebuf structure.
frameno = 0
for f in frames:
js_to_frame(f, "frame%u" % frameno, p.stdin)
frameno += 1
# And output the animation schedule.
p.stdin.write("const struct frame_schedule schedule[] = {\n".encode('utf-8'))
frameno = 0
for f in frames:
interval = int(f['interval']) * 1000
schedule = " { .interval = %u, .frame = &frame%u},\n" % (interval, frameno)
p.stdin.write(schedule.encode('utf-8'))
frameno += 1
p.stdin.write(" { 0, NULL }\n".encode('utf-8'))
p.stdin.write("}; /* schedule */\n\n".encode('utf-8'))
return
if ext in ('.png', '.jpg', '.jpeg'):
frame = Image.open(source)
if not "_fullframe." in source:
frame.resize((20,14))
frame = ImageOps.pad(frame, (32,14), centering=(0,0))
print(" Rendering [" + os.path.basename(source) + "]")
with subprocess.Popen([gcc] + CFLAGS + ['-c', '-o', target, '-xc', '-'], stderr=subprocess.STDOUT, stdin=subprocess.PIPE) as p:
if not "_fullframe." in source:
boilerplate = """
/* Generated by make.py */
#include <stdint.h>
#include <badge.h>
const struct framebuf __attribute__((section(".frames"))) %s = { .data = {
""" % (filename_to_cname(os.path.basename(source)))
tail = "}};"
else:
image_width = frame.size[0]
boilerplate = """
/* Generated by make.py */
#include <stdint.h>
#include <badge.h>
const int %s_width = %d;
const uint16_t __attribute__((section(".frames"))) %s[] = {
""" % (filename_to_cname(os.path.basename(source)), image_width, filename_to_cname(os.path.basename(source)))
tail = "};"
p.stdin.write(boilerplate.encode('utf-8'))
for pixel in list(frame.getdata()):
r = (pixel[0] >> 3) & 0x1F
g = (pixel[1] >> 2) & 0x3F
b = (pixel[2] >> 3) & 0x1F
line = " 0x%04x," % ((r << 11) | (g << 5) | b)
p.stdin.write(line.encode('utf-8'))
p.stdin.write(tail.encode('utf-8'))
p.stdin.close()
return
# Otherwise, we don't understand this file type.
raise Exception("Unknown file type for " + os.path.basename(source))
#######################################
## Recompile the BIOS
#######################################
def buildrom(name):
"""Rebuild the badge BIOS
BIOS sources will be found at srcdir/name, where
srcdir is the location of the make.py script. Compiled
files will be generated in $(pwd)/name, and the output
animation file will be at $(pwd)/name/name.bin
Args:
name (string): Name of the bios to be build.
"""
# Assemble the list of sources for this animation.
biosdir = os.path.join(srcdir, name)
objdir = name
sources = glob(os.path.join(biosdir, '*.c'))
sources += glob(os.path.join(biosdir, '*.s'))
objects = []
# Firmware target image(s) should match the dirname.
elf_target = os.path.join(objdir, name + '.elf')
bin_target = os.path.join(objdir, name + '.bin')
print("BIOS [" + os.path.basename(bin_target) + "] ", end='')
if not check_rebuild(*sources, ldsfile, target=bin_target):
print("is up to date")
return
else:
print("building...")
# Create the output directory, if it doesn't already exist.
if not os.path.exists(objdir):
os.mkdir(objdir)
# Rebuild each source into an object file.
for srcfile in sources:
(root, ext) = os.path.splitext(srcfile)
objfile = root + '.o'
compile(objfile, srcfile)
objects.append(objfile)
# Link the BIOS together.
LDFLAGS = ['-Wl,-Bstatic,-T,%s,--gc-sections' % glob(os.path.join(biosdir, '*.lds'))[0]]
print(" Linking [" + os.path.basename(elf_target) + "]")
if subprocess.call([gcc] + CFLAGS + LDFLAGS + ['-o', elf_target] + objects, stderr=subprocess.STDOUT) != 0:
return
# Convert to a binary file.
print(" Packing [" + os.path.basename(bin_target) + "]")
if subprocess.call([objcopy, '-O', 'binary', elf_target, bin_target], stderr=subprocess.STDOUT) != 0:
return
#######################################
## Recompile Animations
#######################################
def build(name):
"""Rebuild a single animation.
Animation sources will be found at srcdir/name, where
srcdir is the location of the make.py script. Compiled
files will be generated in $(pwd)/name, and the output
animation file will be at $(pwd)/name/name.bin
Args:
name (string): Name of the animation to be build.
"""
# Assemble the list of sources for this animation.
animdir = os.path.join(srcdir, name)
objdir = name
sources = [os.path.join(srcdir, 'syscalls.c')]
sources += [os.path.join(srcdir, 'framebuf.c')]
#sources += [os.path.join(srcdir, 'muldiv.c')]
sources += glob(os.path.join(animdir, '*.c'))
sources += glob(os.path.join(animdir, '*.s'))
sources += glob(os.path.join(animdir, '*.png'))
sources += glob(os.path.join(animdir, '*.jpg'))
sources += glob(os.path.join(animdir, '*.jpeg'))
objects = []
# Firmware target image(s) should match the dirname.
elf_target = os.path.join(objdir, name + '.elf')
bin_target = os.path.join(objdir, name + '.bin')
print("Animation [" + os.path.basename(bin_target) + "] ", end='')
if not check_rebuild(*sources, ldsfile, target=bin_target):
print("is up to date")
return
else:
print("building...")
# Create the output directory, if it doesn't already exist.
if not os.path.exists(objdir):
os.mkdir(objdir)
# Rebuild each source into an object file.
for srcfile in sources:
(root, ext) = os.path.splitext(srcfile)
objfile = root + '.o'
compile(objfile, srcfile)
objects.append(objfile)
# Link the animation together.
LDFLAGS = ['-Wl,-Bstatic,-T,%s,--gc-sections' % ldsfile]
print(" Linking [" + os.path.basename(elf_target) + "]")
if subprocess.call([gcc] + CFLAGS + LDFLAGS + ['-o', elf_target] + objects, stderr=subprocess.STDOUT) != 0:
return
# Convert to a binary file.
print(" Packing [" + os.path.basename(bin_target) + "]")
if subprocess.call([objcopy, '-O', 'binary', elf_target, bin_target], stderr=subprocess.STDOUT) != 0:
return
#######################################
## Recompile JSON Animations
#######################################
def buildjson(jsfile):
"""Rebuild a single animation from JSON syntax.
An animation may be provided in JSON format, which contains
an array of frames, and the interval between each frame in
milliseconds. The name of the animation is the base filename
of the jsfile with its extension removed.
Compiled files will be generated in $(pwd)/name, and the
output animation binary will be at $(pwd)/name/name.bin
Args:
jsfile (string): Filename of the JSON source.
"""
name = os.path.splitext(os.path.basename(jsfile))[0]
objdir = 'json'
objects = []
sources = [jsfile]
sources += [os.path.join(srcdir, 'jsmain.c')]
sources += [os.path.join(srcdir, 'syscalls.c')]
sources += [os.path.join(srcdir, 'framebuf.c')]
sources += [os.path.join(srcdir, 'muldiv.c')]
# Firmware target image(s) should match the dirname.
elf_target = os.path.join(objdir, name + '.elf')
bin_target = os.path.join(objdir, name + '.bin')
print("Animation [" + os.path.basename(bin_target) + "] ", end='')
if not check_rebuild(*sources, ldsfile, target=bin_target):
print("is up to date")
return
else:
print("building...")
# Create the output directory, if it doesn't already exist.
if not os.path.exists(objdir):
os.mkdir(objdir)
# Rebuild each source into an object file.
for srcfile in sources:
(root, ext) = os.path.splitext(srcfile)
objfile = root + '.o'
compile(objfile, srcfile)
objects.append(objfile)
# Link the animation together.
LDFLAGS = ['-Wl,-Bstatic,-T,%s,--gc-sections' % ldsfile]
print(" Linking [" + os.path.basename(elf_target) + "]")
if subprocess.call([gcc] + CFLAGS + LDFLAGS + ['-o', elf_target] + objects, stderr=subprocess.STDOUT) != 0:
return
# Convert to a binary file.
print(" Packing [" + os.path.basename(bin_target) + "]")
if subprocess.call([objcopy, '-O', 'binary', elf_target, bin_target], stderr=subprocess.STDOUT) != 0:
return
#######################################
## Bundle Animcations Together
#######################################
def bundle(*args, target):
"""Bundles the animations together into a data image.
Args:
target (string): Name of the target bundle to be built.
*args (string): Names of the animation files to include in the image.
"""
if not check_rebuild(target=target, *args):
print("Image [" + os.path.basename(target) + "] is up to date")
return
else:
print("Bundling [" + os.path.basename(target) + "]")
with open(target, 'wb') as outfile:
for filename in args:
length = 0
# Copy the animation data.
with open(filename, 'rb') as infile:
while(1):
chunk = infile.read(4)
if (len(chunk) < 4):
break
outfile.write(chunk)
length += 4
# Pad the animation with zeros.
while (length < animSize):
outfile.write(b"\x00\x00\x00\x00")
length += 4
# Append an extra marker to the end o the image.
for i in range(256):
outfile.write(b"\xFF\xFF\xFF\xFF")
#######################################
## Cleanup After Ourselves
#######################################
def clean(*args, target='animations.bin'):
"""Clean any compiled or built files.
Args:
target (string): Filename of the animation bundle.
*args (string): Animations to be cleaned.
"""
# Cleanup the animation objdirs
for name in args:
print("Cleaning [" + name + "]")
del_files = glob(os.path.join(name, '*.bin'))
del_files += glob(os.path.join(name, '*.elf'))
del_files += glob(os.path.join(name, '*.o'))
for x in del_files:
os.remove(x)
# Cleanup the bundled animation.
print("Cleaning [" + target + "]")
if os.path.exists(target):
os.remove(target)
#######################################
## Make Script Entry Point
#######################################
if __name__=='__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
41472,
198,
6738,
15095,
1330,
15095,
198,
1... | 2.289878 | 6,827 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
from os.path import abspath, basename, dirname, exists, expandvars, isdir, isfile, join
import json
import os
import fnmatch
import requests
import subprocess
import sys
import yaml
def json_parse(string):
""" Parse a JSON string into a dictionary """
try:
return json.loads(string)
except Exception as e:
msg = 'Error parsing JSON config: {}'.format(str(e))
print(msg, file=sys.stderr)
raise TypeError(msg)
def kv_parse(string):
""" Parse a string of the form foo=bar into a dictionary """
try:
key, val = string.split('=', 1)
if key is None or key == '':
raise TypeError('Empty key')
except Exception as e:
err = "Invalid key found parsing KV string '{}': {}".format(string, str(e))
print(err, file=sys.stderr)
raise
return { key: val }
def path(fspath, type='file'):
"""
Checks if a filesystem path exists with the correct type
"""
fspath = abspath(expandvars(str(fspath)))
msg = None
prefix = "path '{0}'".format(fspath)
if not exists(fspath):
msg = "{0} does not exist".format(prefix)
if type == 'file' and isdir(fspath):
msg = "{0} is not a file".format(prefix)
if msg is not None:
print(msg, file=sys.stderr)
raise argparse.ArgumentTypeError(msg)
return fspath
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
1822,
29572,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
1615,
12453,
11,
26672,
3672,
11,
... | 2.728016 | 489 |
from telegram.ext import *
from datetime import time
import os, logging
import dialog
import secret
from database import *
from user import *
from service import Service, ServiceAdd
from game import Game
logging.basicConfig(format="%(message)s", level=logging.INFO)
request_kwargs = {
"proxy_url": "socks5://t.geekclass.ru:7777",
"urllib3_proxy_kwargs": { "username": "geek", "password": "socks" }
}
if __name__ == "__main__":
Bot(secret.token)
| [
6738,
573,
30536,
13,
2302,
1330,
1635,
198,
198,
6738,
4818,
8079,
1330,
640,
198,
198,
11748,
28686,
11,
18931,
198,
198,
11748,
17310,
198,
11748,
3200,
198,
198,
6738,
6831,
1330,
1635,
198,
6738,
2836,
220,
220,
220,
220,
1330,
1... | 2.760234 | 171 |
#csvfile='aa.csv'
#outfile='SKA1AA2km.in'
csvfile='dish.csv'
outfile='SKA1Dish.in'
dmax=100000.0
lonref=40.019
latref=-30.7131
import math
import csv
sm_a = 6378137.0
invf = 298.257223563
f = 1.0 / invf
# Convert WGS-84 to ITRF
# lat and lon are the latitude and longitude in radians, h is the height in metres.
ants=[]
lons=[]
lats=[]
fileid=file(csvfile, 'U')
ant=0
while(True):
line=fileid.readline()
if(line==''):
break;
line=line.split(',')
# ant=int(line[0])
ant=ant+1
lon=float(line[0])
lat=float(line[1])
if(distance(lat, lon, latref, lonref)<dmax):
ants.append(ant)
lons.append(lon)
lats.append(lat)
fileid.close()
print ants
s='antennas.SKA1.names = ['
for i in range(len(ants)):
if(i<len(ants)-1):
s=s+'Dish%d,'%ants[i]
else:
s=s+'Dish%d'%ants[i]
s=s+']\n'
print "Read %d lines" % len(ants)
outfileid=file(outfile, 'w')
outfileid.write('antennas.telescope = SKA1\n')
outfileid.write('antennas.SKA1.coordinates = global\n')
outfileid.write(s)
outfileid.write('antennas.SKA1.diameter = 80m\n')
outfileid.write('antennas.SKA1.scale = 1.0\n')
outfileid.write('antennas.SKA1.mount = alt-az\n')
for ant in range(len(ants)):
lon=lons[ant]
lat=lats[ant]
el=300.0
outfileid.write('# lat: %s; long: %s; el: %s\n' % (lat, lon, el))
(xx, yy, zz) = WGS84ToITRF(lat*math.pi/180.0, lon*math.pi/180.0, el)
outfileid.write('antennas.SKA1.Dish%d=[%s, %s, %s]\n' % (ants[ant], xx, yy, zz))
fileid.close()
outfileid.close()
| [
2,
40664,
7753,
11639,
7252,
13,
40664,
6,
198,
2,
448,
7753,
11639,
18831,
32,
16,
3838,
17,
13276,
13,
259,
6,
198,
40664,
7753,
11639,
67,
680,
13,
40664,
6,
198,
448,
7753,
11639,
18831,
32,
16,
35,
680,
13,
259,
6,
198,
67,... | 1.96051 | 785 |
# intentionally empty - nothing to intitalise | [
2,
16464,
6565,
532,
2147,
284,
493,
1287,
786
] | 5 | 9 |
#!/usr/bin/python
import sys,os
if len(sys.argv) != 2:
print "Usage: %s <ptb file>"%os.path.basename(sys.argv[0])
exit(1)
file = sys.argv[1]
separator = '_'
with open(file) as f:
for line in f:
tokens = line.split()
tokens = map(clean_token, tokens)
# For some reason it seems that the POS tagger wants the line to
# end with a space...
# This doesn't seem to be strictly true, but adding the space seems to
# fix some bug...
line="%s "%' '.join(tokens)
print line
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
11,
418,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
3601,
366,
28350,
25,
4064,
82,
1279,
457,
65,
2393,
24618,
4,
418,
13,
6... | 2.232 | 250 |
import os
from typing import Dict
from ape.api import ReceiptAPI, TransactionAPI, UpstreamProvider, Web3Provider
from ape.exceptions import ContractLogicError, ProviderError, TransactionError, VirtualMachineError
from ape.utils import gas_estimation_error_message
from web3 import HTTPProvider, Web3 # type: ignore
from web3.exceptions import ContractLogicError as Web3ContractLogicError
from web3.gas_strategies.rpc import rpc_gas_price_strategy
from web3.middleware import geth_poa_middleware
_ENVIRONMENT_VARIABLE_NAMES = ("WEB3_INFURA_PROJECT_ID", "WEB3_INFURA_API_KEY")
class InfuraProviderError(ProviderError):
"""
An error raised by the Infura provider plugin.
"""
| [
11748,
28686,
198,
6738,
19720,
1330,
360,
713,
198,
198,
6738,
43835,
13,
15042,
1330,
19520,
10257,
17614,
11,
45389,
17614,
11,
3205,
5532,
29495,
11,
5313,
18,
29495,
198,
6738,
43835,
13,
1069,
11755,
1330,
17453,
11187,
291,
12331,
... | 3.174312 | 218 |
import logging
import os
from functools import wraps
from flask import current_app, Flask
from flask_sqlalchemy import SQLAlchemy
from wallet_api.config import FlaskAppConfig as flask_conf
from wallet_api.config import PSQLClientConfig as db_conf
db = SQLAlchemy()
def create_app(test: bool = False) -> Flask:
"""
Application factory pattern - Creates and initialize the application.
:param test: `True` to return an application for unit-testing.
:return: Flask's application object.
"""
app = Flask(__name__)
# Flask config flags
app.config.from_mapping( # type: ignore
{
"SECRET_KEY": flask_conf.secret_key,
# SQLAlchemy specific settings
"SQLALCHEMY_DATABASE_URI": db_conf.connection_url(),
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
}
)
if test:
app.config.from_mapping(
{
"SECRET_KEY": os.urandom(16),
"TESTING": True,
"SQLALCHEMY_DATABASE_URI": db_conf.connection_url(test=True),
}
)
# Logging config
app.logger.setLevel(logging.DEBUG if app.config["DEBUG"] else logging.INFO)
# Initializes database
db.init_app(app)
# Registering blueprints
from wallet_api.routes import app_bp
app.register_blueprint(app_bp)
return app
# ---- Flask views decorator ---- #
def db_isolation_level(level: str):
"""
Flask view decorator to set SQLAlchemy transaction isolation level.
https://docs.sqlalchemy.org/en/13/dialects/postgresql.html#postgresql-isolation-level
:param level: SQLAlchemy driver-specific isolation level.
:return: decorated view funcion.
"""
return decorator
| [
11748,
18931,
198,
11748,
28686,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42903,
1330,
1459,
62,
1324,
11,
46947,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
6738,
13008,
62,
15042,
13,
... | 2.513748 | 691 |
"""
The MIT License (MIT)
Copyright (c) 2017-2018 Nariman Safiulin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import enum
class EventType(enum.Enum):
"""List of event types which can be received."""
UNKNOWN = None
CONNECT = "connect"
READY = "ready"
SHARD_READY = "shard_ready"
RESUMED = "resumed"
ERROR = "error"
SOCKET_RAW_RECEIVE = "socket_raw_receive"
SOCKET_RAW_SEND = "socket_raw_send"
TYPING = "typing"
MESSAGE = "message"
MESSAGE_DELETE = "message_delete"
RAW_MESSAGE_DELETE = "raw_message_delete"
RAW_BULK_MESSAGE_DELETE = "raw_bulk_message_delete"
MESSAGE_EDIT = "message_edit"
RAW_MESSAGE_EDIT = "raw_message_edit"
REACTION_ADD = "reaction_add"
RAW_REACTION_ADD = "raw_reaction_add"
REACTION_REMOVE = "reaction_remove"
RAW_REACTION_REMOVE = "raw_reaction_remove"
REACTION_CLEAR = "reaction_clear"
RAW_REACTION_CLEAR = "raw_reaction_clear"
PRIVATE_CHANNEL_CREATE = "private_channel_create"
PRIVATE_CHANNEL_DELETE = "private_channel_delete"
PRIVATE_CHANNEL_UPDATE = "private_channel_update"
PRIVATE_CHANNEL_PINS_UPDATE = "private_channel_pins_update"
GUILD_CHANNEL_CREATE = "guild_channel_create"
GUILD_CHANNEL_DELETE = "guild_channel_delete"
GUILD_CHANNEL_UPDATE = "guild_channel_update"
GUILD_CHANNEL_PINS_UPDATE = "guild_channel_pins_update"
MEMBER_JOIN = "member_join"
MEMBER_REMOVE = "member_remove"
MEMBER_UPDATE = "member_update"
GUILD_JOIN = "guild_join"
GUILD_REMOVE = "guild_remove"
GUILD_UPDATE = "guild_update"
GUILD_ROLE_CREATE = "guild_role_create"
GUILD_ROLE_DELETE = "guild_role_delete"
GUILD_ROLE_UPDATE = "guild_role_update"
GUILD_EMOJIS_UPDATE = "guild_emojis_update"
GUILD_AVAILABLE = "guild_available"
GUILD_UNAVAILABLE = "guild_unavailable"
VOICE_STATE_UPDATE = "voice_state_update"
MEMBER_BAN = "member_ban"
MEMBER_UNBAN = "member_unban"
GROUP_JOIN = "group_join"
GROUP_REMOVE = "group_remove"
RELATIONSHIP_ADD = "relationship_add"
RELATIONSHIP_REMOVE = "relationship_remove"
RELATIONSHIP_UPDATE = "relationship_update"
| [
37811,
201,
198,
464,
17168,
13789,
357,
36393,
8,
201,
198,
201,
198,
15269,
357,
66,
8,
2177,
12,
7908,
13596,
24086,
6895,
72,
11599,
201,
198,
201,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
1672... | 2.543015 | 1,267 |
import math
rad = float(input())
degree = rad * 180 / math.pi
print(round(degree)) | [
11748,
10688,
198,
198,
6335,
796,
12178,
7,
15414,
28955,
198,
16863,
796,
2511,
1635,
11546,
1220,
10688,
13,
14415,
198,
4798,
7,
744,
7,
16863,
4008
] | 3.074074 | 27 |
import sys
import click
from neoload_cli_lib import user_data
@click.command()
@click.option('--url', default="https://neoload-api.saas.neotys.com/", help="The URL of api ", metavar='URL')
@click.option('--no-write', is_flag=True, help="don't save login on application data")
@click.argument('token', required=False)
def cli(token, url, no_write):
"""Store your token and uri of NeoLoad Web. The token is read from stdin if none is set.
The default url is "https://neoload-api.saas.neotys.com/" """
if not token:
if sys.stdin.isatty():
token = click.prompt("Enter your token", None, True)
else:
token = input()
__user_data = user_data.do_login(token, url, no_write)
if sys.stdin.isatty():
print(__user_data)
| [
11748,
25064,
198,
11748,
3904,
198,
198,
6738,
497,
349,
1170,
62,
44506,
62,
8019,
1330,
2836,
62,
7890,
628,
198,
31,
12976,
13,
21812,
3419,
198,
31,
12976,
13,
18076,
10786,
438,
6371,
3256,
4277,
2625,
5450,
1378,
710,
349,
1170... | 2.524272 | 309 |
"""
Auxiliar descriptor models
--------------------------
This is a module which purpose is contain all the functions in parts that could
be useful for creating descriptor models.
"""
## Invocable characterizer functions
from characterizers import characterizer_1sh_counter,\
characterizer_summer, characterizer_summer_array,\
characterizer_summer_listdict, characterizer_summer_listarray,\
characterizer_summer_arrayarray,\
characterizer_average, characterizer_average_array,\
characterizer_average_listdict, characterizer_average_listarray,\
characterizer_average_arrayarray
from characterizers import characterizer_from_unitcharacterizer
## Invocable reducer functions
from reducers import sum_reducer, avg_reducer
## Invocable add2result functions
from add2result_functions import sum_addresult_function,\
append_addresult_function, replacelist_addresult_function
## Invocable completers
from completers import null_completer, weighted_completer,\
sparse_dict_completer, sparse_dict_completer_unknown,\
null_completer_concatenator
## Invocable aggregation functions
from aggregation_functions import aggregator_1sh_counter, aggregator_summer,\
aggregator_average
## Invocable featurenames functions
from featurenames_functions import counter_featurenames, array_featurenames,\
list_featurenames, general_featurenames
## Invocable out_formatter functions
from out_formatters import count_out_formatter_general, null_out_formatter,\
count_out_formatter_dict2array
| [
198,
37811,
198,
32,
2821,
4797,
43087,
4981,
198,
22369,
438,
198,
1212,
318,
257,
8265,
543,
4007,
318,
3994,
477,
262,
5499,
287,
3354,
326,
714,
198,
1350,
4465,
329,
4441,
43087,
4981,
13,
198,
37811,
198,
198,
2235,
10001,
420,
... | 3.451247 | 441 |
__author__ = "Tonakai"
from classes import *
from models import *
from services import *
from views import *
| [
834,
9800,
834,
796,
366,
35416,
461,
1872,
1,
201,
198,
6738,
6097,
1330,
1635,
201,
198,
6738,
4981,
1330,
1635,
201,
198,
6738,
2594,
1330,
1635,
201,
198,
6738,
5009,
1330,
1635,
201,
198
] | 3.257143 | 35 |
import os
import numpy as np
from ffn.inference import segmentation
def split_segmentation_by_intersection(a, b, min_size):
"""Computes the intersection of two segmentations.
Intersects two spatially overlapping segmentations and assigns a new ID to
every unique (id1, id2) pair of overlapping voxels. If 'id2' is the largest
object overlapping 'id1', their intersection retains the 'id1' label. If the
fragment created by intersection is smaller than 'min_size', it gets removed
from the segmentation (assigned an id of 0 in the output).
`a` is modified in place, `b` is not changed.
Note that (id1, 0) is considered a valid pair and will be mapped to a non-zero
ID as long as the size of the overlapping region is >= min_size, but (0, id2)
will always be mapped to 0 in the output.
Args:
a: First segmentation.
b: Second segmentation.
min_size: Minimum size intersection segment to keep (not map to 0).
Raises:
TypeError: if a or b don't have a dtype of uint64
ValueError: if a.shape != b.shape, or if `a` or `b` contain more than
2**32-1 unique labels.
"""
if a.shape != b.shape:
raise ValueError
a = a.ravel()
output_array = a
b = b.ravel()
def remap_input(x):
"""Remaps `x` if needed to fit within a 32-bit ID space.
Args:
x: uint64 numpy array.
Returns:
`remapped, max_id, orig_values_map`, where:
`remapped` contains the remapped version of `x` containing only
values < 2**32.
`max_id = x.max()`.
`orig_values_map` is None if `remapped == x`, or otherwise an array such
that `x = orig_values_map[remapped]`.
Raises:
TypeError: if `x` does not have uint64 dtype
ValueError: if `x.max() > 2**32-1`.
"""
if x.dtype != np.uint64:
raise TypeError
max_uint32 = 2**32 - 1
max_id = x.max()
orig_values_map = None
if max_id > max_uint32:
orig_values_map, x = np.unique(x, return_inverse=True)
if len(orig_values_map) > max_uint32:
raise ValueError('More than 2**32-1 unique labels not supported')
x = np.cast[np.uint64](x)
if orig_values_map[0] != 0:
orig_values_map = np.concatenate(
[np.array([0], dtype=np.uint64), orig_values_map])
x[...] += 1
return x, max_id, orig_values_map
remapped_a, max_id, a_reverse_map = remap_input(a)
remapped_b, _, _ = remap_input(b)
intersection_segment_ids = np.bitwise_or(remapped_a, remapped_b << 32)
unique_joint_labels, remapped_joint_labels, joint_counts = np.unique(
intersection_segment_ids, return_inverse=True, return_counts=True)
unique_joint_labels_a = np.bitwise_and(unique_joint_labels, 0xFFFFFFFF)
unique_joint_labels_b = unique_joint_labels >> 32
# Maps each segment id `id_a` in `remapped_a` to `(id_b, joint_count)` where
# `id_b` is the segment id in `remapped_b` with maximum overlap, and
# `joint_count` is the number of voxels of overlap.
max_overlap_ids = dict()
for label_a, label_b, count in zip(unique_joint_labels_a,
unique_joint_labels_b, joint_counts):
new_pair = (label_b, count)
existing = max_overlap_ids.setdefault(label_a, new_pair)
if existing[1] < count:
max_overlap_ids[label_a] = new_pair
# Relabel map to apply to remapped_joint_labels to obtain the output ids.
new_labels = np.zeros(len(unique_joint_labels), np.uint64)
for i, (label_a, label_b, count) in enumerate(zip(unique_joint_labels_a,
unique_joint_labels_b,
joint_counts)):
if count < min_size or label_a == 0:
new_label = 0
elif label_b == max_overlap_ids[label_a][0]:
if a_reverse_map is not None:
new_label = a_reverse_map[label_a]
else:
new_label = label_a
else:
max_id += 1
new_label = max_id
new_labels[i] = new_label
output_array[...] = new_labels[remapped_joint_labels]
return output_array
# pre_path = '/users/dlinsley/ffn_v2/ding_segmentations/x0099/y0099/z0099/'
pre_path = '/users/dlinsley/ffn_v2/ding_segmentations/x0015/y0015/z0017'
file_path = '0/0/seg-0_0_0.npz'
min_size = 0 # 1024
split_min_size = 7
v1_path = os.path.join(pre_path, 'v1', file_path)
v2_path = os.path.join(pre_path, 'v2', file_path)
v1 = np.load(v1_path)['segmentation'].astype(np.uint64)
v2 = np.load(v2_path)['segmentation'].astype(np.uint64)
if min_size:
v1 = segmentation.clean_up(seg=v1, min_size=min_size)
v2 = segmentation.clean_up(seg=v2, min_size=min_size)
new_v1 = split_segmentation_by_intersection(v1, v2,split_min_size)
# new_v1 = segmentation.reduce_id_bits(new_v1)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
277,
22184,
13,
259,
4288,
1330,
10618,
341,
628,
198,
4299,
6626,
62,
325,
5154,
341,
62,
1525,
62,
3849,
5458,
7,
64,
11,
275,
11,
949,
62,
7857,
2599,
198,
220,
37227,
... | 2.327425 | 2,031 |
import os
while 1:
ifn = open("version")
major, minor, revision = ifn.read().split(".")
major = int(major)
minor = int(minor)
revision = int(revision)
ifn.close()
print "version", major, minor, revision
print "Increment Major version ?"
if raw_input().upper() == "Y":
major += 1
minor = 0
revision = 0
ofn = open("version", "w")
ofn.write("%u.%u.%u" % (major, minor, revision))
ofn.close()
print "Increment Minor version ?"
if raw_input().upper() == "Y":
minor += 1
revision = 0
ofn = open("version", "w")
ofn.write("%u.%u.%u" % (major, minor, revision))
ofn.close()
print "Increment Revision ?"
if raw_input().upper() == "Y":
revision += 1
ofn = open("version", "w")
ofn.write("%u.%u.%u" % (major, minor, revision))
ofn.close()
ifn = open("version")
major, minor, revision = ifn.read().split(".")
major = int(major)
minor = int(minor)
revision = int(revision)
ifn.close()
print "version", major, minor, revision
print "Version OK ?"
if raw_input().upper() == "Y":
break
while 1:
print "Lint Code ?"
if raw_input().upper() == "Y":
os.system("flake8 chips/*/*.py")
print "Auto Tidy Code ?"
if raw_input().upper() == "Y":
os.system("autopep8 -v --in-place -a -a chips/*/*.py")
print "Code Correct ?"
if raw_input().upper() == "Y":
break
while 1:
print "Run Automatic Tests?"
if raw_input().upper() == "Y":
os.chdir("test_suite")
os.system("./test_all")
os.chdir("..")
print "Run Manual Tests?"
if raw_input().upper() == "Y":
os.chdir("examples")
os.system("./example_1.py")
os.system("./example_2.py")
os.system("./example_3.py")
os.system("./example_4.py")
os.system("./example_5.py")
os.system("./example_6.py")
os.system("./example_7.py")
os.system("./example_8.py")
os.system("./example_9.py")
os.chdir("..")
os.system("python -m chips.utils.debugger")
os.system("python -m chips.utils.block_diagram")
print "Tests OK ?"
if raw_input().upper() == "Y":
break
while 1:
print "Build the documentation ?"
if raw_input().upper() == "Y":
os.chdir("docs")
os.system("make html")
os.system("make pdf")
os.system("make linkcheck")
os.system("make doctest")
os.chdir("..")
print "Review the documentation ?"
if raw_input().upper() == "Y":
os.system("google-chrome docs/build/html/index.html")
print "Publish documentation ?"
if raw_input().upper() == "Y":
os.system("scripts/publish_docs")
print "Documentation OK ?"
if raw_input().upper() == "Y":
break
while 1:
os.system("git status")
print "commit all?"
if raw_input().upper() == "Y":
os.system("git add .")
os.system("git commit")
print "tag version ?"
if raw_input().upper() == "Y":
os.system("git tag %u.%u.%u"%(major, minor, revision))
print "push ?"
if raw_input().upper() == "Y":
os.system("git push --tags origin master")
print "Version Control OK ?"
if raw_input().upper() == "Y":
break
print "Ready to Release?"
if raw_input().upper() == "Y":
os.system("python setup.py register")
| [
11748,
28686,
198,
198,
4514,
352,
25,
198,
220,
220,
220,
611,
77,
796,
1280,
7203,
9641,
4943,
198,
220,
220,
220,
1688,
11,
4159,
11,
18440,
796,
611,
77,
13,
961,
22446,
35312,
7203,
19570,
198,
220,
220,
220,
1688,
796,
493,
... | 2.205976 | 1,573 |
import os
import docker
import pytest
from jina.docker.hubio import HubIO
from jina.enums import BuildTestLevel
from jina.logging import JinaLogger
from jina.parsers.hub import set_hub_build_parser
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
@pytest.fixture(scope='function')
| [
11748,
28686,
198,
198,
11748,
36253,
198,
11748,
12972,
9288,
198,
6738,
474,
1437,
13,
45986,
13,
40140,
952,
1330,
14699,
9399,
198,
6738,
474,
1437,
13,
268,
5700,
1330,
10934,
14402,
4971,
198,
6738,
474,
1437,
13,
6404,
2667,
1330... | 2.818966 | 116 |
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer | [
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
2245,
10879,
198,
6738,
299,
2528,
74,
13,
30001,
1096,
1330,
18752,
30642,
7509,
198,
6738,
299,
2528,
74,
13,
927,
13,
4775,
3262,
1330,
9678,
7934,
43,
368,
6759,
7509
] | 3.05 | 40 |
b = BinarySearchTree()
b.add(1)
b.add(2)
b.add(20)
b.add(-1)
b.add(0)
b.add(-2)
b.add(-2)
# b.inorderTrans()
b.printLeafs() | [
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
65,
796,
45755,
18243,
27660,
3419,
201,
198,
201,
198,
65,
13,
2860,
7,
16,
8,
201,
198,
65,
13,
2860,
7,
17,
8,
201,
198,
... | 1.509804 | 102 |
from fastapi.encoders import jsonable_encoder
from services.authentication import authenticate
from fastapi import APIRouter, Request, HTTPException, status
from fastapi.responses import JSONResponse
from schema.categories import Category
from models.categories import Category as CategoryModel
from config.config_loader import settings
from services.python_operations import convert_mongo_result_to_dict, loop_through_queryset
from dbConnectionManager.db_session import accounts_db_connection_instance
router = APIRouter()
@router.post("/", response_model=Category)
| [
6738,
3049,
15042,
13,
12685,
375,
364,
1330,
33918,
540,
62,
12685,
12342,
198,
6738,
2594,
13,
41299,
3299,
1330,
8323,
5344,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
19390,
11,
14626,
16922,
11,
3722,
198,
6738,
3049,
150... | 3.931034 | 145 |
# -*- coding: utf-8 -*-
'''
File name: code\hilberts_new_hotel\sol_359.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #359 :: Hilbert's New Hotel
#
# For more information see:
# https://projecteuler.net/problem=359
# Problem Statement
'''
An infinite number of people (numbered 1, 2, 3, etc.) are lined up to get a room at Hilbert's newest infinite hotel. The hotel contains an infinite number of floors (numbered 1, 2, 3, etc.), and each floor contains an infinite number of rooms (numbered 1, 2, 3, etc.).
Initially the hotel is empty. Hilbert declares a rule on how the nth person is assigned a room: person n gets the first vacant room in the lowest numbered floor satisfying either of the following:
the floor is empty
the floor is not empty, and if the latest person taking a room in that floor is person m, then m + n is a perfect square
Person 1 gets room 1 in floor 1 since floor 1 is empty.
Person 2 does not get room 2 in floor 1 since 1 + 2 = 3 is not a perfect square.
Person 2 instead gets room 1 in floor 2 since floor 2 is empty.
Person 3 gets room 2 in floor 1 since 1 + 3 = 4 is a perfect square.
Eventually, every person in the line gets a room in the hotel.
Define P(f, r) to be n if person n occupies room r in floor f, and 0 if no person occupies the room. Here are a few examples:
P(1, 1) = 1
P(1, 2) = 3
P(2, 1) = 2
P(10, 20) = 440
P(25, 75) = 4863
P(99, 100) = 19454
Find the sum of all P(f, r) for all positive f and r such that f × r = 71328803586048 and give the last 8 digits as your answer.
'''
# Solution
# Solution Approach
'''
'''
| [
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
9220,
1438,
25,
2438,
59,
71,
346,
527,
912,
62,
3605,
62,
8940,
417,
59,
34453,
62,
30743,
13,
9078,
198,
220,
220,
220,
6434,
2... | 3.199616 | 521 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to shut down Hadoop Cluster on Google Compute Engine."""
import logging
from src import auth
from src.hadoop import datastore
from src.hadoop import hadoop_cluster
from src.pipelines import pipeline
SCOPE = ['https://www.googleapis.com/auth/compute']
class HadoopShutdownError(Exception):
"""Error on Hadoop cluster shut down."""
def ShutdownHadoopCluster(config):
"""Function that actually shuts down Hadoop cluster.
Split as a function for unit test purpose.
Args:
config: Hadoop shutdown parameter created by Datapipeline
in Python dictionary.
Raises:
HadoopShutdownError: Error on shutting down Hadoop cluster, such as
incorrect parameters.
"""
logging.debug('Shutdown Hadoop cluster: %s', str(config))
# Required parameters.
try:
project = config['project']
prefix = config['prefix']
except KeyError as e:
raise HadoopShutdownError(
'Hadoop Shutdown: Missing required parameter: %s' % str(e))
# Query by project name and prefix. Since the query filters with 2 fields,
# the query requires Datastore index.
clusters = datastore.ClusterInfo.query(
datastore.ClusterInfo.project == project,
datastore.ClusterInfo.prefix == prefix).fetch()
if not clusters:
raise HadoopShutdownError(
'Hadoop Shutdown: No cluster found in project "%s" with prefix "%s"',
project, prefix)
for cluster_info in clusters:
logging.info('Shutdown Hadoop cluster: %s', str(cluster_info.key.id()))
cluster = hadoop_cluster.HadoopCluster(
auth.Service.HttpFromServiceAccount(SCOPE),
cluster_id=cluster_info.key.id())
cluster.TeardownCluster()
class HadoopShutdown(pipeline.Pipeline):
"""Pipeline class to shut down Hadoop cluster."""
@staticmethod
def run(self, config):
"""Deletes Google Compute Engine instances of the Hadoop cluster."""
ShutdownHadoopCluster(config)
| [
2,
15069,
2211,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 3.153942 | 799 |
#!/usr/bin/python
import optparse
import os
import zipfile
import fnmatch
import gzip
from ConfigParser import SafeConfigParser
import logging
logging.basicConfig(
level = logging.INFO,
format = "%(asctime)s %(levelname)-5.5s %(message)s",
datefmt = "%H:%M:%S"
)
log = logging.getLogger(__name__)
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option('--ini',
help='INI file to use for pylons settings',
type='str', default='development.ini')
option_parser.add_option('--version',
help='version tag to use as a prefix',
type='str', default='test')
option_parser.add_option('--exclude-file',
help='list of files to exclude', dest="exclude",
type='str', default=None)
options, args = option_parser.parse_args()
c = SafeConfigParser()
c.read(options.ini)
host = c.get("app:main", "cdn.ssh.host")
excludes = []
if options.exclude:
excludes = [n.strip() for n in file(options.exclude).readlines()]
arch = zipfile.ZipFile("static-%s.zip" % options.version, "w")
# walk through all dirs and all files in the specified folder
for root, dirnames, filenames in os.walk(args[0]):
for filename in filenames:
fullpath = os.path.join(root, filename)
relpath = fullpath[len(args[0]):]
if relpath[0] == "/":
relpath = relpath[1:]
# if the file matches an exclusion pattern, skip it
included = True
for file_to_exclude in excludes:
if fnmatch.fnmatch(relpath, file_to_exclude):
included = False
break
# if the file is to be included, add it
if included:
logging.info("Adding %s" % relpath)
arch.write(fullpath, relpath)
# if the file is CSS or JS, also add a precompressed version
# with the same modification timestamp (for nginx to serve)
if relpath.endswith(".css") or relpath.endswith(".js"):
logging.info("Adding %s.gz" % relpath)
f = gzip.open(fullpath+".gz", 'wb', 9)
f.write(file(fullpath).read())
f.close()
s = os.stat(fullpath)
os.utime(fullpath+".gz", (s.st_atime, s.st_mtime))
arch.write(fullpath+".gz", relpath+".gz")
os.unlink(fullpath+".gz")
arch.close()
os.system("scp static-%s.zip %s:~/" % (options.version, host))
os.system("ssh %s unzip -o static-%s.zip -d /opt/cb/var/www/static/%s" % (host, options.version, options.version))
os.system("ssh %s rm -f static-%s.zip" % (host, options.version))
os.system("rm -f static-%s.zip" % (options.version, ))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
2172,
29572,
198,
11748,
28686,
198,
198,
11748,
19974,
7753,
198,
11748,
24714,
15699,
198,
11748,
308,
13344,
198,
6738,
17056,
46677,
1330,
19978,
16934,
46677,
198,
198,
11748,
18... | 2.159125 | 1,326 |
import os
from pypi_config import PASSWORD, USERNAME
os.system("python setup.py sdist")
os.system(f"twine upload dist/* -u {USERNAME} -p {PASSWORD} --skip-existing")
| [
11748,
28686,
198,
198,
6738,
279,
4464,
72,
62,
11250,
1330,
41752,
54,
12532,
11,
1294,
1137,
20608,
198,
198,
418,
13,
10057,
7203,
29412,
9058,
13,
9078,
264,
17080,
4943,
198,
418,
13,
10057,
7,
69,
1,
4246,
500,
9516,
1233,
15... | 2.709677 | 62 |
import re
from django.conf import settings
from django.conf.urls import include, url
from django.urls import path
urlpatterns = []
if settings.SERVE_MEDIA:
from django.views.static import serve
urlpatterns += [
url(
r"^%s(?P<path>.*)$" % re.escape(settings.STATIC_URL.lstrip("/")),
serve,
kwargs={"document_root": settings.STATIC_ROOT},
)
]
urlpatterns += [
url(
r"^%s(?P<path>.*)$" % re.escape(settings.MEDIA_URL.lstrip("/")),
serve,
kwargs={"document_root": settings.MEDIA_ROOT},
)
]
urlpatterns += [path(r"", include("puput.urls"))]
| [
11748,
302,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6371,
33279,
82,
796,
17635,
198,
1... | 2.024316 | 329 |
"""
Plotting: Graphical representations of data extracted from datasets.
Graphical representations of cw-EPR data are an indispensable aspect of data
analysis. To facilitate this, a series of different plotters are available.
Plotting relies on `matplotlib <https://matplotlib.org/>`_, and mainly its
object-oriented interface should be used for the actual plotting.
Generally, two types of plotters can be distinguished:
* Plotters for handling single datasets
Shall be derived from :class:`aspecd.plotting.SinglePlotter`.
* Plotters for handling multiple datasets
Shall be derived from :class:`aspecd.plotting.MultiPlotter`.
In the first case, the plot is usually handled using the :meth:`plot` method
of the respective :obj:`cwepr.dataset.Dataset` object. Additionally,
those plotters always only operate on the data of a single dataset, and the
plot can easily be attached as a representation to the respective dataset.
Plotters handling single datasets should always inherit from the
:class:`aspecd.plotting.SinglePlotter` class.
In the second case, the plot is handled using the :meth:`plot` method of the
:obj:`aspecd.plotting.Plotter` object, and the datasets are stored as a list
within the plotter. As these plots span several datasets, there is no easy
connection between a single dataset and such a plot in sense of
representations stored in datasets. Plotters handling multiple datasets should
always inherit from the :class:`aspecd.plotting.MultiPlotter` class.
In a certain sense, there is a third type of plotters:
* Plotters consisting of more than one axes
Shall be derived from :class:`aspecd.plotting.CompositePlotter`.
However, practically mostly these composite plotters will behave like
plotters handling either single or multiple datasets. Generally,
these composite plotters will use other types of plotters to perform the
actual plot tasks. This modular approach allows for great flexibility.
A note on array dimensions and axes
===================================
Something often quite confusing is the apparent inconsistency between the
order of array dimensions and the order of axes. While we are used to assign
axes in the order *x*, *y*, *z*, and assuming *x* to be horizontal,
*y* vertical (and *z* sticking out of the paper plane), arrays are usually
indexed row-first, column-second. That means, however, that if you simply
plot a 2D array in axes, your *first* dimension is along the *y* axis,
the *second* dimension along the *x* axis.
Therefore, as the axes of your datasets will always correspond to the array
dimensions of your data, in case of 2D plots you will need to *either* use
the information contained in the second axis object for your *x* axis label,
and the information from the first axis object for your *y* axis label,
*or* to transpose the data array.
Another aspect to have in mind is the position of the origin. Usually,
in a Cartesian coordinate system, convention is to have the origin (0,
0) in the *lower left* of the axes (for the positive quadrant). However,
for images, convention is to have the corresponding (0, 0) pixel located in
the *upper left* edge of your image. Therefore, those plotting methods
dealing with images will usually *revert* the direction of your *y* axis.
Most probably, eventually you will have to check with real data and ensure
the plotters to plot data and axes in a consistent fashion.
Types of concrete plotters
==========================
The cwepr package comes with a series of concrete plotters included ready
to be used, thanks to inheriting from the underlying ASpecD framework. As
stated above, plotters can generally be divided into two types: plotters
operating on single datasets and plotters combining the data of multiple
datasets into a single figure.
Additionally, plotters can be categorised with regard to creating figures
consisting of a single or multiple axes. The latter are plotters inheriting
from the :class:`aspecd.plotting.CompositePlotter` class. The latter can be
thought of as templates for the other plotters to operate on, *i.e.* they
provide the axes for other plotters to display their results.
Concrete plotters for single datasets
-------------------------------------
* :class:`cwepr.plotting.SinglePlotter1D`
Basic line plots for single datasets, allowing to plot a series of
line-type plots, including (semi)log plots
* :class:`cwepr.plotting.SinglePlotter2D`
Basic 2D plots for single datasets, allowing to plot a series of 2D plots,
including contour plots and image-type display
* :class:`aspecd.plotting.SinglePlotter2DStacked`
Stacked plots of 2D data, converting a 2D display into a series of 1D line
plots stacked on top of each other.
* :class:`cwepr.plotting.SingleCompositePlotter`
Composite plotter for single datasets, allowing to plot different views of
one and the same datasets by using existing plotters for single datasets.
* :class:`cwepr.plotting.GoniometerSweepPlotter`
Composite plotter for single datasets representing goniometer sweeps,
*i.e.* angular-dependent cw-EPR measurements.
Concrete plotters for multiple datasets
---------------------------------------
* :class:`cwepr.plotting.MultiPlotter1D`
Basic line plots for multiple datasets, allowing to plot a series of
line-type plots, including (semi)log plots
* :class:`cwepr.plotting.MultiPlotter1DStacked`
Stacked line plots for multiple datasets, allowing to plot a series of
line-type plots, including (semi)log plots
* :class:`cwepr.plotting.PowerSweepAnalysisPlotter`
Line plot for multiple datasets particularly for power sweep analysis
(power saturation analysis) with a second *x* axis on top showing the
microwave power.
A note for developers
=====================
As each kind of spectroscopy comes with own needs for extensions, there is a
class :class:`PlotterExtensions` that can be used as a mixin class for other
plotters to provide additional functionality for all plotters.
Make sure when implementing functionality here that it really works with all
types of plotters, *i.e.* both SinglePlotters and MultiPlotters. This is
particularly relevant if you need to get information from dataset(s),
as a SinglePlotter will have an attribute ``dataset``, while a MultiPlotter
will have an attribute ``datasets``.
Module documentation
====================
"""
import copy
import numpy as np
import aspecd.plotting
import aspecd.processing
from cwepr import utils
class GoniometerSweepPlotter(aspecd.plotting.SingleCompositePlotter):
"""Overview of the results of a goniometer sweep.
A goniometer sweep, *i.e.* a series of cw-EPR spectra as a function of
the angle of the sample with respect to the external magnetic field,
is usually performed over at least 180°, regardless of the step size.
The reason is simply that the spectra for 0° and 180° should be
identical due to the underlying physics of magnetic resonance.
The plotter will create three subpanels:
* A 2D plot (scaled image plot) as a general overview.
* A 1D multiplot comparing the signals for 0° and 180° to check for
consistency during the measurement.
* A stacked plot showing all angular positions, providing an alternative
view of the angular-dependent signal changes compared to the 2D plot.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class.
To get an overview of your goniometer sweep, just invoke the plotter with
default values:
.. code-block:: yaml
- kind: singleplot
type: GoniometerSweepPlotter
properties:
filename: output.pdf
"""
class PowerSweepAnalysisPlotter(aspecd.plotting.MultiPlotter1D):
r"""
Plot results of a power saturation analysis with second axis for mw power.
To determine the microwave power level not saturating the cw-EPR signal,
usually a "power sweep" (power saturation study) is carried out with
systematically varying the incident microwave power. The signal
amplitude of the resulting data is plotted vs. the square root of the
microwave power, resulting in a power saturation curve. As long as the
signal is not saturated, the graph shows a linear relationship.
As the class inherites from :class:`aspecd.plotting.MultiPlotter1D`
see there for additional details of the parameters that can be set.
Attributes
----------
parameters : :class:`dict`
All parameters necessary for the plot, implicit and explicit
Most parameters are documented in the base class. Here, only the
additional parameters or parameters with specific settings are
documented.
mw-axis : class:`bool`
Whether to show an additional microwave axis in units of power.
The main *x* axis gives the square root of the microwave power,
but as the microwave power needs to be set in power units
(typically mW), it is convenient to have this available as well.
Default: True
tight_layout: :class:`bool`
Whether to adjust the plot to fit into the figure area
For details see :meth:`matplotlib.figure.Figure.tight_layout`.
Default: True
Examples
--------
The class basically works like a usual MultiPlotter1D. A full power
saturation analysis may look like this:
.. code-block:: yaml
datasets:
- PowerSweep
tasks:
- kind: singleanalysis
type: AmplitudeVsPower
apply_to:
- PowerSweep
result: power_sweep_analysis
- kind: singleanalysis
type: PolynomialFitOnData
properties:
parameters:
order: 1
points: 5
return_type: dataset
apply_to:
- power_sweep_analysis
result: fit
- kind: multiplot
type: PowerSweepAnalysisPlotter
properties:
properties:
drawings:
- marker: '*'
- color: red
grid:
show: true
axis: both
axes:
ylabel: '$EPR\\ amplitude$'
filename: powersweepanalysis.pdf
apply_to:
- power_sweep_analysis
- fit
This would result in a power saturation curve (EPR signal amplitude as a
function of the square root of the microwave power, the latter usually
in mW), and a linear fit covering in this case the first five data points.
.. versionadded:: 0.2
"""
def _create_power_axis(self):
"""
Add a mw power axis as second axis opposite the sqrt(mw power) axis.
Note that :func:`numpy.sqrt` returns NaN for negative values.
Therefore, the lower axis limit is set to be >= 0 in this plot.
"""
power_axis = self.ax.secondary_xaxis('top',
functions=(backward, forward))
power_axis.set_xlabel('$mw\\ power$')
power_axis.tick_params(labelrotation=90)
class PlotterExtensions:
"""Extensions for plots of cw-EPR data.
This class is meant as a mixin class for plotters of the cwepr package
and provides functionality specific for cw-EPR-spectroscopic data.
Hence it can only be used as mixin in addition to a plotter class.
Attributes
----------
parameters : :class:`dict`
All parameters necessary for the plot, implicit and explicit
The following keys exist, in addition to those defined by the actual
plotter:
g-axis: :class:`bool`
Whether to show an additional *g* axis opposite of the magnetic
field axis
This assumes the magnetic field axis to be the *x* axis and the
magnetic field values to be in millitesla (mT), as it calls
:func:`cwepr.utils.convert_mT2g`.
.. versionadded:: 0.2
"""
def _create_g_axis(self, mw_freq=None):
"""
Add a *g* axis as second axis opposite the magnetic field axis.
Currently, this function assumes the magnetic field axis to be the
*x* axis. Additionally, the magnetic field values are assumed to be
in millitesla (mT), and the microwave frequency to be in gigahertz (
GHz).
Parameters
----------
mw_freq : :class:`float`
microwave frequency (**in GHz**) used to convert from mT to g
"""
gaxis = self.ax.secondary_xaxis('top', functions=(backward, forward))
gaxis.set_xlabel(r'$g\ value$')
class SinglePlotter1D(aspecd.plotting.SinglePlotter1D, PlotterExtensions):
"""1D plots of single datasets.
Convenience class taking care of 1D plots of single datasets.
As the class is fully inherited from ASpecD for simple usage, see the
ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter1D`
class for details.
Furthermore, the class inhertis all functionality from
:class:`PlotterExtensions`. See there for additional details.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. Of course, all parameters settable
for the superclasses can be set as well. The examples focus each on a
single aspect.
In the simplest case, just invoke the plotter with default values:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter1D
properties:
filename: output.pdf
In case you would like to have a *g* axis plotted as a second *x* axis on
top:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter1D
properties:
parameters:
g-axis: true
filename: output.pdf
"""
class SinglePlotter2D(aspecd.plotting.SinglePlotter2D, PlotterExtensions):
"""2D plots of single datasets.
Convenience class taking care of 2D plots of single datasets.
As the class is fully inherited from ASpecD for simple usage, see the
ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2D`
class for details.
Furthermore, the class inhertis all functionality from
:class:`PlotterExtensions`. See there for additional details.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. Of course, all parameters settable
for the superclasses can be set as well. The examples focus each on a
single aspect.
In the simplest case, just invoke the plotter with default values:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
filename: output.pdf
To change the axes (flip *x* and *y* axis):
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
filename: output.pdf
parameters:
switch_axes: True
To use another type (here: contour):
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
filename: output.pdf
type: contour
To set the number of levels of a contour plot to 10:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
filename: output.pdf
type: contour
parameters:
levels: 10
To change the colormap (cmap) used:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
filename: output.pdf
properties:
drawing:
cmap: RdGy
Make sure to check the documentation of the ASpecD
:mod:`aspecd.plotting` module for further parameters that can be set.
In case you would like to have a *g* axis plotted as a second *x* axis on
top:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2D
properties:
parameters:
g-axis: true
filename: output.pdf
"""
class SinglePlotter2DStacked(aspecd.plotting.SinglePlotter2DStacked,
PlotterExtensions):
"""Stacked plots of 2D data.
A stackplot creates a series of lines stacked on top of each other from
a 2D dataset.
As the class is fully inherited from ASpecD for simple usage, see the
ASpecD documentation of the :class:`aspecd.plotting.SinglePlotter2DStacked`
class for details.
Furthermore, the class inhertis all functionality from
:class:`PlotterExtensions`. See there for additional details.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. Of course, all parameters settable
for the superclasses can be set as well. The examples focus each on a
single aspect.
In the simplest case, just invoke the plotter with default values:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2DStacked
properties:
filename: output.pdf
If you need to more precisely control the formatting of the y tick
labels, particularly the number of decimals shown, you can set the
formatting accordingly:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2DStacked
properties:
filename: output.pdf
parameters:
yticklabelformat: '%.2f'
In this particular case, the y tick labels will appear with only two
decimals. Note that currently, the "old style" formatting specifications
are used due to their widespread use in other programming languages and
hence the familiarity of many users with this particular notation.
Sometimes you want to have horizontal "zero lines" appear for each
individual trace of the stacked plot. This can be achieved explicitly
setting the "show_zero_lines" parameter to "True" that is set to "False"
by default:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2DStacked
properties:
filename: output.pdf
parameters:
show_zero_lines: True
In case you would like to have a *g* axis plotted as a second *x* axis on
top:
.. code-block:: yaml
- kind: singleplot
type: SinglePlotter2DStacked
properties:
parameters:
g-axis: true
filename: output.pdf
"""
class MultiPlotter1D(aspecd.plotting.MultiPlotter1D, PlotterExtensions):
"""1D plots of multiple datasets.
Convenience class taking care of 1D plots of multiple datasets.
As the class is fully inherited from ASpecD for simple usage, see the
ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1D`
class for details.
Furthermore, the class inhertis all functionality from
:class:`PlotterExtensions`. See there for additional details.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. Of course, all parameters settable
for the superclasses can be set as well. The examples focus each on a
single aspect.
In the simplest case, just invoke the plotter with default values:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1D
properties:
filename: output.pdf
To change the settings of each individual line (here the colour and label),
supposing you have three lines, you need to specify the properties in a
list for each of the drawings:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1D
properties:
filename: output.pdf
properties:
drawings:
- color: '#FF0000'
label: foo
- color: '#00FF00'
label: bar
- color: '#0000FF'
label: foobar
.. important::
If you set colours using the hexadecimal RGB triple prefixed by
``#``, you need to explicitly tell YAML that these are strings,
surrounding the values by quotation marks.
In case you would like to have a *g* axis plotted as a second *x* axis on
top:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1D
properties:
parameters:
g-axis: true
filename: output.pdf
"""
class MultiPlotter1DStacked(aspecd.plotting.MultiPlotter1DStacked,
PlotterExtensions):
"""Stacked 1D plots of multiple datasets.
Convenience class taking care of 1D plots of multiple datasets.
As the class is fully inherited from ASpecD for simple usage, see the
ASpecD documentation of the :class:`aspecd.plotting.MultiPlotter1DStacked`
class for details.
Furthermore, the class inhertis all functionality from
:class:`PlotterExtensions`. See there for additional details.
Examples
--------
For convenience, a series of examples in recipe style (for details of
the recipe-driven data analysis, see :mod:`aspecd.tasks`) is given below
for how to make use of this class. Of course, all parameters settable
for the superclasses can be set as well. The examples focus each on a
single aspect.
In the simplest case, just invoke the plotter with default values:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1DStacked
properties:
filename: output.pdf
To change the settings of each individual line (here the colour and label),
supposing you have three lines, you need to specify the properties in a
list for each of the drawings:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1DStacked
properties:
filename: output.pdf
properties:
drawings:
- color: '#FF0000'
label: foo
- color: '#00FF00'
label: bar
- color: '#0000FF'
label: foobar
.. important::
If you set colours using the hexadecimal RGB triple prefixed by
``#``, you need to explicitly tell YAML that these are strings,
surrounding the values by quotation marks.
Sometimes you want to have horizontal "zero lines" appear for each
individual trace of the stacked plot. This can be achieved explicitly
setting the "show_zero_lines" parameter to "True" that is set to "False"
by default:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1DStacked
properties:
filename: output.pdf
parameters:
show_zero_lines: True
In case you would like to have a *g* axis plotted as a second *x* axis on
top:
.. code-block:: yaml
- kind: multiplot
type: MultiPlotter1DStacked
properties:
parameters:
g-axis: true
filename: output.pdf
"""
| [
37811,
198,
43328,
889,
25,
29681,
605,
24612,
286,
1366,
21242,
422,
40522,
13,
198,
198,
37065,
605,
24612,
286,
269,
86,
12,
36,
4805,
1366,
389,
281,
35669,
4843,
286,
1366,
198,
20930,
13,
1675,
15570,
428,
11,
257,
2168,
286,
... | 2.869875 | 8,292 |
#!/usr/bin/env python3
from codekit.codetools import debug, error, info, warn
from codekit import codetools, pygithub
import argparse
import codekit.progressbar as pbar
import github
import re
import sys
import textwrap
def parse_args():
"""Parse command-line arguments"""
prog = 'github-tag-teams'
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Tag the head of the default branch of all repositories in a GitHub
org which belong to the specified team(s).
Examples:
# mininum required arguments
{prog} --org lsst --team 'DM Auxilliaries' --tag w.2015.33
# *DELETE* an existing git tag
# --debug is always recommended
{prog} \\
--debug \\
--org lsst \\
--team 'DM Auxilliaries' \\
--tag w.2015.33 \\
--delete
# "dry run" with multiple git tags
{prog} \\
--debug \\
--dry-run \\
--org 'lsst' \\
--allow-team 'DM Auxilliaries' \\
--deny-team 'DM Externals' \\
--token "$GITHUB_TOKEN" \\
--user 'sqreadmin' \\
--email 'sqre-admin@lists.lsst.org' \\
--tag 'foo' \\
--tag 'bar'
# *do not* fail if git tag already exists in any repo
{prog} \\
--debug \\
--dry-run \\
--delete \\
--org 'lsst' \\
--allow-team 'DM Auxilliaries' \\
--deny-team 'DM Externals' \\
--token "$GITHUB_TOKEN" \\
--user 'sqreadmin' \\
--email 'sqre-admin@lists.lsst.org' \\
--ignore-existing-tag \\
--tag 'v999.0.0.rc1'
Note that the access token must have access to these oauth scopes:
* read:org
* repo
The token generated by `github-auth --user` should have sufficient
permissions.
""").format(prog=prog),
epilog='Part of codekit: https://github.com/lsst-sqre/sqre-codekit'
)
parser.add_argument(
'--tag',
action='append',
required=True,
help="tag to apply to HEAD of repo (can specify several times")
parser.add_argument(
'--org',
required=True,
help="Github organization")
parser.add_argument(
'--allow-team',
action='append',
required=True,
help='git repos to be tagged MUST be a member of ONE or more of'
' these teams (can specify several times)')
parser.add_argument(
'--deny-team',
action='append',
help='git repos to be tagged MUST NOT be a member of ANY of'
' these teams (can specify several times)')
parser.add_argument('--dry-run', action='store_true')
parser.add_argument(
'--user',
help='Name of person making the tag - defaults to gitconfig value')
parser.add_argument(
'--email',
help='Email address of tagger - defaults to gitconfig value')
parser.add_argument(
'--token-path',
default='~/.sq_github_token_delete',
help='Use a token (made with github-auth) in a non-standard location')
parser.add_argument(
'--token',
default=None,
help='Literal github personal access token string')
parser.add_argument(
'-d', '--debug',
action='count',
default=codetools.debug_lvl_from_env(),
help='Debug mode (can specify several times)')
parser.add_argument('-v', '--version', action=codetools.ScmVersionAction)
delete_group = parser.add_mutually_exclusive_group()
delete_group.add_argument(
'--delete',
action='store_true',
help='*Delete* instead of create tag(s)'
' (mutually exclusive with --ignore-existing-tag)')
delete_group.add_argument(
'--ignore-existing-tag',
action='store_true',
help='Ignore git tag(s) which already exist in a repo'
' -- normally this would be an error.'
' (mutually exclusive with --delete)')
return parser.parse_args()
# XXX this should be refactored to operate similar to
# github_tag_release.check_product_tags() in that it would create a
# codekit.pygithub.TargetTag object and then compare it to an existing tag (if
# present) -- it should also return list of tags to be applied instead of only
# errors.
def check_tags(repos, tags, ignore_existing=False, fail_fast=False):
""" check if tags already exist in repos"""
debug("looking for {n} tag(s):".format(n=len(tags)))
[debug(" {t}".format(t=t)) for t in tags]
debug("in {n} repo(s):".format(n=len(repos)))
[debug(" {r}".format(r=r.full_name)) for r in repos]
# present/missing tags by repo name
present_tags = {}
absent_tags = {}
problems = []
for r in repos:
has_tags = find_tags_in_repo(r, tags)
if has_tags:
if not ignore_existing:
yikes = GitTagExistsError(
"tag(s) {tag} already exists in repos {r}".format(
tag=list(has_tags.keys()),
r=r.full_name
))
if fail_fast:
raise yikes
problems.append(yikes)
error(yikes)
present_tags[r.full_name] = {
'repo': r,
'tags': list(has_tags.values()),
}
missing_tags = [x for x in tags if x not in has_tags]
if missing_tags:
absent_tags[r.full_name] = {
'repo': r,
'need_tags': missing_tags,
}
debug(textwrap.dedent("""\
found:
{n_with:>4} repos with tag(s)
{n_none:>4} repos with no tag(s)
{errors:>4} repos with error(s)\
""").format(
n_with=len(present_tags),
n_none=len(absent_tags),
errors=len(problems),
))
return present_tags, absent_tags, problems
cached_teams = {}
def delete_refs(repo, refs, dry_run=False):
"""Note that only the ref to a tag can be explicitly removed. The tag
object will leave on until it's gargabe collected."""
assert isinstance(repo, github.Repository.Repository), type(repo)
debug("removing {n} refs from {repo}".format(
n=len(refs),
repo=repo.full_name)
)
for r in refs:
debug(" deleting {ref}".format(ref=r.ref))
if dry_run:
debug(' (noop)')
continue
r.delete()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
14873,
988,
270,
13,
19815,
316,
10141,
1330,
14257,
11,
4049,
11,
7508,
11,
9828,
198,
6738,
14873,
988,
270,
1330,
14873,
316,
10141,
11,
12972,
12567,
198,
11748,
182... | 2.051925 | 3,428 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/metaprov/modelaapi/services/system/v1/system.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from github.com.metaprov.modelaapi.pkg.apis.infra.v1alpha1 import generated_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_infra_dot_v1alpha1_dot_generated__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/metaprov/modelaapi/services/system/v1/system.proto',
package='github.com.metaprov.modelaapi.services.servingsite.v1',
syntax='proto3',
serialized_options=b'Z5github.com/metaprov/modelaapi/services/servingsite/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n=github.com/metaprov/modelaapi/services/system/v1/system.proto\x12\x35github.com.metaprov.modelaapi.services.servingsite.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x45github.com/metaprov/modelaapi/pkg/apis/infra/v1alpha1/generated.proto\"6\n\x16\x44ownloadLogFileRequest\x12\x0e\n\x06\x62ucket\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\")\n\x17\x44ownloadLogFileResponse\x12\x0e\n\x06result\x18\x01 \x01(\x0c\"5\n\x15\x42\x61\x63kupDatabaseRequest\x12\x0e\n\x06\x62ucket\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"&\n\x16\x42\x61\x63kupDatabaseResponse\x12\x0c\n\x04path\x18\x01 \x01(\t\"1\n\x11\x42\x61\x63kupEtcdRequest\x12\x0e\n\x06\x62ucket\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\"\"\n\x12\x42\x61\x63kupEtcdResponse\x12\x0c\n\x04path\x18\x01 \x01(\t2\xda\x04\n\rSystemService\x12\xc6\x01\n\x0f\x44ownloadLogfile\x12M.github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileRequest\x1aN.github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\x0c/v1/download\x12\xc3\x01\n\x0e\x42\x61\x63kupDatabase\x12L.github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseRequest\x1aM.github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\"\x0c/v1/backupdb\x12\xb9\x01\n\nBackupEtcd\x12H.github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdRequest\x1aI.github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdResponse\"\x16\x82\xd3\xe4\x93\x02\x10\"\x0e/v1/backupetcdB7Z5github.com/metaprov/modelaapi/services/servingsite/v1b\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_infra_dot_v1alpha1_dot_generated__pb2.DESCRIPTOR,])
_DOWNLOADLOGFILEREQUEST = _descriptor.Descriptor(
name='DownloadLogFileRequest',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='bucket', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileRequest.bucket', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='path', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileRequest.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=221,
serialized_end=275,
)
_DOWNLOADLOGFILERESPONSE = _descriptor.Descriptor(
name='DownloadLogFileResponse',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileResponse.result', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=318,
)
_BACKUPDATABASEREQUEST = _descriptor.Descriptor(
name='BackupDatabaseRequest',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='bucket', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseRequest.bucket', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='path', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseRequest.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=320,
serialized_end=373,
)
_BACKUPDATABASERESPONSE = _descriptor.Descriptor(
name='BackupDatabaseResponse',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseResponse.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=375,
serialized_end=413,
)
_BACKUPETCDREQUEST = _descriptor.Descriptor(
name='BackupEtcdRequest',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='bucket', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdRequest.bucket', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='path', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdRequest.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=415,
serialized_end=464,
)
_BACKUPETCDRESPONSE = _descriptor.Descriptor(
name='BackupEtcdResponse',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdResponse.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=466,
serialized_end=500,
)
DESCRIPTOR.message_types_by_name['DownloadLogFileRequest'] = _DOWNLOADLOGFILEREQUEST
DESCRIPTOR.message_types_by_name['DownloadLogFileResponse'] = _DOWNLOADLOGFILERESPONSE
DESCRIPTOR.message_types_by_name['BackupDatabaseRequest'] = _BACKUPDATABASEREQUEST
DESCRIPTOR.message_types_by_name['BackupDatabaseResponse'] = _BACKUPDATABASERESPONSE
DESCRIPTOR.message_types_by_name['BackupEtcdRequest'] = _BACKUPETCDREQUEST
DESCRIPTOR.message_types_by_name['BackupEtcdResponse'] = _BACKUPETCDRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DownloadLogFileRequest = _reflection.GeneratedProtocolMessageType('DownloadLogFileRequest', (_message.Message,), {
'DESCRIPTOR' : _DOWNLOADLOGFILEREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileRequest)
})
_sym_db.RegisterMessage(DownloadLogFileRequest)
DownloadLogFileResponse = _reflection.GeneratedProtocolMessageType('DownloadLogFileResponse', (_message.Message,), {
'DESCRIPTOR' : _DOWNLOADLOGFILERESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.DownloadLogFileResponse)
})
_sym_db.RegisterMessage(DownloadLogFileResponse)
BackupDatabaseRequest = _reflection.GeneratedProtocolMessageType('BackupDatabaseRequest', (_message.Message,), {
'DESCRIPTOR' : _BACKUPDATABASEREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseRequest)
})
_sym_db.RegisterMessage(BackupDatabaseRequest)
BackupDatabaseResponse = _reflection.GeneratedProtocolMessageType('BackupDatabaseResponse', (_message.Message,), {
'DESCRIPTOR' : _BACKUPDATABASERESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.BackupDatabaseResponse)
})
_sym_db.RegisterMessage(BackupDatabaseResponse)
BackupEtcdRequest = _reflection.GeneratedProtocolMessageType('BackupEtcdRequest', (_message.Message,), {
'DESCRIPTOR' : _BACKUPETCDREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdRequest)
})
_sym_db.RegisterMessage(BackupEtcdRequest)
BackupEtcdResponse = _reflection.GeneratedProtocolMessageType('BackupEtcdResponse', (_message.Message,), {
'DESCRIPTOR' : _BACKUPETCDRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.system.v1.system_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.servingsite.v1.BackupEtcdResponse)
})
_sym_db.RegisterMessage(BackupEtcdResponse)
DESCRIPTOR._options = None
_SYSTEMSERVICE = _descriptor.ServiceDescriptor(
name='SystemService',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.SystemService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=503,
serialized_end=1105,
methods=[
_descriptor.MethodDescriptor(
name='DownloadLogfile',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.SystemService.DownloadLogfile',
index=0,
containing_service=None,
input_type=_DOWNLOADLOGFILEREQUEST,
output_type=_DOWNLOADLOGFILERESPONSE,
serialized_options=b'\202\323\344\223\002\016\022\014/v1/download',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='BackupDatabase',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.SystemService.BackupDatabase',
index=1,
containing_service=None,
input_type=_BACKUPDATABASEREQUEST,
output_type=_BACKUPDATABASERESPONSE,
serialized_options=b'\202\323\344\223\002\016\"\014/v1/backupdb',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='BackupEtcd',
full_name='github.com.metaprov.modelaapi.services.servingsite.v1.SystemService.BackupEtcd',
index=2,
containing_service=None,
input_type=_BACKUPETCDREQUEST,
output_type=_BACKUPETCDRESPONSE,
serialized_options=b'\202\323\344\223\002\020\"\016/v1/backupetcd',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SYSTEMSERVICE)
DESCRIPTOR.services_by_name['SystemService'] = _SYSTEMSERVICE
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
33084,
13,
785,
14,
4164,
499,
18657,
14,
4666,
10304,
15042,
14,
30416,
... | 2.494795 | 5,956 |
import asyncio
from typing import Any, List, Optional
from util.iterables import reduce
| [
11748,
30351,
952,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
32233,
198,
198,
6738,
7736,
13,
2676,
2977,
1330,
4646,
628
] | 4.090909 | 22 |
"""
Determine the spatial relation between two nD-boxes
Based on http://sfclib.github.io
"""
from itertools import product
from pysfc.ndgeom import ndbox
def relate(rect, qrt):
"""
Spatial relationship between two nd-boxes.
Outcome can be:
0: equal
1: contains
2: intersects
-1: no overlap
"""
# how many dimensions to check?
dims = rect.dims
# equal, all coordinates are equal
ncmp = 1
for d in range(dims):
ncmp &= rect.lo[d] == qrt.lo[d] and rect.hi[d] == qrt.hi[d]
if ncmp:
return 0
# fully contains, rect fully contains qrt
ncmp = 1
for d in range(dims):
ncmp &= rect.lo[d] <= qrt.lo[d] and rect.hi[d] >= qrt.hi[d]
if ncmp:
return 1
# intersects, the two nd-boxes interact
# (either on the boundary or internally)
ncmp = 1
for d in range(dims):
ncmp &= rect.lo[d] < qrt.hi[d] and rect.hi[d] > qrt.lo[d]
if ncmp:
return 2
# no overlap
return -1
if __name__ == "__main__":
_test()
| [
37811,
198,
35,
2357,
3810,
262,
21739,
8695,
1022,
734,
299,
35,
12,
29305,
198,
198,
15001,
319,
2638,
1378,
28202,
565,
571,
13,
12567,
13,
952,
198,
37811,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
6738,
279,
893,
16072,
13,
... | 2.186858 | 487 |
# -*- coding: UTF-8 -*-
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from models.BaseModel import SequentialModel
from utils import layers
""" Encoder Layer """
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152... | 3.267606 | 71 |
from gi.repository import Gtk
from gi.repository import Pango
from html.parser import HTMLParser
| [
6738,
308,
72,
13,
260,
1930,
37765,
1330,
402,
30488,
198,
6738,
308,
72,
13,
260,
1930,
37765,
1330,
350,
14208,
198,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
198
] | 3.16129 | 31 |
import io
import re
from setuptools import setup, find_packages
from mylib import __version__
readme = read('README.rst')
# вычищаем локальные версии из файла requirements (согласно PEP440)
requirements = '\n'.join(
re.findall(r'^([^\s^+]+).*$',
read('requirements.txt'),
flags=re.MULTILINE))
setup(
# metadata
name='mylib',
version=__version__,
license='MIT',
author='Andrey Grabovoy',
author_email="grabovoy.av@phystech.edu",
description='mylib, python package',
long_description=readme,
url='https://github.com/Intelligent-Systems-Phystech/ProjectTemplate',
# options
packages=find_packages(),
install_requires=requirements,
)
| [
11748,
33245,
198,
11748,
302,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
616,
8019,
1330,
11593,
9641,
834,
628,
198,
961,
1326,
796,
1100,
10786,
15675,
11682,
13,
81,
301,
11537,
198,
2,
12466,
1... | 2.23676 | 321 |
# minimal pyre typing stubs for lark
from typing import Any
| [
2,
10926,
12972,
260,
19720,
17071,
82,
329,
300,
668,
198,
198,
6738,
19720,
1330,
4377,
198
] | 3.588235 | 17 |
"""Kwaliteitsaanpak PPTX-presentation builder."""
import pathlib
import shutil
from lxml import etree
from pptx import Presentation
from pptx.util import Inches, Pt
import xmltags
from custom_types import TreeBuilderAttributes
from .builder import Builder
class PptxBuilder(Builder):
"""Kwaliteitsaanpak presentation builder."""
# Slide layouts. These are specific for the reference file
TITLE_SLIDE = 0
BULLET_SLIDE = 1
CONTENT_SLIDE = 4
CHAPTER_SLIDE = 16
def add_slide(self, slide_layout_index: int, title: str) -> None:
"""Add a new slide with the given title to the presentation."""
slide_layout = self.presentation.slide_layouts[slide_layout_index]
self.current_slide = self.presentation.slides.add_slide(slide_layout)
self.current_slide.shapes.title.text = title
def add_text_box(self):
"""Add a text box to the current slide."""
text_box = self.current_slide.shapes.add_textbox(Inches(0.7), Inches(1.6), Inches(12), Inches(6))
text_box.text_frame.word_wrap = True
def remove_bullet(self, paragraph_index: int):
"""Remove bullets from the paragraph."""
no_bullet = etree.Element("{http://schemas.openxmlformats.org/drawingml/2006/main}buNone")
self.current_slide.shapes[1].text_frame.paragraphs[paragraph_index]._pPr.insert(0, no_bullet)
def in_appendix(self) -> bool:
"""Return whether the current section is an appendix."""
return self.in_element(xmltags.SECTION, {xmltags.SECTION_IS_APPENDIX: "y"})
def end_document(self) -> None:
"""Override to save the presentation."""
self.presentation.save(self.filename)
| [
37811,
42,
16783,
578,
896,
28340,
41091,
350,
11571,
55,
12,
25579,
341,
27098,
526,
15931,
198,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
6738,
279,
457,
87,
1330,
21662,
341,
1... | 2.611455 | 646 |
# -*- coding: utf-8 -*-
"""
Example of script.
This script sweeps values of gamma at a fixed value of b.
Author: Jean-Gabriel Young <info@jgyoung.ca>
"""
if __name__ == '__main__':
# Relative paths
import sys
from os import path, remove
sys.path.insert(0, path.join(path.dirname(path.realpath(__file__)),
"tools/"))
# Import
import consistency_check as cc
import sys
import time
# Global settings
method_params = dict()
model_params = dict()
outfile = "test.txt"
method = "snowball_sampling" # other options : OD, snowball_sampling
b = 1 # any float in [0, 1]
T = 50 # 50 edges
if method == "snowball_sampling":
method_params["num_samples"] = 10000
method_params["bias_exponent"] = 1
seed = 42
# Parameter grid
method_params["b"] = b
model_params["b"] = b
gamma_range = [-10, -7.5, -5, -3,
-2, -1, -0.5, -0.25, -0.05,
0, 0.05, 0.25, 0.50, 0.75, 0.95,
1, 1.05, 1.25, 1.50, 1.75, 1.95,
2]
# Output header
with open(outfile, 'a') as f:
print("#model_params\tT\tmethod\tmethod_params\tscore", file=f)
# Sweep gammas.
for gamma in gamma_range:
print("gamma=", gamma)
model_params["gamma"] = gamma
method_params["gamma"] = gamma
tmp_path = "/tmp/varying_kernell_" +\
str(int(time.time())) + ".txt"
try:
scores = cc.run("generalized_gn",
model_params,
T,
method,
method_params,
1, # num_iter,
tmp_path=tmp_path,
seed=seed,
verbose=False,
simplified_interface=True)
for s in scores:
with open(outfile, 'a') as f:
print(model_params,
T,
method,
method_params,
s,
sep='\t',
file=f)
except Exception as e:
print("#", str(e)) # log exceptions
remove(tmp_path)
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
16281,
286,
4226,
13,
198,
198,
1212,
4226,
46778,
3815,
286,
34236,
379,
257,
5969,
1988,
286,
275,
13,
198,
198,
13838,
25,
11320,
12,
46079,
11719,
6960,
... | 1.742984 | 1,354 |
/home/runner/.cache/pip/pool/f2/54/dc/f36ec238d67dc3f69bdfc25685140fdb6eb8c2bdc13acf2e5a171e4ab6 | [
14,
11195,
14,
16737,
11757,
23870,
14,
79,
541,
14,
7742,
14,
69,
17,
14,
4051,
14,
17896,
14,
69,
2623,
721,
23721,
67,
3134,
17896,
18,
69,
3388,
65,
7568,
66,
1495,
35978,
15187,
69,
9945,
21,
1765,
23,
66,
17,
17457,
66,
14... | 1.714286 | 56 |
import pandas as pd
import numpy as np
from .dataMaker import *
from pandas.io.parsers import read_csv
from .BOAmaster.BOAmodel import *
from collections import defaultdict
import subprocess
import sys
import os
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
7890,
48890,
1330,
1635,
198,
6738,
19798,
292,
13,
952,
13,
79,
945,
364,
1330,
1100,
62,
40664,
198,
6738,
764,
8202,
5840,
1603,
13,
8202,
32,
1... | 3.359375 | 64 |
from AmcCarrierCore.AppHardware.AmcCryo._amcCryoCore import *
from AmcCarrierCore.AppHardware.AmcCryo._amcCryoCtrl import *
| [
6738,
1703,
66,
9914,
5277,
14055,
13,
4677,
49865,
13,
5840,
66,
26677,
78,
13557,
321,
66,
26677,
78,
14055,
1330,
1635,
198,
6738,
1703,
66,
9914,
5277,
14055,
13,
4677,
49865,
13,
5840,
66,
26677,
78,
13557,
321,
66,
26677,
78,
... | 2.695652 | 46 |
"""
======
C test
======
:filename=2:title=3:lines=1:filesize=1:
"""
print('foo')
| [
37811,
198,
50155,
198,
34,
1332,
198,
50155,
198,
198,
25,
34345,
28,
17,
25,
7839,
28,
18,
25,
6615,
28,
16,
25,
16624,
1096,
28,
16,
25,
198,
37811,
198,
198,
4798,
10786,
21943,
11537,
198
] | 2.27027 | 37 |
import tensorflow as tf
import numpy as np
import os
import math
import glob
from scipy import ndimage, misc
from data.prepare_cifar import read_h5
# you need to change this to your data directory
train_dir = 'data/train/'
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,... numpy array
label: B, numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
if __name__ == "__main__":
f = os.path.join("data/quality_0", "train.h5")
data, label = read_h5(f)
print (data.value.shape, label.value.shape)
| [
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
28686,
201,
198,
11748,
10688,
201,
198,
11748,
15095,
201,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
11,
12747,
201,
198,
6738,
1366,
... | 2.287037 | 324 |
frase = 'Curso em Vídeo Python'
print('Curso' in frase)
#Verifica se a palavra 'Curso' esta dentro da string. | [
8310,
589,
796,
705,
26628,
568,
795,
569,
8836,
2934,
78,
11361,
6,
198,
4798,
10786,
26628,
568,
6,
287,
1216,
589,
8,
198,
2,
13414,
811,
64,
384,
257,
6340,
615,
430,
705,
26628,
568,
6,
1556,
64,
18794,
305,
12379,
4731,
13
] | 2.477273 | 44 |
import unittest
import numpy
import chainer
from chainer import configuration
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer.graph_optimizations.static_graph import static_graph
import chainer.links as L
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float32],
'use_static_graph': [True, False],
}))
class MNISTStaticMLP(chainer.Chain):
"""This is the network from the MNIST example.
Static version.
"""
@static_graph(verbosity_level=2)
class MNISTDynamicMLP(chainer.Chain):
"""This is the network from the MNIST example.
Dynamic version.
"""
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}))
testing.run_module(__name__, __file__)
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
198,
198,
11748,
6333,
263,
198,
6738,
6333,
263,
1330,
8398,
198,
6738,
6333,
263,
1330,
269,
15339,
198,
11748,
6333,
263,
13,
12543,
2733,
355,
376,
198,
6738,
6333,
263,
1330,
313... | 2.871233 | 365 |
from ralph.settings import * # noqa
def only_true(request):
'''For django debug toolbar.'''
return True
DEBUG = True
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': "%s.only_true" % __name__,
}
ROOT_URLCONF = 'ralph.urls.dev'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'ralph.lib.template.loaders.AppTemplateLoader',
],
},
},
]
LOGGING['handlers']['console']['level'] = 'DEBUG'
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['level'] = 'DEBUG'
LOGGING['loggers'][logger]['handlers'].append('console')
if bool_from_env('RALPH_PROFILING'):
SILKY_PYTHON_PROFILER = True
MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES + (
'silk.middleware.SilkyMiddleware',
)
INSTALLED_APPS = INSTALLED_APPS + (
'silk',
)
SILKY_DYNAMIC_PROFILING = [
{
'module': 'ralph.data_center.admin',
'function': 'DataCenterAssetAdmin.changelist_view'
},
]
| [
6738,
374,
17307,
13,
33692,
1330,
1635,
220,
1303,
645,
20402,
628,
198,
4299,
691,
62,
7942,
7,
25927,
2599,
198,
220,
220,
220,
705,
7061,
1890,
42625,
14208,
14257,
50149,
2637,
7061,
198,
220,
220,
220,
1441,
6407,
198,
198,
3053... | 2.044888 | 802 |
#!/usr/bin/env python3
import errno
import struct
import json
import socket
import os
import re
import subprocess
from enum import Enum
from collections import deque
from threading import Timer, Lock
import time
class CommandReply(_ReplyType):
"""
Info about a command that was executed with :func:`Connection.command`.
"""
@property
def error(self):
"""
A human-readable error message
:type: str
"""
return self.__getattr__('error')
@property
def success(self):
"""
Whether the command executed successfully
:type: bool
"""
return self.__getattr__('success')
class VersionReply(_ReplyType):
"""
Info about the version of the running i3 instance.
"""
@property
def major(self):
"""
The major version of i3.
:type: int
"""
return self.__getattr__('major')
@property
def minor(self):
"""
The minor version of i3.
:type: int
"""
return self.__getattr__('minor')
@property
def patch(self):
"""
The patch version of i3.
:type: int
"""
return self.__getattr__('patch')
@property
def human_readable(self):
"""
A human-readable version of i3 containing the precise git version,
build date, and branch name.
:type: str
"""
return self.__getattr__('human_readable')
@property
def loaded_config_file_name(self):
"""
The current config path.
:type: str
"""
return self.__getattr__('loaded_config_file_name')
class BarConfigReply(_ReplyType):
"""
This can be used by third-party workspace bars (especially i3bar, but
others are free to implement compatible alternatives) to get the bar block
configuration from i3.
Not all properties are documented here. A complete list of properties of
this reply type can be found `here
<http://i3wm.org/docs/ipc.html#_bar_config_reply>`_.
"""
@property
def colors(self):
"""
Contains key/value pairs of colors. Each value is a color code in hex,
formatted #rrggbb (like in HTML).
:type: dict
"""
return self.__getattr__('colors')
@property
def id(self):
"""
The ID for this bar.
:type: str
"""
return self.__getattr__('id')
@property
def mode(self):
"""
Either ``dock`` (the bar sets the dock window type) or ``hide`` (the
bar does not show unless a specific key is pressed).
:type: str
"""
return self.__getattr__('mode')
@property
def position(self):
"""
Either ``bottom`` or ``top``.
:type: str
"""
return self.__getattr__('position')
@property
def status_command(self):
"""
Command which will be run to generate a statusline. Each line on
stdout of this command will be displayed in the bar. At the moment, no
formatting is supported.
:type: str
"""
return self.__getattr__('status_command')
@property
def font(self):
"""
The font to use for text on the bar.
:type: str
"""
return self.__getattr__('font')
# this is for compatability with i3ipc-glib
class Connection(object):
"""
This class controls a connection to the i3 ipc socket. It is capable of
executing commands, subscribing to window manager events, and querying the
window manager for information about the current state of windows,
workspaces, outputs, and the i3bar. For more information, see the `ipc
documentation <http://i3wm.org/docs/ipc.html>`_
:param str socket_path: The path for the socket to the current i3 session.
In most situations, you will not have to supply this yourself. Guessing
first happens by the environment variable :envvar:`I3SOCK`, and, if this is
empty, by executing :command:`i3 --get-socketpath`.
:raises Exception: If the connection to ``i3`` cannot be established, or when
the connection terminates.
"""
MAGIC = 'i3-ipc' # safety string for i3-ipc
_chunk_size = 1024 # in bytes
_timeout = 0.5 # in seconds
_struct_header = '=%dsII' % len(MAGIC.encode('utf-8'))
_struct_header_size = struct.calcsize(_struct_header)
def _pack(self, msg_type, payload):
"""
Packs the given message type and payload. Turns the resulting
message into a byte string.
"""
pb = payload.encode('utf-8')
s = struct.pack('=II', len(pb), msg_type.value)
return self.MAGIC.encode('utf-8') + s + pb
def _unpack(self, data):
"""
Unpacks the given byte string and parses the result from JSON.
Returns None on failure and saves data into "self.buffer".
"""
msg_magic, msg_length, msg_type = self._unpack_header(data)
msg_size = self._struct_header_size + msg_length
# XXX: Message shouldn't be any longer than the data
payload = data[self._struct_header_size:msg_size]
return payload.decode('utf-8', 'replace')
def _unpack_header(self, data):
"""
Unpacks the header of given byte string.
"""
return struct.unpack(self._struct_header,
data[:self._struct_header_size])
def _recv_robust(self, sock, size):
"""
Receive size from sock, and retry if the recv() call was interrupted.
(this is only required for python2 compatability)
"""
while True:
try:
return sock.recv(size)
except socket.error as e:
if e.errno != errno.EINTR:
raise
def _ipc_send(self, sock, message_type, payload):
'''
Send and receive a message from the ipc.
NOTE: this is not thread safe
'''
sock.sendall(self._pack(message_type, payload))
data, msg_type = self._ipc_recv(sock)
return data
def command(self, payload):
"""
Send a command to i3. See the `list of commands
<http://i3wm.org/docs/userguide.html#_list_of_commands>`_ in the user
guide for available commands. Pass the text of the command to execute
as the first arguments. This is essentially the same as using
``i3-msg`` or an ``exec`` block in your i3 config to control the
window manager.
:rtype: List of :class:`CommandReply` or None if the command causes i3
to restart or exit and does not give a reply.
"""
data = self.message(MessageType.COMMAND, payload)
if data:
return json.loads(data, object_hook=CommandReply)
else:
return None
def get_version(self):
"""
Get json encoded information about the running i3 instance. The
equivalent of :command:`i3-msg -t get_version`. The return
object exposes the following attributes :attr:`~VersionReply.major`,
:attr:`~VersionReply.minor`, :attr:`~VersionReply.patch`,
:attr:`~VersionReply.human_readable`, and
:attr:`~VersionReply.loaded_config_file_name`.
Example output:
.. code:: json
{'patch': 0,
'human_readable': '4.12 (2016-03-06, branch "4.12")',
'major': 4,
'minor': 12,
'loaded_config_file_name': '/home/joep/.config/i3/config'}
:rtype: VersionReply
"""
data = self.message(MessageType.GET_VERSION, '')
return json.loads(data, object_hook=VersionReply)
def get_bar_config(self, bar_id=None):
"""
Get the configuration of a single bar. Defaults to the first if none is
specified. Use :meth:`get_bar_config_list` to obtain a list of valid
IDs.
:rtype: BarConfigReply
"""
if not bar_id:
bar_config_list = self.get_bar_config_list()
if not bar_config_list:
return None
bar_id = bar_config_list[0]
data = self.message(MessageType.GET_BAR_CONFIG, bar_id)
return json.loads(data, object_hook=BarConfigReply)
def get_bar_config_list(self):
"""
Get list of bar IDs as active in the connected i3 session.
:rtype: List of strings that can be fed as ``bar_id`` into
:meth:`get_bar_config`.
"""
data = self.message(MessageType.GET_BAR_CONFIG, '')
return json.loads(data)
def get_outputs(self):
"""
Get a list of outputs. The equivalent of :command:`i3-msg -t get_outputs`.
:rtype: List of :class:`OutputReply`.
Example output:
.. code:: python
>>> i3ipc.Connection().get_outputs()
[{'name': 'eDP1',
'primary': True,
'active': True,
'rect': {'width': 1920, 'height': 1080, 'y': 0, 'x': 0},
'current_workspace': '2'},
{'name': 'xroot-0',
'primary': False,
'active': False,
'rect': {'width': 1920, 'height': 1080, 'y': 0, 'x': 0},
'current_workspace': None}]
"""
data = self.message(MessageType.GET_OUTPUTS, '')
return json.loads(data, object_hook=OutputReply)
def get_workspaces(self):
"""
Get a list of workspaces. Returns JSON-like data, not a Con instance.
You might want to try the :meth:`Con.workspaces` instead if the info
contained here is too little.
:rtype: List of :class:`WorkspaceReply`.
"""
data = self.message(MessageType.GET_WORKSPACES, '')
return json.loads(data, object_hook=WorkspaceReply)
def get_tree(self):
"""
Returns a :class:`Con` instance with all kinds of methods and selectors.
Start here with exploration. Read up on the :class:`Con` stuffs.
:rtype: Con
"""
data = self.message(MessageType.GET_TREE, '')
return Con(json.loads(data), None, self)
def get_marks(self):
"""
Get a list of the names of all currently set marks.
:rtype: list
"""
data = self.message(MessageType.GET_MARKS, '')
return json.loads(data)
def get_binding_modes(self):
"""
Returns all currently configured binding modes.
:rtype: list
"""
data = self.message(MessageType.GET_BINDING_MODES, '')
return json.loads(data)
def get_config(self):
"""
Currently only contains the "config" member, which is a string
containing the config file as loaded by i3 most recently.
:rtype: ConfigReply
"""
data = self.message(MessageType.GET_CONFIG, '')
return json.loads(data, object_hook=ConfigReply)
def send_tick(self, payload=""):
"""
Sends a tick event with the specified payload. After the reply was
received, the tick event has been written to all IPC connections which
subscribe to tick events.
:rtype: TickReply
"""
data = self.message(MessageType.SEND_TICK, payload)
return json.loads(data, object_hook=TickReply)
class Con(object):
"""
The container class. Has all internal information about the windows,
outputs, workspaces and containers that :command:`i3` manages.
.. attribute:: id
The internal ID (actually a C pointer value within i3) of the container.
You can use it to (re-)identify and address containers when talking to
i3.
.. attribute:: name
The internal name of the container. ``None`` for containers which
are not leaves. The string `_NET_WM_NAME <://specifications.freedesktop.org/wm-spec/1.3/ar01s05.html#idm140238712347280>`_
for windows. Read-only value.
.. attribute:: type
The type of the container. Can be one of ``root``, ``output``, ``con``,
``floating_con``, ``workspace`` or ``dockarea``.
.. attribute:: window_title
The window title.
.. attribute:: window_class
The window class.
.. attribute:: instance
The instance name of the window class.
.. attribute:: gaps
The inner and outer gaps devation from default values.
.. attribute:: border
The type of border style for the selected container. Can be either
``normal``, ``none`` or ``1pixel``.
.. attribute:: current_border_width
Returns amount of pixels for the border. Readonly value. See `i3's user
manual <https://i3wm.org/docs/userguide.html#_border_style_for_new_windows>_
for more info.
.. attribute:: layout
Can be either ``splith``, ``splitv``, ``stacked``, ``tabbed``, ``dockarea`` or
``output``.
:rtype: string
.. attribute:: percent
The percentage which this container takes in its parent. A value of
null means that the percent property does not make sense for this
container, for example for the root container.
:rtype: float
.. attribute:: rect
The absolute display coordinates for this container. Display
coordinates means that when you have two 1600x1200 monitors on a single
X11 Display (the standard way), the coordinates of the first window on
the second monitor are ``{ "x": 1600, "y": 0, "width": 1600, "height":
1200 }``.
.. attribute:: window_rect
The coordinates of the *actual client window* inside the container,
without the window decorations that may also occupy space.
.. attribute:: deco_rect
The coordinates of the window decorations within a container. The
coordinates are relative to the container and do not include the client
window.
.. attribute:: geometry
The original geometry the window specified when i3 mapped it. Used when
switching a window to floating mode, for example.
.. attribute:: window
The X11 window ID of the client window.
.. attribute:: focus
A list of container ids describing the focus situation within the current
container. The first element refers to the container with (in)active focus.
.. attribute:: focused
Whether or not the current container is focused. There is only
one focused container.
.. attribute:: visible
Whether or not the current container is visible.
.. attribute:: num
Optional attribute that only makes sense for workspaces. This allows
for arbitrary and changeable names, even though the keyboard
shortcuts remain the same. See `the i3wm docs <https://i3wm.org/docs/userguide.html#_named_workspaces>`_
for more information
.. attribute:: urgent
Whether the window or workspace has the `urgent` state.
:returns: :bool:`True` or :bool:`False`.
.. attribute:: floating
Whether the container is floating or not. Possible values are
"auto_on", "auto_off", "user_on" and "user_off"
..
command <-- method
command_children <-- method
deco_rect IPC
descendents
find_by_id
find_by_role
find_by_window
find_classed
find_focused
find_fullscreen
find_marked
find_named
find_titled
floating
floating_nodes
fullscreen_mode
gaps
leaves
marks
nodes
orientation
parent
props
root
scratchpad
scratchpad_state
window_class
window_instance
window_rect
window_role
workspace
workspaces
"""
def __iter__(self):
"""
Iterate through the descendents of this node (breadth-first tree traversal)
"""
queue = deque(self.nodes)
queue.extend(self.floating_nodes)
while queue:
con = queue.popleft()
yield con
queue.extend(con.nodes)
queue.extend(con.floating_nodes)
def root(self):
"""
Retrieves the root container.
:rtype: :class:`Con`.
"""
if not self.parent:
return self
con = self.parent
while con.parent:
con = con.parent
return con
def descendents(self):
"""
Retrieve a list of all containers that delineate from the currently
selected container. Includes any kind of container.
:rtype: List of :class:`Con`.
"""
return [c for c in self]
def leaves(self):
"""
Retrieve a list of windows that delineate from the currently
selected container. Only lists client windows, no intermediate
containers.
:rtype: List of :class:`Con`.
"""
leaves = []
for c in self:
if not c.nodes and c.type == "con" and c.parent.type != "dockarea":
leaves.append(c)
return leaves
def command(self, command):
"""
Run a command on the currently active container.
:rtype: CommandReply
"""
return self._conn.command('[con_id="{}"] {}'.format(self.id, command))
def command_children(self, command):
"""
Run a command on the direct children of the currently selected
container.
:rtype: List of CommandReply????
"""
if not len(self.nodes):
return
commands = []
for c in self.nodes:
commands.append('[con_id="{}"] {};'.format(c.id, command))
self._conn.command(' '.join(commands))
def workspaces(self):
"""
Retrieve a list of currently active workspaces.
:rtype: List of :class:`Con`.
"""
workspaces = []
collect_workspaces(self.root())
return workspaces
def find_focused(self):
"""
Finds the focused container.
:rtype class Con:
"""
try:
return next(c for c in self if c.focused)
except StopIteration:
return None
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
11454,
3919,
198,
11748,
2878,
198,
11748,
33918,
198,
11748,
17802,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
6738,
33829,
1330,
2039,
388,
198,
6... | 2.399322 | 7,668 |
#!/usr/bin/env python3
import logging
import os
import os.path as path
import re
# process templates
import chocolatey_packages
SCRIPT_PATH = path.dirname(path.realpath(__file__))
preprocessor(SCRIPT_PATH, 'chocolatey.template.adoc', 'chocolatey.adoc') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
11748,
302,
628,
198,
198,
2,
1429,
24019,
198,
11748,
11311,
88,
62,
43789,
198,
6173,
46023,
62,
342... | 3.035714 | 84 |
from rest_framework import routers
from django.conf.urls import include, url
router = routers.DefaultRouter()
urlpatterns = [
url(r'^/', include(router.urls)),
]
| [
6738,
1334,
62,
30604,
1330,
41144,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
198,
472,
353,
796,
41144,
13,
19463,
49,
39605,
3419,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
... | 2.847458 | 59 |
import sys
sys.path.append('..')
import numpy as np
from Auraliser import Signal
if __name__ == '__main__':
main() | [
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
492,
11537,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
317,
1523,
5847,
1330,
26484,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
... | 2.695652 | 46 |
from django.shortcuts import render
from django.views import generic
from .models import Zoo, Exhibit, Animal
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
33571,
1330,
14276,
198,
198,
6738,
764,
27530,
1330,
21980,
11,
35880,
11,
13792,
198,
198,
2,
13610,
534,
5009,
994,
13,
198
] | 3.833333 | 36 |
import cv2
import numpy as np
def overlay_rect(im, polygons):
"""[This function overlays rectangles defined in polygons [list of x y coords] on an image]
Args:
im ([np.ndarray]): [image]
polygons ([list]): [x y coords of rectangles]
Returns:
[np.ndarray]: [image with rectangles drawn]
"""
# create polygon around the detected text
cv2.polylines(im, polygons, True, (0,255,255))
return im
def overlay_text(im, boxes, texts):
"""[This function renders corresponding text detected from OCR for each rectangle, positioned at first x, y coord]
Args:
im ([np.ndarray]): [image]
boxes ([list]): [x y coords of rectangles]
texts ([list]): [list of OCR detected texts]
Returns:
[np.ndarray]: [image with text rendered]
"""
font = cv2.FONT_HERSHEY_SIMPLEX
# put text on top of the drawn polygon
for idx in range(len(texts)):
cv2.putText(im, texts[idx], (int(boxes[idx][0]), int(boxes[idx][1])), font, 4, (255,255,255), 2, cv2.LINE_AA)
return im
def encode_bboxes(boxes):
"""[This function processes rectangles from Azure API and converts it to cv2.polylines format]
Args:
boxes ([list]): [list of x, y coordinates of rectangles]
Returns:
[list]: [np.array of formatted for use with cv2.polylines function call]
"""
polygons = list()
for box in boxes:
polygon = list()
for idx in range(0, len(box), 2):
polygon.append([box[idx], box[idx+1]])
polygons.append(polygon)
polygons = np.array(polygons, dtype=np.int32)
return polygons | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
33345,
62,
2554,
7,
320,
11,
25052,
684,
2599,
198,
220,
220,
220,
13538,
17912,
1212,
2163,
12893,
592,
13621,
27787,
5447,
287,
25052,
684,
685,
4868,
286,
2124... | 2.436391 | 676 |
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser(description="Given a set of MCMC samples of T, log L, use scipy.kde to approximate the density field.")
parser.add_argument("--config", default="config.yaml", help="The config file specifying everything we need.")
args = parser.parse_args()
import yaml
f = open(args.config)
config = yaml.load(f)
f.close()
# Take the model grids, which provide discrete sets of (temp, R, M_star, and Age) and resample all of these to uniform grids.
import numpy as np
from scipy.stats import gaussian_kde
# import multiprocessing as mp
from emcee import EnsembleSampler
ndim = 2
nwalkers = config["walkers_per_dim"] * ndim
age_low, age_high = config["age_guess"]
mass_low, mass_high = config["mass_guess"]
p0 = np.array([np.random.uniform(age_low, age_high, nwalkers),
np.random.uniform(mass_low, mass_high, nwalkers)]).T
# Load the samples from the CWD
samples = np.load(config["TlL_samples"]) # TlL.npy
cutoff = int(config["cutoff"]) #8000
# Otherwise we probably have too many
if len(samples) > cutoff:
samples = samples[:cutoff].T
else:
samples = samples.T
# temps, lls = samples
kernel = gaussian_kde(samples)
from ScottiePippen.grids import model_dict
for grid_name in config["grids"]:
print(grid_name)
grid = model_dict[grid_name](**config[grid_name])
sampler = EnsembleSampler(nwalkers, ndim, lnprob, args=[grid])
pos, prob, state = sampler.run_mcmc(p0, config["samples"])
# Save the actual chain of samples
np.save(config["outfile"].format(grid_name), sampler.chain)
# Profile code here
# import cProfile
# import pstats
#
# def profile_code():
# lnprob(np.array([3.0, 0.6]))
#
# cProfile.run("profile_code()", "prof")
#
# def display_stats(pfile):
# p = pstats.Stats(pfile)
# p.sort_stats('cumulative').print_stats(.2)
# p.sort_stats('time').print_stats(.2)
#
# display_stats('prof')
#
#
# import sys
# sys.exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
2625,
15056,
257,
900,
286,
13122,
9655,
8405,
286,
309,
11,
2604,
406,
11,
779,
629,
541,... | 2.648248 | 742 |
#!/usr/bin/env python
import argparse
cli = argparse.ArgumentParser()
subparsers = cli.add_subparsers(dest="subcommand")
def argument(*name_or_flags, **kwargs):
"""Convenience function to properly format arguments to pass to the
subcommand decorator.
"""
return (list(name_or_flags), kwargs)
def subcommand(args=[], extend_args_func=None, parent=subparsers):
"""Decorator to define a new subcommand in a sanity-preserving way.
The function will be stored in the ``func`` variable when the parser
parses arguments so that it can be called directly like so::
args = cli.parse_args()
args.func(args)
Usage example::
@subcommand([argument("-d", help="Debug mode", action="store_true")])
def subcommand(args):
print(args)
Then on the command line::
$ python cli.py subcommand -d
"""
return decorator
@subcommand([argument("name", help="hello, name!")])
@subcommand(extend_args_func=conflict_group)
def test(foo, bar):
"""
This is test command for extend args function.
"""
print(foo, bar)
# multi-level subcommand
@subcommand()
test2_subparser = test2.add_subparsers(dest='subcommand')
@subcommand([argument('--foo')], parent=test2_subparser)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
628,
198,
44506,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
7266,
79,
945,
364,
796,
537,
72,
13,
2860,
62,
7266,
79,
945,
364,
7,
16520,
2625,
726... | 2.721414 | 481 |
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.urls import reverse
from ..utils import load_package_json, origin, uri
from .. import breadcrumbs
from entries import kinds
register = template.Library()
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.inclusion_tag('lemoncurry/tags/nav.html')
@register.inclusion_tag('lemoncurry/tags/nav.html')
@register.inclusion_tag('lemoncurry/tags/breadcrumbs.html', takes_context=True)
@register.simple_tag
| [
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
49315,
13,
27530,
1330,
14413,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
11485,
26791,
... | 3.140541 | 185 |
import os
from lmod.spider import Spider
| [
11748,
28686,
198,
6738,
300,
4666,
13,
2777,
1304,
1330,
12648,
628
] | 3.5 | 12 |
"""
MIT License
Copyright (c) 2019-2021 naoTimesdev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
import asyncio
import logging
from math import ceil
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import arrow
import wavelink
from discord.channel import StageChannel, VoiceChannel
from discord.colour import Colour
from discord.embeds import Embed
from wavelink import Player
from wavelink.errors import NodeOccupied, NoMatchingNode
from wavelink.ext import spotify
from wavelink.tracks import YouTubeTrack
from wavelink.utils import MISSING
from naotimes.timeparse import TimeString
from .errors import UnsupportedURLFormat
from .queue import (
GuildMusicInstance,
TrackEntry,
TrackQueueAll,
TrackQueueImpl,
TrackQueueSingle,
TrackRepeat,
)
from .track import (
BandcampDirectLink,
SoundcloudDirectLink,
SpotifyDirectTrack,
SpotifyTrack,
TwitchDirectLink,
YoutubeDirectLinkTrack,
)
if TYPE_CHECKING:
from discord.guild import Guild
from discord.member import Member
from naotimes.bot import naoTimesBot
from naotimes.config import naoTimesLavanodes
__all__ = (
"naoTimesPlayer",
"format_duration",
)
RealTrack = Union[YouTubeTrack, YoutubeDirectLinkTrack, SpotifyTrack]
VocalChannel = Union[VoiceChannel, StageChannel]
| [
37811,
198,
36393,
13789,
198,
198,
15269,
357,
66,
8,
13130,
12,
1238,
2481,
299,
5488,
28595,
7959,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
... | 3.518072 | 664 |
from datetime import datetime
import logging
import requests
from shared_code import configurations
from shared_code.models.oat import OATDetectionResult
from shared_code.trace_utils.trace import trace_manager
XDR_HOST_URL = configurations.get_xdr_host_url()
# Get List of Events
# Get raw logs from search api
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
18931,
198,
11748,
7007,
198,
6738,
4888,
62,
8189,
1330,
25412,
198,
6738,
4888,
62,
8189,
13,
27530,
13,
15073,
1330,
440,
1404,
11242,
3213,
23004,
198,
6738,
4888,
62,
8189,
13,
40546,... | 3.613636 | 88 |
import traceback
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
| [
11748,
12854,
1891,
201,
198,
11748,
19798,
292,
355,
279,
67,
220,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
220,
201,
198,
6738,
1573,
17721,
1330,
9678,
18839,
11,
44934,
45359,
5258,
220,
201
] | 3.025641 | 39 |
import os
import requests
import requests.auth
import urllib
| [
11748,
28686,
198,
11748,
7007,
198,
11748,
7007,
13,
18439,
198,
11748,
2956,
297,
571,
628,
198
] | 3.705882 | 17 |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021-2022 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import (
Any, cast, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING)
from chb.app.AbstractSyntaxTree import AbstractSyntaxTree
from chb.app.ASTNode import ASTInstruction, ASTExpr, ASTLval
from chb.app.InstrXData import InstrXData
from chb.arm.ARMDictionaryRecord import armregistry
from chb.arm.ARMOpcode import ARMOpcode, simplify_result
from chb.arm.ARMOperand import ARMOperand
from chb.arm.ARMOperandKind import ARMOperandKind, ARMAbsoluteOp
from chb.bctypes.BCTyp import BCTyp
from chb.invariants.XXpr import XXpr
import chb.invariants.XXprUtil as XU
from chb.models.ModelsAccess import ModelsAccess
from chb.models.ModelsType import MNamedType
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.api.CallTarget import CallTarget, AppTarget
from chb.arm.ARMDictionary import ARMDictionary
@armregistry.register_tag("B", ARMOpcode)
class ARMBranch(ARMOpcode):
"""branch instruction.
B<c> label
B<c>.W label
tags[1]: <c>
args[0]: index of target operand in armdictionary
args[1]: is-wide (thumb)
"""
@property
def annotation(self, xdata: InstrXData) -> str:
"""xdata format: a:x .
xprs[0]: true condition
xprs[1]: false condition
xprs[2]: true condition (simplified)
xprs[3]: false condition (simplified)
xprs[4]: target address (absolute)
or, if no conditions
xprs[0]: target address (absolute)
"""
if self.is_call_instruction(xdata):
tgt = xdata.call_target(self.ixd)
args = ", ".join(str(x) for x in self.arguments(xdata))
return "call " + str(tgt) + "(" + args + ")"
elif xdata.has_branch_conditions():
return "if " + str(xdata.xprs[0]) + " then goto " + str(xdata.xprs[4])
elif self.tags[1] in ["a", "unc"]:
return "goto " + str(xdata.xprs[0])
else:
return "if ? goto " + str(xdata.xprs[0])
| [
2,
16529,
26171,
198,
2,
6127,
39,
19301,
45755,
16213,
9107,
198,
2,
6434,
25,
367,
11870,
311,
541,
2611,
198,
2,
16529,
26171,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
12,
1238,
1828,
... | 2.89789 | 1,185 |
from __future__ import print_function
import cv2
import numpy as np
import glob, os
#import matplotlib.pyplot as plt
import sys
#import time
import h5py
import random
#from scipy import ndimage
import ntpath
DATA_PATH = './training_patches_varied_256/input/'
LABEL_PATH = './training_patches_varied_256/haze/'
PATCH_PATH = './'
SIZE_INPUT = 256
SIZE_TARGET = 256
STRIDE = 128
count = 0
i = 1
total = 39240
h5fw = h5py.File(str(PATCH_PATH + str(SIZE_INPUT) + str(total) + '_' + 'training' + '.h5'), 'w')
INPUT = np.empty(shape=(total, SIZE_INPUT, SIZE_INPUT, 3))
TARGET = np.empty(shape=(total, SIZE_TARGET, SIZE_TARGET, 3))
k = 0
p = np.random.permutation(total)
print(p)
for data_image in glob.glob(DATA_PATH + '*.png'):
string_data = path_leaf(data_image)
string_label = 'haze' + string_data[5:]
# print(string_data)
print(string_label)
label_image_name = LABEL_PATH + string_label
#BI_img_name = BI_PATH + HR_img_name[12:19] + '.png'
# print(label_image_name)
imgData = cv2.imread(data_image)
imgLabel = cv2.imread(label_image_name)
# normalizing the input and target images
imgData_normalized = imgData/255.0
imgLabel_normalized = imgLabel/255.0
#cv2.imshow('image',imgLabel_normalized)
#cv2.imshow('data',imgData_normalized)
#cv2.waitKey(0)
#input_Data = np.array([imgData_normalized])
#input_Label = np.array([imgLabel_normalized])
# structuring them for tensor flow
#input_elem = np.rollaxis(input_Data, 0, 4)
#target_elem = np.rollaxis(input_Label, 0, 4)
#(hei, wid) = input_elem.shape[0:2]
#subim_input = input_elem[:, :, :,0]
#subim_target = target_elem[:, :, :,0]
INPUT[p[k], :, :, :] = imgLabel_normalized
TARGET[p[k], :, :, :] = imgData_normalized
#INPUT[k+total, :, :, :] = imgData_normalized
#TARGET[k+total, :, :, :] = imgLabel_normalized
#cv2.imshow('image1',INPUT[p[k]])
#cv2.imshow('data1',TARGET[p[k]])
#cv2.waitKey(0)
k = k + 1
#if k==total:
# break
#INPUT = np.append(INPUT, imgData_normalized[np.newaxis, ...], axis=0)
#TARGET = np.append(TARGET, imgLabel_normalized[np.newaxis, ...], axis=0)
#count = count + 1
print(str(k) + '-INPUT' + str(INPUT.shape) + '-TARGET' + str(TARGET.shape))
sys.stdout.flush() #?
#time.sleep(.1) #?
#i += 1
#creation of patches for individual images complete
# start creating a single h5 file by combining all files and shuffling them
#print('>>>Start shuffling Images:')
# function for shuffling images
#INPUT, TARGET = unison_shuffled_copies(INPUT, TARGET)
dset_input = h5fw.create_dataset(name='INPUT', shape=INPUT.shape, data=INPUT, dtype=np.float32)
INPUT = None
print('>>>>INPUT file generated')
dset_target = h5fw.create_dataset(name='TARGET', shape=TARGET.shape, data=TARGET, dtype=np.float32)
print('>>>>TARGET file generated')
print('>>>>save file' + 'training' + 'INPUT_' + str(SIZE_INPUT) + 'TARGET_' + str(SIZE_TARGET))
h5fw.close()
#h5fw_b = h5py.File(str('low_75_BW_' + str(SIZE_INPUT) + '.h5'), 'w')
#dset_input = h5fw_b.create_dataset(name='INPUT', shape=INPUT.shape, data=INPUT, dtype=np.float32)
#dset_target = h5fw_b.create_dataset(name='TARGET', shape=TARGET.shape, data=TARGET, dtype=np.float32)
#h5fw_b.close()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
15095,
11,
28686,
201,
198,
2,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
... | 2.246831 | 1,499 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 18:12:44 2018
@author: Utkarsh
"""
import numpy as np
from skimage.morphology import convex_hull_image, erosion
from skimage.morphology import square
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
2365,
2579,
1248,
25,
1065,
25,
2598,
2864,
201,
198,
201,
198,
31,
9800,
25,
7273,
74,
5406,
201,
198,
37811,
201,
198,
201,
... | 2.6375 | 80 |
# ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2022 by the Gimel Studio project contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import sys
import shutil
import subprocess
# Set to True during development and testing
DEBUG = False
# -- Mac Installation -----------------------------------------------
# -------------------------------------------------------------------
# -- Linux Installation ---------------------------------------------
# -------------------------------------------------------------------
# -- Build Process Initiation ---------------------------------------
# -------------------------------------------------------------------
# Check if this is 64-bit
if not sys.maxsize > 2**32:
raise NotImplementedError("Only 64-bit systems are supported!")
currentPlatform = sys.platform
# Install the required packages
if "darwin" in currentPlatform:
MAC()
elif currentPlatform in ["linux", "linux2"]:
LINUX()
elif "win32" in currentPlatform:
subprocess.call(["pip", "install", "-r", "requirements.txt"])
else:
raise NotImplementedError("Only Windows, Linux and MacOs are supported!")
# Prompt to install openimageio on windows
if "win32" in currentPlatform:
try:
import OpenImageIO
except ImportError:
print("\n\nPlease pip install the openimageio wheel that matches your python version from https://www.lfd.uci.edu/~gohlke/pythonlibs/#openimageio\n\n")
sys.exit()
# Setup the correct arguments and options based on the platform
args = [
"pyinstaller",
"src/main.py",
"-n", "GimelStudio",
"--noconfirm",
"--hidden-import",
"pkg_resources.py2_warn",
"--hidden-import",
"glcontext",
"--add-data"
]
args.append("src/gimelstudio/datafiles/default_config.json" + os.pathsep + "gimelstudio/datafiles")
# if DEBUG is False:
# args.append("--noconsole")
if "win32" in currentPlatform:
args.append("-i")
args.append("assets/GIMELSTUDIO_ICO.ico")
else:
raise NotImplementedError("Only Windows, Linux and MacOs are supported!")
subprocess.call(args)
if sys.platform == "win32":
shutil.copytree("src/nodes", "dist/GimelStudio/nodes")
else:
raise NotImplementedError("Only Windows, Linux and MacOs are supported!")
| [
2,
16529,
10541,
198,
2,
41123,
417,
11733,
15069,
13130,
12,
1238,
1828,
416,
262,
41123,
417,
11733,
1628,
20420,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
74... | 3.549572 | 817 |
from guillotina.db.interfaces import IStorageCreatedEvent
from zope.interface import implementer
@implementer(IStorageCreatedEvent)
| [
6738,
915,
359,
313,
1437,
13,
9945,
13,
3849,
32186,
1330,
314,
31425,
41972,
9237,
198,
6738,
1976,
3008,
13,
39994,
1330,
3494,
263,
628,
198,
31,
320,
26908,
263,
7,
40,
31425,
41972,
9237,
8,
198
] | 3.621622 | 37 |
#=========================================================================
# ListMemPortAdapter
#=========================================================================
# These classes provides a list interface, but the implementation
# essentially turns reads/writes into memory requests sent over a
# port-based memory interface. We use greenlets to enable us to wait
# until the response has come back before returning to the function
# accessing the list.
from greenlet import greenlet
#-------------------------------------------------------------------------
# ListMemPortAdapter
#-------------------------------------------------------------------------
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# __getitem__
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# __setitem__
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# line_trace
#-----------------------------------------------------------------------
| [
2,
23926,
2559,
28,
198,
2,
7343,
13579,
13924,
47307,
198,
2,
23926,
2559,
28,
198,
2,
2312,
6097,
3769,
257,
1351,
7071,
11,
475,
262,
7822,
198,
2,
6986,
4962,
9743,
14,
8933,
274,
656,
4088,
7007,
1908,
625,
257,
198,
2,
2493,... | 8.03012 | 166 |
import sys
MAX_ITERABLES_SIZE = min(100, sys.maxsize)
| [
11748,
25064,
198,
198,
22921,
62,
2043,
1137,
6242,
28378,
62,
33489,
796,
949,
7,
3064,
11,
25064,
13,
9806,
7857,
8,
198
] | 2.391304 | 23 |
#coding:utf-8
import cv2
import numpy as np
from matplotlib import pyplot as plt
# reference: https://blog.csdn.net/u010128736/article/details/52801310
image = cv2.imread("./china-1-jiao-1997.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.subplot(131), plt.imshow(image, "gray")
plt.title("source image"), plt.xticks([]), plt.yticks([])
plt.subplot(132), plt.hist(image.ravel(), 256)
plt.title("Histogram"), plt.xticks([]), plt.yticks([])
ret1, th1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU) #方法选择为THRESH_OTSU
plt.subplot(133), plt.imshow(th1, "gray")
plt.title("OTSU,threshold is " + str(ret1)), plt.xticks([]), plt.yticks([])
plt.show()
| [
2,
66,
7656,
25,
40477,
12,
23,
201,
198,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
201,
198,
201,
198,
2,
4941,
25,
3740,
1378,
140... | 2.083333 | 324 |
"""
Copyright (c) 2015-2018 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import bs4, requests, time, datetime, pprint
pp = pprint.PrettyPrinter(indent=2)
timus_id = "19306"
acm_link = "http://acm.timus.ru/"
submissions = []
from_id = None
count = 1000
for i in xrange(1000):
initial_url = acm_link + "status.aspx?author=" + timus_id + "&count=" + str(count)
if from_id is None:
url = initial_url
else:
url = initial_url + "&from=" + str(from_id)
response = requests.get(url)
soup = bs4.BeautifulSoup(response.text, "lxml")
table = soup.find("table", class_="status")
all_trs = table.find_all("tr")
trs = all_trs[2:-2]
for tr in trs:
tds = tr.find_all("td")
from_id = int(tds[0].text)
curr, _, date = tds[1].contents
curr = time.strptime(curr.text + " " + date.text, "%H:%M:%S %d %b %Y")
curr = time.strptime(str(datetime.datetime(curr.tm_year,
curr.tm_mon,
curr.tm_mday,
curr.tm_hour,
curr.tm_min,
curr.tm_sec) + \
datetime.timedelta(minutes=30)),
"%Y-%m-%d %H:%M:%S")
problem_link = acm_link + tds[3].contents[0]["href"] + "&locale=en"
problem_name = tds[3].text
language = tds[4].text
status = tds[5].text
submission_status = None
if status == "Accepted":
submission_status = "AC"
elif status == "Wrong answer":
submission_status = "WA"
elif status.__contains__("Runtime error"):
submission_status = "RE"
elif status == "Memory limit exceeded":
submission_status = "MLE"
elif status == "Time limit exceeded":
submission_status = "TLE"
elif status == "Compilation error":
submission_status = "CE"
else:
submission_status = "OTH"
if submission_status == "AC":
points = "100"
else:
points = "0"
submissions.append((str(time.strftime("%Y-%m-%d %H:%M:%S", curr)),
problem_link,
problem_name,
submission_status,
points,
language,
""))
from_id -= 1
if len(trs) < count:
break
#print submissions
pp.pprint(submissions) | [
37811,
198,
220,
220,
220,
15069,
357,
66,
8,
1853,
12,
7908,
13308,
33110,
7,
430,
73,
34229,
430,
73,
31,
14816,
13,
785,
828,
13707,
1273,
971,
628,
220,
220,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597... | 2.062637 | 1,820 |
#===================== begin_copyright_notice ==================================
#Copyright (c) 2017 Intel Corporation
#Permission is hereby granted, free of charge, to any person obtaining a
#copy of this software and associated documentation files (the
#"Software"), to deal in the Software without restriction, including
#without limitation the rights to use, copy, modify, merge, publish,
#distribute, sublicense, and/or sell copies of the Software, and to
#permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#The above copyright notice and this permission notice shall be included
#in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
#OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
#TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#======================= end_copyright_notice ==================================
############# Currently Supported Types ######################
#PointerTypes = ["ptr_private","ptr_global","ptr_constant","ptr_local","ptr_generic"]
#FloatingPointTypes = ["half","float","double"]
#IntegerTypes = ["bool","char","short","int","long"]
#IntrinsicsProperties = ["None","NoMem","ReadArgMem","ReadMem","ReadWriteArgMem","NoReturn","NoDuplicate", "Convergent"]
#IntrinsicsProperties may be specified as a comma separated list (e.g., "Convergent,NoMem")
# EX. "GenISA_blah": [{return_type},[arg1_type,arg2_type.....],Property]
# The "any" type can be followed by a default type if a type is not explicitly specified: Ex. "any:int"
# 0 - LLVMMatchType<0>
# 1 - LLVMMatchType<1>
# {int} - LLVMMatchType<{int}>
# See Intrinsics.json file for entries
Imported_Intrinsics = \
{
"GenISA_ROUNDNE": ["float",["float"],"NoMem"],
"GenISA_imulH": ["anyint",[0,0],"NoMem"],
"GenISA_umulH": ["anyint",[0,0],"NoMem"],
"GenISA_f32tof16_rtz": ["float",["float"],"NoMem"],
"GenISA_fsat": ["anyfloat",[0],"NoMem"],
"GenISA_rsq": ["anyfloat",[0],"NoMem"],
"GenISA_uaddc": ["anyvector",["anyint",1],"NoMem"],
"GenISA_usubb": ["anyvector",["anyint",1],"NoMem"],
"GenISA_bfi": ["int",["int","int","int","int"],"NoMem"],
"GenISA_ibfe": ["int",["int","int","int"],"NoMem"],
"GenISA_ubfe": ["int",["int","int","int"],"NoMem"],
"GenISA_bfrev": ["int",["int"],"NoMem"],
"GenISA_firstbitLo": ["int",["int"],"NoMem"],
"GenISA_firstbitHi": ["int",["int"],"NoMem"],
"GenISA_firstbitShi": ["int",["int"],"NoMem"],
"GenISA_IEEE_Sqrt": ["float",["float"],"NoMem"],
"GenISA_IEEE_Divide": ["anyfloat",[0,0],"NoMem"],
"GenISA_ftoi_rte": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftoi_rtp": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftoi_rtn": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftoui_rte": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftoui_rtp": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftoui_rtn": ["anyint",["anyfloat"],"NoMem"],
"GenISA_ftof_rtn": ["anyfloat",["anyfloat"],"NoMem"],
"GenISA_ftof_rtp": ["anyfloat",["anyfloat"],"NoMem"],
"GenISA_ftof_rtz": ["anyfloat",["anyfloat"],"NoMem"],
"GenISA_itof_rtn": ["anyfloat",["anyint"],"NoMem"],
"GenISA_itof_rtp": ["anyfloat",["anyint"],"NoMem"],
"GenISA_itof_rtz": ["anyfloat",["anyint"],"NoMem"],
"GenISA_uitof_rtn": ["anyfloat",["anyint"],"NoMem"],
"GenISA_uitof_rtp": ["anyfloat",["anyint"],"NoMem"],
"GenISA_uitof_rtz": ["anyfloat",["anyint"],"NoMem"],
"GenISA_mul_rtz" : ["anyfloat", [0, 0], "NoMem"],
"GenISA_fma_rtz" : ["anyfloat", [0, 0, 0], "NoMem"],
"GenISA_add_rtz" : ["anyfloat", [0, 0], "NoMem"],
"GenISA_ldstructured": ["float4",["anyptr","int","int"],"ReadArgMem"],
"GenISA_storestructured1": ["void",["anyptr","int","int","float"],"None"],
"GenISA_storestructured2": ["void",["anyptr","int","int","float","float"],"None"],
"GenISA_storestructured3": ["void",["anyptr","int","int","float","float","float"],"None"],
"GenISA_storestructured4": ["void",["anyptr","int","int","float","float","float","float"],"None"],
"GenISA_typedread": ["float4",["anyptr","int","int","int", "int"],"ReadArgMem"],
"GenISA_typedwrite": ["void",["anyptr","int","int","int","int","float","float","float","float"],"None"],
"GenISA_ldraw_indexed": ["any:float",["anyptr","int", "int"],"ReadArgMem"],
"GenISA_ldrawvector_indexed": ["anyvector",["anyptr","int", "int"],"ReadArgMem"],
"GenISA_storeraw_indexed": ["void",["anyptr","int","any:float", "int"],"None"],
"GenISA_storerawvector_indexed": ["void",["anyptr","int","anyvector", "int"],"None"],
"GenISA_intatomicraw": ["anyint",["anyptr","int",0,"int"],"ReadWriteArgMem"],
"GenISA_floatatomicraw": ["anyfloat",["anyptr","int",0,"int"],"ReadWriteArgMem"],
"GenISA_intatomicrawA64": ["anyint",["anyptr","anyptr",0,"int"],"ReadWriteArgMem"],
"GenISA_floatatomicrawA64": ["anyfloat",["anyptr","anyptr",0,"int"],"ReadWriteArgMem"],
"GenISA_dwordatomicstructured": ["int",["anyptr","int","int","int","int"],"ReadWriteArgMem"],
"GenISA_floatatomicstructured": ["float",["anyptr","int","int","float","int"],"ReadWriteArgMem"],
"GenISA_intatomictyped": ["anyint",["anyptr","int","int","int",0,"int"],"ReadWriteArgMem"],
"GenISA_icmpxchgatomicraw": ["anyint",["anyptr","int",0,0],"None"],
"GenISA_fcmpxchgatomicraw": ["anyfloat",["anyptr","int",0,0],"None"],
"GenISA_icmpxchgatomicrawA64": ["anyint",["anyptr","anyptr",0,0],"ReadWriteArgMem"],
"GenISA_fcmpxchgatomicrawA64": ["anyfloat",["anyptr","anyptr",0,0],"ReadWriteArgMem"],
"GenISA_cmpxchgatomicstructured": ["int",["anyptr","int","int","int","int"],"ReadWriteArgMem"],
"GenISA_fcmpxchgatomicstructured": ["float",["anyptr","int","int","float","float"],"ReadWriteArgMem"],
"GenISA_icmpxchgatomictyped": ["anyint",["anyptr","int","int","int",0,0],"ReadWriteArgMem"],
"GenISA_atomiccounterinc": ["int",["anyptr"],"ReadWriteArgMem"],
"GenISA_atomiccounterpredec": ["int",["anyptr"],"ReadWriteArgMem"],
"GenISA_threadgroupbarrier": ["void",[],"Convergent"],
"GenISA_threadgroupbarrier_signal": ["void",[],"Convergent"],
"GenISA_threadgroupbarrier_wait": ["void",[],"Convergent"],
"GenISA_wavebarrier": ["void",[],"Convergent"],
"GenISA_memoryfence": ["void",["bool","bool","bool","bool","bool","bool","bool"],"Convergent"],
"GenISA_typedmemoryfence": ["void",["bool"],"Convergent"],
"GenISA_flushsampler": ["void",[],"None"],
"GenISA_globalSync": ["void",[],"Convergent"],
"GenISA_uavSerializeOnResID": ["void",["int"],"None"],
"GenISA_uavSerializeAll": ["void",[],"None"],
"GenISA_WorkGroupAny": ["int",["int"],"None"],
"GenISA_sampleKillPix": ["anyvector",["anyfloat",1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_evaluateSampler": ["void",["anyvector"],"None"],
"GenISA_ldmsptr16bit": ["anyvector",["short","short","short","short","short","short","short","short","short","anyptr","int","int","int"],"NoMem"],
"GenISA_ldmsptr": ["anyvector",["int","int","int","int","int","int","int","anyptr","int","int","int"],"NoMem"],
"GenISA_ldmcsptr": ["anyvector",["anyint",1,1,1,"anyptr","int","int","int"],"NoMem"],
"GenISA_lodptr": ["anyvector",["anyfloat",1,1,1,"anyptr","anyptr"],"NoMem"],
"GenISA_sampleptr": ["anyvector",["anyfloat",1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleBptr": ["anyvector",["anyfloat",1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleCptr": ["anyvector",["anyfloat",1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleDptr": ["anyvector",["anyfloat",1,1,1,1,1,1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleDCptr": ["anyvector",["anyfloat",1,1,1,1,1,1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleLptr": ["anyvector",["anyfloat",1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleLCptr": ["anyvector",["anyfloat",1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_sampleBCptr": ["anyvector",["anyfloat",1,1,1,1,1,"anyptr","anyptr","int","int","int"],"NoMem"],
"GenISA_ldptr": ["anyvector",["int","int","int","int","anyptr","int","int","int"],"ReadArgMem"],
"GenISA_readsurfaceinfoptr": ["int16",["anyptr","int"],"NoMem"],
"GenISA_resinfoptr": ["int4",["anyptr","int"],"NoMem"],
"GenISA_sampleinfoptr": ["int4",["anyptr"],"NoMem"],
"GenISA_gather4ptr": ["anyvector",["anyfloat",1,1,1,"anyptr","anyptr","int","int","int","int"],"NoMem"],
"GenISA_gather4Cptr": ["anyvector",["anyfloat",1,1,1,1,"anyptr","anyptr","int","int","int","int"],"NoMem"],
"GenISA_gather4POptr": ["anyvector",["anyfloat",1,"int","int",1,"anyptr","anyptr","int","int","int","int"],"NoMem"],
"GenISA_gather4POCptr": ["anyvector",["anyfloat",1,1,"int","int",1,"anyptr","anyptr","int","int","int","int"],"NoMem"],
"GenISA_RuntimeValue": ["any:float",["int"],"NoMem"],
"GenISA_GetBufferPtr": ["anyptr",["int","int"],"NoMem"],
"GenISA_DCL_inputVec": ["anyfloat",["int","int"],"NoMem"],
# Signature: (dwordAttributeOrSetupIndex, e_interpolation_PSOnly)->anyvector
"GenISA_DCL_ShaderInputVec": ["anyvector",["int","int"],"NoMem"],
"GenISA_DCL_GSinputVec": ["float4",["int","int"],"NoMem"],
"GenISA_DCL_SystemValue": ["any:float",["int"],"NoMem"],
"GenISA_SampleOffsetX": ["float",["int"],"NoMem"],
"GenISA_SampleOffsetY": ["float",["int"],"NoMem"],
"GenISA_PixelPositionX": ["short",[],"NoMem"],
"GenISA_PixelPositionY": ["short",[],"NoMem"],
"GenISA_DCL_GSsystemValue": ["float",["int","int"],"NoMem"],
"GenISA_DCL_input": ["int",["int","int"],"None"],
"GenISA_OUTPUT": ["void",["anyfloat",0,0,0,"int","int"],"NoDuplicate"],
"GenISA_PatchConstantOutput": ["void",["anyfloat",0,0,0,"int","int"],"None"],
"GenISA_PHASE_OUTPUT": ["void",["float","int"],"None"],
"GenISA_PHASE_INPUT": ["float",["int"],"NoMem"],
"GenISA_cycleCounter": ["int2",[],"None"],
"GenISA_PullSampleIndexBarys": ["float2",["int","bool"],"NoMem"],
"GenISA_PullSnappedBarys": ["float2",["int","int","bool"],"NoMem"],
"GenISA_Interpolate": ["float",["int","float2"],"NoMem"],
"GenISA_GradientX": ["anyfloat",[0],"NoMem"],
"GenISA_GradientXfine": ["float",["float"],"NoMem"],
"GenISA_GradientY": ["anyfloat",[0],"NoMem"],
"GenISA_GradientYfine": ["float",["float"],"NoMem"],
"GenISA_discard": ["void",["bool"],"None"],
"GenISA_OUTPUTGS": ["void",["float","float","float","float","int","int","int"],"None"],
"GenISA_OUTPUTGS2": ["void",["float","float","float","float","float","float","float","float","int","int","int"],"None"],
"GenISA_EndPrimitive": ["void",["int"],"None"],
"GenISA_SetStream": ["void",["int","int"],"None"],
"GenISA_GsCutControlHeader": ["void",["int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int"],"None"],
"GenISA_GsStreamHeader": ["void",["int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int"],"None"],
"GenISA_DCL_HSControlPointID": ["int",[],"None"],
"GenISA_OutputTessControlPoint": ["void",["float","float","float","float","int","int","int"],"None"],
"GenISA_OutputTessFactors": ["void",["float","float","float","float","float","float"],"None"],
# Signature: (owordAttributeIndex)->float4
"GenISA_DCL_HSPatchConstInputVec": ["float4",["int"],"ReadMem"],
"GenISA_OuterScalarTessFactors": ["void",["int","float"],"None"],
"GenISA_InnerScalarTessFactors": ["void",["int","float"],"None"],
"GenISA_DCL_DSPatchConstInputVec": ["float4",["int"],"NoMem"],
"GenISA_DCL_DSInputTessFactor": ["float",["int"],"NoMem"],
"GenISA_DCL_DSCntrlPtInputVec": ["float4",["int","int"],"NoMem"],
"GenISA_DCL_HSinputVec": ["float4",["int","int"],"NoMem"],
# Signature: (owordVertexIndex, owordAttributeIndex)->float4
"GenISA_DCL_HSOutputCntrlPtInputVec": ["float4",["int","int"],"ReadMem"],
"GenISA_HSURBPatchHeaderRead": ["float8",[],"ReadMem"],
"GenISA_RenderTargetRead": ["float4",["int"],"ReadMem"],
"GenISA_RenderTargetReadSampleFreq": ["float4",["int","int"],"ReadMem"],
"GenISA_patchInstanceId": ["int",[],"NoMem"],
"GenISA_simdLaneId": ["short",[],"NoMem"],
"GenISA_simdSize": ["int",[],"NoMem"],
"GenISA_simdShuffleDown": ["anyint",[0,0,"int"],"Convergent,NoMem"],
"GenISA_simdBlockRead": ["anyvector",["anyptr"],"ReadMem"],
"GenISA_simdBlockReadBindless": ["anyvector",["anyptr", "int"],"ReadMem"],
"GenISA_simdBlockWrite": ["void",["anyptr","anyvector"],"None"],
"GenISA_simdBlockWriteBindless": ["void",["anyptr","anyvector", "int"],"None"],
"GenISA_MediaBlockRead": ["anyint",["int","int","int","int","int","int"],"None"],
"GenISA_MediaBlockWrite": ["void",["int","int","int","int","int","int","anyint"],"None"],
"GenISA_MediaBlockRectangleRead": ["void",["int","int","int","int","int","int","int"],"None"],
"GenISA_simdMediaBlockRead": ["anyvector",["int","int","int","int"],"None"],
"GenISA_simdMediaBlockWrite": ["void",["int","int","int","int","anyvector"],"None"],
"GenISA_simdMediaRegionCopy": ["void",["int","int","int","int","int","int","int","int","int","int","int","int"],"None"],
"GenISA_vaErode": ["void",["ptr_local","float2","int","int"],"None"],
"GenISA_vaDilate": ["void",["ptr_local","float2","int","int"],"None"],
"GenISA_vaMinMaxFilter": ["void",["ptr_local","float2","int","int"],"None"],
"GenISA_vaConvolve": ["void",["ptr_local","float2","int","int"],"None"],
"GenISA_vaConvolveGRF_16x1": ["short",["float2","int","int"],"None"],
"GenISA_vaConvolveGRF_16x4": ["short4",["float2","int","int"],"None"],
"GenISA_vaMinMax": ["void",["ptr_local","float2","int","int"],"None"],
"GenISA_vaCentroid": ["void",["ptr_local","float2","int2","int","int"],"None"],
"GenISA_vaBoolCentroid": ["void",["ptr_local","float2","int2","int","int"],"None"],
"GenISA_vaBoolSum": ["void",["ptr_local","float2","int2","int","int"],"None"],
"GenISA_vmeSendIME": ["void",["int","int","int","int","int","int","int","int"],"None"],
"GenISA_vmeSendIME2": ["anyvector",["anyvector","int","int","int","int"],"None"],
"GenISA_vmeSendFBR": ["void",["int","int","int","int","int","int","int","int"],"None"],
"GenISA_vmeSendFBR2": ["int4",["int4","int","int","int"],"None"],
"GenISA_vmeSendSIC": ["void",["int","int","int","int","int","int"],"None"],
"GenISA_vmeSendSIC2": ["int4",["int4","int","int","int"],"None"],
"GenISA_source_value": ["void",["int"],"None"],
"GenISA_mov_identity": ["void",["int"],"None"],
"GenISA_movflag": ["int",["int"],"None"],
"GenISA_movcr": ["int",["int"],"None"],
"GenISA_hw_thread_id": ["int",[],"NoMem"],
"GenISA_slice_id": ["int",[],"NoMem"],
"GenISA_subslice_id": ["int",[],"NoMem"],
"GenISA_eu_id": ["int",[],"NoMem"],
"GenISA_getSR0" : ["int", ["int"], "None"],
"GenISA_eu_thread_id": ["int",[],"NoMem"],
"GenISA_eu_thread_pause": ["void",["int"],"None"],
"GenISA_setMessagePhaseX_legacy": ["void",["int","int","int","anyint"],"None"],
"GenISA_setMessagePhase_legacy": ["void",["int","int","int"],"None"],
"GenISA_createMessagePhases": ["int",["int"],"None"],
"GenISA_createMessagePhasesV": ["anyvector",["int"],"None"],
"GenISA_createMessagePhasesNoInit": ["int",["int"],"None"],
"GenISA_createMessagePhasesNoInitV": ["anyvector",["int"],"None"],
"GenISA_getMessagePhaseX": ["anyint",["int","int","int"],"None"],
"GenISA_getMessagePhaseXV": ["anyint",["anyvector","int","int"],"None"],
"GenISA_setMessagePhaseX": ["int",["int","int","int","anyint"],"None"],
"GenISA_setMessagePhaseXV": ["anyvector",[0,"int","int","anyint"],"None"],
"GenISA_getMessagePhase": ["int",["int","int"],"None"],
"GenISA_getMessagePhaseV": ["int",["anyvector","int"],"None"],
"GenISA_setMessagePhase": ["int",["int","int","int"],"None"],
"GenISA_setMessagePhaseV": ["anyvector",[0,"int","int"],"None"],
"GenISA_broadcastMessagePhase": ["anyint",["int","int","int","int"],"None"],
"GenISA_broadcastMessagePhaseV": ["anyint",["anyvector","int","int","int"],"None"],
"GenISA_simdSetMessagePhase": ["int",["int","int","int","int","int","anyint"],"None"],
"GenISA_simdSetMessagePhaseV": ["anyvector",[0,"int","int","int","int","anyint"],"None"],
"GenISA_simdGetMessagePhase": ["anyint",["int","int","int"],"None"],
"GenISA_simdGetMessagePhaseV": ["anyint",["anyvector","int","int"],"None"],
"GenISA_extractMVAndSAD": ["void",["int","int","int","int"],"None"],
"GenISA_cmpSADs": ["void",["int","int","int","int"],"None"],
"GenISA_OWordPtr": ["anyptr",["int"],"NoMem"],
"GenISA_StackAlloca": ["ptr_private",["int"],"NoMem"],
"GenISA_RTDualBlendSource": ["void",["float","bool","anyfloat",0,0,0,0,0,0,0,"float","float",
"int","bool","bool","bool","bool","int"],"None"],
"GenISA_RTWrite": ["void",["anyfloat","float","bool",0,0,0,0,"float","float","int","int","bool",
"bool","bool","bool","int"],"None"],
# Signature: (owordOffset, mask, x1, y1, z1, w1, x2, y2, z2, w2)
"GenISA_URBWrite": ["void",["int","int","float","float","float","float","float","float","float","float"],"None"],
# Signature: (index, owordOffset)->float8
"GenISA_URBRead": ["float8",["int","int"],"NoMem"],
# In-place data read using URB Write Handle. Signature: Signature: (owordOffset)->float8
"GenISA_URBReadOutput": ["float8",["int"],"NoMem"],
"GenISA_SetDebugReg": ["int",["int"],"None"],
"GenISA_add_pair": [["int","int"],["int","int","int","int"],"NoMem"],
"GenISA_sub_pair": [["int","int"],["int","int","int","int"],"NoMem"],
"GenISA_mul_pair": [["int","int"],["int","int","int","int"],"NoMem"],
"GenISA_pair_to_ptr": ["anyptr",["int","int"],"NoMem"],
"GenISA_ptr_to_pair": [["int","int"],["anyptr"],"NoMem"],
# Takes a boolean as input; return a bitfield with 1 for active lane with input true, 0 for the rest.
# All lanes get the same value. Signature: (bool)->bitfield_int32
"GenISA_WaveBallot": ["int",["bool"],"Convergent,InaccessibleMemOnly"],
# For each active lane n, return value of n-th bit from the input bitfield. Signature: (bitfield)->bool
"GenISA_WaveInverseBallot": ["bool",["int"],"Convergent,InaccessibleMemOnly"],
# Read from a specific lane. Signature: (value, lane)->value
"GenISA_WaveShuffleIndex": ["anyint",[0,"int"],"Convergent,NoMem"],
# Accumulate all the active lanes. Signature: (value, op)->result; where op is one of IGC::WaveOps
"GenISA_WaveAll": ["anyint",[0,"char"],"Convergent,InaccessibleMemOnly"],
# Accumulate all active lanes within consecutive input clusters and broadcast the result to associated output clusters.
# A k-cluster is a sequence of values from k consecutive (not necessarily active) lanes, such that: clusters are disjoint,
# size value is of 1 <= 2^p <= maxSubgroupSize, p >= 0.
# Signature: (value, op, size)->result; op is one of IGC::WaveOps; size must be a compile-time constant,
# and it is assumed that size > 1; the result for n-th input cluster is replicated to n-th output cluster.
"GenISA_WaveClustered": ["anyint",[0,"char", "int"],"Convergent,InaccessibleMemOnly"],
# Accumulate and keep the intermediate results in each lane.
# Signature: (value, op, type, mask)->result; op is one of IGC::WaveOps, type is either exclusive(0)
# or invlusive(1) operation; mask specifies a subset of lanes to participate in the computation.
"GenISA_WavePrefix": ["anyint",[0,"char","bool","bool"],"Convergent,InaccessibleMemOnly"],
"GenISA_QuadPrefix": ["anyint",[0,"char","bool"],"Convergent,InaccessibleMemOnly"],
"GenISA_InitDiscardMask": ["bool",[],"None"],
"GenISA_UpdateDiscardMask": ["bool",["bool","bool"],"None"],
"GenISA_GetPixelMask": ["bool",["bool"],"None"],
# Check whether invocation is a helper invocation.
"GenISA_IsHelperInvocation": ["bool",["void"],"Convergent,InaccessibleMemOnly"],
"GenISA_dp4a_ss": ["int",["int","int","int"],"NoMem"],
"GenISA_dp4a_uu": ["int",["int","int","int"],"NoMem"],
"GenISA_dp4a_su": ["int",["int","int","int"],"NoMem"],
"GenISA_dp4a_us": ["int",["int","int","int"],"NoMem"],
"GenISA_is_uniform": ["bool",["any"],"NoMem"],
"GenISA_CatchAllDebugLine": ["void", [], "None"]
}
| [
2,
4770,
1421,
28,
2221,
62,
22163,
4766,
62,
42138,
46111,
28,
198,
198,
2,
15269,
357,
66,
8,
2177,
8180,
10501,
198,
198,
2,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
198,
2,
30073,
286,... | 2.593214 | 8,223 |
'''
Need improvement from the code and knowledge is used to built this.
Sorry for the readableness and the quality of code not as your expectation.
'''
import networkx as nx
import matplotlib.pyplot as plt
from itertools import combinations
# return GCD from pair of nums by modulating the nums iteratively.
# Parent class, define and relationing input of a set.
# Generate pair of divisible relation from Input.
# For not directly pointing to private variable.
# Subclass, to do hasse things.
# Draw hasse diagram by rules.
# Subclass of hasse, Sorting wrapper.
# Subclass, to do poset things.
# Return True if every pair of element ∈ self.definerSet.raw reflective.
# Return True if atleast has a pair by if aRb and cRd, a = b and b = c.
# Return True if aRb and cRd, b = c,
# so that aRc, in every pair of element ∈ self.definerSet.raw.
# True if all of 3 laws are True.
# Subclass of poset, infimum & supremum wrapper.
class bounds(poset):
'''
Algorithm too slow, is there any efficient way or formula to do it?
Bug arise when comparing transitive relations that should not be is.
'''
# Supremum, yield true when j is the most closer to a and b while exist in self.definerSet.raw.
#Return j, For each X that is another upper bound of (a, b), applies j ≤ X.
# Infimum, yield true when j is the most closer to a and b while exist in self.definerSet.raw.
#Return i, For each X that is another lower bound of (a, b), applies X ≤ i.
# Subclass, to do lattice things. for now, only support divisible lattice.
# infimum(a,b) and supremum (a,b) of self.poset is exist for each pair of elements a and b in self.definerSet.raw.
# Compare by its own lcm and gcd if match to the smallest & biggest element.
# Compare gcd & lcm to find meet irreducible and join irreducible.
| [
7061,
6,
198,
23037,
9025,
422,
262,
2438,
290,
3725,
318,
973,
284,
3170,
428,
13,
198,
14385,
329,
262,
1100,
23117,
9449,
290,
262,
3081,
286,
2438,
407,
355,
534,
17507,
13,
198,
7061,
6,
198,
198,
11748,
3127,
87,
355,
299,
8... | 3.157095 | 592 |
import unittest
import os
from programytest.client import TestClient
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
198,
6738,
1430,
88,
9288,
13,
16366,
1330,
6208,
11792,
628,
628
] | 3.65 | 20 |
import os
import numpy as np
import dmtools
from dmtools import transform, colorspace, arrange
# COMPILE PIECES | XXXX-XX-XX
pieces = [('taughannock_1', 'taughannock_2')]
os.makedirs('output', exist_ok=True)
for source, dest in pieces:
A = dmtools.read(f"input/{source}.png")
B = dmtools.read(f"input/{dest}.png")
image = f(A, B)
path = f"output/{source}_{dest}_composite.png"
dmtools.write_png(image, path, versioning=True)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
288,
16762,
10141,
198,
6738,
288,
16762,
10141,
1330,
6121,
11,
7577,
10223,
11,
21674,
628,
628,
628,
198,
2,
9440,
11901,
2538,
30434,
2943,
1546,
930,
27713,
55,
12,
8051... | 2.409574 | 188 |
"""Sentiment serializer."""
# Django REST Framework
from rest_framework import serializers
# Models
from feelit.posts.models import Sentiment, Comment
# Utilities
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class CreateSentimentSerializer(serializers.ModelSerializer):
""" Analyze the text from each comment, do GET request to API
and bring results.
"""
pass | [
37811,
31837,
3681,
11389,
7509,
526,
15931,
198,
198,
2,
37770,
30617,
25161,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
2,
32329,
198,
6738,
1254,
270,
13,
24875,
13,
27530,
1330,
11352,
3681,
11,
18957,
198,
198,
2,
... | 3.612903 | 124 |
#!/usr/bin/python
# module_check: supported
# Copyright 2021 VMware, Inc. All rights reserved. VMware Confidential
# SPDX-License-Identifier: Apache License 2.0
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_alertconfig
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of AlertConfig Avi RESTful Object
description:
- This module is used to configure AlertConfig object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
type: str
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
type: str
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete", "remove"]
type: str
avi_patch_path:
description:
- Patch path to use when using avi_api_update_method as patch.
type: str
avi_patch_value:
description:
- Patch value to use when using avi_api_update_method as patch.
type: str
action_group_ref:
description:
- The alert config will trigger the selected alert action, which can send notifications and execute a controlscript.
- It is a reference to an object of type actiongroupconfig.
type: str
alert_rule:
description:
- List of filters matching on events or client logs used for triggering alerts.
required: true
type: dict
autoscale_alert:
description:
- This alert config applies to auto scale alerts.
type: bool
category:
description:
- Determines whether an alert is raised immediately when event occurs (realtime) or after specified number of events occurs within rolling time
- window.
- Enum options - REALTIME, ROLLINGWINDOW, WATERMARK.
- Default value when not specified in API or module is interpreted by Avi Controller as REALTIME.
required: true
type: str
configpb_attributes:
description:
- Protobuf versioning for config pbs.
- Field introduced in 21.1.1.
type: dict
description:
description:
- A custom description field.
type: str
enabled:
description:
- Enable or disable this alert config from generating new alerts.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
expiry_time:
description:
- An alert is expired and deleted after the expiry time has elapsed.
- The original event triggering the alert remains in the event's log.
- Allowed values are 1-31536000.
- Unit is sec.
- Default value when not specified in API or module is interpreted by Avi Controller as 86400.
type: int
name:
description:
- Name of the alert configuration.
required: true
type: str
obj_uuid:
description:
- Uuid of the resource for which alert was raised.
type: str
object_type:
description:
- The object type to which the alert config is associated with.
- Valid object types are - virtual service, pool, service engine.
- Enum options - VIRTUALSERVICE, POOL, HEALTHMONITOR, NETWORKPROFILE, APPLICATIONPROFILE, HTTPPOLICYSET, DNSPOLICY, SECURITYPOLICY, IPADDRGROUP,
- STRINGGROUP, SSLPROFILE, SSLKEYANDCERTIFICATE, NETWORKSECURITYPOLICY, APPLICATIONPERSISTENCEPROFILE, ANALYTICSPROFILE, VSDATASCRIPTSET, TENANT,
- PKIPROFILE, AUTHPROFILE, CLOUD...
type: str
recommendation:
description:
- Recommendation of alertconfig.
type: str
rolling_window:
description:
- Only if the number of events is reached or exceeded within the time window will an alert be generated.
- Allowed values are 1-31536000.
- Unit is sec.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
type: int
source:
description:
- Signifies system events or the type of client logsused in this alert configuration.
- Enum options - CONN_LOGS, APP_LOGS, EVENT_LOGS, METRICS.
required: true
type: str
summary:
description:
- Summary of reason why alert is generated.
type: str
tenant_ref:
description:
- It is a reference to an object of type tenant.
type: str
threshold:
description:
- An alert is created only when the number of events meets or exceeds this number within the chosen time frame.
- Allowed values are 1-65536.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
type: int
throttle:
description:
- Alerts are suppressed (throttled) for this duration of time since the last alert was raised for this alert config.
- Allowed values are 0-31536000.
- Unit is sec.
- Default value when not specified in API or module is interpreted by Avi Controller as 600.
type: int
url:
description:
- Avi controller URL of the object.
type: str
uuid:
description:
- Unique object identifier of the object.
type: str
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- hosts: all
vars:
avi_credentials:
username: "admin"
password: "something"
controller: "192.168.15.18"
api_version: "21.1.1"
- name: Example to create AlertConfig object
avi_alertconfig:
avi_credentials: "{{ avi_credentials }}"
state: present
name: sample_alertconfig
"""
RETURN = '''
obj:
description: AlertConfig (api/alertconfig) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from avi.sdk.utils.ansible_utils import avi_common_argument_spec
from avi.sdk.utils.ansible_utils import (
avi_ansible_api, avi_common_argument_spec)
HAS_AVI = True
except ImportError:
HAS_AVI = False
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
8265,
62,
9122,
25,
4855,
198,
198,
2,
15069,
33448,
37754,
11,
3457,
13,
220,
1439,
2489,
10395,
13,
37754,
7326,
35599,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
13789,... | 2.460205 | 2,827 |
# USAGE
# python /home/nmorales/cxgn/DroneImageScripts/CropToPolygonBulk.py --inputfile_path /export/archive/input.csv
# import the necessary packages
import argparse
import imutils
import cv2
import numpy as np
import json
import csv
import pandas as pd
import CropPolygons.CropPolygonsToSingleImage as CropPolygonsToSingleImage
import CropPolygonsSquareRectangles.CropPolygonsToSingleSquareRectangularImage as CropPolygonsToSingleSquareRectangularImage
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--inputfile_path", required=True, help="complete file path to the image you want to crop to a polygon")
args = vars(ap.parse_args())
inputfile_path = args["inputfile_path"]
input_image_file_data = pd.read_csv(inputfile_path, sep="\t", header=None)
for index, row in input_image_file_data.iterrows():
inputfile_path = row[0]
outputfile_path = row[1]
polygon_json = row[2]
polygon_type = row[3]
image_band_index = row[4]
polygons = json.loads(polygon_json)
img = cv2.imread(inputfile_path, cv2.IMREAD_UNCHANGED)
img_shape = img.shape
if len(img_shape) == 3:
if img_shape[2] == 3:
b,g,r = cv2.split(img)
if image_band_index is not None and not np.isnan(image_band_index):
image_band_index = int(image_band_index)
if image_band_index == 0:
img = b
if image_band_index == 1:
img = g
if image_band_index == 2:
img = r
if polygon_type == 'rectangular_square':
sd = CropPolygonsToSingleSquareRectangularImage.CropPolygonsToSingleSquareRectangularImage()
finalImage = sd.crop(img, polygons)
elif polygon_type == 'rectangular_polygon':
sd = CropPolygonsToSingleImage.CropPolygonsToSingleImage()
finalImage = sd.crop(img, polygons)
cv2.imwrite(outputfile_path, finalImage)
#cv2.imshow("Result", finalImage)
#cv2.waitKey(0)
| [
2,
1294,
11879,
198,
2,
21015,
1220,
11195,
14,
77,
4491,
2040,
14,
66,
87,
4593,
14,
6187,
505,
5159,
7391,
82,
14,
34,
1773,
2514,
34220,
14520,
33,
12171,
13,
9078,
1377,
15414,
7753,
62,
6978,
1220,
39344,
14,
17474,
14,
15414,
... | 2.383079 | 851 |
from ethereum import tester, blocks
import ethereum.utils as utils
import rlp
import ethereum.testutils as testutils
from ethereum.testutils import fixture_to_bytes
import ethereum.config as config
import sys
import os
import json
from ethereum.slogging import get_logger
logger = get_logger()
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
# configure_logging(':trace')
if __name__ == '__main__':
main()
| [
6738,
304,
17733,
1330,
256,
7834,
11,
7021,
198,
11748,
304,
17733,
13,
26791,
355,
3384,
4487,
198,
11748,
374,
34431,
198,
11748,
304,
17733,
13,
9288,
26791,
355,
1332,
26791,
198,
6738,
304,
17733,
13,
9288,
26791,
1330,
29220,
62,... | 3.265306 | 147 |
from ctypes import windll
| [
198,
6738,
269,
19199,
1330,
2344,
297,
198
] | 3.375 | 8 |
# internal data manipulations
import numpy as np # operate on data from the hdf5 file and image generation
import pandas as pd # Data frames from numpy arrays, especially for output
# image generation, manipulation, and analysis
import cv2
from matplotlib import path # dealing with voronoi facets polylines as paths
import colorsys # color code managment
from scipy import ndimage # image manipulation
# plotting
import seaborn as sns
# simple debug printing, enabled with DEBUG_PRINT
DEBUG_PRINT = False
# probably want to move to a more formal logging at some point
def get_timesteps(trajectory):
"""
Determine valid timesteps for the associated hdf5 dump
Parameters
----------
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
Returns
-------
A tuple consisting of three entries. The first is a sorted list of
integers for every valid timestep.
The second and third are for convenience and represent the start and
end time of the run. (First and last timesteps.)
"""
trajectory_times = sorted([int(k) for k in trajectory['id'].keys()])
start_time = trajectory_times[0]
end_time = trajectory_times[len(trajectory_times)-1]
return(trajectory_times, start_time, end_time)
# Using numpy arrays for a lot of the work rather than pandas
# Setting up constants to keep column indices correct
CELL_ID_COL = 0
CELL_X_COL = 1
CELL_Y_COL = 2
CELL_Z_COL = 3
CELL_RADIUS_COL = 4
CELL_ANCESTOR_COL = 5
# Magic value indicating a cell has not yet been assigned an ancestor
NO_ANCESTOR_ASSIGNED = -1
# TODO, in a few places, it may be better to move to a more object oriented
# approach. Mainly for a more clear interface, while avoiding state. For
# example, get_timesteps could easily produce an object with 'steps()',
# 'start_time()', and 'end_time()' methods for clarity without affecting
# performance or relying on too much internal state.
# TODO make compatible with the newer, corrected hdf5 radius dump
def radius_key(timestep):
"""
Generate the appropriate key for a radius at a given timestep.
Does not check timestep for validity.
This function exists because current phototroph runs use an older version
of NUFEB which output individual radius keys for each timestep, (e.g.
radius0, radius100, etc) rather than a single radius entry indexed by
timestep.
Parameters
----------
timestep : The numeric time step at which we want radius info
Returns
-------
A string representing the key for the radius information at the given
timestep
"""
return(f'radius{timestep}')
# TODO error out gracefully if time does not exist
def get_cells(trajectory, time=0, scale=1E6):
"""
Provide the scaled location and radius of all cells at a particular
timestep, with each cell associted with a tag id which remains consistent
between timesteps.
Scaling is intended mainly to translate spatial coordinate to image
pixel locations.
Parameters
----------
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
time : An integer representing the timestep to query for cell locations.
Most runs start at time 0, so this has been left as a
default value.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
The default value returns distance in terms of microns.
WARNING: Because we are return an integer based numpy array, setting
the scale low (as it may be tempting to set the scale to 1) would lead
to most values being 0.
Returns
-------
A five column, multi-row numpy array. Where the columns, in order, are:
1. The consistent atom tag (id)
2. The scaled x, y, and z coordinates of the cell
3. The cell radius
4. The cell's ancestors. This column is intended for later bookkeeping
and is not populated here, beyond initializing to NO_ANCESTOR_ASSIGNED
Each column can be referenced by the defined constants:
CELL_ID_COL = 0
CELL_X_COL = 1
CELL_Y_COL = 2
CELL_Z_COL = 3
CELL_RADIUS_COL = 4
CELL_ANCESTOR_COL = 5
"""
time = str(time)
ret_array = np.column_stack(
(trajectory['id'][time],
scale*np.column_stack((trajectory['x'][time],
trajectory['y'][time],
trajectory['z'][time],
trajectory[radius_key(time)])),
np.full((len(trajectory['id'][time]), 1), NO_ANCESTOR_ASSIGNED)
)).astype(int)
# Occasionally a cell with id == 0 is saved, this is not a valid cell
return( ret_array[ret_array[:,CELL_ID_COL]!= 0])
def get_seeds(trajectory, start_time=0, scale=1E6):
"""
As with get_cells:
Provide the scaled location and radius of all cells at a particular
timestep, with each cell associted with a tag id which remains consistent
between timesteps.
HOWEVER: Also assigns the ancestor id to the same as the cell id, since
these are the initial cells.
Parameters
----------
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
start_time : An integer representing the initial timestep.
Most runs start at time 0, so this has been left as a
default value.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
The default value returns distance in terms of microns.
WARNING: Because we are return an integer based numpy array, setting
the scale low (as it may be tempting to set the scale to 1) would lead
to most values being 0.
Returns
-------
A five column, multi-row numpy array. Where the columns, in order, are:
1. The consistent atom tag (id)
2. The scaled x, y, and z coordinates of the cell
3. The cell radius
4. The cell's ancestors. Unlike with get_cells, this column is
populated. Specfically, it ought to match the value in the CELL_ID_COL
since these are the initial seeds.
Each column can be referenced by the defined constants:
CELL_ID_COL = 0
CELL_X_COL = 1
CELL_Y_COL = 2
CELL_Z_COL = 3
CELL_RADIUS_COL = 4
CELL_ANCESTOR_COL = 5
"""
seeds = get_cells(trajectory, start_time, scale)
# Since this is the first set of cells, they are their own ancestor
seeds[:, CELL_ANCESTOR_COL] = seeds[:, CELL_ID_COL]
return(seeds)
# %%
def assign_ancestry(trajectory):
"""
Infer the ancestor of all cells during all timesteps.
Since cell ancestors are not necessarily tracked, we have to infer them
as we go. This method steps through each timestep, identifies cells with
unknown ancestors, and assigns them an ancestor based on the nearest cell
with a known/inferred ancestor.
There are many other approaches, but this one has proven to be the least
brittle in practice. Do note however, that the accuracy of the inference
will likely go down if the time between recorded timesteps is too large.
Although this does a brute force nearest-neighbor search, it has not
proven to take very long for the number of cells used in our current
runs (order of 1000). There are internal comments noting where
optimizations could be made.
Parameters
----------
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
Returns
-------
A dictionary mapping each cell present in the timestep to the id of its
ancestor.
"""
dprint('Infeerring cell ancestries')
trajectory_times, start_time, end_time = get_timesteps(trajectory)
# Do not need to scale these, since we only care about relative distances
seeds = get_seeds(trajectory, start_time=start_time)
# Dictionary which will hold associations between cell ids and ancestors
ancestry = {}
# All seeds have a known ancestry, themselves
for seed in seeds:
ancestry[seed[CELL_ID_COL]] = seed[CELL_ANCESTOR_COL]
for time in trajectory_times:
dprint(f'\tProcessing time: {time}')
# Do not need to scale, we only care about relative distances
cells = get_cells(trajectory, time=time)
# for cells with known ancestors, set the appropriate value in the
# ancestor column. Used to filter cell list for those with unknown
# ancestors
for cell_id, anc_id in ancestry.items():
# Every once in a while a cell leaves the simulation, so make sure
# it actually exists at this timestep
if(len(cells[cells[:, CELL_ID_COL] == cell_id]) > 0):
ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id][0]
ancestor_found[CELL_ANCESTOR_COL] = anc_id
cells[cells[:, CELL_ID_COL] == cell_id] = ancestor_found
# for all the cells with no currently known ancestor, find the
# nearest cell with an ancestor and assign that ancestor to the
# unknown cell
#
# TODO if this gets slow, use kdtree for neighbor search
# could also use some pre-sorting and heuristics, e.g. keep cells list
# sorted by x,y it's highly unlikely any daughter cell is going to be
# hundreds of pixels away from its parent, so don't need to search the
# whole list and, likely, we ought to do something else anyway if it
# is that far away
no_ancestor_found = cells[cells[:, CELL_ANCESTOR_COL] == -1]
for naf in no_ancestor_found:
x_new = naf[CELL_X_COL]
y_new = naf[CELL_Y_COL]
z_new = naf[CELL_Z_COL]
naf_id = naf[CELL_ID_COL]
min_dist = -1
nearest_ancestor = -1
for cell_id, anc2_id in ancestry.items():
ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id]
if(len(ancestor_found) > 0):
x_old = ancestor_found[0, CELL_X_COL]
y_old = ancestor_found[0, CELL_Y_COL]
z_old = ancestor_found[0, CELL_Z_COL]
distance = ((x_old-x_new)*(x_old-x_new)
+ (y_old-y_new)*(y_old-y_new)
+ (z_old-z_new)*(z_old-z_new))
if((min_dist == -1) | (distance < min_dist)):
min_dist = distance
nearest_ancestor = anc2_id
# now that we've found the nearest neighbor cell with a known
# ancestor, update the ancestry dictionary
ancestry[naf_id] = nearest_ancestor
ancestor_found = cells[cells[:, CELL_ID_COL] == cell_id][0]
ancestor_found[CELL_ANCESTOR_COL] = nearest_ancestor
# probably don't need to do this update since cells is
# about to go out of scope
cells[cells[:, CELL_ID_COL] == cell_id] = ancestor_found
return(ancestry)
# TODO this family of functions really ought to have some responsiblities
# split. Basically, there's filtering which colonies we care about and there's
# determining the area(s) of the relevant colonies. As a motivating example
# think about how separating out the filter responsiblity would ease a new
# use case of 'show me only the live heterotrophs while ignoring the
# cyanobacteria and eps components'
def get_colony_morphology_at_time(time, ancestor_id, ancestors, trajectory,
scale, height, width):
"""
Determine the apparent 2D area of a colony at a specific timestep. A
colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function generates a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function may be called on its own, but it is originally intended as
the lowest level component of :func: get_colony_morphologies_at_times.
Parameters
----------
ancestor_id : The numeric id of the common ancestor to all colony members.
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
time : The numeric timestep of interest.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A three-element list containg the timestep, ancestor id, and apparent 2D
area. Although techinically an ancestor id, the second item can also be
thought of as a colony id.
"""
dprint(f'Getting morphology of colony {ancestor_id} at time {time}')
cells = get_cells(trajectory, time, scale)
mask = np.zeros((height, width, 3), dtype="uint8")
sorted_array = cells[np.argsort(cells[:, CELL_Z_COL])]
for cell in sorted_array:
loc = (int(cell[CELL_X_COL]), int(cell[CELL_Y_COL]))
cell_id = cell[CELL_ID_COL]
seed_id = ancestors[cell_id]
if(seed_id == ancestor_id):
color = (255, 255, 55)
else:
color = (0, 0, 0)
cv2.circle(mask, loc, int(cell[CELL_RADIUS_COL]), color, -1)
# for area, we just count white pixels. no need for cv2
area = np.count_nonzero(mask)
return([time, ancestor_id, area])
# %%
def get_colony_morphologies_at_time(time, ancestors, trajectory, scale, height,
width):
"""
Determine the apparent 2D areas of all colonies at a specific timestep. A
colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function relies on a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function may be called on its own, but it is originally intended as
a mid-level component of :func: get_colony_morphologies_at_times.
Parameters
----------
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
time : The numeric timestep of interest.
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A list of three-element lists which describes all colonies present at the
given timestep. Each three-element list contains the timestep, ancestor id,
and apparent 2D area. Although techinically an ancestor id, the second item
can also be thought of as a colony id.
"""
# TODO although it's conceptually pleasing to defer to doing one colony at
# a time, it means there is A LOT of extra total drawing calls. Almost
# certainly more efficient to draw all colonies on one image and count
# the number pixels with the right color code
morphologies = []
for vi, v in enumerate(set(ancestors.values())):
morphologies.append(
get_colony_morphology_at_time(time, v, ancestors, trajectory,
scale, height, width))
return(morphologies)
# %%
def get_colony_morphologies_at_times(times, ancestors, trajectory, scale,
height, width):
"""
Determine the apparent 2D areas of all colonies at the specified times.
A colony is defined as all cells sharing a common ancestor. The 2D apparent
area is the visible biomass looking from the top down. Every cell from
the colony is projected to the x-y plane and is occulded by any non-colony
colony cells above them.
Internally, this function relies on a virtual black and white image of
the projected and occluded colony to determine the apparent area. The
scale, height, and width parameters should be set so that the results
are comparable to any associated micrographs from analagous wet-lab
experiments.
This function is intended as the main entry point to getting all colony
areas over all timesteps of the simulation. Note that it may take a while
to run. The subordinate functions :func: get_colony_morphologies_at_time
and :func: get_colony_morphology_at_time can be called directly and may
be useful for either prototyping/debugging or for when only a subset of
colony areas (such as the areas at the final timestep) are of interest.
Parameters
----------
times : A numeric list of all timesteps of interest.
ancestors : A dictionary mapping each cell present in the timestep to the
id of its ancestor.
trajectory : The decoded hdf5 file containing the dumped run data.
Usually from something like h5py.File(infile, 'r')
scale : A value by which to multiply the physical coordinates.
The inteded goal to convert from spatial coordinates to pixel locations
so scale is generally passed a number representing pixels per meter.
height : The height of the virtual image.
width : The width of the virtual image.
Returns
-------
A Pandas dataframe which describes all colonies present at the
requested timesteps. Each row contains the the timestep,
ancestor id, apparent 2D area in pixels, and a record of the scaling factor
between pixels and meters. Although techinically an ancestor id,
the second item can also be thought of as a colony id.
We are returning a dataframe, which is unlike the finer grained related
functions for getting colony morophologies. In all uses so far, the
originally returned list of arrays was immediately converted to a dataframe
so we are incorporating that step.
If dealing with a raw numpy array is required, the returned dataframe
may be converted usings the :func: Pandas.dataframe.to_numpy method
"""
morphologies = []
for time in times:
morphologies.extend(
get_colony_morphologies_at_time(time, ancestors, trajectory,
scale, height, width))
df = pd.DataFrame(morphologies,
columns=['Time (s)', 'Colony ID', 'Area (pixels)'])
df['Scale (pixels per meter)'] = scale
return(df)
# get the raw voroni facets
def calc_voronoi_from_seeds(height, width, seeds):
"""
Generate the voronoi facets for an an experiment area, based on the seeding
cell locations.
Parameters
----------
height : Height in pixels of the experimental area.
seeds : A numpy array conforming the to return value of :func: get_seeds
Specifically, each row corresponds to one seed and the columnts at
CELL_X_COL and CELL_Y_COL give the X and Y coordinates in pixel units
Returns
-------
A list of all Voronoi facets, where each facet is an array of vertices
corresponding to pixel locations.
"""
# prepare the subdivsion area
rect = (0, 0, height, width)
subdiv = cv2.Subdiv2D(rect)
# load all seed locations into subdiv as (x,y) tuples
# TODO probably a more efficient way. convert np columns directly to
# vector of tuples?
for s in seeds[:, [CELL_X_COL, CELL_Y_COL]]:
subdiv.insert((s[0], s[1]))
(facets, centers) = subdiv.getVoronoiFacetList([])
return(facets)
# %%
# %%
# clip the facets to the bounding box and trim to ints
# doesn't just bound existing vertices, also creates new along bounds
# using matplotlib path clipping to handle the math
# TODO change this to require seed_facets, so that we can track seed_ids with
# clipped facets (e.g. edge colonies)
def clip_facets(facets, bound_height, bound_width):
"""
Clips facets to a given bounding area and rounds to the nearest
integer.
Parameters
----------
facets : A list of all Voronoi facets, where each facet is an array of
vertices
corresponding to pixel locations.
bound_height : Height of the bounding area, same units as those in the
facet list.
bound_width : Height of the bounding area, same units as those in the
facet list
Returns
-------
A list of clipped Voronoi facets, where each facet is an array of vertices
corresponding to pixel locations.
"""
clipped_facets = []
ifacets = []
rect = (0, 0, bound_height, bound_width)
for fi, f in enumerate(facets):
mpp = path.Path(f, closed=True)
edge_piece = False
for vert in f:
vert_under_x = (vert[0] <= rect[0])
vert_over_x = (vert[0] >= rect[3])
vert_under_y = (vert[1] <= rect[1])
vert_over_y = (vert[1] >= rect[2])
edge_piece = (edge_piece | vert_under_x | vert_over_x |
vert_under_y | vert_over_y)
clipped = mpp.clip_to_bbox(rect)
clipped_facets.append(clipped)
point_arr = []
for points, code in clipped.iter_segments():
point_arr.append(points)
pa = np.array(point_arr, np.int)
ifacets.append([pa, edge_piece])
return(ifacets)
# create a dictionary associated seed points (by id) with each facet
if __name__ == "__main__":
print("This file is not intened to be called directly.")
| [
2,
5387,
1366,
7704,
5768,
198,
11748,
299,
32152,
355,
45941,
220,
1303,
8076,
319,
1366,
422,
262,
289,
7568,
20,
2393,
290,
2939,
5270,
198,
11748,
19798,
292,
355,
279,
67,
220,
1303,
6060,
13431,
422,
299,
32152,
26515,
11,
2592,... | 2.819439 | 8,169 |
import numpy as np
from nose.tools import eq_
from knnimpute import (
knn_impute_few_observed,
knn_impute_with_argpartition,
knn_impute_optimistic,
knn_impute_reference,
)
from low_rank_data import XY_incomplete, missing_mask
| [
11748,
299,
32152,
355,
45941,
198,
6738,
9686,
13,
31391,
1330,
37430,
62,
198,
198,
6738,
638,
77,
11011,
1133,
1330,
357,
198,
220,
220,
220,
638,
77,
62,
11011,
1133,
62,
32146,
62,
672,
45852,
11,
198,
220,
220,
220,
638,
77,
... | 2.505155 | 97 |
# -*- coding: utf-8 -*-
import sys, os
from django.conf import settings
from django.core.management import call_command
TEST_TEMPLATE_DIR = 'templates'
RUNTESTS_DIR = os.path.dirname(__file__)
PREVIOUS_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, ".."))
sys.path.insert(0, PREVIOUS_DIR)
test_settings = {
'DATABASES':{
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
'INSTALLED_APPS': [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django_jinja',
'django_jinja_test',
'pipeline',
'django_jinja.contrib._pipeline',
],
'ROOT_URLCONF':'django_jinja_test.urls',
'STATIC_URL':'/static/',
'STATIC_ROOT': os.path.join(RUNTESTS_DIR, 'static'),
'TEMPLATE_DIRS':(
os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),
),
'USE_I18N': True,
'USE_TZ': True,
'LANGUAGE_CODE':'en',
'MIDDLEWARE_CLASSES': (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
'MANAGERS': ("niwi@niwi.be",),
'TEMPLATE_LOADERS': [
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
],
'PIPELINE_CSS': {
'test': {
'source_filenames': ["style.css"],
'output_filename': "style.2.css",
}
},
'PIPELINE_JS': {
'test': {
'source_filenames': ['script.js'],
'output_filename': 'script.2.js',
}
}
}
if __name__ == '__main__':
test_args = sys.argv[1:]
if not settings.configured:
settings.configure(**test_settings)
if not test_args:
test_args = ['django_jinja_test']
call_command("test", *test_args, verbosity=2)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
11,
28686,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
198,
51,
6465,
62,... | 2.003865 | 1,035 |
"""Module with Kytos Events."""
from kytos.core.helpers import now
class KytosEvent:
"""Base Event class.
The event data will be passed in the `content` attribute, which should be a
dictionary.
"""
def __init__(self, name=None, content=None):
"""Create an event to be published.
Args:
name (string): The name of the event. You should prepend it with
the name of the napp.
content (dict): Dictionary with any extra data for the event.
"""
self.name = name
self.content = content if content is not None else {}
self.timestamp = now()
@property
def destination(self):
"""Return the destination of KytosEvent."""
return self.content.get('destination')
def set_destination(self, destination):
"""Update the destination of KytosEvent.
Args:
destination (string): destination of KytosEvent.
"""
self.content['destination'] = destination
@property
def source(self):
"""Return the source of KytosEvent."""
return self.content.get('source')
def set_source(self, source):
"""Update the source of KytosEvent.
Args:
source (string): source of KytosEvent.
"""
self.content['source'] = source
@property
def message(self):
"""Return the message carried by the event if it exists.
If there is any OpenFlow message on the event it'll be stored on
the 'message' key of the 'content' attribute.
Returns:
A python-openflow message instance if it exists, None otherwise.
"""
try:
return self.content['message']
except KeyError:
return None
| [
37811,
26796,
351,
11118,
83,
418,
18715,
526,
15931,
198,
198,
6738,
479,
20760,
418,
13,
7295,
13,
16794,
364,
1330,
783,
628,
198,
4871,
11118,
83,
418,
9237,
25,
198,
220,
220,
220,
37227,
14881,
8558,
1398,
13,
628,
220,
220,
2... | 2.478621 | 725 |
import pytest
from warnings import catch_warnings
import numpy as np
from pandas.util import testing as tm
from pandas import Panel, date_range, DataFrame
| [
11748,
12972,
9288,
198,
6738,
14601,
1330,
4929,
62,
40539,
654,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19798,
292,
13,
22602,
1330,
4856,
355,
256,
76,
198,
6738,
19798,
292,
1330,
18810,
11,
3128,
62,
9521,
11,
6060,
1... | 3.651163 | 43 |
from django.shortcuts import render, redirect
from op_tasks.models import Product, Dataset
from django.contrib.auth.decorators import login_required
from tasks import user_authorized
from django.contrib.auth.decorators import login_required
from base import LOGIN_URL
@login_required(login_url=LOGIN_URL)
@login_required(login_url=LOGIN_URL)
@login_required(login_url=LOGIN_URL)
@login_required(login_url=LOGIN_URL)
@login_required(login_url=LOGIN_URL)
@login_required(login_url=LOGIN_URL)
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
1034,
62,
83,
6791,
13,
27530,
1330,
8721,
11,
16092,
292,
316,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
1... | 2.94152 | 171 |
#!/usr/bin/python
import string, sys, re
from bb import *
asz=len(sys.argv)-1
if ((asz&1)==1):
print 'even no. of args expected'
args=sys.argv[1:]
bss=[]
for i in range(asz/2):
f = open(args[i*2+1], 'r')
bs=rbb(args[i*2], f)
bss.append(bs)
wrong=False
for i in range(len(bss)-1):
for j in range(i+1,len(bss)):
print 'comparing',args[i*2+1],args[j*2+1]
if bss[i]!=bss[j]:
print 'differ: ',args[i*2+1],args[j*2+1]
print bss[i]-bss[j]
print bss[j]-bss[i]
wrong=True
exit(100 if wrong else 0)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
4731,
11,
25064,
11,
302,
220,
198,
6738,
275,
65,
1330,
1635,
198,
198,
292,
89,
28,
11925,
7,
17597,
13,
853,
85,
13219,
16,
198,
198,
361,
14808,
292,
89,
5,
16,
8,
855,
... | 1.857143 | 294 |
import tensorflow as tf
from Feeder import Feeder
import Modules
feeder = Feeder(0, True)
acoustics = tf.keras.layers.Input(
shape= [None, 256],
dtype= tf.float32
)
net = Modules.Network()
lo = Modules.Loss()
logits, _, _ = net([acoustics, net.get_initial_state()])
model = tf.keras.Model(inputs= acoustics, outputs= logits)
patterns = feeder.Get_Pattern()[2]
optimizer = tf.keras.optimizers.Adam(
learning_rate= 0.001,
beta_1= 0.9,
beta_2= 0.999,
epsilon= 1e-7
)
while True:
with tf.GradientTape() as tape:
logit = model(patterns['acoustics'])
label = tf.expand_dims(patterns['semantics'], axis = 1)
label = tf.tile(label, [1, tf.shape(logit)[1], 1])
loss = tf.nn.sigmoid_cross_entropy_with_logits(label, logit)
loss = tf.reduce_mean(loss)
print(loss)
gradients = tape.gradient(
loss,
model.trainable_variables
)
for gradient, variable in zip(gradients, model.trainable_variables):
print(variable.name, '\t', tf.reduce_mean(tf.abs(gradient)))
optimizer.apply_gradients([
(gradient, variable)
for gradient, variable in zip(gradients, model.trainable_variables)
]) | [
11748,
11192,
273,
11125,
355,
48700,
198,
198,
6738,
18272,
263,
1330,
18272,
263,
198,
11748,
3401,
5028,
198,
198,
12363,
263,
796,
18272,
263,
7,
15,
11,
6407,
8,
198,
198,
330,
23968,
873,
796,
48700,
13,
6122,
292,
13,
75,
696... | 2.23689 | 553 |
#encoding=utf-8
from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter,StemFilter
from whoosh.analysis import Tokenizer,Token
from whoosh.lang.porter import stem
import jieba
import re
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
'you', 'your',u'的',u'了',u'和',u'什么'))
accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
| [
2,
12685,
7656,
28,
40477,
12,
23,
198,
6738,
508,
3768,
13,
20930,
1330,
797,
25636,
37702,
9107,
11,
31426,
7442,
22417,
11,
19485,
22417,
11,
1273,
368,
22417,
198,
6738,
508,
3768,
13,
20930,
1330,
29130,
7509,
11,
30642,
220,
198... | 1.931889 | 323 |
"""Tag model for Uchapishaji.
A tag is a low-level descriptive categorization for posts.
Tags are non-hierarchical and are meant to describe individual
posts that share a common theme.
"An Excellent Blog Engine"
Copyright (c) 2021 by William Ellison. This program is licensed under
the terms of the Do What the Fuck You Want To Public License, version 2
or later, as described in the COPYING file at the root of this
distribution.
William Ellison
<waellison@gmail.com>
October 2021
"""
from slugify import slugify
from . import db
from .WEPPost import WEPPost
post_tags = db.Table(
"post_tags",
db.Column("post_id", db.Integer, db.ForeignKey("posts.id"), primary_key=True),
db.Column("tag_id", db.Integer, db.ForeignKey("tags.id"), primary_key=True),
)
| [
37811,
24835,
2746,
329,
471,
354,
499,
680,
26436,
13,
201,
198,
201,
198,
32,
7621,
318,
257,
1877,
12,
5715,
35644,
17851,
1634,
329,
6851,
13,
201,
198,
36142,
389,
1729,
12,
71,
959,
998,
605,
290,
389,
4001,
284,
6901,
1981,
... | 3.022642 | 265 |
from __future__ import absolute_import
from semantic_aware_models.models.classification.abstract_classifier import *
from semantic_aware_models.models.classification.bert_classifier import * | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
37865,
62,
9685,
62,
27530,
13,
27530,
13,
4871,
2649,
13,
397,
8709,
62,
4871,
7483,
1330,
1635,
198,
6738,
37865,
62,
9685,
62,
27530,
13,
27530,
13,
4871,
2649,
13,
... | 3.979167 | 48 |
"""
Contains classes and functions related to the authenticated Roblox account.
Not to be confused with users.py or the Account system.
"""
from datetime import date
from .utilities.shared import ClientSharedObject
class AccountProvider:
"""
Provides methods that control the authenticated user's account.
"""
def __init__(self, shared: ClientSharedObject):
"""
Arguments:
shared: The ClientSharedObject to be used when getting information on an account.
"""
self._shared: ClientSharedObject = shared
async def get_birthday(self) -> date:
"""
Gets the authenticated user's birthday.
s
Returns:
The authenticated user's birthday.
"""
birthday_response = await self._shared.requests.get(
url=self._shared.url_generator.get_url("accountinformation", "v1/birthdate")
)
birthday_data = birthday_response.json()
return date(
month=birthday_data["birthMonth"],
day=birthday_data["birthDay"],
year=birthday_data["birthYear"]
)
async def set_birthday(
self,
birthday: date,
password: str = None
):
"""
Changes the authenticated user's birthday.
This endpoint *may* require your password, and requires an unlocked PIN.
Arguments:
birthday: A date object that represents the birthay to update the ClientSharedObject's account to.
password: The password to the ClientSharedObject's account, this is required when changing the birthday.
"""
await self._shared.requests.post(
url=self._shared.url_generator.get_url("accountinformation", "v1/birthdate"),
json={
"birthMonth": birthday.month,
"birthDay": birthday.day,
"birthYear": birthday.year,
"password": password
}
)
| [
37811,
198,
198,
4264,
1299,
6097,
290,
5499,
3519,
284,
262,
44529,
3851,
75,
1140,
1848,
13,
198,
3673,
284,
307,
10416,
351,
2985,
13,
9078,
393,
262,
10781,
1080,
13,
198,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
3128,
198,
... | 2.453086 | 810 |
from pioneer_sdk import Pioneer
import time
delta = 0.1
m_to_led = 1000
led_min = 0
led_max = 255
r = led_min
g = led_min
b = led_min
low = 0.25
mid = 0.5
high = 0.75
if __name__ == '__main__':
pioneer_mini = Pioneer(logger=False)
curr_time = time.time()
while True:
if time.time()-curr_time > delta:
tof_data = pioneer_mini.get_dist_sensor_data()
if tof_data is not None:
if tof_data <= low:
r = tof_data*m_to_led
g = led_min
b = led_min
elif low < tof_data <= mid:
r = (tof_data-low)*m_to_led
g = (tof_data-low)*m_to_led
b = led_min
elif mid < tof_data <= high:
r = led_min
g = (tof_data-mid) * m_to_led
b = led_min
elif tof_data >= high:
r = led_min
g = led_min
b = led_max
pioneer_mini.led_control(r=r, g=g, b=b)
curr_time = time.time()
| [
6738,
29570,
62,
21282,
74,
1330,
31437,
198,
11748,
640,
198,
67,
12514,
796,
657,
13,
16,
198,
76,
62,
1462,
62,
992,
796,
8576,
198,
198,
992,
62,
1084,
796,
657,
198,
992,
62,
9806,
796,
14280,
198,
198,
81,
796,
2957,
62,
1... | 1.611748 | 698 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_custom_entities_async.py
DESCRIPTION:
This sample demonstrates how to recognize custom entities in documents.
Recognizing custom entities is available as an action type through the begin_analyze_actions API.
For information on regional support of custom features and how to train a model to
recognize custom entities, see https://aka.ms/azsdk/textanalytics/customentityrecognition
USAGE:
python sample_recognize_custom_entities_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_LANGUAGE_ENDPOINT - the endpoint to your Language resource.
2) AZURE_LANGUAGE_KEY - your Language subscription key
3) CUSTOM_ENTITIES_PROJECT_NAME - your Language Language Studio project name
4) CUSTOM_ENTITIES_DEPLOYMENT_NAME - your Language deployed model name
"""
import os
import asyncio
if __name__ == '__main__':
asyncio.run(main())
| [
2,
16529,
45537,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321,
13,
198,
2,
16529,
35937,
198,
1... | 3.885449 | 323 |
import configparser
import logging
import os
import re
import sys
from datetime import datetime
from enum import Enum
from pkg_resources import DistributionNotFound, get_distribution
from typing import List, Optional
from qaseio import client
from qaseio.client.models import (
TestCaseInfo,
TestRunCreate,
TestRunResultCreate,
TestRunResultStatus,
TestRunResultStepCreate,
TestRunResultUpdate,
)
if sys.version_info >= (3, 8):
from typing import TypedDict
else:
from typing import Dict as TypedDict
try:
# Change here if project is renamed and does not equal the package name
dist_name = "qase-robotframework"
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = "unknown"
finally:
del get_distribution, DistributionNotFound
logger = logging.getLogger("qase-robotframework")
STATUSES = {
"PASS": TestRunResultStatus.PASSED,
"FAIL": TestRunResultStatus.FAILED,
"SKIP": TestRunResultStatus.SKIPPED,
}
| [
11748,
4566,
48610,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
279,
10025,
62,
37540,
1330,
27484,
3673,
21077,
11,
651,... | 3.026706 | 337 |
"""The PowerCalc integration."""
from __future__ import annotations
import logging
from typing import Optional
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_FIXED,
CONF_LINEAR,
CONF_MAX_POWER,
CONF_MAX_WATT,
CONF_MIN_POWER,
CONF_MIN_WATT,
CONF_POWER,
CONF_STATES_POWER,
CONF_WATT,
DATA_CALCULATOR_FACTORY,
DOMAIN,
MODE_FIXED,
MODE_LINEAR,
MODE_LUT,
)
from .errors import StrategyConfigurationError, UnsupportedMode
from .light_model import LightModel
from .strategy_fixed import FixedStrategy
from .strategy_interface import PowerCalculationStrategyInterface
from .strategy_linear import LinearStrategy
from .strategy_lut import LutRegistry, LutStrategy
_LOGGER = logging.getLogger(__name__)
| [
37811,
464,
4333,
9771,
66,
11812,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
1363,
562,
10167,
13,
16794,
364,
13,
774,
13886,
1330,
5995,
48902,
6... | 2.673401 | 297 |
import torch
def any_nan(tensor: torch.Tensor) -> bool:
"""Returns true if the tensor contains a NaN
Args:
tensor (torch.Tensor): the input tensor
Returns:
bool: true if contains a NaN
"""
return bool(torch.isnan(tensor).any().item())
def print_min_max(name, tensor):
"""Print information about a tensor
Args:
name (str): tensor name
tensor (torch.Tensor): the tensor
"""
print(
f"{name} | min {tensor.min()} | max {tensor.max()} | hasnan {any_nan(tensor)} | shape {tensor.shape}"
)
def assert_allclose(tensor, value, tol=1e-5, message=""):
"""Check that all values in the tensor are close to value
Args:
tensor (torch.Tensor): the tensor
value: target value(s)
tol (float, optional): Defaults to 1e-5. tolerance
message (str, optional): Defaults to "". displayed error message
"""
assert ((tensor - value).abs() < tol).all(), message
def assert_proba_distribution(probabilities, tol=1e-5):
"""Check that the tensor is a probability distribution
Args:
probabilities (torch.Tensor): the distribution
tol (float, optional): Defaults to 1e-5. tolerance
"""
assert (probabilities.sum() - 1.0).abs() < tol and (
probabilities >= 0
).all(), "tensor was expected to be a proability distribution (sum={}, negatives={})".format(
probabilities.sum(), (probabilities < 0).any()
)
| [
11748,
28034,
628,
198,
4299,
597,
62,
12647,
7,
83,
22854,
25,
28034,
13,
51,
22854,
8,
4613,
20512,
25,
198,
220,
220,
220,
37227,
35561,
2081,
611,
262,
11192,
273,
4909,
257,
11013,
45,
628,
220,
220,
220,
943,
14542,
25,
198,
... | 2.517065 | 586 |
from collections.abc import Collection
from dataclasses import dataclass
from enum import Enum
from timeit import timeit
from typing import Any
from uuid import UUID, uuid4
import orjson # used in timeit # noqa
from apischema import PassThroughOptions, serialization_default, serialization_method
@dataclass
# orjson supports enums (by values), dataclasses and UUID natively
pass_through = PassThroughOptions(
any=True, enums=True, collections=True, dataclasses=True, types={UUID}
)
serialize_data = serialization_method(Data, pass_through=pass_through)
default = serialization_default()
serialize_data2 = serialization_method(Data) # no pass_through
data = Data(uuid4(), State.ACTIVE, ["foo", "bar"], {"answer": 42})
assert serialize_data(data) is data # data is passed through
print(timeit("orjson.dumps(serialize_data(data), default=default)", globals=globals()))
# 1.248541576
print(timeit("orjson.dumps(serialize_data2(data))", globals=globals()))
# 4.826223127 ~ 4x slower without pass_through
| [
6738,
17268,
13,
39305,
1330,
12251,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
640,
270,
1330,
640,
270,
198,
6738,
19720,
1330,
4377,
198,
6738,
334,
27112,
1330,
471,
27586,
11,... | 3.148607 | 323 |
import json
import numpy as np
import tensorflow as tf
import time
import os
from distutils.version import StrictVersion
import PIL
import argparse
import sys
sys.path.append("../utils/")
from utils.visualization_utils import visualize_boxes_and_labels_on_image_array
if __name__ == "__main__":
main()
| [
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
1233,
26791,
13,
9641,
1330,
520,
2012,
14815,
198,
11748,
350,
4146,
198,
11748,
1822,
29572,
198... | 3.173469 | 98 |
import gym
import universe
if __name__ == "__main__":
env = gym.make('flashgames.DuskDrive-v0')
env.configure(remotes=1)
observations = env.reset()
while True:
action = [[('KeyEvent', 'ArrowUp', True)] for obs in observations]
observation, reward, done, info = env.step(action)
env.render()
| [
11748,
11550,
198,
11748,
6881,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
17365,
796,
11550,
13,
15883,
10786,
34167,
19966,
13,
35,
17990,
24825,
12,
85,
15,
11537,
198,
220,
220,
220,
173... | 2.561538 | 130 |
# Generated by Django 3.2 on 2021-07-18 12:53
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
2998,
12,
1507,
1105,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
"""
The MIT License (MIT)
Copyright (c) 2016 Jake Lussier (Stanford University)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse, sys, os, pickle, json
from os.path import *
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from kitchen import *
from inventory.item_classification.item_classifier import *
from utils.general_utils import *
from db.db_utils import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument("--input", help="Input model pickle.", required=True)
parser.add_argument("--output", help="Output eval file.", required=True)
parser.add_argument("--data", help="Input data.", default=DATA)
parser.add_argument("--eval-args", help="Eval args.", nargs='*', default=[])
parser.add_argument("--start-event-id", help="Start event id.", type=int)
parser.add_argument("--end-event-id", help="End event id.", type=int)
args = parser.parse_args()
assert(exists(args.input))
assert(splitext(args.input)[1] == ".pkl")
assert(splitext(args.output)[1] == ".json")
# Load model.
print "Evaluating model %s"%args.input
model = pickle.load(open(args.input, "r"))
# Load database.
con, cursor = dbConnect()
items = getItems(cursor, start_event_id=args.start_event_id, end_event_id=args.end_event_id)
graph = getFoodGraph(cursor)
ancestors = getFoodAncestors(cursor, graph)
# Eval.
start_time = getEventTime(cursor, args.start_event_id)
eval_args, eval_kwargs = parseArgs(args.eval_args)
predicted_lbls, model_vis_paths = [], []
food_names = getFoodNames(cursor)
assert(not eval_args)
for (i, item) in enumerate(items):
#print "%d of %d" % (i+1, len(items))
if item["arrivalevent_time"] < start_time: continue
fid = model.classify(item, graph, ancestors, args.data, **eval_kwargs)
item["food_id"], item["food_name"] = fid, food_names[fid]
predicted_lbls.append(fid)
model_vis_paths.append(model.vis_im_paths)
# Save.
json.dump([predicted_lbls, model_vis_paths], open(args.output, "w"))
| [
37811,
198,
464,
17168,
13789,
357,
36393,
8,
198,
198,
15269,
357,
66,
8,
1584,
14757,
406,
1046,
959,
357,
32140,
3841,
2059,
8,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
19... | 2.888067 | 1,081 |
import collections
import fcntl
import termios
import pathlib
| [
11748,
17268,
198,
11748,
277,
66,
429,
75,
198,
11748,
3381,
4267,
198,
11748,
3108,
8019,
628,
628,
628
] | 3.526316 | 19 |
import json
from django.core.serializers.json import DjangoJSONEncoder
import django_url_framework #for type hinting
import pprint
from abc import ABC, abstractmethod
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
default_charset = "utf8"
| [
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
46911,
11341,
13,
17752,
1330,
37770,
40386,
27195,
12342,
198,
198,
11748,
42625,
14208,
62,
6371,
62,
30604,
1303,
1640,
2099,
9254,
278,
198,
198,
11748,
279,
4798,
198,
6738,
... | 3.353659 | 82 |