id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3214460 | <gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/2/12 7:09 下午
# @Author: zhoumengjie
# @File : shellclient.py
import logging
import subprocess
from wxcloudrun.bond.PageTemplate import PROJECT_DIR
log = logging.getLogger('log')
def generate_deploy_blog(markdown_file):
p = subprocess.Popen(args=[PROJECT_DIR + '/wxcloudrun/common/blog.sh ' + markdown_file], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while p.poll() is None:
log.info(p.stdout.readline())
if __name__ == '__main__':
generate_deploy_blog('render.html') | StarcoderdataPython |
236572 | <gh_stars>0
"""
@package base
Base Page class implementation
It implements methods which are common to all the pages throughout the application
This class needs to be inherited by all the page classes
This should not be used by creating object instances
Example:
Class PageClassName(BasePage)
"""
from abc import abstractmethod
from base.selenium_driver import SeleniumDriver
from traceback import print_stack
from utilities.util import Util
class BasePage(SeleniumDriver):
def __init__(self, driver):
"""
Inits BasePage class
Required Parameters:
driver: WebDriver Object
Optional Parameters:
None
Returns:
None
"""
super(BasePage, self).__init__(driver)
self._validate_page(driver)
self.driver = driver
self.util = Util()
self.bkend = BEConnection()
def verifyFlashMessage(self, textToVerify, timeout=6):
"""
Validate the flash message after completing an action
Required Parameters:
textToVerify: Text on the flash message that needs to be verified
Optional Parameters:
None
Returns:
Boolean
"""
try:
flashMessageElement = self.waitForElement(locator=".nmbl-flash-message-content", locatorType="css",
info="flash message", timeout=timeout)
if flashMessageElement is not None:
elementText = self.getText(flashMessageElement,
"Getting text on flash message")
# elementText = self.getText(self.getElementByClassName("flash-message"),
# "Getting text on flash message")
result = self.util.verifyTextContains(elementText, textToVerify)
# flashMessageClose = self.getElementByClassName("flash-message-close")
flashMessageClose = self.getElementByCss(".nmbl-flash-message-close")
self.clickElement(flashMessageClose, "Flash message 'X' close button", 1)
return result
else:
# If element does not show up before timeout, return False
return False
except:
self.log.error("Failed to get text on flash message")
print_stack()
return False
def verifyModalMessage(self, textToVerify, textLocator, buttonToClickLocator, buttonInfo=""):
"""
Validate the message on the modal and click Ok/Close button to close the modal
Required Parameters:
textToVerify: Text on the modal that needs to be verified
textLocator: Locator of the message
buttonToClickLocator: Locator of the button to click on modal
Optional Parameters:
None
Returns:
Boolean
"""
try:
result = False
elementPresent = self.isElementPresent(textLocator)
if elementPresent:
elementText = self.getText(self.getElement(textLocator), "Getting text on modal")
result = self.util.verifyTextContains(elementText, textToVerify)
if not result:
self.util.screenShot("FAIL-Modal-Message-Verification")
self.clickElement(self.getElement(buttonToClickLocator), buttonInfo)
return result
except:
self.log.error("Failed to get text on modal")
print_stack()
return False
def verifyModalConfirmation(self, buttonLocator, info, locatorType="id"):
"""
Verify the confirmation modal is present and click the 'Confirmation' button
'Confirmation' button can be OK/Close/Delete
Required Parameters:
buttonLocator: Locator of the button on confirmation modal
info: Information about the button, usually text on the button
Optional Parameters:
locatorType: Type of the locator(id(default), xpath, css, className, linkText)
Returns:
Boolean
"""
try:
elementPresent = self.isElementPresent(buttonLocator, locatorType)
if elementPresent:
return self.clickElement(self.getElement(buttonLocator), info)
return False
except:
self.log.error("Failed to find button on confirmation modal")
print_stack()
return False
def verifyFieldErrorMessage(self, locator, textToVerify, locatorType="id"):
"""
Validate the flash message after completing an action
Required Parameters:
locator: Locator of the error message
textToVerify: Text on the flash message that needs to be verified
Optional Parameters:
locatorType: Type of the locator, default is 'id'
Returns:
Boolean
"""
try:
elementPresent = self.isElementPresent(locator, locatorType)
if elementPresent:
elementText = self.getText(self.getElement(locator),
"Getting text on field error message")
result = self.util.verifyTextContains(elementText, textToVerify)
return result
return False
except:
self.log.error("Failed to get text on field error message")
print_stack()
return False
def verifyPageTitle(self, titleToVerify):
"""
Verify the page Title
Required Parameters:
titleToVerify: Title on the page that needs to be verified
Optional Parameters:
None
Returns:
Boolean
"""
try:
actualTitle = self.getBrowserTitle()
return self.util.verifyTextContains(actualTitle, titleToVerify)
except:
self.log.error("Failed to get page title")
print_stack()
return False
@abstractmethod
def _validate_page(self, driver):
pass
""" Regions define functionality available through all page objects """
# @property
# def search(self):
# from search import SearchRegion
# return SearchRegion(self.driver)
class InvalidPageException(Exception):
""" Throw this exception when you don't find the correct page """
pass
| StarcoderdataPython |
5046512 | <reponame>prismai/cvat<filename>cvat/apps/engine/services.py<gh_stars>0
import os
import logging
import re
import subprocess
from django.conf import settings
# Util service functions
def get_pts_times(video_file: str) -> list:
ffmpeg_cmd_line = 'ffmpeg -i "{video_file}" -an -vsync 0 -debug_ts -f null - 2>&1 | grep filter'.format(
video_file=video_file)
rv = subprocess.run(ffmpeg_cmd_line,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
if rv.returncode != 0:
logging.fatal(rv.stdout)
raise Exception('Error during executing subprocess command')
pts_time_pattern = re.compile(r"(?<=pts_time\:)\S+")
pts_times = []
for line in rv.stdout.decode().split('\n'):
pts_time_matches = pts_time_pattern.findall(line)
if pts_time_matches:
pts_time = round(float(pts_time_matches[0]) * 1000)
pts_times.append(pts_time)
return pts_times
def get_source_path(task_id: int, source: str) -> str:
"""
Get path to video source.
:param task_id: task ID.
:type task_id: int.
:param source: video source file name.
:type source: str.
:return: path to video source.
:rtype: str.
"""
return os.path.join(settings.DATA_ROOT, str(task_id), '.upload', source)
| StarcoderdataPython |
6549220 | #
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# <NAME> - <EMAIL>
# https://www.linkedin.com/in/wellsampaio/
#
import datetime
"""
CREATE TABLE tiposContasModelos (
codModelo INTEGER,
descricao TEXT,
codTipoGasto INTEGER,
codCategoria INTEGER
);
"""
class TipoContaModelo:
__codModelo = 0
__descricao = ""
__codTipoGasto = 0
__codCategoria = 0
def __init__(self):
pass
def povoarObj(self, array):
self.setCodModelo(array[0])
self.setDescricao(array[1])
self.setCodTipoGasto(array[2])
self.setCodCategoria(array[3])
return self
def getCodModelo(self):
return int(self.__codModelo)
def setCodModelo(self, codModelo):
try:
self.__codModelo = int(codModelo)
except ValueError:
self.__codModelo = self.getCodModelo()
def getDescricao(self):
return str(self.__descricao)
def setDescricao(self, descricao):
try:
self.__descricao = str(descricao)
except ValueError:
self.__descricao = self.getDescricao()
def getCodTipoGasto(self):
return int(self.__codTipoGasto)
def setCodTipoGasto(self, codTipoGasto):
try:
self.__codTipoGasto = int(codTipoGasto)
except ValueError:
self.__codTipoGasto = self.getCodTipoGasto()
def getCodCategoria(self):
return int(self.__codCategoria)
def setCodCategoria(self, codCategoria):
try:
self.__codCategoria = int(codCategoria)
except ValueError:
self.__codCategoria = self.getCodCategoria()
| StarcoderdataPython |
1856172 |
from micropsi_core.nodenet.stepoperators import CalculateFlowmodules
class CalculateNumpyFlowmodules(CalculateFlowmodules):
def execute(self, nodenet, nodes, netapi):
if not nodenet.flow_module_instances:
return
for uid, item in nodenet.flow_module_instances.items():
item.is_part_of_active_graph = False
flowio = {}
if nodenet.worldadapter_instance:
if 'datasources' in nodenet.worldadapter_flow_nodes:
sourcenode = nodenet.get_node(nodenet.worldadapter_flow_nodes['datasources'])
sourcenode.is_part_of_active_graph = True
flowio[sourcenode.uid] = {}
for key in sourcenode.outputs:
flowio[sourcenode.uid][key] = self.value_guard(nodenet.worldadapter_instance.get_flow_datasource(key), nodenet.worldadapter, key)
for target_uid, target_name in sourcenode.outputmap[key]:
datatarget_uid = nodenet.worldadapter_flow_nodes.get('datatargets')
if target_uid == datatarget_uid:
nodenet.flow_module_instances[datatarget_uid].is_part_of_active_graph = True
nodenet.worldadapter_instance.add_to_flow_datatarget(target_name, flowio[sourcenode.uid][key])
flow_nodes_to_calculate = set()
for graph in nodenet.flow_graphs:
if nodenet.get_node(graph[-1]).is_requested():
[flow_nodes_to_calculate.add(uid) for uid in graph]
for uid in nodenet.flow_toposort:
if uid in flow_nodes_to_calculate:
skip = False
inputs = {}
node = nodenet.get_node(uid)
for in_name in node.inputs:
if not node.inputmap[in_name]:
raise RuntimeError("Missing Flow-input %s of node %s" % (in_name, str(node)))
source_uid, source_name = node.inputmap[in_name]
if flowio[source_uid][source_name] is None:
# netapi.logger.debug("Skipping graph bc. empty inputs")
skip = True
break
else:
inputs["%s_%s" % (node.uid, in_name)] = flowio[source_uid][source_name]
if skip:
flowio[node.uid] = {}
for out_name in node.outputs:
flowio[node.uid][out_name] = None
continue
func = node.build()
inputlist = [inputs["%s_%s" % (node.uid, name)] for name in node.inputs]
result = func(*inputlist, netapi=nodenet.netapi, node=node, parameters=node.clone_parameters())
if len(node.outputs) == 1 and not isinstance(result, list):
result = [result]
node.is_part_of_active_graph = True
for index, out_name in enumerate(node.outputs):
if node.uid not in flowio:
flowio[node.uid] = {}
if 'datatargets' in nodenet.worldadapter_flow_nodes:
targetnode = nodenet.get_node(nodenet.worldadapter_flow_nodes['datatargets'])
for uid, name in node.outputmap[out_name]:
if uid == targetnode.uid and node.uid != nodenet.worldadapter_flow_nodes.get('datasources', False):
targetnode.is_part_of_active_graph = True
nodenet.worldadapter_instance.add_to_flow_datatarget(name, result[index])
flowio[node.uid][out_name] = self.value_guard(result[index], func, out_name) if result is not None else None
| StarcoderdataPython |
4834906 | <reponame>sethgld/userbotseth
"""No Logic Pligon for @PepeBot
\nCoding by Legend @NeoMatrix90
\nType .logic to see many logical fact
"""
from telethon import events
import asyncio
import random
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=f"logic", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.logic", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("`Attendi 2sec sto pensando 🤔...`")
await asyncio.sleep(2)
x=(random.randrange(1,7))
if x==1:
await event.edit("`La logica è una cosa e il buon senso un’altra.`")
if x==2:
await event.edit("`Ama la vita più della sua logica.`")
if x==3:
await event.edit("`La logica vi porterà da A a B. L’immaginazione vi porterà dappertutto.`")
if x==4:
await event.edit("`Le relazioni tra uomini e donne non si possono spiegare mediante la logica.`")
if x==5:
await event.edit("`Nella realtà non avviene nulla che corrisponda rigorosamente alla logica.`")
if x==6:
await event.edit("`Una mente tutta logica è come un coltello tutto lama. Fa sanguinare la mano che lo usa.`")
if x==7:
await event.edit("`Ciò che sfugge alla logica è quanto v’è di più prezioso in noi stessi.`")
| StarcoderdataPython |
5039846 | <filename>src/pipelining/pipes/pipe_load_all_gold_data.py<gh_stars>0
from etl.gold_data_manager import GoldDataContainer
from pipelining.pipe_root import ConfigRoot
from etl import maxqdata_manager, gold_data_transform_rules, gold_data_manager
from pipelining import data_flow_registry
from IPython import embed
import main
class ConfigRead1(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g1"]["path"]
class ConfigRead2(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g2"]["path"]
class ConfigRead3(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g3"]["path"]
class ConfigRead4(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g4"]["path"]
class ConfigRead5(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g5"]["path"]
class ConfigRead6(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g6"]["path"]
class ConfigRead7(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g7"]["path"]
class ConfigRead8(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g8"]["path"]
class ConfigRead9(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g9"]["path"]
class ConfigRead10(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g10"]["path"]
def run():
gdc_1 = main.load_gold_data(ConfigRead1)
gdc_2 = main.load_gold_data(ConfigRead2)
gdc_3 = main.load_gold_data(ConfigRead3)
gdc_4 = main.load_gold_data(ConfigRead4)
gdc_5 = main.load_gold_data(ConfigRead5)
gdc_6 = main.load_gold_data(ConfigRead6)
gdc_7 = main.load_gold_data(ConfigRead7)
gdc_8 = main.load_gold_data(ConfigRead8)
gdc_9 = main.load_gold_data(ConfigRead9)
gdc_10 = main.load_gold_data(ConfigRead10)
| StarcoderdataPython |
3572243 | """
Universal Office Converter - Convert between any document format supported by LibreOffice/OpenOffice.
See:
https://github.com/dagwieers/unoconv
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.adoc'), encoding='utf-8') as f:
long_description = f.read()
setup(name = "unoconv",
version = "0.7",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/dagwieers/unoconv",
description = "Universal Office Converter - Convert between any document format supported by LibreOffice/OpenOffice.",
scripts = ["unoconv"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Education',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
'Topic :: Office/Business :: Office Suites',
'Topic :: Utilities',
]
)
| StarcoderdataPython |
5118725 | import os
from urllib import parse
from flask import Flask, redirect, request, make_response
from flask_cors import CORS
from aim.web.utils import Singleton
from aim.web.app.config import config
from aim.web.app.db import Db
# from services.executables.manager import Executables as ExecutablesManager
class App(metaclass=Singleton):
api = None
executables_manager = None
@classmethod
def __init__(cls, test_config=None):
api = Flask(__name__, static_folder='html')
api.url_map.strict_slashes = False
@api.before_request
def clear_trailing():
rp = request.path
if rp != '/' and rp.endswith('/'):
return redirect(rp.rstrip('/'))
@api.before_request
def set_timezone():
tz = request.cookies.get('__AIMDE__:TIMEZONE')
if tz:
request.tz = parse.unquote(tz)
else:
# Set default timezone to GMT
request.tz = 'gmt'
CORS(api,
origins='*',
methods=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'HEAD'],
allow_headers=['Origin', 'X-Requested-With',
'Content-Type', 'Accept', 'Authorization'],
supports_credentials=True,
max_age=86400,
vary_header=True)
# check environment variables to see which config to load
env = os.environ.get('FLASK_ENV', 'prod')
# load config
if test_config:
api.config.from_mapping(**test_config)
else:
api.config.from_object(config[env])
Db(api)
# import and register blueprints
from aim.web.app.views import general_bp, serve_wrong_urls
from aim.web.app.projects.views import projects_bp
from aim.web.app.commits.views import commits_bp
from aim.web.app.executables.views import executables_bp
from aim.web.app.tags.views import tags_bp
api.register_blueprint(general_bp)
api.register_blueprint(projects_bp, url_prefix='/api/v1/projects')
api.register_blueprint(commits_bp, url_prefix='/api/v1/commits')
api.register_blueprint(executables_bp, url_prefix='/api/v1/executables')
api.register_blueprint(tags_bp, url_prefix='/api/v1/tags')
api.register_error_handler(404, serve_wrong_urls)
cls.api = api
# Disable executables module
# if cls.executables_manager is not None:
# cls.executables_manager.stop()
# if cls.executables_manager is None:
# cls.executables_manager = ExecutablesManager()
# cls.executables_manager.start()
| StarcoderdataPython |
3446504 | #!/usr/bin/env python
"""
Dumps a DCD file to PDBs
"""
from __future__ import print_function, division
import argparse
import logging
import os
import sys
import numpy as np
import mdtraj as md
import simtk.openmm.app as app # necessary for topology reading from mmCIF
# Format logger
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
def check_file(fpath):
"""Returns absolute path of file if it exists and is readable,
raises IOError otherwise"""
if os.path.isfile(fpath):
return os.path.abspath(fpath)
else:
raise IOError('File not found/readable: {}'.format(fpath))
if __name__ == '__main__':
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('topology', help='Topology file corresponding to DCD')
ap.add_argument('trajectory', help='DCD trajectory file')
ap.add_argument('--output', default=None,
help='Root for naming PDB files: root + _ + frame + .pdb (e.g. trj_1.pdb)')
ap.add_argument('--stride', default=1, type=int,
help='Read only i-th frame. Default: reads all (i=1)')
cmd = ap.parse_args()
# Read/Parse Topology
topology_fpath = check_file(cmd.topology)
if topology_fpath.endswith('cif'):
structure = app.PDBxFile(topology_fpath)
topology = md.Topology.from_openmm(structure.topology)
else:
structure = md.load(cmd.topology)
topology = structure.topology
logging.info('Read topology from file: {}'.format(topology_fpath))
# Read trajectory
trajectory_fpath = check_file(cmd.trajectory)
logging.info('Reading trajectory from file: {}'.format(trajectory_fpath))
trj = md.load(trajectory_fpath, top=topology,
stride=cmd.stride)
logging.info('Removing PBCs and imaging molecules')
topology.create_standard_bonds()
anchors = topology.find_molecules()
sorted_bonds = sorted(topology.bonds, key=lambda x: x[0].index)
sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds])
trj.image_molecules(inplace=True, anchor_molecules=anchors, sorted_bonds=sorted_bonds, make_whole=True)
# Write PDBs
logging.info('Writing {} PDB files of {} atoms'.format(trj.n_frames, trj.n_atoms))
froot = 'frame' if cmd.output is None else cmd.output
n_frames = len(str(len(trj))) # 1: 1, 10: 2, 100: 3, ...
for idx, frame in enumerate(trj, start=1):
frame_name = froot + '_' + str(idx).zfill(n_frames) + '.pdb'
frame.save(frame_name, force_overwrite=True)
logging.info('Wrote frame {}/{} to \'{}\''.format(idx, trj.n_frames, frame_name))
logging.info('Done')
| StarcoderdataPython |
66920 | #Developer by Bafomet
# -*- coding: utf-8 -*-
import requests
from settings import shodan_api
# color
R = "\033[31m" # Red
G = "\033[1;34m" # Blue
C = "\033[1;32m" # Green
W = "\033[0m" # white
O = "\033[45m" # Purple
def honeypot(inp):
url = f"https://api.shodan.io/labs/honeyscore/{inp}"
try:
result = requests.get(url, params={"key": shodan_api}).text
except:
print(f"\nНет доступной информации!")
return
if "error" in result or "404" in result:
print("IP не найден")
return
elif result:
probability = str(float(result) * 10)
print(f"{G} [ + ]{R} Вероятность что это Honeypot : {probability}%")
print()
print(f"{G} На Shodan проверил, там тоже пусто.")
else:
print(" Что-то пошло не так ")
| StarcoderdataPython |
8197047 | import picamera
import picamera.array
import png
import math
from pixel_object import PixelObject
"""
Image processor that can find the edges in a PNG image captured by a PiCamera.
"""
class ImageProcessor:
def __init__(self, res_width=96, res_height=96):
self.camera = picamera.PiCamera(resolution=(res_width, res_height))
# TODO propagate configurable resolution through '96' logic below
self.camera.hflip = True
self.camera.vflip = True
self.res_width = res_width
self.res_height = res_height
self.stream = picamera.array.PiYUVArray(self.camera)
self.pixelObjList = []
self.object_id_center = 0
self.pixelObjList.append(PixelObject(self.next_obj_id()))
self.max_pixel_count = 0
self.largest_object_id = 0
self.largest_X = 0
self.largest_Y = 0
self.filename = ''
def close(self):
print('[ImageProcessor.close] flushing')
self.pixelObjList = []
self.object_id_center = 0
self.max_pixel_count = 0
self.largest_object_id = 0
self.largest_X = 0
self.largest_Y = 0
self.camera.close()
def next_obj_id(self):
self.object_id_center += 1
return self.object_id_center
def capture_frame(self):
self.stream = picamera.array.PiYUVArray(self.camera)
self.camera.capture(self.stream, 'yuv')
self.camera._set_led(True)
self.pixelObjList = []
self.object_id_center = 0
self.pixelObjList.append(PixelObject(self.next_obj_id()))
rows = []
for _ in range(self.res_height):
rows.append(range(self.res_width))
# flip image horizontally
for j, j_ in enumerate(range(self.res_width-1, -1, -1)):
# now flip vertically
for i, i_ in enumerate(range(self.res_height-1, -1, -1)):
rows[j][i] = self.stream.array[j_][i_][0]
self.filename = self.save_PNG('raw.png', rows)
self.spread_white_pixels(
self.make_black_and_white(
self.fuse_horizontal_and_vertical(
self.get_horizontal_edges(rows),
self.get_vertical_edges(rows)))
)
def get_horizontal_edges(self, raw_rows):
# get horizontal edges
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if i + 1 <= 95:
rows[j][i] = self.difference(raw_rows[j][i],
raw_rows[j][i + 1])
else:
rows[j][i] = self.difference(raw_rows[j][i],
raw_rows[j][i - 1])
self.save_PNG('processed_1.png', rows)
return rows
def get_vertical_edges(self, rawrows):
# get vertical edges
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if j + 1 <= 95:
rows[j][i] = self.difference(
rawrows[j][i], rawrows[j + 1][i])
else:
rows[j][i] = self.difference(
rawrows[j][i], rawrows[j - 1][i])
self.save_PNG('processed_2.png', rows)
return rows
def fuse_horizontal_and_vertical(self, hrows, vrows):
# fuse the horizontal edge-image with the vertical edge-image
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
rows[j][i] = self.fusion(hrows[j][i], vrows[j][i])
self.save_PNG('processed_3.png', rows)
return rows
def make_black_and_white(self, edge_rows):
# make the image dual in color (black and white)
threshold = 18
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if edge_rows[j][i] >= threshold:
rows[j][i] = 255
else:
rows[j][i] = 0
self.save_PNG('processed_4.png', rows)
return rows
def spread_white_pixels(self, bw_rows):
# make all the white pixels spread out one more pixel
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if bw_rows[j][i] == 255:
tmp_list = self.neighbors((i, j), 96, 96)
for ent in tmp_list:
tmp_x, tmp_y = ent
rows[tmp_y][tmp_x] = 255
else:
rows[j][i] = 0
self.save_PNG('processed_4_5.png', rows)
self.identify_pixel_objects(rows)
def identify_pixel_objects(self, bw_rows):
# make PixelObjects when pixels are direct 8-neighbours of each other
for j in range(96):
for i in range(96):
if bw_rows[j][i] == 255: # if the pixel is white
tmp_list = []
for ent in self.neighbors((i, j), 96, 96):
tmp_x, tmp_y = ent
if bw_rows[tmp_y][tmp_x] == 255: # if pixel is white
tmp_list.append(ent)
# print tmp_list
flag = False
for obj in self.pixelObjList:
# make a new PixelObj whenever a Pixel isn't connected
# to an object
if obj.check_xy_set(tmp_list) is True:
flag = True
if flag is False:
self.pixelObjList.append(
PixelObject(self.next_obj_id()))
for obj in self.pixelObjList:
obj.check_xy_set(tmp_list)
for obj in self.pixelObjList:
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in obj.XYset:
rows[j][i] = 255
else:
rows[j][i] = 0
# self.save_PNG(string.join([str(obj.id_), 'processed_5.png'], ''), rows)
self.merge_overlapping_pixel_objects()
def merge_overlapping_pixel_objects(self):
# merge objects with overlapping x-y tuples together
center = 0
max_entry = len(self.pixelObjList) - 1
# old_len = len(self.pixelObjList)
# flag = False
while center < max_entry:
tmp = self.check_overlap(center)
if tmp is False:
center += 1
else:
for ent in self.pixelObjList[tmp].XYset:
self.pixelObjList[center].XYset.add(ent)
del self.pixelObjList[tmp]
max_entry = len(self.pixelObjList) - 1
for obj in self.pixelObjList:
object_pixels = obj.count_pixel()
# if self.max_pixel_count == 0:
# results['max_pixel_count'] = object_pixels
# results['object_id'] = obj.id_
if object_pixels > self.max_pixel_count:
self.max_pixel_count = object_pixels
self.largest_object_id = obj.id_
x, y = obj.compute_mean_coord()
self.largest_X = x
self.largest_Y = y
# print obj.XYset
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in obj.XYset:
rows[j][i] = 255
else:
rows[j][i] = 0
# self.save_PNG(string.join([str(obj.id_), 'pixpro.png'], ''), rows)
# print("nmbr of pre Objects:{0}".format(old_len))
self.new_one_pixel_png()
def new_one_pixel_png(self):
"""
make a new png with 1 pixel per object at their respective center
:return:
"""
tmp_pos_list = []
for obj in self.pixelObjList:
print("X:{0} Y:{1}".format(obj.coord_x, obj.coord_y))
tmp_pos_list.append((obj.coord_x, obj.coord_real_y))
rows = []
for _ in range(96):
rows.append(range(96))
for j in range(96):
for i in range(96):
if (i, j) in tmp_pos_list:
rows[j][i] = 255
else:
rows[j][i] = 0
self.save_PNG('PixelObjectPos.png', rows)
def check_overlap(self, counter):
"""
check for overlapping x-y tuples in sets in 2 distinct objects
return the listNumber for the object with overlapping pixels if there
are overlapping pixels return False if not
"""
max_entry = len(self.pixelObjList) - 1
for ent1 in self.pixelObjList[counter].XYset:
for i in range(counter + 1, max_entry + 1, 1):
for ent2 in self.pixelObjList[i].XYset:
if ent1 == ent2:
return i
return False
def save_PNG(self, filename, rws):
# print("[save_PNG] filename:{0} rws:{1}".format(filename, rws))
name = 'img/{0}'.format(filename)
f = open(name, 'wb')
w = png.Writer(96, 96, greyscale=True)
w.write(f, rws)
f.close()
return name
def difference(self, a, b):
if a >= b:
return a - b
else:
return b - a
def fusion(self, a, b):
a_ = int(a)
b_ = int(b)
tmp = round(math.sqrt(a_ * a_ + b_ * b_))
if tmp <= 255:
return int(tmp)
else:
return 255
def neighbors(self, (x, y), max_x, max_y):
n_list = []
xx, yy = (x, y)
for y_ in range(-1, 2, 1):
for x_ in range(-1, 2, 1):
res_x = xx + x_
res_y = yy + y_
if max_x > res_x >= 0 and max_y > res_y >= 0:
n_list.append((res_x, res_y))
return n_list
| StarcoderdataPython |
6657100 | <filename>trough/_download.py
import numpy as np
from datetime import datetime, timedelta
import math
import pathlib
import socket
import abc
import ftplib
from urllib import request
import re
import json
import functools
import logging
import warnings
try:
import h5py
from madrigalWeb import madrigalWeb
import bs4
except ImportError as imp_err:
warnings.warn(f"Packages required for recreating dataset not installed: {imp_err}")
from trough.exceptions import InvalidConfiguration
from trough._arb import parse_arb_fn
logger = logging.getLogger(__name__)
def _doy(date):
return math.floor((date - datetime(date.year, 1, 1)) / timedelta(days=1)) + 1
class Downloader(abc.ABC):
def __init__(self, download_dir: pathlib.Path, *args, **kwargs):
self.download_dir = pathlib.Path(download_dir)
self.cache_fn = self.download_dir / "file_list.json"
self.cache = {}
if self.cache_fn.exists():
with open(self.cache_fn) as f:
self.cache = json.load(f)
@abc.abstractmethod
def _get_file_list(self, start_date, end_date):
"""get list of files on server
Parameters
----------
start_date: datetime
end_date: datetime
Returns
-------
dict[str, list[str]]
dictionary mapping date id to file list for that date
"""
...
@abc.abstractmethod
def _download_files(self, files):
"""download list of server files
Parameters
----------
files: list[str]
server file paths
Returns
-------
list[str]
local file paths
"""
...
@abc.abstractmethod
def _verify_files(self, local_files, files):
"""verify a list of local files, return server file paths corresponding to corrupted local files
Parameters
----------
local_files: list[str]
files: list[str]
Returns
-------
list[str]
bad files (server)
"""
...
def download(self, start_date: datetime, end_date: datetime):
"""Runs download routine
Parameters
----------
start_date: datetime
end_date: datetime
"""
# make sure download dict exists
self.download_dir.mkdir(parents=True, exist_ok=True)
logger.info("collecting file information...")
# get dictionary mapping some id (e.g. date) to file lists
download_dict = self._get_file_list(start_date, end_date)
# update and save file list
self.cache.update(**download_dict)
with open(self.cache_fn, 'w') as f:
json.dump(self.cache, f)
# collect files
server_files = functools.reduce(lambda x, y: x + y, download_dict.values())
logger.info(f"downloading {len(server_files)} files")
# download files
local_files = self._download_files(server_files)
# make sure all files open and have data
logger.info("verifying...")
bad_server_files = self._verify_files(local_files, server_files)
logger.info(f"{len(bad_server_files)} bad files")
if bad_server_files:
fixed_local_files = self._download_files(bad_server_files)
still_bad_server_files = self._verify_files(fixed_local_files, bad_server_files)
for file in still_bad_server_files:
logger.error(f"unable to properly download {file}")
class MadrigalTecDownloader(Downloader):
def __init__(self, download_dir, user_name, user_email, user_affil):
super().__init__(download_dir)
if None in [user_name, user_email, user_affil]:
raise InvalidConfiguration("To download from Madrigal, user name, email, and affiliation must be specified")
self.user_name = user_name
self.user_email = user_email
self.user_affil = user_affil
logger.info("connecting to server")
self.server = madrigalWeb.MadrigalData("http://cedar.openmadrigal.org")
def _get_tec_experiments(self, start_date: datetime, end_date: datetime):
logger.info(f"getting TEC experiments between {start_date} and {end_date}")
experiments = self.server.getExperiments(
8000,
start_date.year, start_date.month, start_date.day, start_date.hour, start_date.minute, start_date.second,
end_date.year, end_date.month, end_date.day, end_date.hour, end_date.minute, end_date.second,
)
return experiments
def _download_file(self, tec_file, local_path, retries=3):
logger.info(f"downloading TEC file {tec_file} to {local_path}")
for retry in range(retries):
try:
if pathlib.Path(local_path).exists():
logger.info(f"already exists: {local_path}")
else:
return self.server.downloadFile(
tec_file, local_path, self.user_name, self.user_email, self.user_affil, 'hdf5'
)
except(socket.timeout, TimeoutError):
logger.error(f'Failure downloading {tec_file} because it took more than allowed number of seconds')
self.server = madrigalWeb.MadrigalData("http://cedar.openmadrigal.org")
def _download_files(self, files):
local_files = []
for i, file in enumerate(files):
if len(files) > 100 and not (i % (len(files) // 100)):
logger.info(f"{round(100 * i / len(files))}% finished")
server_path = pathlib.PurePosixPath(file)
local_path = str(self.download_dir / f"{server_path.stem}.hdf5")
local_files.append(local_path)
self._download_file(file, local_path)
return local_files
def _get_file_list(self, start_date, end_date):
logger.info("Getting file list...")
experiments = sorted(self._get_tec_experiments(start_date - timedelta(days=1), end_date + timedelta(days=1)))
logger.info(f"found {len(experiments)} experiments")
tec_files = {}
for i, experiment in enumerate(experiments):
if len(experiments) > 100 and not (i % (len(experiments) // 100)):
logger.info(f"{round(100 * i / len(experiments))}% finished")
cache_key = str(experiment.id)
if cache_key in self.cache:
files = self.cache[cache_key]
else:
experiment_files = self.server.getExperimentFiles(experiment.id)
files = [exp.name for exp in experiment_files if exp.kindat == 3500]
tec_files[cache_key] = files
return tec_files
@staticmethod
def _verify_local_file(local_file):
try:
with h5py.File(local_file, 'r') as f:
tec = f['Data']['Array Layout']['2D Parameters']['tec'][()]
timestamps = f['Data']['Array Layout']['timestamps'][()]
except Exception as e:
logger.warning(f"bad local file: {local_file}, error: {e}")
return False
return (timestamps.shape[0] > 10) and (np.sum(np.isfinite(tec)) > 100)
def _verify_files(self, local_files, server_files):
bad_server_files = [
server_file for (server_file, local_file) in zip(server_files, local_files)
if not self._verify_local_file(local_file)
]
return bad_server_files
class OmniDownloader(Downloader):
def __init__(self, download_dir, method='ftp', *args, **kwargs):
super().__init__(download_dir, *args, **kwargs)
self.method = method
if method == 'ftp':
logger.info("connecting to server")
self.server = ftplib.FTP_TLS("spdf.gsfc.nasa.gov")
self.server.login()
self._download_file = self._download_ftp_file
elif method == 'http':
self._download_file = self._download_http_file
def _download_files(self, files):
logger.info(f"downloading {len(files)} files")
local_files = []
for file in files:
file_name = file.split('/')[-1]
local_path = str(self.download_dir / file_name)
local_files.append(local_path)
self._download_file(file, local_path)
return local_files
@staticmethod
def _download_http_file(file, local_path):
url = "https://spdf.gsfc.nasa.gov" + file
_download_http_file(url, local_path)
def _download_ftp_file(self, file, local_path):
_download_ftp_file(self.server, file, local_path)
def _get_file_list(self, start_date, end_date):
new_start_date = start_date - timedelta(days=1)
new_end_date = end_date + timedelta(days=1)
files = {
str(year): [f'/pub/data/omni/low_res_omni/omni2_{year:4d}.dat']
for year in range(new_start_date.year, new_end_date.year + 1)
}
return files
@staticmethod
def _verify_local_file(local_file):
return (pathlib.Path(local_file).stat().st_size / (2 ** 20)) > 1 # file size > 1Mb
def _verify_files(self, local_files, server_files):
bad_server_files = [
server_file for (server_file, local_file) in zip(server_files, local_files)
if not self._verify_local_file(local_file)
]
return bad_server_files
class ArbDownloader(Downloader):
def __init__(self, download_dir, *args, **kwargs):
super().__init__(download_dir, *args, **kwargs)
self.satellites = ['f16', 'f17', 'f18', 'f19']
def _download_files(self, files):
logger.info(f"downloading {len(files)} files")
local_files = []
for i, file in enumerate(files):
if len(files) > 100 and not (i % (len(files) // 100)):
logger.info(f"{round(100 * i / len(files))}% finished")
file_name = file.split('/')[-1]
local_path = str(self.download_dir / file_name)
local_files.append(local_path)
if pathlib.Path(local_path).exists():
logger.info(f"already exists: {local_path}")
else:
_download_http_file(file, local_path)
return local_files
def _get_file_list(self, start_date, end_date):
date1 = start_date - timedelta(days=1)
date2 = end_date + timedelta(days=1)
n_days = math.ceil((date2 - date1) / timedelta(days=1))
logger.info(f"getting files for {n_days} days")
days = [date1 + timedelta(days=t) for t in range(n_days)]
arb_files = {}
for i, day in enumerate(days):
if len(days) > 100 and not (i % (len(days) // 100)):
logger.info(f"{round(100 * i / len(days))}% finished")
arb_files.update(self._get_files_for_day(day))
return arb_files
def _get_files_for_day(self, day):
files = {}
for satellite in self.satellites:
doy = _doy(day)
year = day.year
cache_key = f"{satellite}_{year}_{doy}"
if cache_key in self.cache:
files[cache_key] = self.cache[cache_key]
else:
files[cache_key] = []
url = f'https://ssusi.jhuapl.edu/data_retriver?spc={satellite}&type=edr-aur&' \
f'year={year:04d}&Doy={doy:03d}'
with request.urlopen(url) as r:
if r.status == 200:
soup = bs4.BeautifulSoup(r.read(), 'html.parser')
links = soup.find_all('a')
for link in links:
if 'href' in link.attrs and re.match(r'PS\.APL_.+EDR-AURORA.+\.NC', str(link.string)):
sat_name, date = parse_arb_fn(pathlib.Path(link['href']))
if date.date() == day.date() and sat_name.lower() == satellite:
files[cache_key].append(f"https://ssusi.jhuapl.edu/{link['href']}")
return files
@staticmethod
def _verify_local_file(local_file):
try:
with h5py.File(local_file, 'r') as f:
lon = f['MODEL_NORTH_GEOGRAPHIC_LONGITUDE'][()]
except Exception as e:
logger.warning(f"bad local file: {local_file}, error: {e}")
return False
return lon.shape[0] > 10
def _verify_files(self, local_files, server_files):
bad_server_files = [
server_file for (server_file, local_file) in zip(server_files, local_files)
if not self._verify_local_file(local_file)
]
return bad_server_files
def _download_ftp_file(server, server_file: str, local_path: str, retries=3):
logger.info(f"downloading file {server_file} to {local_path}")
for retry in range(retries):
try:
with open(local_path, 'wb') as f:
server.retrbinary(f'RETR {str(server_file)}', f.write)
return
except(socket.timeout, TimeoutError):
logger.error(f'Failure downloading {server_file} because it took more than allowed number of seconds')
def _download_http_file(http_file: str, local_path: str, retries=3):
logger.info(f"downloading file {http_file} to {local_path}")
for retry in range(retries):
try:
with request.urlopen(http_file, timeout=60) as r:
with open(local_path, 'wb') as f:
f.write(r.read())
return
except(socket.timeout, TimeoutError):
logger.error(f'Failure downloading {http_file} because it took more than allowed number of seconds')
| StarcoderdataPython |
5002191 | import cv2
import numpy as np
vc = cv2.VideoCapture(0)
while -1:
ret, img = vc.read()
cv2.imshow('pyCam', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
341675 | from mcscript.lang.Type import Type
from mcscript.lang.atomic_types import MetaType
from mcscript.lang.resource.base.ResourceBase import Resource
class TypeResource(Resource):
"""
Holds a resource type
"""
def type(self) -> Type:
return MetaType
def supports_scoreboard(self) -> bool:
# Technically possible
return False
def supports_storage(self) -> bool:
return False
def __init__(self, static_value: Type):
self.static_value = static_value
| StarcoderdataPython |
1736394 | import functools
from django.db import transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from . import models, serializers, utils
@receiver(post_save, sender=models.Message)
def message_post_processing(sender, instance, created, **_kwargs):
# pylint: disable=unused-argument
if not created:
return
users = []
if instance.is_personal:
if instance.is_outgoing:
users.append(models.User.objects.get(principal=instance.sender))
else:
users.append(models.User.objects.get(principal=instance.recipient))
elif not instance.is_outgoing:
users.extend(sub.user for sub in
models.Subscription.objects.filter(
class_key=instance.class_key,
instance_key__in=(instance.instance_key, '*'),
zrecipient=instance.recipient))
if users:
instance.users.add(*users)
payload = serializers.MessageSerializer(instance).data
for user in users:
user.send_to_user_sockets({
'type': 'incoming_message',
'message': {
'id': instance.id,
'payload': payload,
}
})
@receiver(post_save, sender=models.Subscription)
def resync_subscriber_on_subscription_save(sender, instance, created, **_kwargs):
# pylint: disable=unused-argument
if not created:
return
user = instance.user
group = 'ROOST_SERVER_PROCESS'
if instance.zrecipient == user.principal:
# personal; send to user process
group = utils.principal_to_user_subscriber_group_name(user.principal)
transaction.on_commit(functools.partial(
utils.send_to_group,
group,
{'type': 'resync_subscriptions'}))
@receiver(post_delete, sender=models.Subscription)
def resync_subscriber_on_subscription_delete(sender, instance, **_kwargs):
# pylint: disable=unused-argument
user = instance.user
if not user:
return
group = 'ROOST_SERVER_PROCESS'
if instance.zrecipient == user.principal:
# personal; send to user process
group = utils.principal_to_user_subscriber_group_name(user.principal)
transaction.on_commit(functools.partial(
utils.send_to_group,
group,
{'type': 'resync_subscriptions'}))
@receiver(post_save, sender=models.User)
def start_new_user_subscriber(sender, instance, created, **_kwargs):
# pylint: disable=unused-argument
if created:
utils.send_to_group(
'OVERSEER',
{
'type': 'add_user',
'principal': instance.principal,
'uid': instance.id,
})
instance.add_default_subscriptions()
@receiver(post_delete, sender=models.User)
def resync_subscriber_on_user_delete(sender, instance, **_kwargs):
# pylint: disable=unused-argument
utils.send_to_group('OVERSEER', {
'type': 'del_user',
'principal': instance.principal,
})
| StarcoderdataPython |
16069 | <reponame>eclee25/flu-SDI-exploratory-age
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 10/14/14
###Function: Export zOR retrospective and early warning classifications into csv file format (SDI and ILINet, national and regional for SDI)
### Use nation-level peak-based retrospective classification for SDI region analysis
# 10/14/14 swap OR age groups
###Import data: R_export/OR_zip3_week_outpatient_cl.csv, R_export/allpopstat_zip3_season_cl.csv
#### These data were cleaned with data_extraction/clean_OR_hhsreg_week_outpatient.R and exported with OR_zip3_week.sql
#### allpopstat_zip3_season_cl.csv includes child, adult, and other populations; popstat_zip3_season_cl.csv includes only child and adult populations
###Command Line: python export_zOR_classif_swap.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
## local modules ##
import functions_v2 as fxn
### data structures ###
### called/local plotting parameters ###
nw = fxn.gp_normweeks # number of normalization weeks in baseline period
### functions ###
def print_dict_to_file(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s\n" % (key, value[0], value[1]))
def print_dict_to_file2(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write("season,region,mn_retro,mn_early\n")
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
def print_dict_to_file3(dic, filename):
with open(filename, 'w+') as fwriter:
fwriter.write('season,state,mn_retro,mn_early\n')
for key, value in dic.items():
fwriter.write("%s,%s,%s,%s\n" % (key[0], key[1], value[0], value[1]))
##############################################
# SDI NATIONAL
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
# ##############################################
# # ILINet NATIONAL
# # national files
# incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
# incidin.readline() # remove header
# incid = csv.reader(incidin, delimiter=',')
# popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
# pop = csv.reader(popin, delimiter=',')
# thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
# thanksin.readline() # remove header
# thanks=csv.reader(thanksin, delimiter=',')
# # dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
# d_wk, d_incid, d_OR = fxn.ILINet_week_OR_processing(incid, pop)
# d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# # d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# # d_ILINet_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
# d_ILINet_classifzOR = fxn.classif_zOR_processing(d_wk, d_incid53ls, d_zOR53ls, thanks)
##############################################
# SDI REGION: region-level peak-basesd retrospective classification
# regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_reg, d_OR_reg = fxn.week_OR_processing_region(regincid, regpop)
# dict_zOR_reg[(week, hhsreg)] = zOR
d_zOR_reg = fxn.week_zOR_processing_region(d_wk, d_OR_reg)
# dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_reg, d_OR53ls_reg, d_zOR53ls_reg = fxn.week_plotting_dicts_region(d_wk, d_incid_reg, d_OR_reg, d_zOR_reg)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index(d_wk, d_incid53ls, d_incid53ls_reg, 'region', thanks)
# d_classifzOR_reg[(seasonnum, region)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_reg = fxn.classif_zOR_region_processing(d_classifindex, d_wk, d_zOR53ls_reg)
##############################################
# SDI STATE: state-level peak-basesd retrospective classification
# import same files as regional files
reg_incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/OR_zip3_week_outpatient_cl.csv', 'r')
reg_incidin.readline()
regincid = csv.reader(reg_incidin, delimiter=',')
reg_popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/allpopstat_zip3_season_cl.csv','r')
reg_popin.readline()
regpop = csv.reader(reg_popin, delimiter=',')
# national files
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
thanksin=open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
thanks=csv.reader(thanksin, delimiter=',')
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
_, d_zip3_reg, d_incid_state, d_OR_state = fxn.week_OR_processing_state(regincid, regpop)
# dict_zOR_state[(week, state)] = zOR
d_zOR_state = fxn.week_zOR_processing_state(d_wk, d_OR_state)
# dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls_state, d_OR53ls_state, d_zOR53ls_state = fxn.week_plotting_dicts_state(d_wk, d_incid_state, d_OR_state, d_zOR_state)
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
d_classifindex = fxn.classif_zOR_index_state(d_wk, d_incid53ls, d_incid53ls_state, 'state', thanks)
# d_classifzOR_state[(seasonnum, state)] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR_state = fxn.classif_zOR_state_processing(d_classifindex, d_wk, d_zOR53ls_state)
##############################################
print d_classifzOR
print d_classifzOR_reg
fn1 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_%s_swap.csv' %(nw)
print_dict_to_file(d_classifzOR, fn1)
# fn2 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/ILINet_national_classifications_%s_swap.csv' %(nw)
# print_dict_to_file(d_ILINet_classifzOR, fn2)
fn3 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_regional_classifications_%sreg_swap.csv' %(nw)
print_dict_to_file2(d_classifzOR_reg, fn3)
fn4 = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classifications_%sst_swap.csv' %(nw)
print_dict_to_file3(d_classifzOR_state, fn4) | StarcoderdataPython |
3280567 | <gh_stars>1-10
__version__ = "0.1.0"
from .core import Movie
from .presets import rotating_globe
| StarcoderdataPython |
3326774 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-20 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0048_auto_20160720_1107'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='asked_for_authorization_date',
field=models.DateField(blank=True),
),
migrations.AlterField(
model_name='expense',
name='send_to_finance_date',
field=models.DateField(blank=True),
),
]
| StarcoderdataPython |
11205427 | <filename>ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import status_params
import os
# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
#RPM versioning support
rpm_version = default("/configurations/cluster-env/rpm_version", None)
stack_version_unformatted = str(config['clusterLevelParams']['stack_version'])
stack_version_formatted = config['clusterLevelParams']['stack_version']
#hadoop params
if rpm_version:
hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
hadoop_home = '/usr/bigtop/current/hadoop-client'
hadoop_streeming_jars = "/usr/bigtop/current/hadoop-mapreduce-client/hadoop-streaming-*.jar"
hive_bin = '/usr/bigtop/current/hive-client/bin'
hive_lib = '/usr/bigtop/current/hive-client/lib'
tez_local_api_jars = '/usr/bigtop/current/tez-client/tez*.jar'
tez_local_lib_jars = '/usr/bigtop/current/tez-client/lib/*.jar'
tez_tar_file = "/usr/bigtop/current/tez-client/lib/tez*.tar.gz"
pig_tar_file = '/usr/bigtop/current/pig-client/pig.tar.gz'
hive_tar_file = '/usr/bigtop/current/hive-client/hive.tar.gz'
sqoop_tar_file = '/usr/bigtop/current/sqoop-client/sqoop*.tar.gz'
hcat_lib = '/usr/bigtop/current/hive/hive-hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/bigtop/current/hive-hcatalog/sbin'
else:
hadoop_bin_dir = "/usr/bin"
hadoop_home = '/usr'
hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
hive_bin = '/usr/lib/hive/bin'
hive_lib = '/usr/lib/hive/lib/'
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
if str(stack_version_formatted).startswith('2.0'):
hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
# for newer versions
else:
hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
hadoop_conf_dir = "/etc/hadoop/conf"
hive_conf_dir = "/etc/hive/conf"
hive_client_conf_dir = "/etc/hive/conf"
hive_server_conf_dir = '/etc/hive/conf.server'
# for newer versions
hcat_conf_dir = '/etc/hive-hcatalog/conf'
config_dir = '/etc/hive-webhcat/conf'
execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
#users
hive_user = config['configurations']['hive-env']['hive_user']
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
if hive_jdbc_driver == "com.mysql.jdbc.Driver":
jdbc_jar_name = "mysql-connector-java.jar"
jdbc_symlink_name = "mysql-jdbc-driver.jar"
elif hive_jdbc_driver == "org.postgresql.Driver":
jdbc_jar_name = "postgresql-jdbc.jar"
jdbc_symlink_name = "postgres-jdbc-driver.jar"
elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
jdbc_jar_name = "ojdbc6.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
#common
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
hive_var_lib = '/var/lib/hive'
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
smokeuser = config['configurations']['cluster-env']['smokeuser']
smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
fs_root = config['configurations']['core-site']['fs.defaultFS']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hive_metastore_keytab_path = config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
#hive_env
hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#Default conf dir for client
hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
hive_config_dir = hive_server_conf_dir
else:
hive_config_dir = hive_client_conf_dir
#hive-site
hive_database_name = config['configurations']['hive-env']['hive_database_name']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
java_share_dir = '/usr/share/java'
driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
user_group = config['configurations']['cluster-env']['user_group']
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
target = format("{hive_lib}/{jdbc_jar_name}")
jdk_location = config['ambariLevelParams']['jdk_location']
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
start_metastore_path = format("{tmp_dir}/start_metastore_script")
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
java64_home = config['ambariLevelParams']['java_home']
##### MYSQL
db_name = config['configurations']['hive-env']['hive_database_name']
mysql_user = "mysql"
mysql_group = 'mysql'
mysql_host = config['clusterHostInfo']['hive_mysql_host']
mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
##### POSTGRES
postgresql_adduser_file = "addPostgreSQLUser.sh"
postgresql_adduser_path = format("{tmp_dir}/{postgresql_adduser_file}")
postgresql_host = config['clusterHostInfo']['hive_postgresql_host']
postgresql_pghba_conf_path = "/var/lib/pgsql/data/pg_hba.conf"
postgresql_conf_path = "/var/lib/pgsql/data/postgresql.conf"
postgresql_daemon_name = status_params.postgresql_daemon_name
######## Metastore Schema
init_metastore_schema = True
########## HCAT
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
hcat_env_sh_template = config['configurations']['hcat-env']['content']
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
else:
log4j_props = None
#hive-exec-log4j.properties.template
if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
else:
log4j_exec_props = None
daemon_name = status_params.daemon_name
hive_env_sh_template = config['configurations']['hive-env']['content']
hive_hdfs_user_dir = format("/user/{hive_user}")
hive_hdfs_user_mode = 0700
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
#for create_hdfs_directory
hostname = config["hostname"]
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
tez_user = config['configurations']['tez-env']['tez_user']
if System.get_instance().os_family == "ubuntu":
mysql_configname = '/etc/mysql/my.cnf'
else:
mysql_configname = '/etc/my.cnf'
# Hive security
hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
# trying to install mysql-connector-java upon them can cause packages to conflict.
if os.path.exists(mysql_jdbc_driver_jar):
hive_exclude_packages = ['mysql-connector-java']
else:
hive_exclude_packages = []
########################################################
########### WebHCat related params #####################
########################################################
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
templeton_pid_dir = status_params.hcat_pid_dir
webhcat_pid_file = status_params.webhcat_pid_file
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
webhcat_apps_dir = "/apps/webhcat"
hcat_hdfs_user_dir = format("/user/{hcat_user}")
hcat_hdfs_user_mode = 0755
webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
webhcat_hdfs_user_mode = 0755
#for create_hdfs_directory
security_param = "true" if security_enabled else "false"
if str(stack_version_formatted).startswith('2.0') or str(stack_version_formatted).startswith('2.1'):
app_dir_files = {tez_local_api_jars:None}
else:
app_dir_files = {
tez_local_api_jars:None,
tez_tar_file:"tez.tar.gz"
}
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsDirectory = functools.partial(
HdfsDirectory,
conf_dir=hadoop_conf_dir,
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
bin_dir = hadoop_bin_dir
)
| StarcoderdataPython |
100854 | # Generated by Django 2.2.10 on 2021-07-02 04:01
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_customer_phone'),
]
operations = [
migrations.AddField(
model_name='customer',
name='phone_number',
field=models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
]
| StarcoderdataPython |
365140 | import time
import copy
import numpy as np
import matplotlib.pyplot as plt
import math
import os
from shutil import copy2
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpi4py import MPI
import sys
import scipy.io as sio
from pysit import *
from pysit.gallery import horizontal_reflector
from pysit.util.io import *
from pysit.vis.vis import *
from pysit.util.parallel import *
if __name__ == '__main__':
# Set up parallel computing environment
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
pwrap = ParallelWrapShot()
# Set up domain, mesh and velocity model
pmlx = PML(0.1, 100)
pmly = PML(0.1, 100)
pmlz = PML(0.1, 100)
x_config = (0.1, 1.0, pmlx, pmlx)
y_config = (0.1, 0.9, pmly, pmly)
z_config = (0.1, 0.8, pmlz, pmlz)
d = RectangularDomain(x_config, y_config, z_config)
m = CartesianMesh(d, 46, 41, 36)
C, C0, m, d = horizontal_reflector(m)
# Set up shots
zmin = d.z.lbound
zmax = d.z.rbound
zpos = zmin + (1./9.)*zmax
Nshots = 1,2
shots = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=Nshots,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={},
parallel_shot_wrap=pwrap,
)
shots_freq = copy.deepcopy(shots)
# Define and configure the wave solver
trange = (0.0,3.0)
# Define the time-domain wave-equation solver and generate the time-domain data
solver = ConstantDensityAcousticWave(m,
spatial_accuracy_order=2,
trange=trange,
kernel_implementation='cpp')
base_model = solver.ModelParameters(m,{'C': C})
print('Generating time-domain data...')
tt = time.time()
generate_seismic_data(shots, solver, base_model)
print('Time-domain data generation: {0}s'.format(time.time()-tt))
# Check the result and plot the result
n_data = (46, 41, 36)
n_dataplt = (n_data[0], n_data[2], n_data[1])
origins = [0.1, 0.1, 0.1]
deltas = [0.02, 0.02, 0.02]
if rank == 0:
Cplot = np.reshape(C, n_data)
Cplot = np.transpose(Cplot, (0, 2, 1))
axis_ticks = [np.array(list(range(0, n_dataplt[0]-5, (n_data[0]-6)//4))),
np.array(list(range(5, n_dataplt[1]-5, (n_data[1]-11)//4))),
np.array(list(range(0, n_dataplt[2], (n_data[2]-1)//2)))
]
axis_tickslabels = [(axis_ticks[0] * deltas[0] * 1000.0 + origins[0] * 1000.0).astype(int),
(axis_ticks[1] * deltas[1] * 1000.0 + origins[1] * 1000.0).astype(int),
(axis_ticks[2] * deltas[2] * 1000.0 + origins[2] * 1000.0).astype(int)
]
plot_3D_panel(Cplot, slice3d=(22, 18, 20),
axis_label=['x [m]', 'z [m]', 'y [m]'],
axis_ticks=axis_ticks,
axis_tickslabels=axis_tickslabels,
)
plt.title('Slice at \n x = 540 m, y = 500 m, z = 440 m')
plt.show()
comm.Barrier()
data = shots[0].receivers.data
t_smp = np.linspace(trange[0], trange[1], data.shape[0])
fig=plt.figure()
n_recdata = [len(t_smp), n_data[0], n_data[1]]
n_recdataplt = [n_data[0], len(t_smp), n_data[1]]
data = np.reshape(data, n_recdata)
dataplt = np.transpose(data, (1, 0, 2))
deltas_data = [deltas[0], solver.dt, deltas[2]]
origins_data = [origins[0], 0.0,origins[2]]
axis_ticks = [np.array(list(range(0, n_recdataplt[0]-5, (n_recdataplt[0]-1)//4))),
np.array(list(range(0, n_recdataplt[1]-5, (n_recdataplt[1]-1)//4))),
np.array(list(range(0, n_recdataplt[2], (n_recdataplt[2]-1)//2)))
]
axis_tickslabels = [np.round(axis_ticks[0] * deltas_data[0] + origins_data[0], 2),
np.round(axis_ticks[1] * deltas_data[1] + origins_data[1], 2),
np.round(axis_ticks[2] * deltas_data[2] + origins_data[2], 2)
]
plot_3D_panel(dataplt, slice3d=(22, 900, 20),
axis_label=[ 'x [km]', 'Time [s]', 'y [km]'],
axis_ticks=axis_ticks,
axis_tickslabels=axis_tickslabels,
width_ratios=[1,1], height_ratios=[1,1],cmap='seismic', vmin=-0.2,vmax=0.2
)
plt.show()
# Define the frequency-domain wave-equation solver and generate the frequency-domain data
pmlx = PML(0.1, 100, compact=True)
pmly = PML(0.1, 100, compact=True)
pmlz = PML(0.1, 100, compact=True)
x_config = (0.1, 1.0, pmlx, pmlx)
y_config = (0.1, 0.9, pmly, pmly)
z_config = (0.1, 0.8, pmlz, pmlz)
d = RectangularDomain(x_config, y_config, z_config)
m = CartesianMesh(d, 46, 41, 36)
C, C0, m, d = horizontal_reflector(m)
solver = ConstantDensityHelmholtz(m,
spatial_accuracy_order=4,
parallel_shot_wrap=pwrap,)
frequencies = [2.0,3.0]
print('Generating frequency-domain data...')
tt = time.time()
generate_seismic_data(shots_freq, solver, base_model, frequencies=frequencies)
print('Frequency-domain data generation: {0}s'.format(time.time()-tt))
# Check the result and plot the result
xrec = np.linspace(0.1,1.0,46)
yrec = np.linspace(0.1,0.9,41)
data1 = shots_freq[0].receivers.data_dft[2.0]
data2 = shots_freq[0].receivers.data_dft[3.0]
data1 = np.reshape(data1, (len(xrec),len(yrec)))
data2 = np.reshape(data2, (len(xrec),len(yrec)))
plt.figure(figsize=(12,12))
plt.subplot(2,2,1)
vmax = np.abs(np.real(data1)).max()
clim=np.array([-vmax, vmax])
plt.imshow(np.real(data1).transpose(),cmap='seismic',clim=clim,
extent=[xrec[0], xrec[-1], yrec[-1], yrec[0]])
plt.xlabel('X [km]')
plt.ylabel('Y [km]')
plt.title('Real part of data at 2 Hz')
plt.colorbar()
plt.subplot(2,2,2)
vmax = np.abs(np.imag(data1)).max()
clim=np.array([-vmax, vmax])
plt.imshow(np.imag(data1).transpose(),cmap='seismic',clim=clim,
extent=[xrec[0], xrec[-1], yrec[-1], yrec[0]])
plt.xlabel('X [km]')
plt.ylabel('Y [km]')
plt.title('Imaginary part of data at 2 Hz')
plt.colorbar()
plt.subplot(2,2,3)
vmax = np.abs(np.real(data2)).max()
clim=np.array([-vmax, vmax])
plt.imshow(np.real(data2).transpose(),cmap='seismic',clim=clim,
extent=[xrec[0], xrec[-1], yrec[-1], yrec[0]])
plt.xlabel('X [km]')
plt.ylabel('Y [km]')
plt.title('Real part of data at 3 Hz')
plt.colorbar()
plt.subplot(2,2,4)
vmax = np.abs(np.imag(data2)).max()
clim=np.array([-vmax, vmax])
plt.imshow(np.imag(data2).transpose(),cmap='seismic',clim=clim,
extent=[xrec[0], xrec[-1], yrec[-1], yrec[0]])
plt.xlabel('X [km]')
plt.ylabel('Y [km]')
plt.title('Imaginary part of data at 3 Hz')
plt.colorbar()
plt.show()
| StarcoderdataPython |
8008045 | from enum import Enum
class State(Enum):
BADEN_WUERTTENBERG = "Baden-Württenberg"
BAYERN = "Bayern"
BERLIN = "Berlin"
BRANDENBURG = "Brandenburg"
BREMEN = "Bremen"
HAMBURG = "Hamburg"
HESSEN = "Hessen"
MECKLENBURG_VORPOMMERN = "Mecklenburg-Vorpommern"
NIEDERSACHEN = "Niedersachen"
NORDRHEIN_WESTFALEN = "Nordrhein-Westfalen"
RHEINLAND_PFALZ = "Rheinland-Pfalz"
SAARLAND = "Saarland"
SACHSEN = "Sachsen"
SACHSEN_ANHALT = "Sachen-Anhalt"
SCHLESWIG_HOLSTEIN = "Schleswig_Holstein"
THUERINGEN = "Thüringen"
| StarcoderdataPython |
9618576 | <reponame>fswzb/autotrade
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import abstract as tech_factor
import talib.abstract
__all__ = tech_factor.__all__ + list(
filter(lambda x: x.isupper() and not x.startswith('_'), dir(
talib.abstract)))
| StarcoderdataPython |
8096450 | #python 3.5.2
def makeChange(coinValueList, change, debug):
count = 0
coinValueList.sort(reverse=True)
result = {}
for coin in coinValueList:
result[coin] = 0
if debug:
print('COIN VALUES: ', coinValueList)
while len(coinValueList) > count:
if change - coinValueList[count] >= 0:
if debug:
print(str(change) + ' - ' + '[ ' + str(coinValueList[count]) + ' ]')
result[coinValueList[count]] = result[coinValueList[count]] + 1
change = change - coinValueList[count]
print(' = ' + str(change))
else:
count = count + 1
return result
print(makeChange([10, 25, 5, 1], 63, True))
print('-' * 20)
# Recursion
class Coins:
def __init__(self, coinValueList):
self.counter = 0
coinValueList.sort(reverse=True)
self.coinValueList = coinValueList.copy()
self.results = {}
for coin in coinValueList:
self.results[coin] = 0
@property
def coin(self):
return self.coinValueList[self.counter]
def __repr__(self):
return ('Coins [ '
+ 'counter : ' + str(self.counter)
+ ', results : ' + str(self.results)
+ ', coinValueList : ' + str(self.coinValueList)
+ ', coin: ' + str(self.coinValueList[self.counter])
+ ' ] ')
def makeCoinChange(self, change):
print('change: ' + str(change))
print(self.__repr__())
# When making change you first uses the highest to lowest coin value
if change - self.coin < 0:
self.counter += 1
if change != 0:
if self.coin <= change:
self.results[self.coin] += 1
self.makeCoinChange(change - self.coin)
else:
self.makeCoinChange(change)
return self.results
change = Coins([10, 25, 5, 1])
print('\n')
print(change.makeCoinChange(63))
print('-' * 20)
'''
OUTPUT:
COIN VALUES: [25, 10, 5, 1]
63 - [ 25 ]
= 38
38 - [ 25 ]
= 13
13 - [ 10 ]
= 3
3 - [ 1 ]
= 2
2 - [ 1 ]
= 1
1 - [ 1 ]
= 0
{25: 2, 10: 1, 5: 0, 1: 3}
--------------------
change: 63
Coins [ counter : 0, results : {25: 0, 10: 0, 5: 0, 1: 0}, coinValueList : [25, 10, 5, 1], coin: 25 ]
change: 38
Coins [ counter : 0, results : {25: 1, 10: 0, 5: 0, 1: 0}, coinValueList : [25, 10, 5, 1], coin: 25 ]
change: 13
Coins [ counter : 0, results : {25: 2, 10: 0, 5: 0, 1: 0}, coinValueList : [25, 10, 5, 1], coin: 25 ]
change: 3
Coins [ counter : 1, results : {25: 2, 10: 1, 5: 0, 1: 0}, coinValueList : [25, 10, 5, 1], coin: 10 ]
change: 3
Coins [ counter : 2, results : {25: 2, 10: 1, 5: 0, 1: 0}, coinValueList : [25, 10, 5, 1], coin: 5 ]
change: 2
Coins [ counter : 3, results : {25: 2, 10: 1, 5: 0, 1: 1}, coinValueList : [25, 10, 5, 1], coin: 1 ]
change: 1
Coins [ counter : 3, results : {25: 2, 10: 1, 5: 0, 1: 2}, coinValueList : [25, 10, 5, 1], coin: 1 ]
change: 0
Coins [ counter : 3, results : {25: 2, 10: 1, 5: 0, 1: 3}, coinValueList : [25, 10, 5, 1], coin: 1 ]
{25: 2, 10: 1, 5: 0, 1: 3}
--------------------
'''
| StarcoderdataPython |
11367046 | from colouring.colour import Colour
class Node(object):
def __init__(self, colour: Colour, id: int):
self._colour = colour
self.id = id
def get_colour(self) -> Colour:
return self._colour
| StarcoderdataPython |
4932507 | <reponame>lupyuen/RaspberryPiImage<filename>usr/share/pyshared/ajenti/plugins/samba/smbusers.py
import subprocess
class SambaUser (object):
def __init__(self):
self.username = None
self.sid = None
class SambaUsers (object):
def load(self):
self.users = []
for un in [s.split(':')[0] for s in subprocess.check_output(['pdbedit', '-L', '-d0']).split('\n')]:
if un and not ' ' in un and not un.startswith('WARNING'):
lines = subprocess.check_output(['pdbedit', '-Lv', '-d0', '-u', un]).split('\n')
fields = {}
for l in lines:
if l and ':' in l:
l = l.split(':', 1)
fields[l[0]] = l[1].strip()
u = SambaUser()
u.username = un
u.sid = fields['User SID']
self.users.append(u)
def create(self, un):
p = subprocess.Popen(['pdbedit', '-at', '-u', un])
p.communicate('\n\n\n')
def delete(self, un):
subprocess.call(['pdbedit', '-x', '-u', un])
def set_password(self, un, pw):
p = subprocess.Popen(['pdbedit', '-at', '-u', un])
p.communicate('%s\n%s\n' % (pw, pw))
return p.returncode == 0
| StarcoderdataPython |
5090167 | <reponame>danieldennett/gap_sdk
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_schema_head
import flatbuffers
class DimensionMetadata(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDimensionMetadata(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DimensionMetadata()
x.Init(buf, n + offset)
return x
@classmethod
def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# DimensionMetadata
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DimensionMetadata
def Format(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def DenseSize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# DimensionMetadata
def ArraySegments(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# DimensionMetadata
def ArraySegmentsAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# DimensionMetadata
def ArraySegmentsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# DimensionMetadata
def ArrayIndices(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# DimensionMetadata
def ArrayIndicesAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# DimensionMetadata
def ArrayIndicesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
def DimensionMetadataStart(builder): builder.StartObject(4)
def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0)
def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0)
def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0)
def DimensionMetadataStartArraySegmentsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0)
def DimensionMetadataStartArrayIndicesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def DimensionMetadataEnd(builder): return builder.EndObject()
| StarcoderdataPython |
8103235 | import argparse
import pandas as pd
from family import *
from utils import *
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument('-p', '--pedfile', default="Test_Ped.txt")
argp.add_argument('-d', '--data', default="Test_cleaned.txt")
argp.add_argument('-o', '--output', default="filtered.csv")
argp.add_argument('-op', '--output_phen', default="filtered_phen.csv")
argp.add_argument('-f', '--family', default="")
argp.add_argument('-ph', '--phenfile', default="Test_Phen.txt")
argp.add_argument('-m', '--mapfile', default="phenotype_to_genes.txt")
argp.add_argument('--nophen', default = False, action = 'store_true')
args = argp.parse_args()
# get a dict of families from the pedfile
families = get_families(args.pedfile)
if not args.nophen:
print("Getting relevant genes for family phenotypes...")
# give each family a list of genes relevant to their phenotype
load_phen(families, args.phenfile, args.mapfile)
# read in the file containing variants
df = pd.read_csv(args.data, sep='\t')
#check that there are no errors, and remove rows with errors.
df = verify(df)
# csv with variants in one family
if args.family != "":
fam = families[args.family]
fam_variants = df.copy()
for person in fam.people:
filt = filter_zyg if person.phen == "Unaffected" else exclude_zyg
fam_variants = filt(fam_variants, person.ID, "0/0")
fam_variants.to_csv(fam.ID + ".csv")
# empty dataframes for results with and without phenotype filter
result = pd.DataFrame()
result_p = pd.DataFrame()
for fam in families.values():
print("Filtering", fam.ID + '...')
# get a dataframe of variants for the family,
# without phenotype filter
famresult = filter_family(df, fam, phenfilter = False)
# append it to the results
result = pd.concat([result,famresult])
if not args.nophen:
# get a dataframe of variants for the family,
# with phenotype filter
famresult_p = filter_family(df, fam, phenfilter = True)
# append it to the results
result_p = pd.concat([result_p,famresult_p])
# organize result first by sample and then by inh model
result = result.sort_values(['sample', 'inh model'])
#save result
result.to_csv(args.output)
print(result)
#save result with phenotype filter
if not args.nophen:
result_p = result_p.sort_values(['family','phens_matched','sample'])
result_p.to_csv(args.output_phen)
print(result_p)
| StarcoderdataPython |
1881349 | # Copyright 2015 redisapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
import mock
import os
import json
from redisapi.storage import Instance, MongoStorage
class DockerManagerTest(unittest.TestCase):
def remove_env(self, env):
if env in os.environ:
del os.environ[env]
def setUp(self):
os.environ["SENTINEL_HOSTS"] = '["http://host1.com:4243", \
"http://localhost:4243", "http://host2.com:4243"]'
self.addCleanup(self.remove_env, "SENTINEL_HOSTS")
os.environ["REDIS_SERVER_HOST"] = "localhost"
self.addCleanup(self.remove_env, "REDIS_SERVER_HOST")
os.environ["REDIS_IMAGE"] = "redisapi"
self.addCleanup(self.remove_env, "REDIS_IMAGE")
os.environ["DOCKER_HOSTS"] = '["http://host1.com:4243", \
"http://localhost:4243"]'
self.addCleanup(self.remove_env, "DOCKER_HOSTS")
from redisapi.managers import DockerManager
self.manager = DockerManager()
client_mock = mock.Mock()
client_mock.return_value = mock.Mock()
self.manager.client = client_mock
self.manager.health_checker = mock.Mock()
self.storage = MongoStorage()
def tearDown(self):
self.storage.db().instances.remove()
def test_client(self):
os.environ["DOCKER_HOSTS"] = '["http://host1.com:4243", \
"http://localhost:4243"]'
self.addCleanup(self.remove_env, "DOCKER_HOSTS")
from redisapi.managers import DockerManager
manager = DockerManager()
client = manager.client()
hosts = ["http://host1.com:4243", "http://localhost:4243"]
self.assertIn(client.base_url, hosts)
def test_extract_hostname(self):
from redisapi.managers import DockerManager
manager = DockerManager()
url = manager.extract_hostname("http://host.com:4243")
self.assertEqual(url, "host.com")
def test_docker_host_from_hostname(self):
from redisapi.managers import DockerManager
manager = DockerManager()
url = manager.docker_url_from_hostname("host.com")
self.assertEqual(url, "http://host.com:4243")
def test_client_with_value(self):
from redisapi.managers import DockerManager
manager = DockerManager()
host = "http://myhost.com"
client = manager.client(host=host)
self.assertEqual(client.base_url, host)
def test_hc(self):
from redisapi.hc import FakeHealthCheck
from redisapi.managers import DockerManager
manager = DockerManager()
self.assertIsInstance(manager.health_checker(), FakeHealthCheck)
def test_docker_hosts(self):
hosts = ["http://host1.com:4243", "http://localhost:4243"]
self.assertListEqual(self.manager.docker_hosts, hosts)
@mock.patch("pyzabbix.ZabbixAPI")
def test_hc_zabbix(self, zabix_mock):
os.environ["ZABBIX_URL"] = "url"
os.environ["ZABBIX_USER"] = "url"
os.environ["ZABBIX_PASSWORD"] = "<PASSWORD>"
os.environ["HEALTH_CHECKER"] = "zabbix"
self.addCleanup(self.remove_env, "HEALTH_CHECKER")
os.environ["ZABBIX_HOST"] = "2"
os.environ["ZABBIX_INTERFACE"] = "1"
from redisapi.hc import ZabbixHealthCheck
from redisapi.managers import DockerManager
manager = DockerManager()
self.assertIsInstance(manager.health_checker(), ZabbixHealthCheck)
def test_add_instance(self):
add_mock = mock.Mock()
self.manager.config_sentinels = mock.Mock()
self.manager.health_checker.return_value = add_mock
client_mock = mock.Mock()
client_mock.return_value = mock.Mock(base_url="http://localhost:4243")
self.manager.client = client_mock
self.manager.client().create_container.return_value = {"Id": "12"}
self.manager.client().inspect_container.return_value = {
'NetworkSettings': {
u'Ports': {u'6379/tcp': [{u'HostPort': u'49154'}]}}}
instance = self.manager.add_instance("name")
self.manager.client().create_container.assert_called_with(
self.manager.image_name,
command="",
environment={'REDIS_PORT': 49153},
ports=[49153]
)
self.manager.client().start.assert_called_with(
"12",
port_bindings={49153: ('0.0.0.0', 49153)}
)
add_mock.add.assert_called_with("localhost", 49153)
endpoint = instance.endpoints[0]
self.assertEqual(instance.name, "name")
self.assertEqual(endpoint["container_id"], "12")
self.assertEqual(endpoint["host"], "localhost")
self.assertEqual(endpoint["port"], 49153)
self.assertEqual(instance.plan, "basic")
self.manager.config_sentinels.assert_called_with(
"name", endpoint)
def test_remove_instance(self):
remove_mock = mock.Mock()
self.manager.remove_from_sentinel = mock.Mock()
self.manager.health_checker.return_value = remove_mock
instance = Instance(
name="name",
plan="basic",
endpoints=[{"host": "host", "port": 123, "container_id": "12"}],
)
self.storage.add_instance(instance)
self.manager.remove_instance(instance)
remove_mock.remove.assert_called_with("host", 123)
self.manager.client.assert_called_with("http://host:4243")
self.manager.client().stop.assert_called_with(
instance.endpoints[0]["container_id"])
self.manager.client().remove_container.assert_called(
instance.endpoints[0]["container_id"])
self.storage.remove_instance(instance)
self.manager.remove_from_sentinel.assert_called_with(
instance.name)
def test_bind(self):
instance = Instance(
name="name",
plan='basic',
endpoints=[{"host": "localhost", "port": 4242,
"container_id": "12"}],
)
result = self.manager.bind(instance)
self.assertEqual(result['REDIS_HOST'], "localhost")
self.assertEqual(result['REDIS_PORT'], "4242")
expected_redis = json.dumps(['localhost:4242'])
expected_sentinels = json.dumps([
u'http://host1.com:4243',
u'http://localhost:4243',
u'http://host2.com:4243'
])
self.assertEqual(result['REDIS_HOSTS'], expected_redis)
self.assertEqual(result['SENTINEL_HOSTS'], expected_sentinels)
self.assertEqual(result['REDIS_MASTER'], instance.name)
def test_running_without_the_REDIS_IMAGE_variable(self):
del os.environ["REDIS_IMAGE"]
with self.assertRaises(Exception) as cm:
from redisapi.managers import DockerManager
DockerManager()
exc = cm.exception
self.assertEqual(
(u"You must define the REDIS_IMAGE environment variable.",),
exc.args,
)
@mock.patch("redis.StrictRedis")
def test_config_sentinels(self, redis_mock):
master = {"host": "localhost", "port": "3333"}
self.manager.config_sentinels("master_name", master)
calls = []
sentinels = [
{"host": u"host1.com", "port": u"4243"},
{"host": u"localhost", "port": u"4243"},
{"host": u"host2.com", "port": u"4243"},
]
for sentinel in sentinels:
host, port = sentinel["host"], sentinel["port"]
sentinel_calls = [
mock.call(host=host, port=port),
mock.call().sentinel(
'monitor', 'master_name', 'localhost', '3333', '1'),
mock.call().sentinel(
'set', 'master_name', 'down-after-milliseconds', '5000'),
mock.call().sentinel(
'set', 'master_name', 'failover-timeout', '60000'),
mock.call().sentinel(
'set', 'master_name', 'parallel-syncs', '1'),
]
calls.extend(sentinel_calls)
redis_mock.assert_has_calls(calls)
@mock.patch("redis.StrictRedis")
def test_remove_from_sentinel(self, redis_mock):
self.manager.remove_from_sentinel("master_name")
calls = []
sentinels = [
{"host": u"host1.com", "port": u"4243"},
{"host": u"localhost", "port": u"4243"},
{"host": u"host2.com", "port": u"4243"},
]
for sentinel in sentinels:
host, port = sentinel["host"], sentinel["port"]
sentinel_calls = [
mock.call(host=host, port=port),
mock.call().sentinel(
'remove', 'master_name'),
]
calls.extend(sentinel_calls)
redis_mock.assert_has_calls(calls)
def test_port_range_start(self):
self.assertEqual(49153, self.manager.port_range_start)
def test_get_port_new_host(self):
self.assertEqual(49153, self.manager.get_port_by_host("newhost"))
def test_get_port_host_with_containers(self):
instance = Instance(
name="name",
plan="basic",
endpoints=[{"host": "newhost", "port": 49153,
"container_id": "12"}],
)
self.storage.add_instance(instance)
self.assertEqual(49154, self.manager.get_port_by_host("newhost"))
| StarcoderdataPython |
12832426 | # -*- coding: utf-8 -*-
__author__ = 'luckydonald'
from .exceptions import NoResponse, IllegalResponseException
from .encoding import to_unicode as u
from time import sleep
import atexit
import logging
logger = logging.getLogger(__name__)
__all__ = ["receiver", "sender", "Telegram"]
class Telegram(object):
"""
To have the sender and the receiver in one handsome object.
Also is able to start the CLI, and stop it respectivly.
"""
def __init__(self, host="127.0.0.1", port=4458, telegram = None, pubkey_file = None, custom_cli_args = None):
from .sender import Sender
from .receiver import Receiver
self._proc = None
if telegram and pubkey_file:
if host not in ["127.0.0.1", "localhost","",None]:
raise ValueError("Can only start the cli at localhost. You may not provide a different host.")
host = "127.0.0.1"
self.startCLI(telegram=telegram, pubkey_file=pubkey_file, custom_cli_args=custom_cli_args, port=port)
elif telegram is not None or pubkey_file is not None or custom_cli_args is not None:
logger.warn("cli related parameter given, but not cli and pubkey path not present.")
self.sender = Sender(host=host,port=port)
self.receiver = Receiver(host=host,port=port)
while self._proc is not None and self._proc.returncode is None:
self._proc.poll()
try:
result = self.sender.raw(u("help"), retry_connect=False)
if result and u("Prints this help") in result:
logger.info("CLI available.")
else:
logger.warn("CLI does not responde correctly. (Debug: {})".format(result))
break
except:
logger.info("CLI did not responde.")
sleep(1)
else:
raise AssertionError("CLI Process died.")
def startCLI(self, telegram=None, pubkey_file=None, custom_cli_args=None, port=4458):
"""
Start the telegram process.
:type telegram: builtins.str
:type pubkey_file: builtins.str
:type custom_cli_args: list | tuple
:return: (int) process id of telegram.
:rtype int:
"""
if not telegram or not pubkey_file:
raise ValueError("telegram and/or pubkey_file not defined.")
self._tg = telegram
self._pub = pubkey_file
import subprocess
def preexec_function():
import os
os.setpgrp()
atexit.register(self.stopCLI)
args = [self._tg, '-R', '-W', '-P', str(port), '-k', self._pub, '--json']
if custom_cli_args is not None:
if not isinstance(custom_cli_args, (list, tuple)):
raise TypeError("custom_cli_args should be a list or a tuple.")
args.extend(custom_cli_args)
logger.info("Starting Telegram Executable: \"{cmd}\"".format(cmd=" ".join(args)))
self._proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, preexec_fn = preexec_function)
if self._check_stopped():
raise AssertionError("CLI did stop, should be running...")
#return pid
#raise NotImplementedError("I Have to figure out processes in Python first...")
def stopCLI(self):
"""
Stop the telegram process.
:return: (int) returncode of the cli process.
:rtype int:
"""
logger.info("Asking to CLI to stop.")
if self._proc is not None:
if self.sender._do_quit:
logger.warn("Sender already stopped. Unable to issue safe_quit or quit to exit the cli nicely.")
else:
try:
self.sender.safe_quit()
except (NoResponse, IllegalResponseException, AssertionError):
logger.debug("safe_quit Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("safe_quit did not terminate.")
try:
self.sender.quit()
except (NoResponse, IllegalResponseException, AssertionError):
logger.debug("quit Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("quit did not terminate.")
self.sender.stop() # quit and safe quit are done, we don't need the sender any longer.
#end if-else: self.sender._do_quit
if self._check_stopped(): return self._proc.returncode
try:
self._proc.terminate()
except Exception as e: #todo: ProcessLookupError does not exist before python 3
logger.debug("terminate Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("terminate did not terminate.")
try:
self._proc.kill()
except Exception as e: #todo: ProcessLookupError does not exist before python 3
logger.debug("kill Exception", exc_info=True)
if self._check_stopped(): return self._proc.returncode
logger.debug("kill did not terminate.")
logger.warn("CLI kinda didn't die... Will wait (block) for termination.")
self._proc.wait()
self._check_stopped()
return self._proc.returncode
else:
logger.warn("No CLI running.")
raise AssertionError("No CLI running.")
def _check_stopped(self):
self._proc.poll()
if self._proc.returncode is not None:
logger.info("CLI did stop ({return_code}).".format(return_code=self._proc.returncode))
if hasattr(self, "sender") and self.sender is not None:
self.sender.stop()
return True | StarcoderdataPython |
1848991 | <reponame>malyvsen/unifit
import scipy.stats
# some distributions were excluded because they were:
# * deprecated
# * raising errors during fitting
# * taking ages to fit (levy_stable)
names = [
'alpha',
'anglit',
'arcsine',
'argus',
'beta',
'betaprime',
'bradford',
'burr',
'burr12',
'cauchy',
'chi',
'chi2',
'cosine',
'crystalball',
'dgamma',
'dweibull',
'expon',
'exponnorm',
'exponpow',
'exponweib',
'f',
'fatiguelife',
'fisk',
'foldcauchy',
'foldnorm',
'gamma',
'gausshyper',
'genexpon',
'genextreme',
'gengamma',
'genhalflogistic',
'geninvgauss',
'genlogistic',
'gennorm',
'genpareto',
'gilbrat',
'gompertz',
'gumbel_l',
'gumbel_r',
'halfcauchy',
'halfgennorm',
'halflogistic',
'halfnorm',
'hypsecant',
'invgamma',
'invgauss',
'invweibull',
'johnsonsb',
'johnsonsu',
'kappa3',
'kappa4',
'laplace',
'levy',
'levy_l',
'loggamma',
'logistic',
'loglaplace',
'lognorm',
'loguniform',
'lomax',
'maxwell',
'mielke',
'moyal',
'nakagami',
'ncf',
'nct',
'ncx2',
'norm',
'norminvgauss',
'pareto',
'pearson3',
'powerlaw',
'powerlognorm',
'powernorm',
'rayleigh',
'rdist',
'recipinvgauss',
'reciprocal',
'rice',
'semicircular',
'skewnorm',
't',
'trapz',
'triang',
'truncexpon',
'truncnorm',
'tukeylambda',
'uniform',
'vonmises',
'vonmises_line',
'wald',
'weibull_max',
'weibull_min',
'wrapcauchy',
]
distributions = {
name: getattr(scipy.stats, name)
for name in names
if hasattr(scipy.stats, name) # older versions miss some distributions
}
| StarcoderdataPython |
3483867 | # Copyright (c) 2021 The Trade Desk, Inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import datetime as dt
import sys
import time
from uid2_client import EncryptionKeysAutoRefresher
from uid2_client import Uid2Client
from uid2_client import decrypt_token
def _usage():
print('Usage: python3 sample_auto_refresh.py <base_url> <auth_key> <ad_token>', file=sys.stderr)
sys.exit(1)
if len(sys.argv) <= 3:
_usage()
base_url = sys.argv[1]
auth_key = sys.argv[2]
ad_token = sys.argv[3]
client = Uid2Client(base_url, auth_key)
with EncryptionKeysAutoRefresher(client, dt.timedelta(seconds=4), dt.timedelta(seconds=7)) as refresher:
for i in range(0, 20):
refresh_result = refresher.current_result()
if refresh_result.ready:
print('Keys are ready, last refreshed (UTC):', refresh_result.last_success_time, flush=True)
result = decrypt_token(ad_token, refresh_result.keys)
print('UID2 =', result.uid2, flush=True)
else:
print('Keys are not ready yet, last error:', refresh_result.last_error[1], flush=True)
time.sleep(1)
| StarcoderdataPython |
5084887 | <filename>example.py<gh_stars>0
from yaml2args import yaml2args
import argparse
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-c', '--config', default='config.yaml', type=str, help='path to config file.')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
args = parser.parse_args()
args = yaml2args(args.config, args)
print(args.xxx)
| StarcoderdataPython |
288287 | from fontTools.cffLib import PrivateDict
from fontTools.cffLib.specializer import stringToProgram
from fontTools.misc.testTools import getXML, parseXML
from fontTools.misc.psCharStrings import (
T2CharString,
encodeFloat,
encodeFixed,
read_fixed1616,
read_realNumber,
)
from fontTools.pens.recordingPen import RecordingPen
import unittest
def hexenc(s):
return ' '.join('%02x' % x for x in s)
class T2CharStringTest(unittest.TestCase):
@classmethod
def stringToT2CharString(cls, string):
return T2CharString(program=stringToProgram(string), private=PrivateDict())
def test_calcBounds_empty(self):
cs = self.stringToT2CharString("endchar")
bounds = cs.calcBounds(None)
self.assertEqual(bounds, None)
def test_calcBounds_line(self):
cs = self.stringToT2CharString("100 100 rmoveto 40 10 rlineto -20 50 rlineto endchar")
bounds = cs.calcBounds(None)
self.assertEqual(bounds, (100, 100, 140, 160))
def test_calcBounds_curve(self):
cs = self.stringToT2CharString("100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto endchar")
bounds = cs.calcBounds(None)
self.assertEqual(bounds, (91.90524980688875, -12.5, 208.09475019311125, 100))
def test_charstring_bytecode_optimization(self):
cs = self.stringToT2CharString(
"100.0 100 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto endchar")
cs.isCFF2 = False
cs.private._isCFF2 = False
cs.compile()
cs.decompile()
self.assertEqual(
cs.program, [100, 100, 'rmoveto', -50, -150, 200.5, 0, -50, 150,
'rrcurveto', 'endchar'])
cs2 = self.stringToT2CharString(
"100.0 rmoveto -50.0 -150 200.5 0.0 -50 150 rrcurveto")
cs2.isCFF2 = True
cs2.private._isCFF2 = True
cs2.compile(isCFF2=True)
cs2.decompile()
self.assertEqual(
cs2.program, [100, 'rmoveto', -50, -150, 200.5, 0, -50, 150,
'rrcurveto'])
def test_encodeFloat(self):
testNums = [
# value expected result
(-9.399999999999999, '1e e9 a4 ff'), # -9.4
(9.399999999999999999, '1e 9a 4f'), # 9.4
(456.8, '1e 45 6a 8f'), # 456.8
(0.0, '1e 0f'), # 0
(-0.0, '1e 0f'), # 0
(1.0, '1e 1f'), # 1
(-1.0, '1e e1 ff'), # -1
(98765.37e2, '1e 98 76 53 7f'), # 9876537
(1234567890.0, '1e 1a 23 45 67 9b 09 ff'), # 1234567890
(9.876537e-4, '1e a0 00 98 76 53 7f'), # 9.876537e-24
(9.876537e+4, '1e 98 76 5a 37 ff'), # 9.876537e+24
]
for sample in testNums:
encoded_result = encodeFloat(sample[0])
# check to see if we got the expected bytes
self.assertEqual(hexenc(encoded_result), sample[1])
# check to see if we get the same value by decoding the data
decoded_result = read_realNumber(
None,
None,
encoded_result,
1,
)
self.assertEqual(decoded_result[0], float('%.8g' % sample[0]))
# We limit to 8 digits of precision to match the implementation
# of encodeFloat.
def test_encode_decode_fixed(self):
testNums = [
# value expected hex expected float
(-9.399999999999999, 'ff ff f6 99 9a', -9.3999939),
(-9.4, 'ff ff f6 99 9a', -9.3999939),
(9.399999999999999999, 'ff 00 09 66 66', 9.3999939),
(9.4, 'ff 00 09 66 66', 9.3999939),
(456.8, 'ff 01 c8 cc cd', 456.8000031),
(-456.8, 'ff fe 37 33 33', -456.8000031),
]
for (value, expected_hex, expected_float) in testNums:
encoded_result = encodeFixed(value)
# check to see if we got the expected bytes
self.assertEqual(hexenc(encoded_result), expected_hex)
# check to see if we get the same value by decoding the data
decoded_result = read_fixed1616(
None,
None,
encoded_result,
1,
)
self.assertAlmostEqual(decoded_result[0], expected_float)
def test_toXML(self):
program = [
'107 53.4004 166.199 hstem',
'174.6 163.801 vstem',
'338.4 142.8 rmoveto',
'28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto',
'endchar'
]
cs = self.stringToT2CharString(" ".join(program))
self.assertEqual(getXML(cs.toXML), program)
def test_fromXML(self):
cs = T2CharString()
for name, attrs, content in parseXML(
[
'<CharString name="period">'
' 338.4 142.8 rmoveto',
' 28 0 21.9 9 15.8 18 15.8 18 7.9 20.79959 0 23.6 rrcurveto',
' endchar'
'</CharString>'
]
):
cs.fromXML(name, attrs, content)
expected_program = [
338.3999939, 142.8000031, 'rmoveto',
28, 0, 21.8999939, 9, 15.8000031,
18, 15.8000031, 18, 7.8999939,
20.7995911, 0, 23.6000061, 'rrcurveto',
'endchar'
]
self.assertEqual(len(cs.program), len(expected_program))
for arg, expected_arg in zip(cs.program, expected_program):
if isinstance(arg, str):
self.assertIsInstance(expected_arg, str)
self.assertEqual(arg, expected_arg)
else:
self.assertNotIsInstance(expected_arg, str)
self.assertAlmostEqual(arg, expected_arg)
def test_pen_closePath(self):
# Test CFF2/T2 charstring: it does NOT end in "endchar"
# https://github.com/fonttools/fonttools/issues/2455
cs = self.stringToT2CharString("100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto")
pen = RecordingPen()
cs.draw(pen)
self.assertEqual(pen.value[-1], ('closePath', ()))
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| StarcoderdataPython |
373003 | from contextlib import contextmanager
import enum
import typing
import pymongo
from energuide import dwelling
from energuide import logger
LOGGER = logger.get_logger(__name__)
class EnvVariables(enum.Enum):
username = 'ENERGUIDE_USERNAME'
password = '<PASSWORD>'
host = 'ENERGUIDE_HOST'
port = 'ENERGUIDE_PORT'
database = 'ENERGUIDE_DBNAME'
collection = 'ENERGUIDE_COLLECTION'
production = 'ENERGUIDE_PRODUCTION'
class EnvDefaults(enum.Enum):
username = ''
password = ''
host = 'localhost'
port = 27017
database = 'energuide'
collection = 'dwellings'
production = False
class _DatabaseCoordinates(typing.NamedTuple):
username: str
password: str
host: str
port: int
production: bool = False
class DatabaseCoordinates(_DatabaseCoordinates):
@property
def connection_string(self) -> str:
if self.production:
connection_string = f'mongodb+srv://{self.username}:{self.password}@{self.host}'
else:
prefix = f'{self.username}:{self.password}@' if self.username and self.password else ''
connection_string = f'{prefix}{self.host}:{self.port}'
return connection_string
@contextmanager # type: ignore
def mongo_client(database_coordinates: DatabaseCoordinates) -> typing.Iterable[pymongo.MongoClient]:
connection_string = database_coordinates.connection_string
LOGGER.info(
f'Connecting to {database_coordinates.host}, using method '
f"{['local', 'production'][database_coordinates.production]}"
)
with pymongo.MongoClient(f'{connection_string}') as client:
yield client
def load(coords: DatabaseCoordinates,
database_name: str,
collection_name: str,
data: typing.Iterable[dwelling.Dwelling],
update: bool = True) -> None:
client: pymongo.MongoClient
with mongo_client(coords) as client:
database = client[database_name]
collection = database[collection_name]
if not update:
collection.drop()
num_rows = 0
for row in data:
num_rows += 1
data_row = row.to_dict()
collection.update({'houseId': data_row['houseId']}, data_row, upsert=True)
LOGGER.info(f"updated {num_rows} rows in the database")
| StarcoderdataPython |
8014426 |
from swingers import models
| StarcoderdataPython |
3361095 | from collections import Counter
from anytree import Node, PostOrderIter
class Circus:
day = 7
test = 2
def process(self, raw_input):
nodes = {new_node.name: new_node for new_node in self.parseInput(raw_input)}
for node in nodes.values():
if len(node.childrenNames) > 0:
children = [nodes.get(childrenName) for childrenName in node.childrenNames]
node.children = children
root = nodes.popitem()[1].root
if self.test == 2:
for node in PostOrderIter(root):
if len(node.children) == 0:
node.towerWeight = node.weight
else:
childrenWeights = [child.towerWeight for child in node.children]
if len(set(childrenWeights)) > 1:
weight_count = Counter(childrenWeights)
right_weight = weight_count.most_common(1)[0][0]
wrong_child = [child for child in node.children if child.towerWeight != right_weight][0]
return wrong_child.weight + (right_weight - wrong_child.towerWeight)
else:
node.towerWeight = node.weight + sum(childrenWeights)
# print(root.parent)
# print(root.children)
# print(RenderTree(root))
return root.name
def parseInput(self, raw_input):
return [self.makeNode(row) for row in raw_input]
def makeNode(self, row):
data = row.split()
name = data[0]
weight = int(data[1].strip('()')) # remove braces
children = []
if len(data) > 1:
children = [name.strip(',') for name in data[3:]] # remove commas
return Node(name, weight=weight, childrenNames=children)
def executeTestOnFile(self, input_filename):
with open(input_filename) as input_file:
raw_input = input_file.readlines()
result = self.process(raw_input)
print(result)
with open(self.get_output_filename(), 'w') as output_file:
output_file.write(str(result))
def get_output_filename(self):
return "day" + str(self.day).zfill(2) + "." + str(self.test).zfill(2) + ".output"
def get_input_filename(self):
return "day" + str(self.day).zfill(2) + ".input"
if __name__ == "__main__":
exercise = Circus()
exercise.executeTestOnFile(exercise.get_input_filename())
| StarcoderdataPython |
1737709 | <filename>shaape/cairobackend.py
import cairo
import os
import errno
import math
from drawingbackend import DrawingBackend
import networkx as nx
from translatable import Translatable
from rotatable import Rotatable
from node import Node
import pangocairo
import pango
import warnings
class CairoBackend(DrawingBackend):
DEFAULT_MARGIN = (10, 10, 10, 10)
SHADOW_OPAQUENESS = 0.4
def __init__(self, image_scale = 1.0, image_width = None, image_height = None):
super(CairoBackend, self).__init__(image_scale, image_width, image_height)
self.set_margin(*(CairoBackend.DEFAULT_MARGIN))
self.set_image_size(0, 0)
self.__surfaces = []
self.__ctx = None
self.__drawn_graph = None
self.__font_map = pangocairo.cairo_font_map_get_default()
self.__available_font_names = [f.get_name() for f in self.__font_map.list_families()]
return
def blur_surface(self):
try:
import numpy as np
from scipy import ndimage
blurred_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(math.ceil(self.__image_size[0])), int(math.ceil(self.__image_size[1])))
top_surface = self.__surfaces[-1]
width = top_surface.get_width()
height = top_surface.get_height()
src = np.frombuffer(top_surface.get_data(), np.uint8)
src.shape = (height, width, 4)
dst = np.frombuffer(blurred_surface.get_data(), np.uint8)
dst.shape = (height, width, 4)
dst[:,:,3] = ndimage.gaussian_filter(src[:,:,3], sigma=3 * self.scale())
dst[:,:,0] = ndimage.gaussian_filter(src[:,:,0], sigma=3 * self.scale())
dst[:,:,1] = ndimage.gaussian_filter(src[:,:,1], sigma=3 * self.scale())
dst[:,:,2] = ndimage.gaussian_filter(src[:,:,2], sigma=3 * self.scale())
blurred_image = cairo.ImageSurface.create_for_data(dst, cairo.FORMAT_ARGB32, width, height)
self.__ctx.set_source_surface(blurred_image)
self.__ctx.set_operator(cairo.OPERATOR_SOURCE)
self.__ctx.paint()
except ImportError:
pass
def new_surface(self, name = None):
return cairo.ImageSurface(cairo.FORMAT_ARGB32, int(math.ceil(self.image_size()[0])), int(math.ceil(self.image_size()[1])))
def push_surface(self):
surface = self.new_surface()
self.__surfaces.append(surface)
self.__ctx = cairo.Context(surface)
self.__drawn_graph = nx.Graph()
def pop_surface(self):
surface = self.__surfaces.pop()
self.__ctx = cairo.Context(self.__surfaces[-1])
self.__ctx.set_source_surface(surface)
self.__ctx.set_operator(cairo.OPERATOR_OVER)
self.__ctx.paint()
self.__drawn_graph = None
def surfaces(self):
return self.__surfaces
def set_image_size(self, width, height):
if not width:
width = 1
if not height:
height = 1
self.__image_size = (width, height)
def image_size(self):
return self.__image_size
def set_margin(self, left, right, top, bottom):
self.__margin = (left, right, top, bottom)
def margin(self):
return self.__margin
def create_canvas(self):
self.set_image_size(self._canvas_size[0], self._canvas_size[1])
self.push_surface()
self.__ctx.set_source_rgb(1, 1, 1)
self.__ctx.rectangle(0.0, 0.0, self.__image_size[0] + self.__margin[0] + self.__margin[1], self.__image_size[1] + self.__margin[2] + self.__margin[3])
return
def apply_dash(self, drawable):
if drawable.style().fill_type() == 'dashed':
width = drawable.style().width() * self._scale
dash_list = [ width * 4, width]
self.__ctx.set_dash(dash_list)
elif drawable.style().fill_type() == 'dotted':
width = drawable.style().width() * self._scale
dash_list = [width, width]
self.__ctx.set_dash(dash_list)
elif drawable.style().fill_type() == 'dash-dotted':
width = drawable.style().width() * self._scale
dash_list = [ width * 4, width, width, width]
self.__ctx.set_dash(dash_list)
else:
self.__ctx.set_dash([])
def apply_line(self, drawable, opaqueness = 1.0, shadow = False):
self.__ctx.set_line_cap(cairo.LINE_CAP_BUTT)
self.__ctx.set_line_join (cairo.LINE_JOIN_ROUND)
width = max(1, math.floor(drawable.style().width() * self._scale))
color = drawable.style().color()[0]
if len(color) == 3:
color = tuple(color) + tuple([1])
if shadow:
color = map(lambda x: (1 - color[3]) * x, color[:3]) + [color[3]]
adapted_color = tuple(color[:3]) + tuple([color[3] * opaqueness])
self.__ctx.set_source_rgba(*adapted_color)
self.apply_dash(drawable)
self.__ctx.set_line_width(width)
return
def apply_fill(self, drawable, opaqueness = 1.0, shadow = False):
minimum = drawable.min()
maximum = drawable.max()
colors = drawable.style().color()
if len(colors) > 1:
linear_gradient = cairo.LinearGradient(minimum[0], minimum[1], maximum[0], maximum[1])
n = 0
for color in colors:
stop = n * (1.0 / (len(colors) - 1))
if len(color) == 3:
color = tuple(color) + tuple([1])
if shadow:
color = map(lambda x: (1 - color[3]) * x, color[:3]) + [color[3]]
adapted_color = tuple(color[:3]) + tuple([color[3] * opaqueness])
linear_gradient.add_color_stop_rgba(stop, *adapted_color)
n = n + 1
self.__ctx.set_source(linear_gradient)
else:
color = colors[0]
if len(color) == 3:
color = tuple(color) + tuple([1])
if shadow:
color = map(lambda x: (1 - color[3]) * x, color[:3]) + [color[3]]
adapted_color = tuple(color[:3]) + tuple([color[3] * opaqueness])
self.__ctx.set_source_rgba(*adapted_color)
self.__ctx.set_line_width(1)
def draw_polygon(self, polygon):
self.__ctx.save()
self.apply_fill(polygon)
self.apply_transform(polygon)
nodes = polygon.nodes()
self.apply_path(nodes)
self.__ctx.fill()
self.__ctx.restore()
return
def draw_polygon_shadow(self, polygon):
self.__ctx.save()
self.apply_fill(polygon, opaqueness = self.SHADOW_OPAQUENESS, shadow = True)
self.__ctx.set_operator(cairo.OPERATOR_SOURCE)
self.apply_transform(polygon)
nodes = polygon.nodes()
self.apply_path(nodes)
self.__ctx.fill()
self.__ctx.restore()
return
def draw_open_graph_shadow(self, open_graph):
self.apply_line(open_graph, opaqueness = self.SHADOW_OPAQUENESS, shadow = True)
self.__ctx.save()
self.apply_transform(open_graph)
self.__ctx.set_operator(cairo.OPERATOR_SOURCE)
paths = open_graph.paths()
if len(paths) > 0:
for path in paths:
if path[0] == path[-1]:
nodes = [path[-2]] + path
else:
nodes = [path[0]] + path + [path[-1]]
self.apply_line(open_graph, opaqueness = self.SHADOW_OPAQUENESS, shadow = True)
self.apply_path(nodes)
self.__ctx.set_operator(cairo.OPERATOR_SOURCE)
self.__ctx.stroke()
self.__ctx.restore()
return
def _transform_to_sharp_space(self, direction, node):
if self.__ctx.get_line_width() % 2 == 1:
result = Node(node[0], node[1])
if direction[0] == 0:
result.set_position(result[0] + 0.5, result[1])
if direction[1] == 0:
result.set_position(result[0], result[1] + 0.5)
return result
else:
return node
def apply_path(self, nodes):
cycle = (nodes[0] == nodes[-1])
if cycle and nodes[0].style() == 'curve':
line_end = nodes[1] + ((nodes[0] - nodes[1]) * 0.5)
else:
line_end = nodes[0]
self.__ctx.move_to(*self._transform_to_sharp_space(nodes[1] - nodes[0], line_end))
for i in range(1, len(nodes)):
if nodes[i].style() == 'curve':
if i == len(nodes) - 1:
if cycle == True:
next_i = 1
else:
next_i = i
else:
next_i = i + 1
if i == len(nodes) - 1:
direction = nodes[next_i] - nodes[i]
else:
direction = Node(0, 0)
if i > 0 and nodes[i - 1].style() == 'miter':
temp_end = nodes[i - 1] + ((nodes[i] - nodes[i - 1]) * 0.5)
self.__ctx.line_to(*self._transform_to_sharp_space(Node(0, 0), temp_end))
line_end = nodes[i] + ((nodes[next_i] - nodes[i]) * 0.5)
cp1 = nodes[i - 1] + ((nodes[i] - nodes[i - 1]) * 0.97)
cp2 = nodes[next_i] + ((nodes[i] - nodes[next_i]) * 0.97)
cp1 = self._transform_to_sharp_space(direction, cp1)
cp2 = self._transform_to_sharp_space(direction, cp2)
self.__ctx.curve_to(cp1[0], cp1[1], cp2[0], cp2[1], *self._transform_to_sharp_space(direction, line_end))
else:
if i == len(nodes) - 1:
direction = nodes[i] - nodes[i - 1]
else:
direction = Node(0, 0)
self.__ctx.line_to(*self._transform_to_sharp_space(direction, nodes[i]))
line_end = nodes[i]
return
def draw_open_graph(self, open_graph):
self.__ctx.save()
self.apply_transform(open_graph)
paths = open_graph.paths()
if len(paths) > 0:
for path in paths:
self.apply_line(open_graph)
self.apply_path(path)
self.__ctx.set_operator(cairo.OPERATOR_CLEAR)
self.__ctx.set_dash([])
self.__ctx.stroke_preserve()
self.apply_line(open_graph)
self.__ctx.set_operator(cairo.OPERATOR_SOURCE)
self.__ctx.stroke()
self.__ctx.restore()
return
def __draw_text(self, text_obj, shadow = False):
text = text_obj.text()
self.__ctx.save()
pangocairo_context = pangocairo.CairoContext(self.__ctx)
layout = pangocairo_context.create_layout()
font = pango.FontDescription(text_obj.style().font().name())
if not font.get_family() in self.__available_font_names:
warnings.warn("Couldn't find font family for font name \"" + font.get_family() + "\". Using default font. Available fonts are: " + str(self.__available_font_names), RuntimeWarning)
font_size = font.get_size()
if font_size == 0:
font_size = 10 * pango.SCALE
font.set_size(int(font_size * self._scale))
layout.set_font_description(font)
layout.set_text(text_obj.text())
if shadow == True:
self.apply_fill(text_obj, opaqueness = self.SHADOW_OPAQUENESS, shadow = True)
else:
self.apply_fill(text_obj, shadow = False)
self.__ctx.translate(*(text_obj.position()))
letter_width, letter_height = layout.get_pixel_size()
unit_width, unit_height = self.global_scale()
diff_height = (unit_height - letter_height) / 2
diff_width = (unit_width - letter_width) / 2
self.__ctx.translate(0, diff_height)
for cx, letter in enumerate(text):
layout.set_text(letter)
letter_width, letter_height = layout.get_pixel_size()
unit_width, unit_height = self.global_scale()
diff_height = (unit_height - letter_height) / 2
diff_width = (unit_width - letter_width) / 2
self.__ctx.translate(diff_width, 0)
fwidth, fheight = layout.get_pixel_size()
pangocairo_context.update_layout(layout)
pangocairo_context.layout_path(layout)
self.__ctx.translate(-diff_width, 0)
self.__ctx.translate(unit_width, 0)
self.__ctx.fill()
self.__ctx.restore()
return
def draw_text(self, text_obj):
self.__draw_text(text_obj, shadow = False)
def draw_text_shadow(self, text_obj):
self.__draw_text(text_obj, shadow = True)
def apply_transform(self, obj):
if isinstance(obj, Translatable):
self.__ctx.translate(obj.position()[0], obj.position()[1])
if isinstance(obj, Rotatable):
self.__ctx.rotate(math.radians(obj.angle()))
return
def export_to_file(self, filename):
path = os.path.dirname(filename)
if path != '':
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.__surfaces[-1].write_to_png(filename)
return
def ctx(self):
return self.__ctx
def translate(self, x, y):
self.ctx().translate(x, y)
| StarcoderdataPython |
6519702 | <filename>crawling_update7/crawlKoreaData_Seoul.py
import requests
from bs4 import BeautifulSoup
from utils import write_data
import time
import datetime
from datetime import date, timedelta
def get_data(url):
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
updated = soup.find('p', class_='txt-status').text # 업데이트날짜
data = soup.select('.seoul-map-wrap>.seoul-map.seoul-map-all>span')
data.pop()
return data, updated
def parse_data(data, updated):
confirmed_seoul = [] # 시도별확진자
for i, d in enumerate(data):
region = d.find_all('em', class_='sr-only')[0].text # 지역이름
confirmed = d.find_all('span', class_='num')[0].text
confirmed = int(confirmed)
confirmed_seoul.append({
'지역이름': region,
'확진자수': confirmed
})
confirmed_seoul.append({'업데이트날짜': updated})
return confirmed_seoul
today = date.today()
yesterday = date.today() - timedelta(1)
a = str(today)
b = str(yesterday)
def run():
data, updated = get_data(
"https://www.seoul.go.kr/coronaV/coronaStatus.do")
confirmed_region = parse_data(data, updated)
# print(confirmed_region)
save_dir = 'koreaData_Seoul_' + a + '.js'
crawler_name = 'crawlKoreaData_Seoul.py'
var_name = 'koreaData_Seoul'
write_data(confirmed_region, save_dir, crawler_name, var_name)
run()
| StarcoderdataPython |
8197153 | from collections import defaultdict
from decimal import Decimal
from _datetime import datetime, timedelta
from enum import Enum
import math
import random
import re
import requests
import time
from vnpy.app.algo_trading import AlgoTemplate
from vnpy.trader.utility import round_to
from vnpy.trader.constant import Direction, Status, OrderType
from vnpy.trader.object import AccountData, OrderData, TradeData, TickData
from vnpy.trader.engine import BaseEngine
class AutoMarketMakerAlgo(AlgoTemplate):
""""""
display_name = "交易所稳定币对 流动性挖坑"
default_setting = {
"vt_symbol": "",
"base_asset": 0.0,
"quote_asset": 0.0,
"price_offset": 1,
"price_tolerance": 5,
"interval": 10,
"volume": 1000,
"min_order_level": 5,
"max_loss": 10
}
variables = [
"pos",
"timer_count",
"vt_ask_orderid",
"vt_bid_orderid"
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.base_asset = setting["base_asset"]
self.quote_asset = setting["quote_asset"]
assert self.base_asset != 0 and self.quote_asset != 0
self.x = self.base_asset
self.y = self.quote_asset
self.price_offset = setting["price_offset"]
self.price_tolerance = setting["price_tolerance"]
self.interval = setting["interval"]
self.volume = setting["volume"]
self.min_order_level = setting["min_order_level"]
self.max_loss = setting.get("max_loss", 0.1 * (self.base_asset + self.quote_asset))
self.fee_rate = setting.get("fee_rate", 0.0015)
assert 0 <= self.fee_rate < 1
# validate setting
assert 0 <= self.min_order_level <= 5
# Variables
self.pos = 0
self.timer_count = 0
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.hedge_ask_orderids = []
self.hedge_bid_orderids = []
self.last_tick = None
self.stopped = True
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def on_start(self):
""""""
random.seed(time.time())
self.write_log("开始 自动做市")
self.stopped = False
self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick if self.algo_engine.main_engine is not None else 0.00000001
assert self.pricetick > 0
def on_tick(self, tick: TickData):
""""""
self.last_tick = tick
self.prune_ask_orders(tick)
self.prune_bid_orders(tick)
def prune_ask_orders(self, tick: TickData):
#TODO: prune hedge orders
market_price = (tick.ask_price_1 + tick.bid_price_1) / 2
if self.vt_ask_orderid != "":
target_ask_price = round_to(market_price * ((100 + self.price_offset)/100), self.pricetick)
if self.vt_ask_price > target_ask_price:
self.write_log(f"当前卖单{self.vt_ask_price} 超出目标价 {target_ask_price},取消{self.vt_ask_orderid}")
self.cancel_order(self.vt_ask_orderid)
def prune_bid_orders(self, tick: TickData):
#TODO: prune hedge orders
market_price = (tick.ask_price_1 + tick.bid_price_1) / 2
if self.vt_bid_orderid != "":
target_bid_price = round_to(market_price * ((100 - self.price_offset)/100), self.pricetick)
if self.vt_bid_price < target_bid_price:
self.write_log(f"当前买单{self.vt_bid_price} 超出目标价 {target_bid_price},取消{self.vt_bid_orderid}")
self.cancel_order(self.vt_bid_orderid)
def on_timer(self):
""""""
if not self.last_tick or self.stopped:
return
if not self.check_assets_balance():
self.cancel_all()
self.stopped = True
return
self.timer_count += 1
if self.timer_count < self.interval:
self.put_variables_event()
return
self.timer_count = 0
market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2
if self.vt_ask_orderid == "" and len(self.hedge_ask_orderids) == 0:
min_ask_price = getattr(self.last_tick, f"ask_price_{self.min_order_level}") if self.min_order_level > 0 else market_price
vt_ask_price = round_to(market_price * ((100 + self.price_offset)/100), self.pricetick)
if vt_ask_price >= min_ask_price and math.fabs(vt_ask_price - 1)*100 <= self.price_tolerance:
self.vt_ask_price = vt_ask_price
self.vt_ask_volume = self.volume
if self.vt_ask_volume > 0:
self.write_log(f"委托AMM卖单,价格:{self.vt_ask_price}, 下单量: {self.vt_ask_volume}")
self.vt_ask_orderid = self.sell(self.vt_symbol, self.vt_ask_price, self.vt_ask_volume)
if self.vt_bid_orderid == "" and len(self.hedge_bid_orderids) == 0:
max_bid_price = getattr(self.last_tick, f"bid_price_{self.min_order_level}") if self.min_order_level > 0 else market_price
vt_bid_price = round_to(market_price * ((100 - self.price_offset)/100), self.pricetick)
if vt_bid_price <= max_bid_price and math.fabs(vt_bid_price - 1)*100 <= self.price_tolerance:
self.vt_bid_price = vt_bid_price
self.vt_bid_volume = self.volume
if self.vt_bid_volume > 0:
self.write_log(f"委托AMM买单,价格:{self.vt_bid_price},下单量: {self.vt_bid_volume}")
self.vt_bid_orderid = self.buy(self.vt_symbol, self.vt_bid_price, self.vt_bid_volume)
#self.write_log(f"{self.vt_ask_orderid}, {self.vt_bid_orderid}")
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
if order.vt_orderid == self.vt_ask_orderid:
if not order.is_active():
if order.traded > 0:
self.write_log(f"AMM卖单成交,价格:{order.price},成交量: {order.traded}")
self.hedge(order)
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
self.x-=order.traded
self.y+=order.traded*order.price
elif order.vt_orderid == self.vt_bid_orderid:
if not order.is_active():
if order.traded > 0:
self.write_log(f"AMM买单成交,价格:{order.price},成交量: {order.traded}")
self.hedge(order)
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.x+=order.traded
self.y-=order.traded*order.price
elif order.vt_orderid in self.hedge_ask_orderids:
if not order.is_active():
self.write_log(f"对冲卖单成交,价格:{order.price},成交量: {order.traded}")
self.hedge_ask_orderids.remove(order.vt_orderid)
self.x-=order.traded
self.y+=order.traded*order.price
elif order.vt_orderid in self.hedge_bid_orderids:
if not order.is_active():
self.write_log(f"对冲买单成交,价格:{order.price},成交量: {order.traded}")
self.hedge_bid_orderids.remove(order.vt_orderid)
self.x+=order.traded
self.y-=order.traded*order.price
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
self.put_variables_event()
def hedge(self, order: OrderData):
""""""
volume = order.traded
if order.direction == Direction.SHORT:
hedge_price = round_to(order.price * (1-self.fee_rate), self.pricetick)
volume = volume/(1-self.fee_rate)
vt_hedge_bid_orderid = self.buy(
self.vt_symbol,
hedge_price,
volume
)
if vt_hedge_bid_orderid != "":
self.write_log(f"委托AMM对冲买单,价格:{order.price}, 下单量: {volume}")
self.hedge_bid_orderids.append(vt_hedge_bid_orderid)
elif order.direction == Direction.LONG:
hedge_price = round_to(order.price / (1-self.fee_rate), self.pricetick)
vt_hedge_ask_orderid = self.sell(
self.vt_symbol,
hedge_price,
volume
)
if vt_hedge_ask_orderid != "":
self.write_log(f"委托AMM对冲卖单,价格:{order.price}, 下单量: {volume}")
self.hedge_ask_orderids.append(vt_hedge_ask_orderid)
def on_stop(self):
""""""
self.write_log("停止 流动性挖矿")
# self.write_log(f"账户状态:{self.algo_engine.main_engine.get_all_accounts()}")
time.sleep(5)
def check_assets_balance(self):
""""""
x, y = self.x, self.y
if x < 0 or y < 0:
self.write_log(f"当前持仓: {x}*{y} < 0 ,停止自动做市机器人")
self.stopped = True
if (x + y) - (self.base_asset + self.quote_asset) < -self.max_loss:
self.write_log(f"当前持仓: {x}+{y} < {self.base_asset+self.quote_asset} - {self.max_loss} ,停止自动做市机器人")
return False
return True
| StarcoderdataPython |
6678927 | <gh_stars>0
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('D:\@Semester 06\Digital Image Processing\Lab\Manuals\Figures\lab4\_img3.tif', 0)
[w, h] = img.shape # Get image rows & cols
histogram = list() # Create empty lists
pdf = list()
cdf = list()
tf = list()
for p in range(0, 256): # Append lists with 0
histogram.append(0)
pdf.append(0)
cdf.append(0)
tf.append(0) # list with all zeros
for i in range(0, w): # Histogram
for j in range(0, h):
histogram[img[i][j]] = histogram[img[i][j]] + 1
plt.plot(histogram)
plt.show()
for i in range(0, 256): # PDF
pdf[i] = histogram[i]/(w * h)
plt.plot(pdf)
plt.show()
for i in range(0, 256): # CDF
cdf[i] = sum(pdf[0:i])
plt.plot(cdf)
plt.show()
for i in range(0, 256): # Transformation function
tf[i] = round(cdf[i] * 255)
plt.plot(tf)
plt.show()
for i in range(w): # Create contrast enhanced image
for j in range(0, h):
img[i][j] = tf[img[i][j]]
plt.plot(img)
plt.show()
for i in range(0, w): # Histogram
for j in range(0, h):
histogram[img[i][j]] = histogram[img[i][j]] + 1
plt.plot(histogram)
plt.show()
cv.imshow('Enhanced', img) # Show enhanced image
cv.waitKey(20000)
| StarcoderdataPython |
394299 | # -*- coding: utf-8 -*-
import sys
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="black",
install=[[sys.executable, "-m", "pip", "install", "-U", "black"]],
help_cmd=["black", "-h"],
run=["black"],
rundefault=["black"],
dotfiles=[],
language="python",
autorun=False,
run_per_file=True,
autofix=True,
)
class BlackParser(ParserBase):
"""Black isn't actually a linter, so no-op."""
def parse(self, lint_data):
return []
| StarcoderdataPython |
5126834 | <gh_stars>0
from game_states import GameStates
def kill_player(player, colors):
player.char = '%'
player.color = colors.get('dark_red')
return 'You died!', GameStates.PLAYER_DEAD
def kill_monster(monster, colors):
death_message = '{0} is dead!'.format(monster.name.capitalize())
monster.char = '%'
monster.color = colors.get('dark_red')
monster.blocks = False
monster.fighter = None
monster.ai = None
monster.name = 'remains of ' + monster.name
return death_message
| StarcoderdataPython |
1879517 | """Helper functions used across this library."""
import os
import re
from functools import partial
from itertools import islice
from typing import Tuple
EXTENSIONS = re.compile(r".+py$|.+zip$|.+egg$")
def take(n, iterable):
"""
Return first n items of the iterable as a list
Notes
-----
From itertools recipes:
https://docs.python.org/3.6/library/itertools.html#itertools-recipes
"""
return list(islice(iterable, n))
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
Notes
-----
Reimplemented from more itertools to avoid the installation of the package.
https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.chunked
"""
return iter(partial(take, n, iter(iterable)), [])
def get_py_zip_egg_files(path: str) -> Tuple[str, ...]:
"""
Find all .py, .zip, .egg files in sys.path.
This method is a workaround needed for Glue2.0 as of 2020-05-11
"""
return tuple(e.path for e in filter(lambda ent: EXTENSIONS.match(ent.name), os.scandir(path)))
| StarcoderdataPython |
12830500 | """
ml_digit_recognition.py
Trains a neural network to recognize handwritten digits from the MNIST database.
Author: <NAME>
Created: 10 - 28 - 2017
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data as mnist_input_data
from random import randint
from tqdm import tqdm
from time import time
# -------------------------------- HYPER PARAMS --------------------------------
LEARNING_RATE = 0.2 # How quickly the network learns (sensitivity to error)
BATCH_SIZE = 500 # The number of samples in a batch in each training epoch
TRAINING_EPOCHS = 5000 # The number of training epochs
LOGPATH = 'logs'
# --------------------------------- MNIST Data ---------------------------------
# Get MNIST Data
print('Getting MNIST digit data')
mnist = mnist_input_data.read_data_sets('MNIST_data/', one_hot=True)
# --------------------------- Neural Network System ----------------------------
"""
Convolution Neural Network:
input(784)
reshape(28,28,1)
convolve(4, 4)
relu(4, 4)
reshape(49)
softmax(10)
output(10)
Determines the numerical digit that the given image represents.
"""
# Initialize random seed
tf.set_random_seed(int(time()))
# Input z
z = tf.placeholder(tf.float32, [None, 784], name='z')
# Reshape layer
s0 = tf.reshape(z, [-1, 28, 28, 1])
# Convolution layer
Kc0 = tf.Variable(tf.random_uniform([4, 4, 1, 1]), name='Kc0')
c0 = tf.nn.conv2d(s0, Kc0, strides=[1, 4, 4, 1], padding='SAME')
# RELU layer
r0 = tf.nn.relu(c0)
# Reshape layer
s1 = tf.reshape(c0, [-1, 49])
# Fully-Connected Layer to Output P
Wp = tf.Variable(tf.random_uniform([49, 10]), name='Wp')
bp = tf.Variable(tf.random_uniform([10]), name='bp')
p = tf.nn.softmax(tf.matmul(s1, Wp) + bp)
# Training p
p_ = tf.placeholder(tf.float32, [None, 10], name='p_')
# Error function (Least-Squares)
error = tf.losses.mean_squared_error(labels=p_, predictions=p)
# Trainer
trainer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(error)
# Summary
tf.summary.scalar('error', error)
tf.summary.histogram('Kc0', Kc0)
tf.summary.histogram('Wp', Wp)
tf.summary.histogram('bp', bp)
summary = tf.summary.merge_all()
# ----------------------------- Show Sample Helper -----------------------------
def show_sample(name, images, labels, predicts, error):
"""
HELPER FUNCTION
Shows a sample of the given MNIST data and the resulting predictions
from the neural network. Plots images, labels, and predictions in a
subplot with the given rows and columns. Prints the given error
afterwards.
:param name: the name of the dataset
:param images: the images of the MNIST data (Kx28x28 array)
:param labels: the labels of the MNIST data
:param predicts: the predictions from the Nerual Network
:param error: the error of the prediction from the Neural Network
"""
# Title formatters
plot_title = '{name} Sample Digits'
subplot_title = 'Expected: {expected}, Predicted: {predicted}'
error_title = '{name} error: {error}'
# Rows and columns of subplot
rows = 2
cols = 2
# Randomized samples start
start = randint(0, images.shape[0] - (rows*cols))
# Get formatted data
formatted_images = np.reshape(images, (-1, 28, 28))
formatted_labels = np.argmax(labels, axis=1)
formatted_predicts = np.argmax(predicts, axis=1)
# Create subplot plot
plt.figure(plot_title.format(name=name))
for index in range(rows*cols):
# Create subplot of each sample
splt = plt.subplot(rows, cols, index+1)
splt.set_title(subplot_title.format(
expected=formatted_labels[start+index],
predicted=formatted_predicts[start+index]
))
splt.axis('off')
splt.imshow(formatted_images[start+index,:,:], cmap='gray')
# Show plot and then print error
plt.show()
print(error_title.format(name=name, error=error))
# ----------------------------------- Session ----------------------------------
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
# Create session writer
writer = tf.summary.FileWriter(LOGPATH,
graph=tf.get_default_graph())
# ---------------------- Initial Run -----------------------
# Compute sample data
print('Compute initial prediction')
predicts_0 = sess.run(p, feed_dict={z:mnist.test.images})
error_0 = sess.run(error, feed_dict={z:mnist.test.images,
p_:mnist.test.labels})
# Plot initial sample
print('Plot initial prediction sample')
show_sample(name='Initial',
images=mnist.test.images,
labels=mnist.test.labels,
predicts=predicts_0,
error=error_0)
# --------------------- Training Step ----------------------
print('Training Neural Network...')
for i in tqdm(range(TRAINING_EPOCHS), desc='Training'):
# Train batch and add summary
batch_zs, batch_ps = mnist.train.next_batch(BATCH_SIZE)
_, summ = sess.run([trainer, summary],
feed_dict={z:batch_zs, p_:batch_ps})
writer.add_summary(summ, i)
# ----------------------- Final Run ------------------------
# Get Sample Images and labels
print('Compute final prediction')
predicts_1 = sess.run(p, feed_dict={z:mnist.test.images})
error_1 = sess.run(error, feed_dict={z:mnist.test.images,
p_:mnist.test.labels})
# Plot final samples
print('Plot final prediction sample')
show_sample(name='Final',
images=mnist.test.images,
labels=mnist.test.labels,
predicts=predicts_1,
error=error_1)
# ---------------------- Close Writer ----------------------
writer.close()
| StarcoderdataPython |
11261740 | <reponame>spp2/PythonStdioGames
import pytest
import sys
import io
from gamesbyexample import mancala
def test_getNewBoard():
assert mancala.getNewBoard() == {'1': 0, '2': 0, 'A': 4, 'B': 4, 'C': 4, 'D': 4, 'E': 4, 'F': 4, 'G': 4, 'H': 4, 'I': 4, 'J': 4, 'K': 4, 'L': 4}
def test_displayBoard(capsys):
gameBoard = mancala.getNewBoard()
mancala.displayBoard(gameBoard)
captured = capsys.readouterr()
assert captured.out.count('4') == 12
assert captured.out.count('0') == 2
def test_makeMove():
gameBoard = mancala.getNewBoard()
result = mancala.makeMove(gameBoard, '1', 'C')
assert result == '1'
assert gameBoard == {'1': 1, '2': 0, 'A': 4, 'B': 4, 'C': 0, 'D': 5, 'E': 5, 'F': 5, 'G': 4, 'H': 4, 'I': 4, 'J': 4, 'K': 4, 'L': 4}
result = mancala.makeMove(gameBoard, '1', 'D')
assert result == '2'
assert gameBoard == {'1': 2, '2': 0, 'A': 4, 'B': 4, 'C': 0, 'D': 0, 'E': 6, 'F': 6, 'G': 4, 'H': 4, 'I': 4, 'J': 4, 'K': 5, 'L': 5}
def test_quit():
board = mancala.getNewBoard()
# Test quitting:
with pytest.raises(SystemExit):
sys.stdin = io.StringIO('QUIT\n')
mancala.getPlayerMove('1', board)
with pytest.raises(SystemExit):
sys.stdin = io.StringIO('quit\n')
mancala.getPlayerMove('1', board)
with pytest.raises(SystemExit):
sys.stdin = io.StringIO('qUiT\n')
mancala.getPlayerMove('1', board)
with pytest.raises(SystemExit):
sys.stdin = io.StringIO(' quit \n')
mancala.getPlayerMove('1', board)
def test_main(capsys):
sys.stdin = io.StringIO('\nC\nD\nG\nC\nK\nI\nB\nH\nB\nJ\nD\nA\nI\nE\nH\nG\nJ\nF\nG\nH\nD\nI\nJ\nE\nH\nB\nG\n')
with pytest.raises(SystemExit):
mancala.main()
captured = capsys.readouterr()
assert 'Player 1 has won!' in captured.out
# Tests having so many seeds in a pocket that you skip the other player's store:
sys.stdin = io.StringIO('\nA\nG\nB\nH\nE\nI\nD\nG\nC\nH\nE\nF\nQUIT\n')
with pytest.raises(SystemExit):
mancala.main()
captured = capsys.readouterr()
assert '| 1 | 3 |' in captured.out
sys.stdin = io.StringIO('\nG\n1\n2\nC\nC\n\nQUIT\n')
with pytest.raises(SystemExit):
mancala.main()
captured = capsys.readouterr()
assert 'Please pick a letter on your side of the board.' in captured.out
assert 'Please pick a non-empty pit.' in captured.out | StarcoderdataPython |
4891416 | """Fingerprint
generate fingerprints from molecules.
fp = Fingerprint(numIntegers)
Generate a fingerprint object that stores fingerprints
in an array of numIntegers.
fp.addPath(path)
add a path to the fingerprint. path is any str'able
value.
fp in fp2
returns 1 if the fingerprint fp is a contained in the fingerprint
fp2.
This might be a bastardazation of __contains__ but I sort
of like it.
The method for creating fingerprints is remarkably simple.
a sequence of non branching paths is extracted from a molecule.
A path is a string value named atoms and named bonds
through the traversal. For instance:
'C-C-C-C-N' or even 'Carbon through single bond to Carbon'.
Any string will do as long as the string is always the
same for the same path through the same or other molecules.
For each path
1 convert the string to an integer value and use it to
seed a random number generator
(random.seed can use any hashable value as the seed!
python, again, is cool!)
random.seed(path)
2 pull out two random integers from the seeded generator
index = int(random.random() * NUM_INTS)
bit = int(random.random() * INT_SIZE)
fingerprint[index] = fingerprint[index] | 1<<bit
we store a fingerprint as an array of integers. Each integer
has a certain number of bits that can be flipped. The process
of adding a path to a fingerprint is simply choosing the index
and bit position for a path. The above procedure does this
in a deterministic fashion.
"""
import random
# To Do
# we can add an ordering operation for __contains__
# that compares the fingerprint integers in a different
# order. Ideally the most likely integer for failure
# should be chosen first.
# We also need a good way to cache these to disk. I suppose
# cPickle will work just fine for now...
class Fingerlist:
def __init__(self, numIntegers=1, fingerprint=None):
self.fingerprint = [0] * numIntegers
def addPath(self, path):
random.seed(path)
fingerprint = self.fingerprint
index = int(random.random() * len(fingerprint))
self.fingerprint[index] += 1
def __contains__(self, other):
# self contains other
if len(other.fingerprint) != len(self.fingerprint):
raise "Fingerlists not the same size!"
for a,b in zip(self.fingerprint, other.fingerprint):
if b > a: return 0
return 1
def to_list(self):
return self.fingerprint[:]
class SplitFingerlist:
def __init__(self, maxdepth=7, integersPerPrint=[4]*6):
assert maxdepth-1 == len(integersPerPrint)
p = self.paths = []
for numints in integersPerPrint:
p.append(Fingerlist(numIntegers=numints))
def addPath(self, length, path):
self.paths[length-2].addPath(path)
def __contains__(self, other):
# does self contain other
assert len(self.paths) == len(other.paths)
for pother, pself in zip(other.paths, self.paths):
if pother not in pself:
return 0
return 1
def to_list(self):
result = []
for p in self.paths:
result += p.to_list()
return result
def test():
# XXX FIX ME
# better tests are needed of course :)
paths = ["my",
"dog",
"has",
"fleas"]
fp = Fingerlist(32)
fp2 = Fingerlist(32)
for path in paths:
fp.addPath(path)
paths.reverse()
for path in paths:
fp2.addPath(path)
assert fp.fingerprint == fp2.fingerprint
assert fp in fp2
fp2.addPath("yuma")
assert fp in fp2
assert fp2 not in fp
if __name__ == "__main__":
test()
| StarcoderdataPython |
9756440 | """
Manual Control
==============
Module storing an implementation of a controller and values associated with it.
The `Controller` class is the implementation of a closed loop control system for the ROV, with the hardware readings
executed in separate process. The readings are forwarded to the shared memory in a previously agreed format.
"""
from ..common import data_manager as _dm, Log as _Log
from .utils import DrivingMode as _DrivingMode, normalise as _normalise, \
NORM_IDLE as _IDLE, NORM_MAX as _MAX, NORM_MIN as _MIN
import multiprocessing as _mp
import inputs as _inputs
import time as _time
# Initialise the controller, has to be a global variable due to multiprocessing
_CONTROLLER = _inputs.devices.gamepads[0] if _inputs.devices.gamepads else None
# Create the hardware to class value dispatcher
_DISPATCH_MAP = {
"ABS_X": "left_axis_x",
"ABS_Y": "left_axis_y",
"ABS_RX": "right_axis_x",
"ABS_RY": "right_axis_y",
"ABS_Z": "left_trigger",
"ABS_RZ": "right_trigger",
"ABS_HAT0X": "hat_x",
"ABS_HAT0Y": "hat_y",
"BTN_SOUTH": "button_A",
"BTN_EAST": "button_B",
"BTN_WEST": "button_X",
"BTN_NORTH": "button_Y",
"BTN_TL": "button_LB",
"BTN_TR": "button_RB",
"BTN_THUMBL": "button_left_stick",
"BTN_THUMBR": "button_right_stick",
"BTN_START": "button_select",
"BTN_SELECT": "button_start"
}
# Declare the max and min values - the hardware and the expected ones
_DEADZONE = 1025
_HARDWARE_AXIS_MAX = 32767
_HARDWARE_AXIS_MIN = -32768
_HARDWARE_TRIGGER_MAX = 255
_HARDWARE_TRIGGER_MIN = 0
_INTENDED_AXIS_MAX = _MAX
_INTENDED_AXIS_MIN = _MIN
_INTENDED_TRIGGER_MAX = _MAX
_INTENDED_TRIGGER_MIN = _IDLE
def _normalise_axis(value: float) -> float:
"""
Helper function used to normalise the controller axis values into a common range.
Uses a custom dead zone to ignore too small values (hardware limitations).
:param value: Value to be normalised
:raises: ValueError
:return: Normalised value
"""
value = 0 if -_DEADZONE < value < _DEADZONE else value
return _normalise(value, _HARDWARE_AXIS_MIN, _HARDWARE_AXIS_MAX, _INTENDED_AXIS_MIN, _INTENDED_AXIS_MAX)
def _normalise_trigger(value: float) -> float:
"""
Helper function used to normalise the controller trigger values into a common range.
:param value: Value to be normalised
:raises: ValueError
:return: Normalised value
"""
return _normalise(value, _HARDWARE_TRIGGER_MIN, _HARDWARE_TRIGGER_MAX, _INTENDED_TRIGGER_MIN, _INTENDED_TRIGGER_MAX)
class Controller:
"""
Controller used to handle manual control systems.
Uses the global `_CONTROLLER` reference to interact with hardware.
Functions
---------
The following list shortly summarises each function:
* __init__ - standard constructor, returns early if the global `_CONTROLLER` reference is invalid
* __bool__ - standard thruthness method, returns state of the global `_CONTROLLER` reference
* _dispatch_event - a method to update the controller's state using hardware readings
* state - a getter to get the current state of the controller
* _update - a method to update the shared memory
* _read - a wrapper method to execute everything in a separate process
* start - a method to start the controller readings
Controller mappings
-------------------
The following list shortly summarises each property (apart from state) within the class:
Axis
++++
- left_axis_x
- left_axis_y
- right_axis_x
- right_axis_y
Triggers
++++++++
- left_trigger
- right_trigger
Hat
+++
- hat_y
- hat_x
Buttons
+++++++
- button_A
- button_B
- button_X
- button_Y
- button_LB
- button_RB
- button_left_stick
- button_right_stick
- button_select
- button_start
Model values
++++++++++++
- mode
- yaw
- pitch
- roll
- sway
- surge
- heave
Usage
-----
After creating an instance of the class, `start` method will either return a process ID of the started process, or
-1 if the controller wasn't initialised correctly.
"""
def __init__(self):
"""
Constructor method used to initialise all fields or return early if no controller devices were detected.
Most fields are internal values used by the class, however the `_delay` should be adjusted to modify
the frequency of hardware readings.
"""
# Stop the initialisation early if failed to recognise the controller
if not _CONTROLLER:
_Log.error("No game controllers detected")
return
self._process = _mp.Process(target=self._read, name="Controller")
self._mode = _DrivingMode.MANUAL
self._delay = 0.01
self._data = {
"mode": 0,
"yaw": 0,
"pitch": 0,
"roll": 0,
"sway": 0,
"surge": 0,
"heave": 0,
}
# Initialise the axis
self._left_axis_x = 0
self._left_axis_y = 0
self._right_axis_x = 0
self._right_axis_y = 0
# Initialise the triggers
self._left_trigger = 0
self._right_trigger = 0
# Initialise the hat
self._hat_y = 0
self._hat_x = 0
# Initialise the buttons
self.button_A = False
self.button_B = False
self.button_X = False
self.button_Y = False
self.button_LB = False
self.button_RB = False
self.button_left_stick = False
self.button_right_stick = False
self._button_select = False
self._button_start = False
def __bool__(self):
return _CONTROLLER is not None
@property
def state(self) -> dict:
"""
Getter function used to fetch all (normalised) degrees of freedom in a dictionary format.
:return: Dictionary with normalised degrees of freedom
"""
return {k: self.__getattribute__(k) for k in self._data}
@property
def left_axis_x(self):
return self._left_axis_x
@left_axis_x.setter
def left_axis_x(self, value):
self._left_axis_x = _normalise_axis(value)
@property
def left_axis_y(self):
return self._left_axis_y
@left_axis_y.setter
def left_axis_y(self, value):
self._left_axis_y = _normalise_axis(value)
@property
def right_axis_x(self):
return self._right_axis_x
@right_axis_x.setter
def right_axis_x(self, value):
self._right_axis_x = _normalise_axis(value)
@property
def right_axis_y(self):
return self._right_axis_y
@right_axis_y.setter
def right_axis_y(self, value):
self._right_axis_y = _normalise_axis(value)
@property
def left_trigger(self):
return self._left_trigger
@left_trigger.setter
def left_trigger(self, value):
self._left_trigger = _normalise_trigger(value)
@property
def right_trigger(self):
return self._right_trigger
@right_trigger.setter
def right_trigger(self, value):
self._right_trigger = _normalise_trigger(value)
@property
def hat_x(self):
return self._hat_x
@hat_x.setter
def hat_x(self, value):
self._hat_x = value
@property
def hat_y(self):
return self._hat_y
@hat_y.setter
def hat_y(self, value):
self._hat_y = value * (-1)
@property
def button_start(self):
return self._button_start
@button_start.setter
def button_start(self, value):
"""
Setter which selects next driving mode (wrapping)
:param value: State of the button
"""
self._button_start = bool(value)
if value:
try:
# noinspection PyTypeChecker
self._mode = _DrivingMode(self._mode.value + 1)
except ValueError:
self._mode = _DrivingMode(0)
@property
def button_select(self):
return self._button_select
@button_select.setter
def button_select(self, value):
"""
Setter which selects next previous mode (wrapping)
:param value: State of the button
"""
self._button_select = bool(value)
if value:
try:
# noinspection PyTypeChecker
self._mode = _DrivingMode(self._mode.value - 1)
except ValueError:
self._mode = _DrivingMode(len(_DrivingMode.__members__) - 1)
@property
def mode(self) -> _DrivingMode:
"""
Getter for the driving mode. Must be one of the `DrivingMode`-s defined in the statics module.
:return: Current driving mode
"""
return self._mode
@property
def yaw(self) -> float:
"""
Yaw is determined by both triggers.
:return: -1.0 for full port turn, 1.0 for full starboard turn
"""
if self.right_trigger:
return self.right_trigger
elif self.left_trigger:
return -self.left_trigger
else:
return 0
@property
def pitch(self) -> float:
"""
Pitch is determined by the vertical right axis.
:return: 1.0 for full bow pitch, -1.0 for full stern pitch
"""
return self.right_axis_y
@property
def roll(self) -> float:
"""
Roll is determined by the buttons X and B.
:return: -1.0 for full port roll, 1.0 for full starboard roll, 0 otherwise
"""
if self.button_B:
return 1.0
elif self.button_X:
return -1.0
else:
return 0
@property
def sway(self) -> float:
"""
Sway is determined by the horizontal right axis.
:return: -1.0 for full port sway, 1.0 for full starboard sway
"""
return self._right_axis_x
@property
def surge(self) -> float:
"""
Surge is determined by the vertical left axis.
:return: 1.0 for full forward surge, -1.0 for full backwards surge
"""
return self.left_axis_y
@property
def heave(self) -> float:
"""
Heave is determined by the buttons RB and LB.
:return: 1.0 for full up heave, -1.0 for full down heave, 0 otherwise
"""
if self.button_RB:
return 1.0
elif self.button_LB:
return -1.0
else:
return 0
def _dispatch_event(self, event: _inputs.InputEvent):
"""
Dispatcher method used to update the controller's state with the hardware readings.
:param event: Hardware reading event
"""
# Ignore syncing events
if event.code == "SYN_REPORT":
return
if event.code in _DISPATCH_MAP:
_Log.debug(f"New controller event read - {event.code}")
self.__setattr__(_DISPATCH_MAP[event.code], event.state)
self._update()
else:
_Log.error(f"Event not registered in the dispatch map - {event.code}")
def _update(self):
"""
Method used to update the shared memory controller data using the current controller's state.
"""
_Log.debug("Updating manual control shared memory")
# When entering the autonomous driving mode, reset the manual control values
if self._mode == _DrivingMode.AUTONOMOUS:
data = {k: (_DrivingMode.AUTONOMOUS if k == "mode" else 0) for k in self._data}
else:
data = self.state
# Calculate the data differences and override the data to check for differences next time
difference = {"manual_" + k: v for k, v in data.items() if v != self._data[k]}
self._data = data
# Make sure the mode is using a correct key and a correct value
if "manual_mode" in difference:
difference["mode"] = difference.pop("manual_mode").value
# Finally update the data manager with a collection of values
_dm.control.update(difference)
def _read(self):
"""
Wrapper method used as a target for the process spawning.
"""
while True:
self._dispatch_event(_CONTROLLER.read()[0])
_time.sleep(self._delay)
def start(self) -> int:
"""
Method used to start the hardware readings in a separate process.
:return: -1 on errors or process id
"""
if not _CONTROLLER:
_Log.error("Can not start - no game controllers detected")
return -1
else:
self._process.start()
_Log.info("Controller reading process started, pid {}".format(self._process.pid))
return self._process.pid
| StarcoderdataPython |
359098 | #!/bin/env python3
import argparse
import time
import sqlite3
def search(c, target, threshold):
return count, t2-t1
def tanimoto_count(connection, target, threshold):
print('Target structure:', target)
print('Minimum Tanimoto similarity:', threshold)
t1 = time.time()
count = connection.execute(
"SELECT count(*) FROM "
"morgan_idx_chembl_molecule as idx WHERE "
"idx.id match rdtree_tanimoto(mol_morgan_bfp(mol_from_smiles(?), 2, 1024), ?)",
(target, threshold)).fetchall()[0][0]
t2 = time.time()
print('Found {0} matching objects in {1} seconds'.format(count, t2-t1))
if __name__=="__main__":
parser= argparse.ArgumentParser(
description='Find the number of records similar to the input structure')
parser.add_argument('chembldb',
help='The path to the SQLite database w/ the ChEMBL compounds')
parser.add_argument('smiles', help='The input structure in SMILES format')
parser.add_argument('threshold', type=float,
help='The minimum similarity of the matching objects')
parser.add_argument('--chemicalite', default='chemicalite',
help='The name or path to the ChemicaLite extension module')
args = parser.parse_args()
connection = sqlite3.connect(args.chembldb)
connection.enable_load_extension(True)
connection.load_extension(args.chemicalite)
connection.enable_load_extension(False)
tanimoto_count(connection, args.smiles, args.threshold)
connection.close()
| StarcoderdataPython |
5048925 | #!/usr/bin/env python
import argparse
import json
import logging
parser = argparse.ArgumentParser(description=(
'Converts text profile data into JSON format compatible with Chrome.'))
parser.add_argument('profile', type=str, nargs='+', help='profile files')
args = parser.parse_args()
records = []
for fname in args.profile:
with open(fname, 'r') as f:
for line in f:
try:
record = json.loads(line)
except ValueError as e:
logging.exception(e)
else:
records.append(record)
print(json.dumps({'traceEvents': records}))
| StarcoderdataPython |
8136737 | # Aula 13 (Estrutura de Repetição for)
numero = int(input('Tabuada de: '))
for c in range(1, 11):
print('{} X {}: {}'.format(numero, c, numero * c))
| StarcoderdataPython |
5088653 | class Analyze(
Demon,
):
pass
| StarcoderdataPython |
3336263 | from PIL import Image
import subprocess
def cleanFile(filePath, newFilePath):
image = Image.open(filePath)
#Set a threshold value for the image, and save
image = image.point(lambda x: 0 if x<143 else 255)
image.save(newFilePath)
#call tesseract to do OCR on the newly created image
subprocess.call(["tesseract", newFilePath, "output"])
#Open and read the resulting data file
outputFile = open("output.txt", 'r')
print(outputFile.read())
outputFile.close()
cleanFile("text_2.png", "text_2_clean.png") | StarcoderdataPython |
4926791 | <reponame>Nerd-Bear/DSM-API<filename>app_app/admin.py
from django.contrib import admin
from . import models
class AppModelAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'owner')
admin.site.register(models.AppModel, AppModelAdmin)
| StarcoderdataPython |
12832400 | <reponame>Badger-Finance/python-keepers
import pytest
import responses
from requests import HTTPError
from config.enums import Network
from src.token_utils import get_token_price
@responses.activate
def test_get_token_price_prod():
currency = "usd"
price = 8.75
responses.add(
responses.GET,
f"https://api.badger.finance/v2/prices?currency={currency}&chain={Network.Ethereum}",
json={
"0x3472a5a71965499acd81997a54bba8d852c6e53d": 8.75,
},
status=200,
)
token_price = get_token_price(
token_address="0x3472a5a71965499acd81997a54bba8d852c6e53d",
currency="usd",
chain=Network.Ethereum,
)
assert token_price == price
@responses.activate
def test_get_token_price_staging():
currency = "usd"
price = 8.75
responses.add(
responses.GET,
f"https://staging-api.badger.finance/v2/prices?currency={currency}"
f"&chain={Network.Ethereum}",
json={
"0x3472a5a71965499acd81997a54bba8d852c6e53d": 8.75,
},
status=200,
)
token_price = get_token_price(
token_address="0x3472a5a71965499acd81997a54bba8d852c6e53d",
currency="usd",
chain=Network.Ethereum,
use_staging=True,
)
assert token_price == price
@responses.activate
def test_get_token_price_raises():
currency = "usd"
responses.add(
responses.GET,
f"https://staging-api.badger.finance/v2/prices?currency={currency}"
f"&chain={Network.Ethereum}",
json={
"0x3472a5a71965499acd81997a54bba8d852c6e53d": 8.75,
},
status=403,
)
with pytest.raises(HTTPError):
get_token_price(
token_address="0x3472a5a71965499acd81997a54bba8d852c6e53d",
currency="usd",
chain=Network.Ethereum,
use_staging=True,
)
| StarcoderdataPython |
1851780 | <filename>tests/conftest.py
import os
import pytest
@pytest.fixture
def os_environ(monkeypatch):
mock_environ = dict(os.environ)
monkeypatch.setattr(os, 'environ', mock_environ)
return mock_environ
| StarcoderdataPython |
205269 | <filename>autosklearn/experimental/hyperboost/acquistion_function.py
import numpy as np
from smac.optimizer.acquisition import AbstractAcquisitionFunction
class ScorePlusDistance(AbstractAcquisitionFunction):
def __init__(self, model):
super().__init__(model)
def _compute(self, X: np.ndarray):
loss, closeness = self.model.predict(X)
# print(f'Acquisition function {1 - (loss + closeness)/2}')
return 1 - (loss + closeness) / 2
| StarcoderdataPython |
3513561 | # -*- coding: utf-8 -*-
from flask import render_template, Response, redirect, url_for, flash
from flask_restful import Resource, reqparse
from airports import db
class Airport(Resource):
def get(self, iata_code):
if iata_code:
query = db.engine.execute("SELECT * FROM airports WHERE iata_code=?", (iata_code.upper(),))
first_element = query.first()
if first_element:
result = dict((x, y) for x, y in zip(query.keys(), first_element))
coord = list(map(float, result.get("coordinates", ("0, 0")).split(", ")))
return Response(render_template('airport.html', result=result, coord=coord[::-1]))
flash("Not found")
return redirect(url_for('hello'))
def post(self, iata_code):
parser = reqparse.RequestParser()
parser.add_argument('iata_code', type=str)
args = parser.parse_args()
return redirect(url_for('airport', iata_code=args['iata_code']))
| StarcoderdataPython |
165931 | import torch
import torch.utils.data
from rlkit.torch.pytorch_util import from_numpy
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from rlkit.pythonplusplus import identity
from rlkit.torch import pytorch_util as ptu
import numpy as np
class RefinementNetwork(nn.Module):
def __init__(
self,
input_width,
input_height,
input_channels,
output_size,
kernel_sizes,
n_channels,
strides,
paddings,
hidden_sizes,
lstm_size,
lstm_input_size,
added_fc_input_size=0,
batch_norm_conv=False,
batch_norm_fc=False,
init_w=1e-4,
hidden_init=nn.init.xavier_uniform_,
hidden_activation=nn.ReLU(),
output_activation=identity,
):
if hidden_sizes is None:
hidden_sizes = []
assert len(kernel_sizes) == \
len(n_channels) == \
len(strides) == \
len(paddings)
super().__init__()
self.hidden_sizes = hidden_sizes
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.lstm_size = lstm_size
self.output_size = output_size
self.output_activation = output_activation
self.hidden_activation = hidden_activation
self.batch_norm_conv = batch_norm_conv
self.batch_norm_fc = batch_norm_fc
self.added_fc_input_size = added_fc_input_size
self.conv_input_length = self.input_width * self.input_height * self.input_channels
self.conv_layers = nn.ModuleList()
self.conv_norm_layers = nn.ModuleList()
self.fc_layers = nn.ModuleList()
self.fc_norm_layers = nn.ModuleList()
self.lstm = nn.LSTM(lstm_input_size, lstm_size, num_layers=1, batch_first=True)
for out_channels, kernel_size, stride, padding in \
zip(n_channels, kernel_sizes, strides, paddings):
conv = nn.Conv2d(input_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding)
hidden_init(conv.weight)
conv.bias.data.fill_(0)
conv_layer = conv
self.conv_layers.append(conv_layer)
input_channels = out_channels
# find output dim of conv_layers by trial and add normalization conv layers
test_mat = torch.zeros(1, self.input_channels, self.input_width,
self.input_height) # initially the model is on CPU (caller should then move it to GPU if
for conv_layer in self.conv_layers:
test_mat = conv_layer(test_mat)
#self.conv_norm_layers.append(nn.BatchNorm2d(test_mat.shape[1]))
fc_input_size = int(np.prod(test_mat.shape))
# used only for injecting input directly into fc layers
fc_input_size += added_fc_input_size
for idx, hidden_size in enumerate(hidden_sizes):
fc_layer = nn.Linear(fc_input_size, hidden_size)
#norm_layer = nn.BatchNorm1d(hidden_size)
fc_layer.weight.data.uniform_(-init_w, init_w)
fc_layer.bias.data.uniform_(-init_w, init_w)
self.fc_layers.append(fc_layer)
#self.fc_norm_layers.append(norm_layer)
fc_input_size = hidden_size
self.last_fc = nn.Linear(lstm_size, output_size)
#self.last_fc.weight.data.uniform_(-init_w, init_w)
#self.last_fc.bias.data.uniform_(-init_w, init_w)
self.last_fc2 = nn.Linear(lstm_size, output_size)
xcoords = np.expand_dims(np.linspace(-1, 1, self.input_width), 0).repeat(self.input_height, 0)
ycoords = np.repeat(np.linspace(-1, 1, self.input_height), self.input_width).reshape((self.input_height, self.input_width))
self.coords = from_numpy(np.expand_dims(np.stack([xcoords, ycoords], 0), 0))
def forward(self, input, hidden1, hidden2, extra_input=None):
# need to reshape from batch of flattened images into (channsls, w, h)
# import pdb; pdb.set_trace()
# h = input.view(input.shape[0],
# self.input_channels-2,
# self.input_height,
# self.input_width)
h = input
coords = self.coords.repeat(input.shape[0], 1, 1, 1)
h = torch.cat([h, coords], 1)
h = self.apply_forward(h, self.conv_layers, self.conv_norm_layers,
use_batch_norm=self.batch_norm_conv)
# flatten channels for fc layers
h = h.view(h.size(0), -1)
# if extra_input is not None:
# h = torch.cat((h, extra_input), dim=1)
output = self.apply_forward(h, self.fc_layers, self.fc_norm_layers,
use_batch_norm=self.batch_norm_fc)
if extra_input is not None:
output = torch.cat([output, extra_input], dim=1)
if len(hidden1.shape) == 2:
hidden1, hidden2 = hidden1.unsqueeze(0), hidden2.unsqueeze(0)
self.lstm.flatten_parameters()
output, hidden = self.lstm(output.unsqueeze(1), (hidden1, hidden2))
#output1 = self.output_activation(self.last_fc(output.squeeze()))
output1 = self.output_activation(self.last_fc(output.squeeze()))
output2 = self.output_activation(self.last_fc2(output.squeeze()))
return output1, output2, hidden[0], hidden[1]
def initialize_hidden(self, bs):
return (Variable(ptu.from_numpy(np.zeros((1, bs, self.lstm_size)))),
Variable(ptu.from_numpy(np.zeros((1, bs, self.lstm_size)))))
def apply_forward(self, input, hidden_layers, norm_layers,
use_batch_norm=False):
h = input
for layer in hidden_layers:
h = layer(h)
#if use_batch_norm:
# h = norm_layer(h)
h = self.hidden_activation(h)
return h
| StarcoderdataPython |
6476774 | """
@brief Flask blueprint are registered in _init__.py file.
@details The file contains a blueprint registered for Ticket Management app which is
apis_blueprint.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
from flask_migrate import Migrate
from flask_security import SQLAlchemyUserDatastore, Security
from config import app_config
# Set configuration with help of env variable in bash
config_env = os.getenv('env', 'development')
app = Flask(__name__)
app.config.from_object(app_config[config_env])
db = SQLAlchemy(app)
# database migration
migrate = Migrate(app, db)
from .APIs.models import User, Role, UserRoles
from .APIs import APIs as apis_blueprint
# Register blueprints
app.register_blueprint(apis_blueprint)
# Flask-Security
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security().init_app(app, user_datastore, register_blueprint=False)
| StarcoderdataPython |
9700282 | def findtwo2020(filename):
f = open(filename)
data = f.readlines()
f.close()
data = [int(item.strip("\n")) for item in data]
for i in range(len(data)):
for j in range(i+1,len(data)):
#print(data[i],data[j],data[i]+data[j])
if data[i]+data[j] == 2020:
return data[i]*data[j]
return "Did not find it"
def findthree2020(filename):
f = open(filename)
data = f.readlines()
f.close()
data = [int(item.strip("\n")) for item in data]
for i in range(len(data)):
for j in range(i+1,len(data)):
for k in range(j+1,len(data)):
#print(data[i]+data[j]+data[k])
if data[i]+data[j]+data[k] == 2020:
return data[i]*data[j]*data[k]
return "Did not find it"
| StarcoderdataPython |
6657638 | <reponame>adamcvj/SatelliteTracker<gh_stars>1-10
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought naming package component>
#------------------------------------------------------------------------------
""" State factory for referenceable objects. """
# Local imports.
from .referenceable import Referenceable
from .state_factory import StateFactory
class ReferenceableStateFactory(StateFactory):
""" State factory for referenceable objects. """
###########################################################################
# 'StateFactory' interface.
###########################################################################
def get_state_to_bind(self, obj, name, context):
""" Returns the state of an object for binding. """
state = None
# If the object knows how to create a reference to it then let it
# do so.
if isinstance(obj, Referenceable):
state = obj.reference
return state
### EOF #######################################################################
| StarcoderdataPython |
1948923 | <reponame>cjw296/carly
import json
from collections import Counter
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.task import LoopingCall
class CollectorProtocol(DatagramProtocol):
def __init__(self):
self.counts = Counter()
def datagramReceived(self, data, addr):
payload = json.loads(data)
self.counts[payload['id']] += 1
class SenderProtocol(DatagramProtocol):
def __init__(self, targetIp, targetPort):
self.counts = {}
self.targetIp = targetIp
self.targetPort = targetPort
self.loop = LoopingCall(self.ping)
def startProtocol(self):
self.transport.connect(self.targetIp, self.targetPort)
self.loop.start(1)
def ping(self):
self.transport.write(json.dumps({'id': 'client'+str(id(self))}).encode('ascii'))
| StarcoderdataPython |
3272804 | <reponame>t4d-classes/python_03222021_afternoon
from statistics import mean, median, stdev
prices = []
with open("gme_price.txt", "r") as price_file:
for price in price_file:
prices.append(float(price))
print(f"Max: {max(prices)}")
print(f"Min: {min(prices)}")
print(f"Mean: {mean(prices)}")
print(f"Median: {median(prices)}")
print(f"StDev: {stdev(prices)}")
| StarcoderdataPython |
9647991 | from parsl.config import Config
from parsl.executors import WorkQueueExecutor
config = Config(
executors=[WorkQueueExecutor(port=50055,
# init_command='source /home/yadu/src/wq_parsl/setup_parsl_env.sh;
# echo "Ran at $date" > /home/yadu/src/wq_parsl/parsl/tests/workqueue_tests/ran.log',
)]
)
| StarcoderdataPython |
12863794 | <reponame>sawood14012/fabric8-analytics-jobs<filename>f8a_jobs/handlers/flow.py<gh_stars>1-10
"""Schedule multiple flows of a type."""
from .base import BaseHandler
class FlowScheduling(BaseHandler):
"""Schedule multiple flows of a type."""
def execute(self, flow_name, flow_arguments):
"""Schedule multiple flows of a type, do filter expansion if needed.
:param flow_name: flow name that should be scheduled
:param flow_arguments: a list of flow arguments per flow
"""
for node_args in flow_arguments:
if self.is_filter_query(node_args):
for args in self.expand_filter_query(node_args):
self.run_selinon_flow(flow_name, args)
else:
self.run_selinon_flow(flow_name, node_args)
| StarcoderdataPython |
1780577 | <reponame>meow464/pyobjus
__version__ = '1.2.0'
from .pyobjus import *
| StarcoderdataPython |
3255077 | <gh_stars>0
#!/usr/bin/env python3
from SuPyModes.Geometry import Geometry, Circle
from SuPyModes.Solver import SuPySolver
from SuPyModes.sellmeier import Fused_silica
import time
Clad = Circle(Radi=62.5, Position=(0, 0), Index=Fused_silica(1.55))
Core0 = Circle(Position=Clad.C[0], Radi=4.2, Index=Fused_silica(1.55)+0.005)
Geo = Geometry(Objects=[Clad, Core0],
Xbound=[-70, 70],
Ybound=[-70, 70],
Nx=150,
Ny=150)
#Geo.Plot()
Sol = SuPySolver(Coupler=Geo,
Tolerance=1e-8,
MaxIter=10000,
nMode=8,
sMode=6)
start = time.time()
SuperModes = Sol.GetModes(wavelength=1.55,
Nstep=2,
ITRf=0.9)
end = time.time()
print('compute time:', end - start)
SuperModes.Plot(Input=['All'], iter=[0])
| StarcoderdataPython |
4853204 | <filename>plexmonitor/tasks/email_task.py<gh_stars>0
import email
from sparts.tasks.periodic import PeriodicTask # type: ignore
from plexmonitor.lib.command import Command
from plexmonitor.lib.email import Inbox
class EmailTask(PeriodicTask):
""" Periodic task to read the email inbox and scan for new commands.
To prevent against potential DOS, the task will only look at one email
per iteration, the most recent one. If the most recent mail has already
been processed, the iteration will be a no-op.
"""
INTERVAL = 10.0
def initTask(self) -> None:
super(EmailTask, self).initTask()
self.inbox = Inbox()
self.inbox.connect()
self.last_mail_id = None # type: str
def execute(self) -> None:
last_unread = self.inbox.get_last_unread_mail_id()
last_processed = self.last_mail_id
if last_processed is not None and\
int(last_unread) <= int(last_processed):
self.logger.info("Nothing to fetch")
return
self.logger.info("Going to fetch mail ID {}".format(last_unread))
mail = self.inbox.fetch(last_unread) # type: email.message.Message
self.last_mail_id = last_unread
cmd = Command.from_email(mail)
if not cmd:
self.logger.info("No valid command")
return
self.logger.info("Got command {action} from {sender}"
.format(action=cmd.action,
sender=cmd.context['sender']))
| StarcoderdataPython |
8199923 | <gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetFileSystemsResult',
'AwaitableGetFileSystemsResult',
'get_file_systems',
]
@pulumi.output_type
class GetFileSystemsResult:
"""
A collection of values returned by getFileSystems.
"""
def __init__(__self__, description_regex=None, descriptions=None, id=None, ids=None, output_file=None, protocol_type=None, storage_type=None, systems=None):
if description_regex and not isinstance(description_regex, str):
raise TypeError("Expected argument 'description_regex' to be a str")
pulumi.set(__self__, "description_regex", description_regex)
if descriptions and not isinstance(descriptions, list):
raise TypeError("Expected argument 'descriptions' to be a list")
pulumi.set(__self__, "descriptions", descriptions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if protocol_type and not isinstance(protocol_type, str):
raise TypeError("Expected argument 'protocol_type' to be a str")
pulumi.set(__self__, "protocol_type", protocol_type)
if storage_type and not isinstance(storage_type, str):
raise TypeError("Expected argument 'storage_type' to be a str")
pulumi.set(__self__, "storage_type", storage_type)
if systems and not isinstance(systems, list):
raise TypeError("Expected argument 'systems' to be a list")
pulumi.set(__self__, "systems", systems)
@property
@pulumi.getter(name="descriptionRegex")
def description_regex(self) -> Optional[str]:
return pulumi.get(self, "description_regex")
@property
@pulumi.getter
def descriptions(self) -> Sequence[str]:
"""
A list of FileSystem descriptions.
"""
return pulumi.get(self, "descriptions")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
A list of FileSystem Id.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> Optional[str]:
"""
ProtocolType block of the FileSystem
"""
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> Optional[str]:
"""
StorageType block of the FileSystem.
"""
return pulumi.get(self, "storage_type")
@property
@pulumi.getter
def systems(self) -> Sequence['outputs.GetFileSystemsSystemResult']:
"""
A list of VPCs. Each element contains the following attributes:
"""
return pulumi.get(self, "systems")
class AwaitableGetFileSystemsResult(GetFileSystemsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFileSystemsResult(
description_regex=self.description_regex,
descriptions=self.descriptions,
id=self.id,
ids=self.ids,
output_file=self.output_file,
protocol_type=self.protocol_type,
storage_type=self.storage_type,
systems=self.systems)
def get_file_systems(description_regex: Optional[str] = None,
ids: Optional[Sequence[str]] = None,
output_file: Optional[str] = None,
protocol_type: Optional[str] = None,
storage_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFileSystemsResult:
"""
This data source provides FileSystems available to the user.
> **NOTE**: Available in 1.35.0+
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
fs = alicloud.nas.get_file_systems(description_regex=alicloud_nas_file_system["foo"]["description"],
protocol_type="NFS")
pulumi.export("alicloudNasFileSystemsId", fs.systems[0].id)
```
:param str description_regex: A regex string to filter the results by the :FileSystem description.
:param Sequence[str] ids: A list of FileSystemId.
:param str protocol_type: Filter results by a specific ProtocolType. Valid values: `NFS` and `SMB`.
:param str storage_type: Filter results by a specific StorageType. Valid values: `Capacity` and `Performance`.
"""
__args__ = dict()
__args__['descriptionRegex'] = description_regex
__args__['ids'] = ids
__args__['outputFile'] = output_file
__args__['protocolType'] = protocol_type
__args__['storageType'] = storage_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:nas/getFileSystems:getFileSystems', __args__, opts=opts, typ=GetFileSystemsResult).value
return AwaitableGetFileSystemsResult(
description_regex=__ret__.description_regex,
descriptions=__ret__.descriptions,
id=__ret__.id,
ids=__ret__.ids,
output_file=__ret__.output_file,
protocol_type=__ret__.protocol_type,
storage_type=__ret__.storage_type,
systems=__ret__.systems)
| StarcoderdataPython |
4882632 | # Given a parent-child list, filter remove all tuples with a term where another tuple # has a child term
# Group by first column
import sys
if len(sys.argv) < 2:
print sys.argv[0], "[parent-child file]"
print "Filter all tuples having a parent term when a tuple with a child term exists"
exit(1)
sep="|"
parents = { }
file=open(sys.argv[1], 'r')
# Load term-child data from stdin
for line in file:
cols=line.strip().split(sep)
term=cols[0]
child=cols[1]
if child in parents:
parents[child].append(term)
else:
parents[child]= [ term ]
file=sys.stdin
curr_group = None
curr_lines = {}
covered = set( [] )
for line in file:
fields = line.strip().split(sep)
group_field=fields[0]
if (curr_group == None):
curr_group = group_field
if (curr_group != group_field):
# new group - print the old result
for i,j in curr_lines.iteritems():
print j,
# reset for next group
curr_group=group_field
curr_lines = {}
covered = set ( [] )
term=fields[1]
if term not in covered:
# remove all existing term lines that parents of the term
for parent_term in parents[term]:
if parent_term in curr_lines:
del curr_lines[parent_term]
# add and update the covered terms set
covered.update(parents[term])
curr_lines[term]=line
# otherwise, term is already covered == don't add
# print the last group
for i,j in curr_lines.iteritems():
print j, | StarcoderdataPython |
1853231 | #!/usr/bin/env python3
#
# How to run: python3 ocr_to_csv.py [filename]
#
import io, json, sys, os, psycopg2
from PIL import Image, ImageDraw
from PIL import ImagePath
from pathlib import Path
from ocr import ocr_tesseract
from ocr import ocr_google_vision
if len(sys.argv) != 2:
print("Error: filename missing.")
sys.exit(1)
path = sys.argv[1]
crop_buffer = 30
##Import database settings from settings.py file
import settings
conn = psycopg2.connect(host = settings.db_host, database = settings.db_db, user = settings.db_user, password = <PASSWORD>, connect_timeout = 60)
conn.autocommit = True
db_cursor = conn.cursor()
#Create entry for file
db_cursor.execute("INSERT INTO ocr_documents (project_id, filename, ocr_source) VALUES (%(project_id)s, %(filename)s, %(source)s) RETURNING document_id", {'project_id': settings.project_id, 'filename': Path(path).name, 'source': settings.source})
document_id = db_cursor.fetchone()
image = vision.types.Image(content=content)
# Language hint codes for handwritten OCR:
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
# Note: Use only one language hint code per request for handwritten OCR.
image_context = vision.types.ImageContext(
language_hints=['en-t-i0-handwrit'])
response = client.document_text_detection(image=image, image_context=image_context)
if os.path.exists('response') == False:
os.mkdir('response')
with open('response/{}.txt'.format(Path(path).stem), 'w') as out:
out.write(str(response))
ocr_text = response.full_text_annotation.text.split("\n")
print('Full Text: \n=============\n{}\n=============\n'.format(response.full_text_annotation.text))
if os.path.exists('fulltext') == False:
os.mkdir('fulltext')
with open('fulltext/{}.txt'.format(Path(path).stem), 'w') as out:
out.write(response.full_text_annotation.text)
#word, confidence, coords
if os.path.exists('csv') == False:
os.mkdir('csv')
data_file = 'csv/{}.csv'.format(Path(path).stem)
if os.path.exists('images') == False:
os.mkdir('images')
img_file = 'images/{}.jpg'.format(Path(path).stem)
wordfile = open(data_file, "w")
wordfile.write("word_text,block,page,word,word_line,confidence,vertices_x_0,vertices_y_0,vertices_x_1,vertices_y_1,vertices_x_2,vertices_y_2,vertices_x_3,vertices_y_3\n")
word_list = []
p = 0
b = 0
w = 0
for page in response.full_text_annotation.pages:
for block in page.blocks:
#print('\nBlock confidence: {}\n'.format(block.confidence))
b += 1
for paragraph in block.paragraphs:
#print('Paragraph confidence: {}'.format(
#paragraph.confidence))
p += 1
word_line = 0
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
#Should ignore?
if word_text in settings.text_ignore:
continue
if settings.ignore_text(word_text, Path(path).stem):
continue
w += 1
#0
wrd_vertices = [str(word.bounding_box.vertices[0]).split('\n')]
wrd_vertices_x_0 = wrd_vertices[0][0].replace('x: ', '')
wrd_vertices_y_0 = wrd_vertices[0][1].replace('y: ', '')
#1
wrd_vertices = [str(word.bounding_box.vertices[1]).split('\n')]
wrd_vertices_x_1 = wrd_vertices[0][0].replace('x: ', '')
wrd_vertices_y_1 = wrd_vertices[0][1].replace('y: ', '')
#2
wrd_vertices = [str(word.bounding_box.vertices[2]).split('\n')]
wrd_vertices_x_2 = wrd_vertices[0][0].replace('x: ', '')
wrd_vertices_y_2 = wrd_vertices[0][1].replace('y: ', '')
#3
wrd_vertices = [str(word.bounding_box.vertices[3]).split('\n')]
wrd_vertices_x_3 = wrd_vertices[0][0].replace('x: ', '')
wrd_vertices_y_3 = wrd_vertices[0][1].replace('y: ', '')
#print([word_text, word.confidence, [[wrd_vertices_x_0, wrd_vertices_y_0], [wrd_vertices_x_1, wrd_vertices_y_1], [wrd_vertices_x_2, wrd_vertices_y_2], [wrd_vertices_x_3, wrd_vertices_y_3]]])
#Find which line
for i in range(word_line, len(ocr_text)):
#print("{}-{}-{} {}".format(i, word_text, ocr_text[i], word_text in ocr_text[i]))
if word_text in ocr_text[i]:
word_line = i
break
print("{}-{}-{}-{} {}".format(i, word_line, word_text, ocr_text[i], word_text in ocr_text[i]))
wordfile.write("\"%s\",%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (word_text, b, p, w, word_line, word.confidence, wrd_vertices_x_0, wrd_vertices_y_0, wrd_vertices_x_1, wrd_vertices_y_1, wrd_vertices_x_2, wrd_vertices_y_2, wrd_vertices_x_3, wrd_vertices_y_3))
#write box to image
if word.confidence > 0.9:
linecolor = "#66ff33"
elif word.confidence <= 0.9 and word.confidence > 0.8:
linecolor = "#ffdb4d"
elif word.confidence <= 0.8 and word.confidence > 0.7:
linecolor = "#ffa366"
elif word.confidence <= 0.7:
linecolor = "#ff6666"
draw = ImageDraw.Draw(im)
draw.line([(int(wrd_vertices_x_0), int(wrd_vertices_y_0)), (int(wrd_vertices_x_1), int(wrd_vertices_y_1))], fill = linecolor, width = 3)
draw.line([(int(wrd_vertices_x_1), int(wrd_vertices_y_1)), (int(wrd_vertices_x_2), int(wrd_vertices_y_2))], fill = linecolor, width = 3)
draw.line([(int(wrd_vertices_x_2), int(wrd_vertices_y_2)), (int(wrd_vertices_x_3), int(wrd_vertices_y_3))], fill = linecolor, width = 3)
draw.line([(int(wrd_vertices_x_3), int(wrd_vertices_y_3)), (int(wrd_vertices_x_0), int(wrd_vertices_y_0))], fill = linecolor, width = 3)
del draw
word_list.append([word_text, word.confidence, [[wrd_vertices_x_0, wrd_vertices_y_0], [wrd_vertices_x_1, wrd_vertices_y_1], [wrd_vertices_x_2, wrd_vertices_y_2], [wrd_vertices_x_3, wrd_vertices_y_3]]])
db_cursor.execute("INSERT INTO ocr_entries (document_id, word_text, block, page, word, word_line, confidence, vertices_x_0, vertices_y_0, vertices_x_1, vertices_y_1, vertices_x_2, vertices_y_2, vertices_x_3, vertices_y_3) VALUES (%(document_id)s, %(word_text)s, %(block)s, %(page)s, %(word)s, %(word_line)s, %(confidence)s, %(vertices_x_0)s, %(vertices_y_0)s, %(vertices_x_1)s, %(vertices_y_1)s, %(vertices_x_2)s, %(vertices_y_2)s, %(vertices_x_3)s, %(vertices_y_3)s)", {'document_id': document_id, 'word_text': word_text, 'block': b, 'page': p, 'word': w, 'word_line': word_line, 'confidence': word.confidence, 'vertices_x_0': wrd_vertices_x_0, 'vertices_y_0': wrd_vertices_y_0, 'vertices_x_1': wrd_vertices_x_1, 'vertices_y_1': wrd_vertices_y_1, 'vertices_x_2': wrd_vertices_x_2, 'vertices_y_2': wrd_vertices_y_2, 'vertices_x_3': wrd_vertices_x_3, 'vertices_y_3': wrd_vertices_y_3})
#Crop image
results_poly = response.text_annotations[0].bounding_poly
results_minx = min(results_poly.vertices[0].x, results_poly.vertices[1].x, results_poly.vertices[2].x, results_poly.vertices[3].x)
results_miny = min(results_poly.vertices[0].y, results_poly.vertices[1].y, results_poly.vertices[2].y, results_poly.vertices[3].y)
results_maxx = max(results_poly.vertices[0].x, results_poly.vertices[1].x, results_poly.vertices[2].x, results_poly.vertices[3].x)
results_maxy = max(results_poly.vertices[0].y, results_poly.vertices[1].y, results_poly.vertices[2].y, results_poly.vertices[3].y)
if results_minx > crop_buffer:
results_minx = results_minx - crop_buffer
if results_miny > crop_buffer:
results_miny = results_miny - crop_buffer
if (results_maxx + crop_buffer) < im.size[0]:
results_maxx = results_maxx + crop_buffer
if (results_maxy + crop_buffer) < im.size[1]:
results_maxy = results_maxy + crop_buffer
print("Cropping image to ({},{}), ({},{})".format(results_minx, results_miny, results_maxx, results_maxy))
#image for PIL
im = Image.open(path)
im1 = im.crop((results_minx, results_miny, results_maxx, results_maxy))
#Save cropped image
im1.save(img_file, "JPEG")
sys.exit(0)
| StarcoderdataPython |
9769524 | <reponame>philroche/teamclock<filename>teamclock/clocks/models.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from timezone_field import TimeZoneField
@python_2_unicode_compatible
class Clock(models.Model):
name = models.CharField(_("Name of Clock"), blank=False, max_length=255)
uid = models.CharField(_("Unique identifier of Clock"), blank=False, max_length=255)
def __str__(self):
return '%s-%s' % (self.uid, self.name)
@python_2_unicode_compatible
class TeamMember(models.Model):
name = models.CharField(_("Name of Team Member"), blank=False, max_length=255)
clock = models.ForeignKey(Clock, null=False, blank=False)
timezone = TimeZoneField(default='Europe/London') # defaults supported
city = models.CharField(_("City of Team Member"), blank=False, max_length=255)
color = models.CharField(_("Color (HEX)"), blank=False, max_length=7, default="#EEEEEE")
def __str__(self):
return '%s from clock %s' % (self.name, self.clock.name)
| StarcoderdataPython |
12839361 | <gh_stars>100-1000
import requests
import json
url = 'http://localhost:5000/invocations'
data = {
'Pclass':[3,3,3],
'Sex': ['male', 'female', 'male'],
'Age':[4, 22, 28]
}
j_data = json.dumps(data)
headers = {'Content-Type': 'application/json'}
print("Sending request for model...")
print(f"Data: {j_data}")
r = requests.post(url, json=j_data, headers=headers)
print(f"Response: {r.text}") | StarcoderdataPython |
4975847 | <filename>src/index.py
from __future__ import print_function, division
import os
import sys
root = os.getcwd().split("MAR_test")[0] + "MAR_test/src/util"
sys.path.append(root)
from flask import Flask, url_for, render_template, request, jsonify, Response, json
from pdb import set_trace
from mar import MAR
app = Flask(__name__,static_url_path='/static')
global target
target=MAR()
@app.route('/hello/')
def hello():
return render_template('hello.html')
@app.route('/load',methods=['POST'])
def load():
global target
file=request.form['file']
target=target.create(file)
pos, neg, total = target.get_numbers()
return jsonify({"hasLabel": target.hasLabel, "flag": target.flag, "pos": pos, "done": pos+neg, "total": total})
@app.route('/load_old',methods=['POST'])
def load_old():
global target
file=request.form['file']
target.create_old(file)
if target.last_pos==0:
target.flag=False
return jsonify({"flag": target.flag})
@app.route('/export',methods=['POST'])
def export():
try:
target.export()
flag=True
except:
flag=False
return jsonify({"flag": flag})
@app.route('/plot',methods=['POST'])
def plot():
dir = "./static/image"
for file in os.listdir(dir):
os.remove(os.path.join(dir,file))
name = target.plot()
return jsonify({"path": name})
@app.route('/labeling',methods=['POST'])
def labeling():
id = int(request.form['id'])
label = request.form['label']
target.code(id,label)
pos, neg, total = target.get_numbers()
return jsonify({"flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/auto',methods=['POST'])
def auto():
for id in request.form.values():
target.code(int(id),target.body["label"][int(id)])
pos, neg, total = target.get_numbers()
return jsonify({"flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/restart',methods=['POST'])
def restart():
global target
os.remove("./memory/"+target.name+".pickle")
target = target.create(target.filename)
pos, neg, total = target.get_numbers()
return jsonify({"hasLabel": target.hasLabel, "flag": target.flag, "pos": pos, "done": pos + neg, "total": total})
@app.route('/train',methods=['POST'])
def train():
pos,neg,total=target.get_numbers()
random_id = target.random()
res={"random": target.format(random_id)}
if pos>0 or target.last_pos>0:
uncertain_id, uncertain_prob, certain_id, certain_prob = target.train()
res["certain"] = target.format(certain_id,certain_prob)
if target.last_pos>0:
uncertain_id, uncertain_prob, certain_reuse_id, certain_reuse_prob = target.train_reuse()
res["reuse"] = target.format(certain_reuse_id, certain_reuse_prob)
target.save()
return jsonify(res)
if __name__ == "__main__":
app.run(debug=False,use_debugger=False)
| StarcoderdataPython |
151689 | <gh_stars>10-100
# If you have not yet seen the source in basic/main.py, please take a look.
# In this sample we override the ProtoRPC message schema of MyModel in both the
# request and response of MyModelInsert and in the response of MyModelList.
# This is used to randomly set the value of attr2 based on attr1.
import random
import endpoints
from google.appengine.ext import ndb
from protorpc import remote
from endpoints_proto_datastore.ndb import EndpointsModel
# These are used as extra phrases to randomly add to the value of attr1 when
# setting attr2.
PHRASES = ['I', 'AM', 'RANDOM', 'AND', 'ARBITRARY']
class MyModel(EndpointsModel):
attr1 = ndb.StringProperty()
attr2 = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
@endpoints.api(name='myapi', version='v1', description='My Little API')
class MyApi(remote.Service):
# In addition to the arguments used in the MyModel.method decorator in
# basic/main.py, we also use request_fields and response_fields to override
# the schema of the ProtoRPC request message and response message,
# respectively.
# Since request_fields is ('attr1',), instead of the three string fields
# attr1, attr2 and created, the request message schema will contain a single
# string field corresponding to the NDB property attr1. Similarly, since
# response_fields is ('created',), the response message schema will contain a
# single string field corresponding to the NDB property created.
@MyModel.method(request_fields=('attr1',),
response_fields=('created',),
path='mymodel',
http_method='POST',
name='mymodel.insert')
def MyModelInsert(self, my_model):
# We use a random value from PHRASES to set attr2 in terms of attr1. Since
# the request message can only contain a value for attr1, we need to also
# provide a value for attr2.
my_model.attr2 = '%s-%s' % (my_model.attr1, random.choice(PHRASES))
# As in basic/main.py, since created is auto_now_add, the entity gets a new
# value for created and an ID after being persisted.
my_model.put()
return my_model
# As above, in addition to the arguments used in the MyModel.query_method
# decorator in basic/main.py, we also use collection_fields to override
# the schema of the ProtoRPC messages that are listed in the "items" fields
# of the query response. As in basic/main.py, there are no query arguments.
# Since collection_fields is ('attr2', 'created'), each value in the "items"
# list will contain the two string fields corresponding to the NDB properties
# attr2 and created.
@MyModel.query_method(collection_fields=('attr2', 'created'),
path='mymodels', name='mymodel.list')
def MyModelList(self, query):
# As in basic/main.py, no filters are applied.
return query
application = endpoints.api_server([MyApi], restricted=False)
| StarcoderdataPython |
1894108 | <reponame>nima1999nikkhah/SimSiam_gMLP
import argparse
import os
import time
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torch.utils.data import Dataset, DataLoader
import numpy as np
from einops.layers.torch import Rearrange
class Residual(nn.Module):
def __init__(self, survival_prob, fn):
super().__init__()
self.prob = torch.rand(1)
self.survival_prob = survival_prob
self.fn = fn
def forward(self, x):
if self.prob <= self.survival_prob:
return self.fn(x) + x
else:
return self.fn(x)
class PreNorm(nn.Module):
def __init__(self, dim, fn, **kwargs):
super().__init__(**kwargs)
self.norm = nn.LayerNorm(normalized_shape=dim, eps=1e-6)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x))
class SpatialGatingUnit(nn.Module):
def __init__(self, dim_seq, dim_ff):
super().__init__()
self.proj = nn.Linear(dim_seq, dim_seq)
nn.init.zeros_(self.proj.weight)
nn.init.ones_(self.proj.bias)
self.norm = nn.LayerNorm(normalized_shape=dim_ff//2, eps=1e-6)
self.dim_ff = dim_ff
self.activation = nn.GELU()
def forward(self, x):
# x: shape = (batch, dim_seq, channel)
res, gate = torch.split(tensor=x, split_size_or_sections=self.dim_ff//2, dim=2) #ch
# res, gate: shape = (batch, dim_seq, channel//2)
gate = self.norm(gate)
# gate: shape = (batch, dim_seq, channel//2)
gate = torch.transpose(gate, 1, 2)
# gate: shape = (batch, channel//2, dim_seq)
gate = self.proj(gate)
# gate: shape = (batch, channel//2, dim_seq)
# gate = self.activation(gate)
gate = torch.transpose(gate, 1, 2)
# gate: shape = (batch, dim_seq, channel//2)
return gate * res
class gMLPBlock(nn.Module):
def __init__(self, dim, dim_ff, seq_len):
super().__init__()
self.proj_in = nn.Linear(dim, dim_ff)
self.activation = nn.GELU()
self.sgu = SpatialGatingUnit(seq_len, dim_ff)
self.proj_out = nn.Linear(dim_ff//2, dim) #ch
def forward(self, x):
# shape=(B, seq, dim) --> (B, seq, dim_ff) --> (B, seq, dim_ff/2) --> (B, seq, dim)
x = self.proj_in(x)
x = self.activation(x)
x = self.sgu(x)
x = self.proj_out(x)
return x
class gMLPFeatures(nn.Module):
def __init__(self, survival_prob=0.5, image_size=256, patch_size=16, dim=128, depth=30, ff_mult=2, num_classes=196):
super().__init__()
self.image_size = image_size
self.patch_size = patch_size
self.patch_rearrange = Rearrange('b c (h p) (w q) -> b (h w) (c p q)', p=self.patch_size, q=self.patch_size) #(b, 3 , 256, 256) -> (b, 16*16, 3*16*16)
dim_ff = dim * ff_mult
initial_dim = 3 * (patch_size ** 2)
num_patches = (image_size // patch_size) ** 2
self.patch_embed = nn.Linear(initial_dim , dim) #shape=(B, seq, dim)
self.query = torch.nn.Parameter(torch.randn(1,1,dim))
self.query.requires_grad = True
self.dim = dim
module_list = [Residual(survival_prob,
PreNorm(dim,
gMLPBlock(dim=dim,
dim_ff=dim_ff,
seq_len=num_patches+1,
))) for i in range(depth)]
self.glayers = nn.Sequential(*module_list)
self.norm = nn.LayerNorm(normalized_shape=dim, eps=1e-5)
self.classification_rearrange = Rearrange('b s d -> b (s d)')
self.clssification_head = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Linear(dim, num_classes)
)
def extract_patches(self, images):
batch_size = images.size(0)
patches = self.patch_rearrange(images)
return patches
def forward(self, x):
x = self.extract_patches(x) #shape=(B, num_patches, patch_size**2 * C)
x = self.patch_embed(x) #shape=(B, num_patches, dim)
B, seq, dim = x.shape
query_1 = self.query.repeat((B, 1, 1))
x = torch.cat([query_1, x], dim=1)
x = self.glayers(x) #shape=(B, num_patches, dim)
x = self.norm(x) #shape=(B, num_patches, dim)
x = x[:,0:1,:]
x = self.classification_rearrange(x)
return x
| StarcoderdataPython |
4807869 | """This is the package which contains all the cogs"""
from .community import Community
from .logs import Logs
from .misc import Cmds
from .sudo import Sudo
# Yes this entire package will be one extension
# This will speed up the launch!
def setup(bot):
bot.add_cog(Community(bot))
bot.add_cog(Logs(bot))
bot.add_cog(Cmds(bot))
bot.add_cog(Sudo(bot))
| StarcoderdataPython |
375332 | import logging
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
logger = logging.getLogger(__name__)
def is_positive_number(value):
try:
value = int(value)
return value > 0
except ValueError as e:
logger.exception(e)
| StarcoderdataPython |
9780616 | <reponame>jshridha/home-assistant
"""Support for Lutron lights."""
import logging
from homeassistant.components.light import ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light
from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron lights."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]["light"]:
dev = LutronLight(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
def to_lutron_level(level):
"""Convert the given HASS light level (0-255) to Lutron (0.0-100.0)."""
return float((level * 100) / 255)
def to_hass_level(level):
"""Convert the given Lutron (0.0-100.0) light level to HASS (0-255)."""
return int((level * 255) / 100)
class LutronLight(LutronDevice, Light):
"""Representation of a Lutron Light, including dimmable."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the light."""
self._prev_brightness = None
self._is_on = False
super().__init__(area_name, lutron_device, controller)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of the light."""
new_brightness = to_hass_level(self._lutron_device.last_level())
if new_brightness != 0:
self._prev_brightness = new_brightness
return new_brightness
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs and self._lutron_device.is_dimmable:
brightness = kwargs[ATTR_BRIGHTNESS]
elif self._prev_brightness == 0:
brightness = 255 / 2
else:
brightness = self._prev_brightness
self._prev_brightness = brightness
self._lutron_device.level = to_lutron_level(brightness)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
attr = {"lutron_integration_id": self._lutron_device.id}
return attr
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
self._is_on = self._lutron_device.level > 0
if self._prev_brightness is None:
self._prev_brightness = to_hass_level(self._lutron_device.level)
| StarcoderdataPython |
3479151 | #!/usr/bin/env python3
class Solution:
def findPeakElement(self, nums: [int]) -> int:
if len(nums) == 0:
return -1
left = 0
right = len(nums)
while left < right:
mid = (left + right) // 2
if nums[mid] < nums[mid+1]:
left = mid+1
else:
right = mid
# Post-processing:
# End Condition: left == right
return left
a = Solution()
print(Solution.findPeakElement(a, [1,3,2,1])) | StarcoderdataPython |
1621732 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0015_auto_20150410_0042'),
]
operations = [
migrations.AlterModelManagers(
name='member',
managers=[
],
),
]
| StarcoderdataPython |
3446888 | <gh_stars>100-1000
import smtplib
import ssl
"""Make sure that the gmail you are using have enabled
less secure app otherwise email will not be sent"""
def send_email(message):
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "Enter your Email"
receiver_email = "Again Enter Same Email"
password = "<PASSWORD>"
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
try:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
print('email sent!')
return True
except Exception as e:
print(e)
print("could not login or send the mail.")
return False
| StarcoderdataPython |
11276282 | <filename>fall_down_detecter.py
#!/usr/bin/env python
# encoding: utf-8
'''
@author: zhaoyuxin
@connect: <EMAIL>
@file: thinter_s.py
@time: 2020/8/4 20:02
'''
import tkinter as tk
from tkinter import messagebox
import sys
import os
import threading
import fileinput
from demo import detect_main
address_dir = 'Video_Information/url_address.txt'
name_dir = 'Video_Information/id_name.txt'
def add():
var_id = id_name.get()
t_name.insert('insert', var_id)
t_name.insert('end', '\n')
b = 'Video_Information/id_name.txt'
h = open(b, 'a+', encoding='utf-8')
h.write(var_id + '\n')
h.close()
var_adress = url_address.get()
t_address.insert('insert', var_adress)
t_address.insert('end', '\n')
c = 'Video_Information/url_address.txt'
f = open(c, 'a+', encoding='utf-8')
f.write(var_adress + '\n')
f.close()
def stop():
sys.exit()
def test(txt):
detect_main(video_source=txt['url_address'],video_name=txt['id_name'])
def run():
adress_txt = open(address_dir, 'r', encoding='utf-8')
name_txt = open(name_dir, 'r', encoding='utf-8')
for names in zip(name_txt,adress_txt):
key = names[0].strip()
value = names[1].strip()
txts = {'id_name': key, 'url_address': value}
thread_ = threading.Thread(target=test,args=(txts,))
thread_.setDaemon(True)
thread_.start()
def delete():
delete_name = delete_id_name.get()
line_num = 0
with open('Video_Information/id_name.txt', 'r') as a:
is_exist = False
for i,id_names in enumerate(a):
id_names = id_names.strip()
if id_names == delete_name:
is_exist = True
line_num = i + 1
if not is_exist:
tk.messagebox.showerror('Error', 'Your id name is wrong,try it again!')
a.close()
f = fileinput.input('Video_Information/url_address.txt', inplace=1, mode='rU')
for line in f:
if f.filelineno() == line_num:
print(end='')
else:
print(line,end='')
f.close()
g = fileinput.input('Video_Information/id_name.txt', inplace=1, mode='rU' )
for line in g:
if g.filelineno() == line_num:
print(end='')
else:
print(line,end='')
g.close()
if os.path.exists(name_dir):
a = open(name_dir, 'r', encoding='utf-8')
t_name.delete(0.0, index2='end')
for id_names in a:
t_name.insert('insert', id_names)
a.close()
b = open(address_dir, 'r', encoding='utf-8')
t_address.delete(0.0, index2='end')
for id_names in b:
t_address.insert('insert', id_names)
b.close()
if __name__ == '__main__':
# prosess_pool = Pool(processes=2)
#
# parser = argparse.ArgumentParser(
# description='''Lightweight human pose estimation python demo.
# This is just for quick results preview.
# Please, consider c++ demo for the best performance.''')
# parser.add_argument('--checkpoint-path', type=str, default='openpose.jit',
# help='path to the checkpoint')
# parser.add_argument('--height-size', type=int, default=256, help='network input layer height size')
# parser.add_argument('--prosess-pool', type=Pool, default=None)
#
# args = parser.parse_args()
# args.prosess_pool = prosess_pool
if not os.path.exists('Video_Information'):
os.makedirs('Video_Information')
window = tk.Tk()
window.title('Passenger fall alarm system')
window.geometry('550x500')
# window.iconbitmap('px.ico')
tk.Label(window, text='please input id name:').place(x=10, y=10)
tk.Label(window, text='please input url address:').place(x=10, y=40)
tk.Label(window, text='please input delete id name:').place(x=10, y=70)
tk.Label(window, text='id names').place(x=20, y=160)
tk.Label(window, text='url addresses').place(x=250, y=160)
var_id_name = tk.StringVar()
id_name = tk.Entry(window, textvariable=var_id_name)
id_name.place(x=200, y=10)
var_url_address = tk.StringVar()
url_address = tk.Entry(window, textvariable=var_url_address)
url_address.place(x=200, y=40)
var_delete_name = tk.StringVar()
delete_id_name = tk.Entry(window, textvariable=var_delete_name)
delete_id_name.place(x=200, y=70)
add = tk.Button(window, text='add', width=10, command=add)
add.place(x=70, y=120)
stop = tk.Button(window, text='stop', width=10, command=stop, background='red')
stop.place(x=315, y=120)
run = tk.Button(window, text='run', width=10, command=run)
run.place(x=195, y=120)
t_name = tk.Text(window, width=10, height=15, font=14)
t_name.place(x=20, y=180)
t_address = tk.Text(window, width=50, height=15, font=14)
t_address.place(x=100, y=180)
if os.path.exists(name_dir):
a = open(name_dir, 'r', encoding='utf-8')
for id_names in a:
t_name.insert('insert', id_names)
a.close()
b = open(address_dir, 'r', encoding='utf-8')
for id_names in b:
t_address.insert('insert', id_names)
b.close()
delete = tk.Button(window, text='delete', width=10, command=delete)
delete.place(x=430, y=120)
window.mainloop()
| StarcoderdataPython |
4955144 | from setuptools import setup, find_packages
setup(
name='redshift_nn',
version='1.0',
packages=find_packages(),
include_package_data=True,
description='Redshfit NN using a keras model on Cloud ML Engine',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=[
'keras',
'h5py'
],
zip_safe=False
)
| StarcoderdataPython |
11320486 | <reponame>vinnamkim/pytorch-cifar<filename>only_test.py
'''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--epoch', default=100, type=int, help='length of epochs to train')
parser.add_argument('--batch_size', default=128, type=int, help='training batch size')
parser.add_argument('--random_seed', default=None, type=int, help='random seed')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--model', default='norm', type=str, help='model to train')
parser.add_argument('--spectral-penalty', default=-1., type=float, help='spectral_penalty')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
if args.random_seed is not None:
print('Set manual seed :', args.random_seed)
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
num_workers = 2
if os.name == 'nt':
num_workers = 0
print('batch_size : ', args.batch_size)
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=num_workers)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
#from torchvision.models import resnet18
if args.model == 'lasso':
from my_models.lasso_resnet import resnet18
net = resnet18()
elif args.model == 'norm':
from my_models.norm_resnet import resnet18
net = resnet18()
elif args.model == 'ws':
from my_models.ws_resnet import resnet18
net = resnet18()
dir_name = args.model + '_18_' + str(args.batch_size)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
print('==> Resuming from checkpoint..')
assert os.path.isdir(dir_name), 'Error: no checkpoint directory found!'
checkpoint = torch.load(os.path.join(dir_name, 'ckpt.pth'))
#net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
def get_d_params(ckpt, percentage):
from collections import OrderedDict
d_params = OrderedDict()
o_num_params = 0
n_num_params = 0
with torch.no_grad():
for key in ckpt:
if len(ckpt[key].shape) > 1:
U, S, V = torch.svd(ckpt[key].flatten(1))
n = int(len(S) * percentage)
UU = U[:, :n]
SS = S[:n]
VV = V[:, :n]
dp = UU.mm(SS.diag()).mm(VV.T)
d_params[key] = dp.reshape_as(ckpt[key])
o_num_params += ckpt[key].shape.numel()
n_num_params += UU.shape.numel() + SS.shape.numel() + VV.shape.numel()
else:
d_params[key] = ckpt[key]
o_num_params += ckpt[key].shape.numel()
n_num_params += ckpt[key].shape.numel()
return d_params, o_num_params, n_num_params
criterion = nn.CrossEntropyLoss()
def only_test(percentage):
d_params, o_num_params, n_num_params = get_d_params(checkpoint['net'], percentage)
net.load_state_dict(d_params)
print(n_num_params / o_num_params)
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
return percentage, test_loss/(batch_idx+1), 100. * correct / total, n_num_params / o_num_params
stats = []
import numpy as np
for i in np.linspace(0.5, 1.0, 20):
print("Percentage : ", i)
stats.append(only_test(i))
torch.save(stats, args.model + '_reduction.stats')
| StarcoderdataPython |
3582586 | <filename>examples/a_unicorn_examples/solve.py
import unicorn
import random
import string
import capstone
import re
import globalData
import binascii
def ranstr(num):
salt = ''.join(random.sample(string.ascii_letters + string.digits, num))
return salt
cs = capstone.Cs(capstone.CS_ARCH_ARM64, capstone.CS_MODE_ARM)
cs.detail = True
all_regs = None
reg_names = {
"X0": unicorn.arm64_const.UC_ARM64_REG_X0,
"X1": unicorn.arm64_const.UC_ARM64_REG_X1,
"X2": unicorn.arm64_const.UC_ARM64_REG_X2,
"X3": unicorn.arm64_const.UC_ARM64_REG_X3,
"X4": unicorn.arm64_const.UC_ARM64_REG_X4,
"X5": unicorn.arm64_const.UC_ARM64_REG_X5,
"X6": unicorn.arm64_const.UC_ARM64_REG_X6,
"X7": unicorn.arm64_const.UC_ARM64_REG_X7,
"X8": unicorn.arm64_const.UC_ARM64_REG_X8,
"X9": unicorn.arm64_const.UC_ARM64_REG_X9,
"X10": unicorn.arm64_const.UC_ARM64_REG_X10,
"X11": unicorn.arm64_const.UC_ARM64_REG_X11,
"X12": unicorn.arm64_const.UC_ARM64_REG_X12,
"X13": unicorn.arm64_const.UC_ARM64_REG_X13,
"X14": unicorn.arm64_const.UC_ARM64_REG_X14,
"X15": unicorn.arm64_const.UC_ARM64_REG_X15,
"X16": unicorn.arm64_const.UC_ARM64_REG_X16,
"X17": unicorn.arm64_const.UC_ARM64_REG_X17,
"X18": unicorn.arm64_const.UC_ARM64_REG_X18,
"X19": unicorn.arm64_const.UC_ARM64_REG_X19,
"X20": unicorn.arm64_const.UC_ARM64_REG_X20,
"X21": unicorn.arm64_const.UC_ARM64_REG_X21,
"X22": unicorn.arm64_const.UC_ARM64_REG_X22,
"X23": unicorn.arm64_const.UC_ARM64_REG_X23,
"X24": unicorn.arm64_const.UC_ARM64_REG_X24,
"X25": unicorn.arm64_const.UC_ARM64_REG_X25,
"X26": unicorn.arm64_const.UC_ARM64_REG_X26,
"X27": unicorn.arm64_const.UC_ARM64_REG_X27,
"X28": unicorn.arm64_const.UC_ARM64_REG_X28,
"W0": unicorn.arm64_const.UC_ARM64_REG_W0,
"W1": unicorn.arm64_const.UC_ARM64_REG_W1,
"W2": unicorn.arm64_const.UC_ARM64_REG_W2,
"W3": unicorn.arm64_const.UC_ARM64_REG_W3,
"W4": unicorn.arm64_const.UC_ARM64_REG_W4,
"W5": unicorn.arm64_const.UC_ARM64_REG_W5,
"W6": unicorn.arm64_const.UC_ARM64_REG_W6,
"W7": unicorn.arm64_const.UC_ARM64_REG_W7,
"W8": unicorn.arm64_const.UC_ARM64_REG_W8,
"W9": unicorn.arm64_const.UC_ARM64_REG_W9,
"W10": unicorn.arm64_const.UC_ARM64_REG_W10,
"W11": unicorn.arm64_const.UC_ARM64_REG_W11,
"W12": unicorn.arm64_const.UC_ARM64_REG_W12,
"W13": unicorn.arm64_const.UC_ARM64_REG_W13,
"W14": unicorn.arm64_const.UC_ARM64_REG_W14,
"W15": unicorn.arm64_const.UC_ARM64_REG_W15,
"W16": unicorn.arm64_const.UC_ARM64_REG_W16,
"W17": unicorn.arm64_const.UC_ARM64_REG_W17,
"W18": unicorn.arm64_const.UC_ARM64_REG_W18,
"W19": unicorn.arm64_const.UC_ARM64_REG_W19,
"W20": unicorn.arm64_const.UC_ARM64_REG_W20,
"W21": unicorn.arm64_const.UC_ARM64_REG_W21,
"W22": unicorn.arm64_const.UC_ARM64_REG_W22,
"W23": unicorn.arm64_const.UC_ARM64_REG_W23,
"W24": unicorn.arm64_const.UC_ARM64_REG_W24,
"W25": unicorn.arm64_const.UC_ARM64_REG_W25,
"W26": unicorn.arm64_const.UC_ARM64_REG_W26,
"W27": unicorn.arm64_const.UC_ARM64_REG_W27,
"W28": unicorn.arm64_const.UC_ARM64_REG_W28,
"SP": unicorn.arm64_const.UC_ARM64_REG_SP,
}
# 初始化全局数据
def initGlobalData():
globalData.has_pre = False
globalData.pre_codestr = ""
globalData.pre_regname = ""
# 添加监视列表,trace时打印该内存的变动
globalData.watch_addrs = {0x7290e3e060: ""}
# globalData.watch_addrs = {0x7290e3e6e8: ""}
def hook_code(uc: unicorn.Uc, address, size, user_data):
inst_code = uc.mem_read(address, size)
for inst in cs.disasm(inst_code, size):
# 判断是否保存有上次的指令,有的话,则先打印上次的指令,并且查询上次的第一个寄存器的新数值
if globalData.has_pre and globalData.pre_regname:
regindex = reg_names[globalData.pre_regname.upper()]
regvalue = uc.reg_read(regindex)
globalData.pre_codestr += "\t//%s=0x%x" % (globalData.pre_regname, regvalue)
print(globalData.pre_codestr)
globalData.pre_codestr = ""
globalData.has_pre = False
# 监控我关心的内存空间,如果发生变动会再打印
if len(globalData.watch_addrs) > 0:
for i, v in globalData.watch_addrs.items():
idata = uc.mem_read(i, 0x10)
buf = binascii.b2a_hex(idata)
hexstr = buf.decode(encoding="utf-8")
if globalData.watch_addrs[i] == hexstr:
continue
globalData.watch_addrs[i] = hexstr
print("0x%x\t%s" % (i, hexstr))
# 拼接当前行的汇编指令
opstr = "0x%x:\t%s\t%s" % (address, inst.mnemonic, inst.op_str)
# 从当前行指令中匹配出所有的寄存器
res = re.findall(r'[^0]([wx][0-9]+)', " " + inst.op_str, re.I | re.M)
# 如果有多个寄存器,取第一个为数值被改变的寄存器
if len(res) > 0:
globalData.pre_regname = res[0]
res = list(set(res))
# 如果有sp寄存器,则单独插入
if "sp" in inst.op_str:
res.append("sp")
# 如果没有寄存器,则不需要记录为上次的,直接打印即可
if len(res) <= 0:
has_pre = False
print(opstr)
continue
# 记录数据为上次的指令
fenge = "\t\t------"
curreg = ""
for regname in res:
regindex = reg_names[regname.upper()]
regvalue = uc.reg_read(regindex)
curreg += "%s=0x%x\t" % (regname, regvalue)
globalData.pre_codestr = opstr + fenge + curreg
globalData.has_pre = True
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
initGlobalData()
# 创建uc对象
uc = unicorn.Uc(unicorn.UC_ARCH_ARM64, unicorn.UC_MODE_ARM)
# 从内存中dump下来so的基址
code_addr = 0x7290e07000
# 用来存放so代码的大小,尽量大一点。内存不值钱
code_size = 8 * 0x1000 * 0x1000
# 创建一块内存
uc.mem_map(code_addr, code_size)
# 在上面那块内存后面继续划一片内存来当做栈空间
stack_addr = code_addr + code_size
stack_size = 0x1000
# 栈顶的位置,这里是64位的,所以偏移8个字节
stack_top = stack_addr + stack_size - 0x8
# 申请一块栈空间
uc.mem_map(stack_addr, stack_size)
# 栈空间往后继续划一块空间用来存放参数
args_addr = stack_addr + stack_size
args_size = 0x1000
uc.mem_map(args_addr, args_size)
# 设置每句汇编执行都会调用hook_code
uc.hook_add(unicorn.UC_HOOK_CODE, hook_code)
# 读取
# witopen("libnative-lib.so_0x7eae047000_0x38000.so", "rb") as f:
with open("libnative-lib.so_0x7290e07000_0x38000.so", "rb") as f:
sodata = f.read()
# 给前面创建的空间写入so的数据
uc.mem_write(code_addr, sodata)
# 要执行的代码开始位置
start_addr = code_addr + 0xFCB4
# 要执行的代码结束位置
end_addr = code_addr + 0xFF2C
# 随机生成一个入参
# input_str = ranstr(36)
input_str = "GKzRvnqywrgUfN0XZ5Bo9Juek3E8b2YtV74Q"
print("input:%s input_addr:0x%x" % (input_str, args_addr))
input_byte = str.encode(input_str)
# 将生成的入参写入前面创建的内存空间
uc.mem_write(args_addr, input_byte)
# ida中看到的函数有参数1、2,然后分别对应X0和X1,写入对应数据,栈寄存器给一个栈顶的地址
uc.reg_write(unicorn.arm64_const.UC_ARM64_REG_X0, args_addr)
uc.reg_write(unicorn.arm64_const.UC_ARM64_REG_X1, len(input_str))
uc.reg_write(unicorn.arm64_const.UC_ARM64_REG_SP, stack_top)
# 开始执行代码段
uc.emu_start(start_addr, end_addr)
# ida中看到返回值是直接写在入参中,所以结果我们直接从入参的内存中读取
result = uc.mem_read(args_addr, args_size)
print("result:", result.decode(encoding="utf-8"))
# 最后释放创建的内存
uc.mem_unmap(args_addr, args_size)
uc.mem_unmap(stack_addr, stack_size)
uc.mem_unmap(code_addr, code_size) | StarcoderdataPython |
5127984 | import sys
from data_manager import DataManager
# from flight_search import FlightSearch
# from flight_data import FlightData
# from notification_manager import NotificationManager
dm = DataManager()
first_name = input("Firstname: ")
last_name = input("Lastname: ")
email = input("Email: ")
confirm_email = input("Confirm Email: ")
if email != confirm_email:
print("Wooow....")
sys.exit(1)
dm.add_user(first_name, last_name, email)
print("You're in the club!")
# fs = FlightSearch()
# nm = NotificationManager()
# for trip in dm.trips:
# found = fs.upcoming("MSP", trip["iataCode"], trip["lowestPrice"])
# if found["price"] < trip["lowestPrice"]:
# nm.send(f"Found a cheap ${found['price']} flight from MSP to {trip['iataCode']}!") | StarcoderdataPython |
6424808 | <gh_stars>0
#! /usr/bin/env python3
"""
multiple-well master equations
"""
import sys, os, time
import ctypes
import numpy as np
import scipy.linalg
from . import __version__
from . import constants
from .solverlib import load_library
from .solverlib import set_num_threads
from .solverlib import restore_num_threads
from .solverlib import get_num_threads
from .readfile import read1d
from .readfile import read2d
from .singlewell import ME1D
from .singlewell import ME2D
def prepare_multi1d(well_list, maxE=None):
""" prepare well objects with aligned top-energy grains for multiple-well 1D ME
well_list: list of tuple (well_name, rrkm_file, relative_energy)
"""
names = []
tmpl = []
dE = None
topEbin = None
for well in well_list:
name, fn, relE = well
if name in names: raise ValueError("Duplicated well name: %s" % name)
names.append(name)
dE_this, rho, kl = read1d(fn)
if dE is None: dE = dE_this
elif dE != dE_this: raise ValueError("inconsistent dE: %s, %s" % (dE, dE_this))
offset0 = int(relE/dE + 0.5)
if (topEbin is None) or (topEbin > offset0 + len(rho)): topEbin = offset0 + len(rho)
tmpl.append([rho, kl, offset0])
if (maxE is not None) and ((topEbin-1) * dE > maxE): topEbin = int(maxE / dE) + 1
wells = []
for x in tmpl:
rho, kl, offset0 = x
nsiz_this = topEbin - offset0
rho = rho[:nsiz_this]
for ich in range(len(kl)): kl[ich] = kl[ich][:nsiz_this]
wells.append(ME1D(dE, rho, kl, offset0))
return names, wells
def prepare_multi2d(well_list, dJ, maxE=None, maxJ=None):
""" prepare well objects with aligned top-energy grains for multiple-well 2D ME
well_list: list of tuple (well_name, rrkmEJ_file, relative_energy)
"""
names = []
tmpl = []
dE = None
topEbin = None
for well in well_list:
name, fn, relE = well
if name in names: raise ValueError("Duplicated well name: %s" % name)
names.append(name)
dE_this, B2D, Jl, offsetl, rhol, kll = read2d(fn, dJ, maxJ=maxJ)
if dE is None: dE = dE_this
elif dE != dE_this: raise ValueError("inconsistent dE: %s, %s" % (dE, dE_this))
offset0 = int(relE/dE + 0.5)
sizE = len(rhol[0])
if (topEbin is None) or (topEbin > offset0 + sizE): topEbin = offset0 + sizE
tmpl.append([B2D, Jl, offsetl, rhol, kll, offset0])
if (maxE is not None) and ((topEbin-1) * dE > maxE): topEbin = int(maxE / dE) + 1
wells = []
for x in tmpl:
B2D, Jl, offsetl, rhol, kll, offset0 = x
for iJ in range(len(Jl)):
sizE_this = topEbin - offset0 - offsetl[iJ]
rhol[iJ] = rhol[iJ][:sizE_this]
for ich in range(len(kll)): kll[ich][iJ] = kll[ich][iJ][:sizE_this]
wells.append(ME2D(dE, B2D, Jl, offsetl, rhol, kll, offset0))
return names, wells
class MEBaseMW(object):
""" Base class for multiple-well master equations """
def __init__(self):
self.nwell = None
self.names = None
self.wells = None
self.connections = None
self.nsiz = None
self.nsizl = None
self.posl = None
self.rhoa = None
self.ka = None
self.Ea = None
self.channels = None
self.kisom_sym = None
self.kisom_i = None
self.kisom_j = None
self.is1d = False
self.lib, self.libfn = load_library()
def __getitem__(self, name):
""" returns well object """
if name in self.names:
return self.wells[self.names.index(name)]
else:
raise IndexError("Unknown well name: %s" % (name))
def set_kisom(self, iwell1, ich1, iwell2, ich2):
""" virtual method """
raise TypeError("method set_kisom not provided")
def set_channels(self):
# self.channels[iwell_from][ich] = iwell_to; None for dissociation
self.channels = [[None for ich in range(well.nchan)] for well in self.wells]
self.kisom_sym = np.array([]) # array of symmetrized k for isomerization
self.kisom_i = np.array([], dtype=np.int64) # array of index i
self.kisom_j = np.array([], dtype=np.int64) # array of index j (j>i)
for icon in range(len(self.connections)):
(name1, ch1), (name2, ch2) = self.connections[icon]
ich1 = ch1 - 1
ich2 = ch2 - 1
if name1 not in self.names: raise ValueError("Well not found: %s" % name1)
if name2 not in self.names: raise ValueError("Well not found: %s" % name2)
if name1 == name2: raise ValueError("Invalid connection: %s" % (name1, name2))
iwell1 = self.names.index(name1)
iwell2 = self.names.index(name2)
if ich1 < 0 or ich1 >= self.wells[iwell1].nchan:
raise ValueError("Invalid channel %s (well %s)" % (ich1+1, name1))
if ich2 < 0 or ich2 >= self.wells[iwell2].nchan:
raise ValueError("Invalid channel %s (well %s)" % (ich2+1, name2))
if self.channels[iwell1][ich1] is not None:
raise ValueError("Duplicated channel %s (well %s)" % (ich1+1, name1))
if self.channels[iwell2][ich2] is not None:
raise ValueError("Duplicated channel %s (well %s)" % (ich2+1, name2))
self.channels[iwell1][ich1] = iwell2
self.channels[iwell2][ich2] = iwell1
self.set_kisom(iwell1, ich1, iwell2, ich2)
if self.nwell > 1:
isolated = True
for iwell in range(self.nwell):
if all(x is None for x in self.channels[iwell]):
raise ValueError("isolated well: %s" % (self.names[iwell]))
def get_channel_strings(self):
kstrs = []
for iwell in range(self.nwell):
well = self.wells[iwell]
for ich in range(well.nchan):
if self.channels[iwell][ich] is not None: continue
s = "%s-k%d" % (self.names[iwell], ich+1)
if self.channels[iwell][ich] is None: s += "(dis)"
else: s += "(to-%s)" % (self.names[self.channels[iwell][ich]])
kstrs.append(s)
xstrs = []
for iwell in range(self.nwell):
s = "x(%s)" % (self.names[iwell])
xstrs.append(s)
return kstrs, xstrs
def get_channel_strings_phnm(self):
dischl = []
for iwell in range(self.nwell):
well = self.wells[iwell]
for ich in range(well.nchan):
if self.channels[iwell][ich] is None:
dischl.append("%s-ch%d" % (self.names[iwell], ich+1))
kdstrl = [[] for x in dischl]
for i in range(len(dischl)):
for j in range(self.nwell):
kdstrl[i].append("%s->%s" % (self.names[j], dischl[i]))
kwstrl = [[] for i in range(self.nwell)]
for i in range(self.nwell):
jc = 0
for j in range(self.nwell):
if i == j: continue
kwstrl[i].append("%s->%s" % (self.names[j], self.names[i]))
return kdstrl, kwstrl
def kphnm_from_ss(self, kll, popll):
""" phenomenological rate constants from steady-state solutions
kll: channel-specific overall decomposition rate constants in
the steady-state decomposition of wells
popll: steady-state populations during the steady-state
decomposition of wells
returns lists of the rate constants for dissociation and isomerization,
kdl and kwl, corresponding to kdstrl and kwstrl, respectively, of the
get_channel_strings_phnm() method
"""
ndisch = len(kll[0])
G = np.zeros((self.nwell, self.nwell))
for i in range(self.nwell):
for j in range(self.nwell): G[i,j] = popll[i][j]
kdl = [np.zeros(self.nwell) for ich in range(ndisch)]
G_LU = scipy.linalg.lu_factor(G)
for i in range(ndisch):
kssl = np.zeros(self.nwell)
for j in range(self.nwell): kssl[j] = kll[j][i]
kdl[i] = scipy.linalg.lu_solve(G_LU, kssl)
nm1 = self.nwell-1
GW = np.zeros((self.nwell*nm1, self.nwell*nm1))
dvec = np.zeros(self.nwell*nm1)
for i in range(self.nwell):
jc = 0
for j in range(self.nwell):
if i == j: continue
dvec[i*nm1+jc] = G[j][i] * sum(kdl[ich][i] for ich in range(ndisch))
kc = 0
for k in range(self.nwell):
if k == i: continue
GW[i*nm1+jc][i*nm1+kc] = G[j][k]
ic = i
if i > k: ic -= 1
GW[i*nm1+jc][k*nm1+ic] = -G[j][i]
kc += 1
jc += 1
GW_LU = scipy.linalg.lu_factor(GW)
kw_all = scipy.linalg.lu_solve(GW_LU, dvec)
kwl = [np.copy(kw_all[iwell*nm1:(iwell+1)*nm1]) for iwell in range(self.nwell)]
return kdl, kwl
def get_channel_strings_phnm_ca(self, chemact_well_ch):
chemact_well = chemact_well_ch[0]
chemact_ch = chemact_well_ch[1]
if chemact_well in self.names: chemact_well = self.names.index(chemact_well)
chemact = "%s-ch%d" % (self.names[chemact_well], chemact_ch)
dischl = []
for iwell in range(self.nwell):
well = self.wells[iwell]
for ich in range(well.nchan):
if self.channels[iwell][ich] is None:
dischl.append("%s-ch%d" % (self.names[iwell], ich+1))
krstrl = ["%s->%s" % (chemact, self.names[i]) for i in range(self.nwell)]
kbstrl = []
for x in dischl:
if x == chemact: kbstrl.append("%s(no-rxn)" % (chemact))
else: kbstrl.append("%s->%s" % (chemact, x))
return krstrl, kbstrl
def kphnm_from_cass(self, khpl, kl, popl, kdl, kwl):
""" phenomenological rate constants from chemical activation steady-state solution
khpl: HPL bimolecular rate constant
kl: channel-specific apparent decomposition rate constants in
the chemical activation steady state
popl: steady-state populations during the chemical activation steady state
kdl, kwl: outputs of kphnm_from_ss()
returns lists of the rate constants for reactant-to-well and reactant-to-fragments,
krl and kbl, corresponding to krstrl and kbstrl, respectively, of the
get_channel_strings_phnm_ca() method
"""
ndisch = len(kl)
krl = [0. for i in range(self.nwell)]
kbl = [0. for i in range(ndisch)]
sumkl = sum(kl)
for j in range(self.nwell):
for i in range(self.nwell):
if i == j: continue
jc = j
ic = i
if j > i: jc -= 1
if i > j: ic -= 1
krl[j] += popl[j]*kwl[i][jc] - popl[i]*kwl[j][ic]
for l in range(ndisch): krl[j] += popl[j] * kdl[l][j]
krl[j] *= khpl / sumkl
for l in range(ndisch):
kbl[l] = kl[l]
for j in range(self.nwell): kbl[l] -= popl[j] * kdl[l][j]
kbl[l] *= khpl / sumkl
return krl, kbl
def hpl(self, T):
ga = self.rhoa * np.exp(- self.Ea * constants.cm2k / T)
ga /= ga.sum()
kdis = 0.
kl = []
popl = []
for iwell in range(self.nwell):
well = self.wells[iwell]
ga_this = ga[self.posl[iwell]:self.posl[iwell]+self.nsizl[iwell]]
popl.append(ga_this.sum())
for ich in range(well.nchan):
if self.channels[iwell][ich] is not None: continue
k = (well.kchl[ich] * ga_this).sum()
kl.append(k)
if self.channels[iwell][ich] is None: # dissoc
kdis += k
return kdis, kl, ga, popl
def solve(self, T, p, gguess=None, solver="", bandpcrit=1e-9, neig=1,
reactant=None, chemact_well_ch=None,
verbose=False, nthreads=None, maxmemGB=None):
""" solve ME by calling solve1d or solve2d function of the library
T: temperature in K
p: pressure in bar
gguess: initial guess for iterative solver
solver: see me2d.show_solvers()
bandpcrit: truncation threshold for banded matrix (None to use dense matrix)
neig: number of eigenpairs to be computed
reactant: name of the reactant well (only for InvIter solver for strady-state decomposition)
chemact_well_ch: recombination (well-name, channel) (for chemical activation with solver=LinEq; gguess has to be None)
verbose: verbose flag (True/False or integer)
nthreads: number of threads to be used in the computation
maxmemGB: max memory size used by the solver in GB
"""
logfp = sys.stdout
if bandpcrit is None: bandpcrit = -1.
if reactant is None: reactant = -1
elif reactant in self.names: reactant = self.names.index(reactant)
if chemact_well_ch is not None:
chemact_well = chemact_well_ch[0]
chemact_ch = chemact_well_ch[1]
if chemact_well in self.names: chemact_well = self.names.index(chemact_well)
if nthreads is not None:
max_threads_orig = get_num_threads()
set_num_threads(nthreads)
if maxmemGB is not None: self.lib.set_me_maxmem_GB(maxmemGB)
if verbose:
logfp.write("%s ver.%s: %s.solve started at %s\n"
% (__package__, __version__, self.__class__.__name__,
time.strftime("%a, %d %b %Y %H:%M:%S")))
logfp.write("Library: %s\n" % (self.libfn))
max_threads = get_num_threads()
if len(max_threads) > 0:
for x in max_threads: logfp.write("%s max threads = %s\n" % (x[0], x[1]))
if maxmemGB is not None:
logfp.write("Max memory size = %s GB\n" % (self.lib.get_me_maxmem_GB()))
solverstr = solver
if bandpcrit >= 0: solverstr += " (banded, pcrit=%.1e)" % (bandpcrit)
if reactant >= 0: solverstr += " (reactant = %s)" % (self.names[reactant])
if chemact_well_ch is not None:
solverstr += " (chemact: well = %s, ch = %s)" % (self.names[chemact_well], chemact_ch)
logfp.write("T=%.0f, p=%.2e, solver=%s\n" % (T, p, solverstr))
logfp.flush()
nsiz = np.array(self.nsizl, dtype=np.int64)
bandpcrit = np.full(self.nsiz, bandpcrit)
# p is given in bar
dens = p * 0.1 / constants.kb / T # molecule/cm3
ZM = np.array([well.Z * dens for well in self.wells]) # s^-1
kbT = T / constants.cm2k # cm^-1
if verbose is True: verbose = 1
elif verbose is False: verbose = 0
if gguess is not None: vec = np.array(gguess)
elif chemact_well_ch is not None:
# chemical activation flux
if self.channels[chemact_well][chemact_ch-1] is not None:
logfp.write("WARNING: THIS IS AN ISOMERIZATION CHANNEL!\n")
logfp.flush()
ga = self.rhoa * np.exp(- self.Ea * constants.cm2k / T)
ga_sel = ga[self.posl[chemact_well]:self.posl[chemact_well]+self.nsizl[chemact_well]]
flx = ga_sel * self.wells[chemact_well].kchl[chemact_ch-1]
vec = np.zeros(len(ga))
vec[self.posl[chemact_well]:self.posl[chemact_well]+self.nsizl[chemact_well]] = flx
else: vec = self.rhoa * np.exp(- self.Ea * constants.cm2k / T) # thermal distrib
vals = np.zeros(neig)
if len(vec) < neig*self.nsiz: vec = np.append(vec, np.zeros(neig*self.nsiz - len(vec)))
if self.is1d:
y_e = np.array([well.y_e for well in self.wells])
ainv_ea = np.concatenate([well.ainv_ea for well in self.wells])
ptype = np.full(self.nsiz, -1, dtype=np.int64) # downward prob. given
res = self.lib.solve1d_mw(self.nwell, nsiz, neig, vals, vec,
self.Ea, self.rhoa, self.ka,
y_e, ainv_ea, ptype,
len(self.kisom_sym), self.kisom_sym,
self.kisom_i, self.kisom_j,
bandpcrit, ZM, kbT, solver.encode(), reactant, verbose)
else:
y_e = np.array([well.y_e for well in self.wells])
y_J = np.array([well.y_J for well in self.wells])
ainv_ea = np.concatenate([well.ainv_ea for well in self.wells])
ainv_Ja = np.concatenate([well.ainv_Ja for well in self.wells])
ptype = np.full(self.nsiz, 0, dtype=np.int64) # symmetrized prob. given
res = self.lib.solve2d_mw(self.nwell, nsiz, neig, vals, vec,
self.Ea, self.ea, self.Ja, self.rhoa, self.ka,
y_e, y_J, ainv_ea, ainv_Ja, ptype,
len(self.kisom_sym), self.kisom_sym,
self.kisom_i, self.kisom_j,
bandpcrit, ZM, kbT, solver.encode(), reactant, verbose)
if nthreads is not None:
restore_num_threads(max_threads_orig)
if res < 0: raise ValueError("ERROR in solver: res = %g" % res)
ksol = -vals[0]
ga = vec[:self.nsiz]
kdis = 0.
kl = []
popl = []
for iwell in range(self.nwell):
well = self.wells[iwell]
ga_this = ga[self.posl[iwell]:self.posl[iwell]+self.nsizl[iwell]]
popl.append(ga_this.sum())
for ich in range(well.nchan):
if self.channels[iwell][ich] is not None: continue
k = (well.kchl[ich] * ga_this).sum()
kl.append(k)
if self.channels[iwell][ich] is None: # dissoc
kdis += k
kdiff = abs((kdis - ksol) / kdis)
if (kdiff > 0.01) and (not solver.startswith("LinEq")):
logfp.write("WARNING: |kdis-ksol|/|kdis| = %.2e (kdis=%.6e, ksol=%.6e)\n"
% (kdiff, kdis, ksol))
logfp.flush()
return kdis, kl, ga, popl, vals, vec
class ME1DMW(MEBaseMW):
""" multiple-well 1D master equation """
@classmethod
def read_from(cls, well_list, connections, maxE=None):
""" read well_list and return an instance of the class
arguments:
well_list: list of tuple (well_name, rrkm_file, relative_energy)
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
names, wells = prepare_multi1d(well_list, maxE=maxE)
return cls(names, wells, connections)
def __init__(self, names, wells, connections):
"""
names: list of well names
wells: list of ME1D objects
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
super().__init__()
self.is1d = True
self.nwell = len(names)
self.names = names
self.wells = wells
self.connections = connections
self.dE = None
self.topE = None
self.nsizl = []
self.posl = []
for iwell in range(self.nwell):
dE_this = self.wells[iwell].dE
if self.dE is None: self.dE = dE_this
elif self.dE != dE_this:
raise ValueError("inconsistent dE: %s, %s" % (self.dE, dE_this))
topE_this = self.wells[iwell].Ea[-1]
if self.topE is None: self.topE = topE_this
elif self.topE != topE_this:
raise ValueError("inconsistent topE: %s, %s" % (self.topE, topE_this))
self.nsizl.append(self.wells[iwell].nsiz)
if iwell == 0: self.posl.append(0)
else: self.posl.append(self.posl[iwell-1] + self.wells[iwell-1].nsiz)
self.nsiz = sum(self.nsizl)
self.Ea = np.concatenate([well.Ea for well in self.wells])
self.rhoa = np.concatenate([well.rhoa for well in self.wells])
self.ka = np.concatenate([well.ka for well in self.wells])
self.set_channels()
def set_kisom(self, iwell1, ich1, iwell2, ich2):
well1 = self.wells[iwell1]
well2 = self.wells[iwell2]
if (well1.offset0 > well2.offset0):
start1 = 0
start2 = well1.offset0 - well2.offset0
nk = well1.nsiz
else:
start1 = well2.offset0 - well1.offset0
start2 = 0
nk = well2.nsiz
rho1 = well1.rhoa[start1:]
rho2 = well2.rhoa[start2:]
k1 = well1.kchl[ich1][start1:]
k2 = well2.kchl[ich2][start2:]
nh1 = rho1 * k1
nh2 = rho2 * k2
nh = np.sqrt(nh1) * np.sqrt(nh2)
# check symmetry
rdiffmax = 0.
for ik in range(nk):
# skip check for small k
if (k1[ik] < 1e-6) and (k2[ik] < 1e-6): continue
# +/- 1 grain tolerance (for numerical discretization error)
diff = None
if ik == 0:
if nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
elif ik == nk-1:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
else:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
elif nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
if diff is not None:
rdiff = abs(diff) / nh[ik]
if rdiff > rdiffmax: rdiffmax = rdiff
if rdiffmax > 0.3:
raise ValueError("asymmetry detected: %s %% between %s and %s"
% (rdiffmax*100, self.names[iwell1], self.names[iwell2]))
ksym = nh / (np.sqrt(rho1) * np.sqrt(rho2)) # store symmetrized k (= k_i * sqrt(rho_i/rho_j))
self.kisom_sym = np.append(self.kisom_sym, ksym)
pos1 = self.posl[iwell1] + start1
pos2 = self.posl[iwell2] + start2
pos1a = pos1 + np.arange(nk, dtype=np.int64)
pos2a = pos2 + np.arange(nk, dtype=np.int64)
if pos1 < pos2:
self.kisom_i = np.append(self.kisom_i, pos1a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos2a) # array of index j (j>i)
else:
self.kisom_i = np.append(self.kisom_i, pos2a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos1a) # array of index j (j>i)
return
class ME2DMW(MEBaseMW):
""" multiple-well 2D master equation """
@classmethod
def read_from(cls, well_list, connections, dJ, maxE=None, maxJ=None):
""" read well_list and return an instance of the class
arguments:
well_list: list of tuple (well_name, rrkmEJ_file, relative_energy)
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
names, wells = prepare_multi2d(well_list, dJ, maxE=maxE, maxJ=maxJ)
return cls(names, wells, connections)
def __init__(self, names, wells, connections):
"""
names: list of well names
wells: list of ME2D objects
connections: list of isomerization channels ((well_name, ichan), (well_name, ichan))
(note: channel index starts from 1)
"""
super().__init__()
self.is1d = False
self.nwell = len(names)
self.names = names
self.wells = wells
self.connections = connections
self.dE = None
self.topE = None
self.Jl = None
self.nsizl = []
self.posl = []
for iwell in range(self.nwell):
dE_this = self.wells[iwell].dE
if self.dE is None: self.dE = dE_this
elif self.dE != dE_this:
raise ValueError("inconsistent dE: %s, %s" % (self.dE, dE_this))
topE_this = max(self.wells[iwell].Ea)
if self.topE is None: self.topE = topE_this
elif self.topE != topE_this:
raise ValueError("inconsistent topE: %s, %s" % (self.topE, topE_this))
Jl_this = self.wells[iwell].Jl
if self.Jl is None: self.Jl = Jl_this[:]
else:
Jlen = min(len(self.Jl), len(Jl_this))
for iJ in range(Jlen):
if self.Jl[iJ] != Jl_this[iJ]:
raise ValueError("inconsistent Jl: %s, %s" % (self.Jl, Jl_this))
if len(Jl_this) > len(self.Jl): self.Jl = Jl_this[:]
self.nsizl.append(self.wells[iwell].nsiz)
if iwell == 0: self.posl.append(0)
else: self.posl.append(self.posl[iwell-1] + self.wells[iwell-1].nsiz)
self.nsiz = sum(self.nsizl)
self.Ea = np.concatenate([well.Ea for well in self.wells])
self.ea = np.concatenate([well.ea for well in self.wells])
self.Ja = np.concatenate([well.Ja for well in self.wells])
self.rhoa = np.concatenate([well.rhoa for well in self.wells])
self.ka = np.concatenate([well.ka for well in self.wells])
self.set_channels()
def set_kisom(self, iwell1, ich1, iwell2, ich2):
well1 = self.wells[iwell1]
well2 = self.wells[iwell2]
Jlen = min(len(well1.Jl), len(well2.Jl))
for iJ in range(Jlen):
J = well1.Jl[iJ]
off1 = well1.offset0 + well1.offsetl[iJ]
off2 = well2.offset0 + well2.offsetl[iJ]
if (off1 > off2):
start1 = 0
start2 = off1 - off2
nk = len(well1.rhol[iJ])
else:
start1 = off2 - off1
start2 = 0
nk = len(well2.rhol[iJ])
rho1 = well1.rhol[iJ][start1:]
rho2 = well2.rhol[iJ][start2:]
k1 = well1.kll[ich1][iJ][start1:]
k2 = well2.kll[ich2][iJ][start2:]
nh1 = rho1 * k1
nh2 = rho2 * k2
nh = np.sqrt(nh1) * np.sqrt(nh2)
# check symmetry
rdiffmax = 0.
for ik in range(nk):
# skip check for small k
if (k1[ik] < 1e-6) and (k2[ik] < 1e-6): continue
# +/- 1 grain tolerance (for numerical discretization error)
diff = None
if ik == 0:
if nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
elif ik == nk-1:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
else:
if nh1[ik] < nh2[ik-1]: diff = nh2[ik-1] - nh1[ik]
elif nh1[ik] > nh2[ik+1]: diff = nh1[ik] - nh2[ik+1]
if diff is not None:
rdiff = abs(diff) / nh[ik]
if rdiff > rdiffmax: rdiffmax = rdiff
if rdiffmax > 0.3:
raise ValueError("asymmetry detected: %s %% between %s and %s"
% (rdiffmax*100, self.names[iwell1], self.names[iwell2]))
ksym = nh / (np.sqrt(rho1) * np.sqrt(rho2)) # store symmetrized k (= k_i * sqrt(rho_i/rho_j))
self.kisom_sym = np.append(self.kisom_sym, ksym)
pos1a = np.zeros(nk, dtype=np.int64)
iEstart = 0
for iE in range(well1.offsetl[iJ] + start1):
iEstart += well1.sizJl[iE]
for ik in range(nk):
iE = well1.offsetl[iJ] + start1 + ik
iJr = well1.sizJl[iE] - 1 - iJ
pos1a[ik] = self.posl[iwell1] + iEstart + iJr
iEstart += well1.sizJl[iE]
pos2a = np.zeros(nk, dtype=np.int64)
iEstart = 0
for iE in range(well2.offsetl[iJ] + start2):
iEstart += well2.sizJl[iE]
for ik in range(nk):
iE = well2.offsetl[iJ] + start2 + ik
iJr = well2.sizJl[iE] - 1 - iJ
pos2a[ik] = self.posl[iwell2] + iEstart + iJr
iEstart += well2.sizJl[iE]
for ik in range(nk):
if well1.Ea[pos1a[ik]-self.posl[iwell1]] != well2.Ea[pos2a[ik]-self.posl[iwell2]]:
raise ValueError("Error in set_kisom: inconsistent Ea")
if well1.Ja[pos1a[ik]-self.posl[iwell1]] != well2.Ja[pos2a[ik]-self.posl[iwell2]]:
raise ValueError("Error in set_kisom: inconsistent Ja")
if self.posl[iwell1] < self.posl[iwell2]:
self.kisom_i = np.append(self.kisom_i, pos1a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos2a) # array of index j (j>i)
else:
self.kisom_i = np.append(self.kisom_i, pos2a) # array of index i
self.kisom_j = np.append(self.kisom_j, pos1a) # array of index j (j>i)
return
| StarcoderdataPython |
4891551 | <filename>chainerrl/v_function.py<gh_stars>100-1000
from abc import ABCMeta
from abc import abstractmethod
class VFunction(object, metaclass=ABCMeta):
@abstractmethod
def __call__(self, x):
raise NotImplementedError()
| StarcoderdataPython |
3292461 | from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
class BackendPermissionViewMixin(object):
'''
Base mixin for views that are checking permissions.
'''
def get_required_permissions(self):
return ['django_backend.access_backend']
def get_required_object_permissions(self):
return []
def has_perm(self, perm, obj=None):
'''
Checks if the request's user has the given permission. The permission
will actually be checked by the backend. You can give a shortcut like::
self.has_perm('change')
Instead of including the model name::
self.has_perm('change_page')
'''
return self.backend.has_perm(self.request.user, perm, obj)
def check_permission(self):
'''
Override this if you need to implement a sophisticated hook that is not
just done with ``user.has_perm`` checks.
Ideally you don't need this and everything is implemented by using
callable perms.
This will only be called if the other permission checks were successfull.
'''
return True
def _check_permissions(self):
# Do not allow access for any non logged in user.
if not self.request.user.is_authenticated():
return self.redirect_to_login()
# Do not allow access for any non-staff user.
if not self.request.user.has_perm('django_backend.access_backend'):
raise PermissionDenied
# First check the permissions given by the
# ``get_required_permissions`` method.
for permission in self.get_required_permissions():
if not self.has_perm(permission):
raise PermissionDenied
# If there is a object set for this view, we need to check if we are
# allowed to access it.
if hasattr(self, 'get_object'):
obj = self.get_object()
if obj is not None:
for permission in self.get_required_object_permissions():
if not self.has_perm(permission, obj):
raise PermissionDenied
# Now this is a hook we can implement more sophisticated checks.
if not self.check_permission():
raise PermissionDenied
def pre_dispatch(self, request, *args, **kwargs):
response = self._check_permissions()
if response:
return response
return super(BackendPermissionViewMixin, self).pre_dispatch(request, *args, **kwargs)
def redirect_to_login(self):
urlname = self.backend.base.urlnames.views['login'].name
return HttpResponseRedirect('{0}?next={1}'.format(
reverse(urlname),
self.request.path))
def get_permission_denied_redirect_url(self):
urlname = self.backend.base.urlnames.views['index'].name
return self.reverse(urlname)
def handle_permission_denied(self, request, exception):
'''
Display an error message and redirect to a url defined by
``get_permission_denied_redirect_url``.
'''
# Only redirect the user to the backend index if the user can access
# it.
if self.request.user.has_perm('django_backend.access_backend'):
message = unicode(exception)
if not message:
message = _("You don't have the required permissions.")
messages.error(request, _('Sorry, you cannot access this page. %(message)s') % {
'message': message,
})
return HttpResponseRedirect(self.get_permission_denied_redirect_url())
# Re-raise to return a 403 response.
raise exception
| StarcoderdataPython |
9699721 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for start game related functionality
"""
class StartGameScreen():
"""
Screen to start game
.. versionadded:: 0.9
"""
def __init__(self, generator, application, screen):
"""
Default constructor
"""
super().__init__()
self.generator = generator
self.class_names = self.generator.configuration.keys()
self.screen = screen
def show(self):
"""
Show this screen
"""
self.draw_screen()
selection = None
while selection is None:
selection = self.screen.getch()
try:
selection = int(chr(selection))
except ValueError:
selection = None
if selection < 0 or selection > len(self.class_names) - 1:
selection = None
for index, class_name in enumerate(self.class_names):
if index == selection:
return self.generator.generate_creature(class_name)
def draw_screen(self):
"""
Draw screen
"""
self.screen.clear()
for index, class_name in enumerate(self.class_names):
self.screen.addstr(5 + index, 20,
'{0}. {1}'.format(index, class_name))
self.screen.refresh()
| StarcoderdataPython |
1921636 | """Process management module."""
from .manager import ProcessManager
from .process import ManagedProcess, ProcessInfo
__all__ = [
"ManagedProcess",
"ProcessInfo",
"ProcessManager",
]
| StarcoderdataPython |
3578625 | import sys
import matplotlib.pyplot as plt
import csv
import numpy
title = "Server latency vs Number of clients"
output = "comparison_latency.pdf"
y_label = "Latency (μs)"
def setBoxColors(bp):
plt.setp(bp['boxes'][0], color='red')
plt.setp(bp['medians'][0], color='red')
plt.setp(bp['caps'][0], color='red')
plt.setp(bp['caps'][1], color='red')
plt.setp(bp['whiskers'][0], color='red')
plt.setp(bp['whiskers'][1], color='red')
plt.setp(bp['boxes'][1], color='black')
plt.setp(bp['medians'][1], color='black')
plt.setp(bp['caps'][2], color='black')
plt.setp(bp['caps'][3], color='black')
plt.setp(bp['whiskers'][2], color='black')
plt.setp(bp['whiskers'][3], color='black')
plt.setp(bp['boxes'][2], color='blue')
plt.setp(bp['medians'][2], color='blue')
plt.setp(bp['caps'][4], color='blue')
plt.setp(bp['caps'][5], color='blue')
plt.setp(bp['whiskers'][4], color='blue')
plt.setp(bp['whiskers'][5], color='blue')
plt.setp(bp['boxes'][3], color='green')
plt.setp(bp['medians'][3], color='green')
plt.setp(bp['caps'][6], color='green')
plt.setp(bp['caps'][7], color='green')
plt.setp(bp['whiskers'][6], color='green')
plt.setp(bp['whiskers'][7], color='green')
def read(input, numThreads):
with open(input, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
title = True
y = []
for row in plots:
if title:
title = False
else:
sum = 0
for i in row[1:]:
sum = sum + float(row[1])
y.append(sum / numThreads)
return numpy.array(y)
def plot_box(info_list, colors):
data = {}
data['1 client'] = []
data['2 clients'] = []
data['10 clients'] = []
data['100 clients'] = []
no_of_messages = 1000
server_names = []
plots = ['1 client', '2 clients', '10 clients', '100 clients']
for server_name, prefix in info_list:
prefix = prefix + "_"
server_names.append(server_name)
data['1 client'].append(read(prefix + "single.csv", 1))
data['2 clients'].append(read(prefix + "two.csv", 1))
data['10 clients'].append(read(prefix + "ten.csv", 1))
data['100 clients'].append(read(prefix + "hundred.csv", 1))
fig = plt.figure()
ax = plt.axes()
plt.title(title)
ax.set_ylabel(y_label)
ax.set_yscale('log')
number_per_group = len(server_names) #3
for index, plot in enumerate(plots):
bp = ax.boxplot(data[plot], positions=range(
index * (number_per_group + 1) + 1, (index + 1) * (number_per_group + 1) ), widths=0.6, sym='')
setBoxColors(bp)
ax.set_xticklabels(plots)
ax.set_xticks([2 + (number_per_group + 1) * i for i in range(0, len(plots))])
plt.xlim(0, (number_per_group + 1) * len(plots) + 1)
plt.plot([], c='red', label='libzmq')
plt.plot([], c='black', label='JeroMQ')
plt.plot([], c='blue', label='Mirage-zmq (unikernel)')
plt.plot([], c='green', label='Mirage-zmq (executable)')
plt.legend()
plt.savefig(output)
plot_box(
[("libzmq", "cpp"), ("JeroMQ", "java"), ("Mirage-zmq (unikernel)",
"unikernel"), ("Mirage-zmq (executable)", "exe")],
{'libzmq': 'red', 'JeroMQ': 'black', 'Mirage-zmq (unikernel)': 'blue', "Mirage-zmq (executable)": 'green'})
| StarcoderdataPython |
333026 | <reponame>jonasvj/TFDE<filename>datasets/synthetic.py
import datasets
import numpy as np
class EightGaussians:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/8gaussians.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
class Checkerboard:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/checkerboard.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
class TwoSpirals:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'synthetic/2spirals.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(root_path):
rng = np.random.RandomState(seed=42)
data = np.load(root_path)
rng.shuffle(data)
n_train = int((1/3) * data.shape[0])
n_val = int((1/3) * data.shape[0])
data_train = data[0:n_train]
data_val = data[n_train:n_train+n_val]
data_test = data[n_train+n_val:]
return data_train, data_val, data_test
def load_data_normalised(root_path):
data_train, data_val, data_test = load_data(root_path)
mu = data_train.mean(axis=0)
s = data_train.std(axis=0)
data_train = (data_train - mu) / s
data_val = (data_val - mu) / s
data_test = (data_test - mu) / s
return data_train, data_val, data_test | StarcoderdataPython |
5159739 | <gh_stars>0
"""Add Envelope Functionality
Revision ID: b3c0c76ac2e6
Revises: <PASSWORD>
Create Date: 2018-04-06 20:17:17.556595
"""
# revision identifiers, used by Alembic.
revision = 'b3c0c76ac2e6'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('envelope_templates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('agency_ein', sa.String(length=4), nullable=True),
sa.Column('title', sa.String(), nullable=False),
sa.Column('template_name', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['agency_ein'], ['agencies.ein'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('envelopes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latex', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['responses.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('envelopes')
op.drop_table('envelope_templates')
### end Alembic commands ###
| StarcoderdataPython |
3201346 | <filename>openmldefaults/config_spaces/svc.py<gh_stars>1-10
import ConfigSpace
def get_hyperparameter_search_space_small(seed):
"""
Small version of svm config space, featuring important hyperparameters
based on https://arxiv.org/abs/1710.04725
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
C = ConfigSpace.UniformFloatHyperparameter(
name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
kernel = ConfigSpace.CategoricalHyperparameter(
name='svc__kernel', choices=['rbf', 'poly', 'sigmoid'], default_value='rbf')
degree = ConfigSpace.UniformIntegerHyperparameter(
name='svc__degree', lower=1, upper=5, default_value=3)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
coef0 = ConfigSpace.UniformFloatHyperparameter(
name='svc__coef0', lower=-1, upper=1, default_value=0)
cs.add_hyperparameters([
C,
kernel,
degree,
gamma,
coef0
])
degree_depends_on_poly = ConfigSpace.EqualsCondition(degree, kernel, 'poly')
coef0_condition = ConfigSpace.InCondition(coef0, kernel, ['poly', 'sigmoid'])
cs.add_condition(degree_depends_on_poly)
cs.add_condition(coef0_condition)
return cs
def get_hyperparameter_search_space_micro(seed):
"""
Small version of svm config space, featuring important hyperparameters
as used by:
http://metalearning.ml/2018/papers/metalearn2018_paper70.pdf
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)
kernel = ConfigSpace.Constant(name='svc__kernel', value='rbf')
C = ConfigSpace.UniformFloatHyperparameter(name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)
gamma = ConfigSpace.UniformFloatHyperparameter(
name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)
cs.add_hyperparameters([
kernel,
C,
gamma
])
return cs
| StarcoderdataPython |
11350627 | <reponame>floatofmath/floatofmath.github.com
"""This is the simple python script that does all the work for setting up the beautiful
bioinformatics site. First we look into site.info were all pages are defined by the
triple of page name, which will determine the file names, page title, which determines
the link text shown on the page, and page description, which determines the tool tip that
is shown on mouse over i.e. what the title property of the html tag. Then we get the
main.tpl template.
each page defined in site.info must have a corresponding file called <file_name>.page
the html code in this file will then be inerted into the main template
we also generate a list of links for all pages defined in site.info which we insert into
all the generated pages.
finally each page is written to <file_name>.html, if this file already exists it is
ERASED (!) and replaced"""
"""To change a site (e.g. index.html) change what you want in the .page (e.g. index.page) file
and run python bioinf_cms.py"""
"""To create a page (e.g. test.html) start a new .page document (e.g. test.page) and just write some text in html
you don't need to bother about header footer whatsoever. then add an entry on a new line to site.info (e.g. test, this is a test, Test)
then run bioinf_cms.py.
the new page (e.g. test.html) will be generated and a link added to the sidebar (on all pages)
e.g. link to test.html, with text: this is a test, and on mouse over your browser will show you a tooltip with the text: Test,"""
"""To hide a page simply remove its entry from the site.info and remove <file_name>.html,
this could be useful if you write something but don't want to show it until some time later.
if you also remove <file_name>.page the page will be completely deleted."""
"""Software written by <NAME> <float_at_lefant.net>
Use as you like, credit where due"""
import os,re
def getLinks(info='site.info'):
"""look in the info, check for files, and make a list of links"""
infoFile = open(info)
links = infoFile.readlines()[1:]
links = [link.strip() for link in links]
linkInfo = [[item.strip() for item in name.split(',')] for name in links]
infoFile.close()
return(linkInfo)
def mergeLists(lists):
merged = []
for l in lists:
for s in l:
merged.append(s)
return merged
def buildLinkHtml(links):
"""Take the link list and build the html tags"""
def buildTag(link):
return "<p><a href=\""+link[0]+".html\" title=\""+link[2]+"\">"+link[1]+"</a></p>"
return [buildTag(link) for link in links]
def buildPagesHtml(links,template='main.tpl',altlinks=''):
def getTemplate(template):
templateFile = open(template)
templateHtml = templateFile.readlines()
templateFile.close()
return templateHtml
def getContentLine(templateHtml):
print 'works'
try:
return templateHtml.index('[[content]]\n')
except:
return 0
def getLinkLine(templateHtml):
return templateHtml.index('[[links]]\n')
def getPageHtml(link):
pageFile = open(link + '.page')
pageHtml = pageFile.readlines()
pageFile.close()
return pageHtml
def buildPage(page,templateHtml):
return mergeLists([templateHtml[:getContentLine(templateHtml)],getPageHtml(page),templateHtml[(getContentLine(templateHtml)+1):]])
def addLinks(page,linkHtml):
return mergeLists([page[:getLinkLine(page)],linkHtml,page[(getLinkLine(page)+1):]])
if altlinks == '':
altlinks = links
templateHtml = getTemplate(template)
pages = [page for [page,a,b] in links]
pagesHtml = [buildPage(page,templateHtml) for page in pages]
pagesHtml = [addLinks(page,buildLinkHtml(altlinks)) for page in pagesHtml]
return(pagesHtml)
def makeFiles(links,pagesHtml,suf='.html'):
files = [f+suf for [f,a,b] in links]
for i in range(len(files)):
try:
os.remove(files[i])
except OSError:
pass
pageFile = open(files[i],'w')
pageFile.writelines(pagesHtml[i])
pageFile.close()
if __name__ == "__main__":
links = getLinks()
pagesHtml = buildPagesHtml(links)
# footerInc = buildPagesHtml([['footer','',''],],template='footer.tpl',altlinks=links)
makeFiles(links,pagesHtml)
# makeFiles([['includes/footer','',''],],footerInc,'.inc')
| StarcoderdataPython |
8058512 | import random
def write_file(path: str):
new_file = open("reduced_data.list", "w+")
with open(path, "r", errors='ignore') as file:
for line in file.readlines()[14:]:
probability = random.uniform(0, 1)
if probability > 0.99:
new_file.write(line)
def make_film_lst(path: str) -> list:
films = []
with open(path, "r+") as file:
for line in file.readlines():
info = [line[:line.index("(")].strip(), line[line.index("(") + 1:line.index(")")].strip()]
if "{" in line:
info.append(line[line.index("}") + 1:].strip())
else:
info.append(line[line.index(")") + 1:].strip())
films.append(info)
clear_films_info = []
for film in films:
if film[1].isnumeric():
film[1] = int(film[1])
clear_films_info.append(film)
for film in clear_films_info:
while "(" in film[-1]:
film[-1] = film[-1].replace(film[-1][film[-1].index("("): film[-1].index(")") + 1], "")
if "\t" in film[-1]:
film[-1] = film[-1].replace("\t", "")
film[-1] = ", ".join(film[-1].split(", ")[-3:])
while len(clear_films_info) > 50:
clear_films_info.remove(clear_films_info[random.randint(0, len(clear_films_info) - 1)])
return clear_films_info
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.