id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5183179 | <reponame>chidanandpujar/python_pyez_scripts
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
dev = Device(host='xx.xx.xx.xx', user='xyz', password='<PASSWORD>', gather_facts=False)
dev.open()
dev.timeout=300
with Config(dev, mode='ephemeral', ephemeral_instance='test1') as cu:
try:
cu.load(path="test.xml", format='xml')
except Exception as err:
print(err)
| StarcoderdataPython |
3321854 | <reponame>ChrisTimperley/roswire
# -*- coding: utf-8 -*-
__all__ = ("ROS2LaunchManager",)
import os
import shlex
import typing
from typing import Collection, List, Mapping, Optional, Sequence, Tuple, Union
import attr
from loguru import logger
from .reader import ROS2LaunchFileReader
from ... import exceptions as exc
from ...common.launch.config import LaunchConfig
from ...common.launch.controller import ROSLaunchController
if typing.TYPE_CHECKING:
from ... import AppInstance
@attr.s(eq=False)
class ROS2LaunchManager:
"""
Provides access to `ros2 launch
<design.ros2.org/articles/roslaunch.html>`_ for an
associated ROS2 system. This interface is used to locate, read,
and write `launch python files and to launch ROS nodes using those
files.
"""
_app_instance: "AppInstance" = attr.ib()
@classmethod
def for_app_instance(
cls, app_instance: "AppInstance"
) -> "ROS2LaunchManager":
return ROS2LaunchManager(app_instance=app_instance)
def read(
self,
filename: str,
*,
package: Optional[str] = None,
argv: Optional[Sequence[str]] = None,
) -> LaunchConfig:
"""Produces a summary of the effects of a launch file.
Parameters
----------
filename: str
The name of the launch file, or an absolute path to the launch
file inside the container.
package: str, optional
The name of the package to which the launch file belongs.
argv: Sequence[str], optional
An optional sequence of command-line arguments that should be
supplied to :code:`roslaunch`.
Raises
------
PackageNotFound
If the given package could not be found.
LaunchFileNotFound
If the given launch file could not be found in the package.
"""
filename = self.locate(filename, package=package)
reader = ROS2LaunchFileReader(self._app_instance)
return reader.read(filename, argv)
def write(
self, config: LaunchConfig, *, filename: Optional[str] = None
) -> str:
"""Writes a given launch configuration to disk as an XML launch file.
Parameters
----------
config: LaunchConfig
A launch configuration.
filename: str, optional
The name of the file to which the configuration should be written.
If no filename is given, a temporary file will be created. It is
the responsibility of the caller to ensure that the temporary file
is appropriately destroyed.
Returns
-------
str: The absolute path to the generated XML launch file.
"""
raise NotImplementedError
def locate(self, filename: str, *, package: Optional[str] = None) -> str:
"""Locates a given launch file.
Parameters
----------
filename: str
The name of the launch file, or an absolute path to the launch
file inside the container.
package: str, optional
Optionally specifies the name of the package to which the launch
file belongs.
Returns
-------
The absolute path to the launch file, if it exists.
Raises
------
PackageNotFound
If the given package could not be found.
LaunchFileNotFound
If the given launch file could not be found in the package.
"""
if not package:
assert os.path.isabs(filename)
return filename
else:
app_description = self._app_instance.app.description
package_description = app_description.packages[package]
package_location = package_description.path
paths = self._app_instance.files.find(package_location, filename)
for path in paths:
if package in path:
logger.debug(
"determined location of launch file"
f" [{filename}] in package [{package}]: "
f"{path}"
)
return path
raise exc.LaunchFileNotFound(path=filename)
def launch(
self,
filename: str,
*,
package: Optional[str] = None,
args: Optional[Mapping[str, Union[int, str]]] = None,
prefix: Optional[str] = None,
launch_prefixes: Optional[Mapping[str, str]] = None,
node_to_remappings: Optional[
Mapping[str, Collection[Tuple[str, str]]]
] = None,
) -> ROSLaunchController:
"""Provides an interface to the roslaunch command.
Parameters
----------
filename: str
The name of the launch file, or an absolute path to the launch
file inside the container.
package: str, optional
The name of the package to which the launch file belongs.
args: Dict[str, Union[int, str]], optional
Keyword arguments that should be supplied to roslaunch.
prefix: str, optional
An optional prefix to add before the roslaunch command.
launch_prefixes: Mapping[str, str], optional
An optional mapping from nodes, given by their names, to their
individual launch prefix.
node_to_remappings: Mapping[str, Collection[Tuple[str, str]]], optional
A collection of name remappings for each node, represented as a
mapping from node names to a collection of remappings for that
node, where each remapping is a tuple of the
form :code:`(to, from)`.
Returns
-------
ROSLaunchController
An interface for inspecting and managing the launch process.
Raises
------
PackageNotFound
If the given package could not be found.
LaunchFileNotFound
If the given launch file could not be found in the package.
"""
shell = self._app_instance.shell
if not args:
args = {}
if not launch_prefixes:
launch_prefixes = {}
if node_to_remappings or launch_prefixes:
m = "Requires self.read: not yet implemented"
raise NotImplementedError(m)
if package:
filename_without_path = os.path.basename(filename)
cmd = [
"ros2 launch",
shlex.quote(package),
shlex.quote(filename_without_path),
]
else:
m = "Not yet implemented when package is None"
raise NotImplementedError(m)
launch_args: List[str] = [f"{arg}:={val}" for arg, val in args.items()]
cmd += launch_args
if prefix:
cmd = [prefix] + cmd
cmd_str = " ".join(cmd)
popen = shell.popen(cmd_str, stdout=True, stderr=True)
return ROSLaunchController(filename=filename, popen=popen)
__call__ = launch
| StarcoderdataPython |
6590878 |
db = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '',
'db': 'testdb'
}
| StarcoderdataPython |
1912902 | #!/usr/bin/env python
import glob
import os
import subprocess
import sys
from distutils.command.build_ext import build_ext
from setuptools import setup, find_packages, Extension
# top level bindings directory
BINDINGS_DIR = os.path.dirname(os.path.abspath(__file__))
# top level repo directory
TOPDIR = os.path.dirname(os.path.dirname(BINDINGS_DIR))
SRCDIR = os.path.join(TOPDIR, 'src')
class build_ext_compiler_check(build_ext):
"""Add custom compile flags for compiled extensions."""
def build_extensions(self):
compiler = self.compiler.compiler_type
cxxflags = []
if compiler != 'msvc':
cxxflags.append('-std=c++11')
for ext in self.extensions:
ext.extra_compile_args.extend(cxxflags)
return build_ext.build_extensions(self)
def pkgconfig(*packages, **kw):
"""Translate pkg-config data to compatible Extension parameters."""
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
try:
tokens = subprocess.check_output(
['pkg-config', '--libs', '--cflags'] + list(packages)).split()
except OSError as e:
sys.stderr.write('running pkg-config failed: {}\n'.format(e.strerror))
sys.exit(1)
for token in tokens:
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
return kw
ext_kwargs = dict(
include_dirs=[SRCDIR],
library_dirs=[SRCDIR],
)
if sys.platform == 'win32':
ext_kwargs['libraries'] = ['libsmu']
else:
ext_kwargs['libraries'] = ['smu']
ext_kwargs = pkgconfig('libusb-1.0', **ext_kwargs)
extensions = []
extensions.extend([
Extension(
'pysmu._pysmu',
[os.path.join(BINDINGS_DIR, 'src', 'pysmu.cpp')], **ext_kwargs),
])
setup(
name='pysmu',
version='0.88',
description='python library for the m1k device',
url='https://github.com/analogdevicesinc/libsmu',
license='BSD',
maintainer='Analog Devices, Inc.',
packages=find_packages(),
ext_modules=extensions,
scripts=glob.glob('bin/*'),
cmdclass={'build_ext': build_ext_compiler_check},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
],
)
| StarcoderdataPython |
1934033 | <filename>dipferromagtheory/reso.py
"""
"""
### IMPORTS
import numpy as np
import json
from dipferromagtheory import resdir
def sigma_lambda(q, q_mean, dlam):
"""
Standard deviation of the wavelength distribution in q space
Parameters
----------
q : float
momentum/q transfer value
q_mean : float
average momentum/q transfer value
dlam : float
wavelength spread in percent
Return
------
: float
standard deviation of the wavelength spread in q space
Note
----
triangular spead to sigma conversion:
(sig_lam / lam)**2 = 1/6 (delta lam / lam)**2
"""
return (q_mean - q) * dlam / np.sqrt(6)
#-------------------------------------------------------------------------------------
def sigma_Q_all(q, q_mean, r1, l1, r2, l2, dx, dy, lam, dlam):
"""
combined q resolution sigma from goemetric and wavelength spread
Parameters
----------
q : float
momentum/q transfer value
q_mean : float
average momentum/q transfer value
r1 : float
circular radius of source aperture
r2 : float
circular radius of sample aperture
l1 : float
source-sample distance
l2 : float
sample-detector distance
dx : float
horizontal pixel width
dy : float
vertical pixel width
lam : float
wavelength
dlam : float
wavelength spread in percent (delta lambda/lambda)
for a triangular distr.
Return
------
: float
standard deviation of the q resolution is gaussian approximation
"""
geomcontr = (l2 / l1 * r1 * 0.5)**2 + ((l1 + l2) / l1 * r2 * 0.5)**2
sig_sqr_Q_wave = sigma_lambda(q, q_mean, dlam)**2
sig_sqr_Q_geom = (2 * np.pi / lam / l2)**2 * (2 * geomcontr + 1/12. * (dx**2 + dy**2))
return np.sqrt(sig_sqr_Q_geom + sig_sqr_Q_wave)
#-------------------------------------------------------------------------------------
def resolution_Q(q, q_mean, sigma_q):
"""
Calculates the q resolution approximated in gaussian form
Parameters
----------
q : float
momentum/q transfer value (intefration parameter)
q_mean : float
average momentum/q transfer value
sigma_q : float
standard deviation of the q resolution
"""
return np.exp(-0.5 * ((q_mean - q) / sigma_q)**2) / np.sqrt(2 * np.pi * sigma_q**2)
#------------------------------------------------------------------------------
class Maskqdists:
"""
"""
path = f"{resdir}/qdists_Ni2019.json"
dists = {}
# with open(path, "r") as jsonfile:
# dists = json.load(jsonfile)
@classmethod
def keys(cls):
return cls.dists.keys()
@classmethod
def get(cls, key):
return cls.dists[key]
@classmethod
def init(cls):
with open(cls.path, "r") as jsonfile:
cls.dists = json.load(jsonfile)
@classmethod
def update(cls, path):
cls.path = path
Maskqdists.init() | StarcoderdataPython |
11333551 | import math
import numpy as np
from digital_image_processing.algorithms.edge_detection_algorithms.threshold.threshold_based_edge_detection import (
fbc_threshold,
)
from digital_image_processing.tools.logger_base import log as log_message
def forward_difference(img_to_forward: np.ndarray) -> np.ndarray:
"""Runs the Forward Difference algorithm
Reference:
<NAME>., & <NAME>. (2016). Comparison and Evaluation of First Derivatives
Estimation. Computer Vision and Graphics, 121–133. https://doi.org/10.1007/978-3-319-46418-3_11
:param img_to_forward: The input image. Must be a gray scale image
:type img_to_forward: ndarray
:return: The estimated local operator for each pixel
:rtype: ndarray
"""
log_message.info('========Forward Difference==========')
img_h, img_w = img_to_forward.shape
ret = np.copy(img_to_forward)
for i in range(img_h):
for j in range(img_w):
if i >= img_h - 1 or j >= img_w - 1:
ret[i][j] = 0
else:
dx = float(img_to_forward[i + 1][j]) - float(img_to_forward[i][j])
dy = float(img_to_forward[i][j + 1]) - float(img_to_forward[i][j])
ret[i][j] = np.uint8(np.round(math.sqrt(dx ** 2 + dy ** 2)))
_, forward = fbc_threshold(ret)
assert not np.logical_and(forward > 0, forward < 255).any(), 'Image forward operator isn\'t monochrome'
return forward
| StarcoderdataPython |
8152320 | #!/usr/bin/env python3
import os, json
import requests
from datetime import datetime
from flask import Flask, jsonify, request, url_for, make_response, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db_path = os.path.join(os.path.dirname(__file__), 'app.db')
db_uri = 'sqlite:///{}'.format(db_path)
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db = SQLAlchemy(app)
#-------------------------------------------------------------------------------------------------------------------------------------------
# Data Models
# Migrations: from app import db --> db.create_all()
class AppReview(db.Model):
'''Data model for job requests.
'''
review_id = db.Column(db.Integer, primary_key=True)
review_date = db.Column(db.DateTime, unique=False, nullable=False, default=datetime.now)
app_name = db.Column(db.String(20), unique=False, nullable=True)
review_title = db.Column(db.String(180), unique=False, nullable=True)
review = db.Column(db.String(180), unique=False, nullable=True)
predicted_sent = db.Column(db.String(10), unique=False, nullable=True)
sent = db.Column(db.String(10), unique=False, nullable=True)
predicted_stars = db.Column(db.Integer)
stars = db.Column(db.Integer)
def __init__(self, app_name:str, review_title: str, review: str, predicted_sent: str, sent: str, predicted_stars: int, stars: int):
self.app_name = app_name
self.review_title = review_title
self.review = review
self.predicted_sent = predicted_sent
self.sent = sent
self.predicted_stars = predicted_stars
self.stars = stars
def __repr__(self):
return str({'title': self.review_title, 'body': self.review, 'stars': self.stars, 'sent': self.sent})
@app.route('/')
def review_form():
'''Route for submitting app reviews.
'''
review_title = request.args.get('review_title', False)
review = request.args.get('review', False)
sub_stars = request.args.get('stars', False)
predicted_stars = request.args.get('pred_stars', False)
sub_pos = request.args.get('pos', False)
predicted_pos = request.args.get('pred_pos', False)
app = request.args.get('app_name')
print(request.args)
apps = ['App 1']
pred_stars = False
pos = False
prob = False
if review and sub_stars:
new_review = AppReview(app, review_title.strip(), review.strip(), predicted_pos.strip(), sub_pos.strip(), predicted_stars, sub_stars)
db.session.add(new_review)
db.session.commit()
return render_template("review_form.html", app_names=apps,
review_title=None, review=None,
pred_stars=None, pred_pos=None
)
if review:
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
data = {'title': review_title, 'review': review}
req = requests.post("http://127.0.0.1:81/score", data=json.dumps(data), headers=headers)
pred_stars = req.json()['stars']
req = requests.post("http://127.0.0.1:80/score", data=json.dumps(data), headers=headers)
pos = req.json()['pos_neg']
prob = req.json()['prob']
pos = f'{pos}, {prob}'
return render_template("review_form.html", app_names=apps,
review_title=review_title if review_title else None,
review=review if review else None,
pred_stars=pred_stars if pred_stars else None,
pred_pos=pos if pos else None
)
if __name__ == "__main__":
app.debug = True
app.run(port=5000) | StarcoderdataPython |
1674749 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from apps.node_man.constants import GSE_PORT_DEFAULT_VALUE, IamActionType
from apps.node_man.handlers.iam import IamHandler
from apps.node_man.models import AccessPoint
from apps.utils.local import get_request_username
class ListSerializer(serializers.ModelSerializer):
"""
AP返回数据
"""
id = serializers.IntegerField(label=_("接入点ID"))
name = serializers.CharField(label=_("接入点名称"))
ap_type = serializers.CharField(label=_("接入点类型"))
region_id = serializers.CharField(label=_("区域id"))
city_id = serializers.CharField(label=_("城市id"))
btfileserver = serializers.JSONField(label=_("GSE BT文件服务器列表"))
dataserver = serializers.JSONField(label=_("GSE 数据服务器列表"))
taskserver = serializers.JSONField(label=_("GSE 任务服务器列表"))
zk_hosts = serializers.JSONField(label=_("ZK服务器列表"))
zk_account = serializers.CharField(label=_("ZK账号"))
package_inner_url = serializers.CharField(label=_("安装包内网地址"))
package_outer_url = serializers.CharField(label=_("安装包外网地址"))
agent_config = serializers.JSONField(label=_("Agent配置信息"))
status = serializers.CharField(label=_("接入点状态"))
description = serializers.CharField(label=_("接入点描述"), allow_blank=True)
is_enabled = serializers.BooleanField(label=_("是否启用"))
is_default = serializers.BooleanField(label=_("是否默认接入点,不可删除"))
proxy_package = serializers.JSONField(label=_("Proxy上的安装包"))
def to_representation(self, instance):
ret = super(ListSerializer, self).to_representation(instance)
perms = IamHandler().fetch_policy(
get_request_username(),
[IamActionType.ap_edit, IamActionType.ap_delete, IamActionType.ap_create, IamActionType.ap_view],
)
ret["permissions"] = {
"edit": ret["id"] in perms[IamActionType.ap_edit],
"delete": ret["id"] in perms[IamActionType.ap_delete],
"view": ret["id"] in perms[IamActionType.ap_view],
}
return ret
class Meta:
model = AccessPoint
exclude = ("zk_password",)
class UpdateOrCreateSerializer(serializers.ModelSerializer):
"""
创建AP
"""
class ServersSerializer(serializers.Serializer):
inner_ip = serializers.CharField(label=_("内网IP"))
outer_ip = serializers.CharField(label=_("外网IP"))
class ZKSerializer(serializers.Serializer):
zk_ip = serializers.CharField(label=_("ZK IP地址"))
zk_port = serializers.CharField(label=_("ZK 端口"))
btfileserver = serializers.ListField(child=ServersSerializer())
dataserver = serializers.ListField(child=ServersSerializer())
taskserver = serializers.ListField(child=ServersSerializer())
zk_hosts = serializers.ListField(child=ZKSerializer())
zk_account = serializers.CharField(label=_("ZK账号"), required=False, allow_blank=True)
zk_password = serializers.CharField(label=_("ZK密码"), required=False, allow_blank=True)
agent_config = serializers.DictField(label=_("Agent配置"))
description = serializers.CharField(label=_("接入点描述"), allow_blank=True)
creator = serializers.JSONField(_("接入点创建者"), required=False)
port_config = serializers.DictField(default=GSE_PORT_DEFAULT_VALUE)
proxy_package = serializers.ListField()
bscp_config = serializers.DictField(_("BSCP配置"), required=False)
outer_callback_url = serializers.CharField(label=_("节点管理外网回调地址"), required=False)
class Meta:
fields = "__all__"
model = AccessPoint
| StarcoderdataPython |
4859535 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class FileObject(object):
"""Abstract class for a generic file."""
__metaclass__ = ABCMeta
@abstractmethod
def close(self):
# type: () -> None
"""
Close the file.
"""
pass
@abstractmethod
def read(self, size):
# type: (int) -> bytes
"""
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or all of the remaining data
if no size was specified.
:param size: number of bytes to read, where None is all remaining data
:return: data read
"""
pass
@abstractmethod
def seek(self, offset, whence):
# type: (int, int) -> None
"""
Set a position in the file for future reading or writing.
:param offset: the position as an offset
:param whence: from where the position should be reached (the beginning, the end, etc.)
"""
pass
| StarcoderdataPython |
12847151 | <gh_stars>1-10
import time
import json
import sys
from pathlib import Path
from pprint import pprint
import wikidata_utils
from graphviz import Digraph
NL = "\n"
def find_subclasses_between(subclass, superclass):
# Query Stardog for subclasses
subclassesJSON = wikidata_utils.query_subclasses_stardog(superclass, subclass)[
"results"
]["bindings"]
subclassesList = []
try:
# Parse JSON for results
subclassesList = [result["entity"]["value"] for result in subclassesJSON]
# Look for QID in all the strings
subclassesList = wikidata_utils.regex_match_QID(subclassesList)
except:
pass
print(f"Subclasses between '{subclass}' and '{superclass}':\n{subclassesList}")
# print(subclassLabels)
try:
# Remove superclass from the list (it is included by SPARQL)
subclassesList.remove(superclass)
except:
pass
# Return reversed list so we can use it immediately in the right order with graphviz
return list(reversed(subclassesList))
def graph_from_superclasses_dict(treesDictFilename, **kwargs):
# PROBLEM: Given a dictionary with entities, their superclasses and subclasses, create a "maximal" graph that displays the relation between entities
dotsTime = int(time.time())
# Optional argument; if it exists, will include only entities from the ranking
rankingEntities = kwargs.get("rankingEntities", None)
useRandomColors = kwargs.get("useRandomColors", None)
remainingEntities = set(rankingEntities)
totalEntities = len(remainingEntities)
with open(Path(treesDictFilename), "r+", encoding="utf8") as dictFile:
entitiesDict = json.load(dictFile)
# Filter out entities without any subclasses in the ranking
# Entities of interest here are entities without superclasses or whose superclasses are themselves
entitiesDict = dict(
filter(
lambda x: x[1]["subclasses"] != []
and (x[1]["superclasses"] == [] or [x[0]] == x[1]["superclasses"]),
entitiesDict.items(),
)
)
keepEntity = "1"
keptDict = {}
pprint(entitiesDict.keys())
while(len(keepEntity) > 0):
if not keptDict:
keepEntity = input("What entity to generate graphs for? [Enter] for All: ")
else:
keepEntity = input("What entity to generate graphs for? [Enter] to leave: ")
if keepEntity:
kept = entitiesDict.pop(keepEntity)
keptDict[keepEntity] = kept
else:
break
print(f"Kept {keepEntity}")
if keptDict:
entitiesDict = keptDict
# Number of entities to be processed
print(f"{len(entitiesDict)} superclasses")
nodesDict = {}
for entity in entitiesDict.items():
# Get label for each main entity
entityLabel = wikidata_utils.get_entity_label(entity[0])
nSubclasses = len(entity[1]["subclasses"])
print(f"\nBuilding graph for {entity[0]} ({entityLabel}).")
print(f"{entityLabel.capitalize()} has at least {nSubclasses} subclasses from the ranking.\n")
# Create graph for each main entity
nodesep = "0.1"
ranksep = "0.5"
if nSubclasses > 50:
nodesep = "0.15"
ranksep = "1"
dot = Digraph(
comment=entityLabel,
strict=True,
encoding="utf8",
graph_attr={"nodesep": nodesep, "ranksep": ranksep, "rankdir": "BT"},
)
# Create a bigger node for each main entity
dot.node(f"{entityLabel}\n{entity[0]}", fontsize="24")
# Add entity QID to nodes' dict
nodesDict[entity[0]] = True
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
for subclass in entity[1]["subclasses"]:
# Get label for each subclass
subclassLabel = wikidata_utils.get_entity_label(subclass)
# If label is unavailable, use ID
if subclassLabel != "Label unavailable":
subclassNodeLabel = f"{subclassLabel}\n{subclass}"
else:
subclassNodeLabel = subclass
print(
f'Finding subclasses between "{subclassLabel}" and "{entityLabel}"...'
)
# Get random color for nodes and edges
argsColor = "#111111"
if useRandomColors:
argsColor = wikidata_utils.random_color_hex()
edgeLabel = None
if not nodesDict.get(subclass, False):
# Create subclass node
dot.node(f"{subclassLabel}\n{subclass}", color=argsColor)
# Add subclass QID to nodes' dict
nodesDict[subclass] = True
# Query intermediary entities between "subclass" and "entity" (returns ordered list)
subclassesBetween = find_subclasses_between(subclass, entity[0])
# Default styling for intermediary subclasses
subclassNodeArgs = {
"shape": "square",
"color": "#777777",
"fontsize": "10",
"fontcolor": "#555555",
}
# remainingEntitiesLastIteration = {totalEntities - len(remainingEntities)}
if rankingEntities:
# Filter out subclasses that aren't from the ranking
subclassesBetween = {
subclass: True
for subclass in subclassesBetween
if subclass in rankingEntities
}
print(f"Subclasses between: {subclassesBetween}")
# Use no particular styling instead
subclassNodeArgs = {}
# edgeLabel = "P279+"
if subclassesBetween:
# Get labels for each subclass in between
subclassLabels = [
wikidata_utils.get_entity_label(subclass)
for subclass in list(subclassesBetween)
]
# Connect "main" subclass to its immediate superclass
print(
f"(First) Marking {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) as subclass of {subclassLabels[-1]} ({list(subclassesBetween)[-1]})"
)
dot.edge(
subclassNodeLabel,
f"{subclassLabels[-1]}\n{list(subclassesBetween)[-1]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(list(subclassesBetween)[-1])
except KeyError:
pass
for i, subclassBetween in enumerate(subclassesBetween):
if not nodesDict.get(subclassBetween, False):
# Create node for each subclass
dot.node(
f"{subclassLabels[i]}\n{subclassBetween}",
**subclassNodeArgs,
color=argsColor,
)
# Add intermediary entity QID to nodes' dict
nodesDict[subclassBetween] = True
for i, subclassBetween in enumerate(list(subclassesBetween)[:-1]):
# Connect each subclass to its immediate superclass
# First, check if they should be connected
for j, entityAbove in enumerate(list(subclassesBetween)[i:]):
checkSubclass = list(subclassesBetween)[i]
checkSubclassLabel = subclassLabels[i]
if i == 0:
checkSubclass = subclass
checkSubclassLabel = subclassLabel
isSubclass = wikidata_utils.query_subclass_stardog(
entityAbove, checkSubclass, transitive=True
)["results"]["bindings"][0]["isSubclass0"]["value"]
isSubclass = isSubclass.lower() == "true"
print(
f" (For) Is {checkSubclass} subclass of {entityAbove}? {isSubclass}"
)
if isSubclass:
print(
f" Marking {checkSubclassLabel} ({checkSubclass}) as subclass of {subclassLabels[i + j]} ({entityAbove})"
)
dot.edge(
f"{checkSubclassLabel}\n{checkSubclass}",
f"{subclassLabels[i + j]}\n{entityAbove}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(checkSubclass)
except KeyError:
pass
try:
remainingEntities.remove(entityAbove)
except KeyError:
pass
# if totalEntities - len(remainingEntities) > remainingEntitiesLastIteration:
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
# Connect the topmost superclass to the main superclass, i.e., the entity
print(
f"(Last) Marking {subclassLabels[0]} as subclass of {entityLabel}"
)
dot.edge(
f"{subclassLabels[0]}\n{list(subclassesBetween)[0]}",
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
else:
# If there are no subclasses in between, connect subclass and entity directly
print(
f"Joining {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) and {entityLabel} ({entity[0]})"
)
dot.edge(
subclassNodeLabel,
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(subclass)
except KeyError:
pass
# Not having graphviz properly installed might raise an exception
try:
if rankingEntities:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(f"output/dots/dots_{dotsTime}/AP1_{dot.comment}.gv")
else:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(
f"output/dots/dots_{dotsTime}/AP1_{dot.comment}_intermediary.gv"
)
except:
print("\nVerify your Graphviz installation or Digraph args!\n")
pass
try:
remainingEntities.remove(entity[0])
except KeyError:
pass
print(remainingEntities)
def get_ranking_entity_set(rankingFile):
entityList = parse_ranking_file(rankingFile)
return set(entityList)
def parse_ranking_file(rankingFile):
lines = rankingFile.readlines()
lines = list(map(lambda line: line.strip(), lines))
# Look for the QID in all strings
rankEntities = wikidata_utils.regex_match_QID(lines)
return rankEntities
if __name__ == "__main__":
try:
fileIn = Path(sys.argv[2])
except:
fileIn = Path("output/ranking/AP1_minus_Q23958852_ranking.txt")
with open(fileIn, "r") as rankingFile:
entities = parse_ranking_file(rankingFile)
# entitiesSet = get_ranking_entity_set(rankingFile)
# graph_from_superclasses_dict(
# "output/AP1_occurrence.json", rankingEntities=entities
# )
graph_from_superclasses_dict(
"output/AP1_trees.json", rankingEntities=entities
)
| StarcoderdataPython |
5194768 | <filename>applaud/endpoints/app_preview_sets.py
from __future__ import annotations
from .base import Endpoint, IDEndpoint, SortOrder, endpoint
from ..fields import *
from typing import Union
from ..schemas.models import *
from ..schemas.responses import *
from ..schemas.requests import *
from ..schemas.enums import *
class AppPreviewSetsEndpoint(Endpoint):
path = '/v1/appPreviewSets'
def create(self, request: AppPreviewSetCreateRequest) -> AppPreviewSetResponse:
'''Create the resource.
:param request: AppPreviewSet representation
:type request: AppPreviewSetCreateRequest
:returns: Single AppPreviewSet
:rtype: AppPreviewSetResponse
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a request or a HTTP error occurred.
'''
json = super()._perform_post(request)
return AppPreviewSetResponse.parse_obj(json)
class AppPreviewSetEndpoint(IDEndpoint):
path = '/v1/appPreviewSets/{id}'
@endpoint('/v1/appPreviewSets/{id}/appPreviews')
def app_previews(self) -> AppPreviewsOfAppPreviewSetEndpoint:
return AppPreviewsOfAppPreviewSetEndpoint(self.id, self.session)
@endpoint('/v1/appPreviewSets/{id}/relationships/appPreviews')
def app_previews_linkages(self) -> AppPreviewsLinkagesOfAppPreviewSetEndpoint:
return AppPreviewsLinkagesOfAppPreviewSetEndpoint(self.id, self.session)
def fields(self, *, app_preview_set: Union[AppPreviewSetField, list[AppPreviewSetField]]=None, app_preview: Union[AppPreviewField, list[AppPreviewField]]=None) -> AppPreviewSetEndpoint:
'''Fields to return for included related types.
:param app_preview_set: the fields to include for returned resources of type appPreviewSets
:type app_preview_set: Union[AppPreviewSetField, list[AppPreviewSetField]] = None
:param app_preview: the fields to include for returned resources of type appPreviews
:type app_preview: Union[AppPreviewField, list[AppPreviewField]] = None
:returns: self
:rtype: applaud.endpoints.AppPreviewSetEndpoint
'''
if app_preview_set: self._set_fields('appPreviewSets',app_preview_set if type(app_preview_set) is list else [app_preview_set])
if app_preview: self._set_fields('appPreviews',app_preview if type(app_preview) is list else [app_preview])
return self
class Include(StringEnum):
APP_PREVIEWS = 'appPreviews'
APP_STORE_VERSION_LOCALIZATION = 'appStoreVersionLocalization'
def include(self, relationship: Union[Include, list[Include]]) -> AppPreviewSetEndpoint:
'''Relationship data to include in the response.
:returns: self
:rtype: applaud.endpoints.AppPreviewSetEndpoint
'''
if relationship: self._set_includes(relationship if type(relationship) is list else [relationship])
return self
def limit(self, *, app_previews: int=None) -> AppPreviewSetEndpoint:
'''Number of included related resources to return.
:param app_previews: maximum number of related appPreviews returned (when they are included). The maximum limit is 50
:type app_previews: int = None
:returns: self
:rtype: applaud.endpoints.AppPreviewSetEndpoint
'''
if app_previews and app_previews > 50:
raise ValueError(f'The maximum limit of app_previews is 50')
if app_previews: self._set_limit(app_previews, 'appPreviews')
return self
def get(self) -> AppPreviewSetResponse:
'''Get the resource.
:returns: Single AppPreviewSet
:rtype: AppPreviewSetResponse
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a error reponse returned.
:py:class:`requests.RequestException`: if a connection or a HTTP error occurred.
'''
json = super()._perform_get()
return AppPreviewSetResponse.parse_obj(json)
def delete(self):
'''Delete the resource.
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a request or a HTTP error occurred.
'''
super()._perform_delete()
class AppPreviewsLinkagesOfAppPreviewSetEndpoint(IDEndpoint):
path = '/v1/appPreviewSets/{id}/relationships/appPreviews'
def limit(self, number: int=None) -> AppPreviewsLinkagesOfAppPreviewSetEndpoint:
'''Number of resources to return.
:param number: maximum resources per page. The maximum limit is 200
:type number: int = None
:returns: self
:rtype: applaud.endpoints.AppPreviewsLinkagesOfAppPreviewSetEndpoint
'''
if number and number > 200:
raise ValueError(f'The maximum limit of number is 200')
if number: self._set_limit(number)
return self
def get(self) -> AppPreviewSetAppPreviewsLinkagesResponse:
'''Get one or more resources.
:returns: List of related linkages
:rtype: AppPreviewSetAppPreviewsLinkagesResponse
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a error reponse returned.
:py:class:`requests.RequestException`: if a connection or a HTTP error occurred.
'''
json = super()._perform_get()
return AppPreviewSetAppPreviewsLinkagesResponse.parse_obj(json)
def update(self, request: AppPreviewSetAppPreviewsLinkagesRequest):
'''Modify one or more related linkages.
:param request: List of related linkages
:type request: AppPreviewSetAppPreviewsLinkagesRequest
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a request or a HTTP error occurred.
'''
super()._perform_patch(request)
class AppPreviewsOfAppPreviewSetEndpoint(IDEndpoint):
path = '/v1/appPreviewSets/{id}/appPreviews'
def fields(self, *, app_preview: Union[AppPreviewField, list[AppPreviewField]]=None) -> AppPreviewsOfAppPreviewSetEndpoint:
'''Fields to return for included related types.
:param app_preview: the fields to include for returned resources of type appPreviews
:type app_preview: Union[AppPreviewField, list[AppPreviewField]] = None
:returns: self
:rtype: applaud.endpoints.AppPreviewsOfAppPreviewSetEndpoint
'''
if app_preview: self._set_fields('appPreviews',app_preview if type(app_preview) is list else [app_preview])
return self
def limit(self, number: int=None) -> AppPreviewsOfAppPreviewSetEndpoint:
'''Number of resources to return.
:param number: maximum resources per page. The maximum limit is 200
:type number: int = None
:returns: self
:rtype: applaud.endpoints.AppPreviewsOfAppPreviewSetEndpoint
'''
if number and number > 200:
raise ValueError(f'The maximum limit of number is 200')
if number: self._set_limit(number)
return self
def get(self) -> AppPreviewsResponse:
'''Get one or more resources.
:returns: List of related resources
:rtype: AppPreviewsResponse
:raises: :py:class:`applaud.schemas.responses.ErrorResponse`: if a error reponse returned.
:py:class:`requests.RequestException`: if a connection or a HTTP error occurred.
'''
json = super()._perform_get()
return AppPreviewsResponse.parse_obj(json)
| StarcoderdataPython |
11298377 | import cv2
import numpy as np
from sklearn.cluster import KMeans
from skimage.feature import greycomatrix, greycoprops
from skimage import data
histsogram_centers_file_name = 'HistogramCenters.npy'
n_indexed_colors=256
n_color_histogram_categories=64
dct2_size = 100
GLCM_resize_size = 200
GLCM_step = 20
# speed-up opencv using multithreads
cv2.setUseOptimized(True)
cv2.setNumThreads(8)
def CreateIndexedColorClasses(feature_vectors):
# an approximation is more than enough, no need to have high accuracy and waste computing time
kmeans = KMeans(n_clusters=n_indexed_colors, n_init =1, tol=0.001, max_iter=100, random_state=0, n_jobs=1, algorithm='full')
kmeans.fit(pixels_vector)
return kmeans.cluster_centers_
def ImgPathToPixelVector(img_path):
img = cv2.imread(img_path)
# a 200*200 image preserves most color information and it's enough for indexing colors
img = cv2.resize(img,(200,200))
reshaped_image = img.reshape((-1,3))
reshaped_image = np.float32(reshaped_image)
return reshaped_image
def RGBToIndex(img, color_classes):
# reconstruct the kmeans from center information
kmeans = KMeans(n_clusters=n_indexed_colors, random_state=0)
kmeans.cluster_centers_ = color_classes
# Reshape the image into a vector of pixels
pixel_vector = img.reshape(-1, 3)
# Get the nearest class for each pixel
labels = kmeans.predict(pixel_vector)
# Reshape the indexed image to the height and width of the original
return_img = labels
rows, cols, channels = img.shape
return return_img.reshape(rows, cols)
def IndexedImageToRGB(indexed_img, color_classes):
# create a new array of true color pixels
rg_colors = color_classes[indexed_img.flatten()]
# reshape the size of the new array to have 3 color channels and the dimensions of the original
rows, cols = indexed_img.shape
return rg_colors.reshape(rows, cols, 3)
def CreateColorHistogram(img):
# Calculte and normalize the histogram for each channel then append them
histogram = cv2.calcHist([img],[0],None,[n_color_histogram_categories],[0,256])
histogram = cv2.normalize(histogram, None)
ch1_histogram = cv2.calcHist([img],[1],None,[n_color_histogram_categories],[0,256])
ch1_histogram = cv2.normalize(ch1_histogram, None)
histogram = np.vstack((histogram, ch1_histogram))
ch2_histogram = cv2.calcHist([img],[2],None,[n_color_histogram_categories],[0,256])
ch2_histogram = cv2.normalize(ch2_histogram, None)
histogram = np.vstack((histogram, ch2_histogram))
return histogram
def CreateIndexedColorHistogram(img, color_classes):
indexed_img = RGBToIndex(img, color_classes)
# cv2 calcHist only works with uint8 arrays (?) so we are limited to 256 colors
indexed_img = indexed_img.astype(np.uint8)
histogram = cv2.calcHist([indexed_img],[0],None,[n_indexed_colors],[0,n_indexed_colors])
histogram = cv2.normalize(histogram, None)
return histogram
def CreateDCT2(img):
# only the gray image is needed for the dct
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# normalise the image content
NormImg = np.float32(grey_img)/255.0
Dct2 = cv2.dct(NormImg)
Dct2Out = np.zeros([dct2_size,dct2_size])
Dct2Out = Dct2[:dct2_size,:dct2_size]
return Dct2Out.reshape(-1, 1)
def CreateGLCM(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
resized_img = cv2.resize(grey_img,(GLCM_resize_size, GLCM_resize_size))
energy_features = []
contrast_features = []
sz = resized_img.shape
for i in range(0,sz[0],GLCM_step):
for j in range(0,sz[1],GLCM_step):
patch = resized_img[i:i+GLCM_step,j:j+GLCM_step]
#greycomatrix parameters:
# 1) patch : part of image to generate co-occurance matrix for
# 2 & 3): separation vector neighbor [1] and angle in radians [0] "1 to the right"
# 4) symmetric = True: add the matrix to its transpose
# 5) normed = True: divide each element in matrix by number of elements in it
glcm = greycomatrix(patch, [1], [0], 256, symmetric=True, normed=True)
energy_features.append(greycoprops(glcm, 'energy')[0, 0])
contrast_features.append(greycoprops(glcm, 'contrast')[0, 0])
out_glsm_features = np.array(energy_features)
out_glsm_features = np.vstack((out_glsm_features, contrast_features))
return out_glsm_features.reshape(-1, 1)
def CreateImageFeaturesVector(img, colors_classes):
# Create the features of each category
features_vector = CreateColorHistogram(img)
indexed_histogram_features = CreateIndexedColorHistogram(img, colors_classes)
features_vector = np.vstack((features_vector, indexed_histogram_features))
dct2_features = CreateDCT2(img)
features_vector = np.vstack((features_vector, dct2_features))
GLSM_features = CreateGLCM(img)
features_vector = np.vstack((features_vector, GLSM_features))
return features_vector
| StarcoderdataPython |
1680954 | #!/usr/bin/python
# Copyright (C) 2014-2020 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <https://www.gnu.org/licenses/>.
"""Benchmark output validator
Given a benchmark output file in json format and a benchmark schema file,
validate the output against the schema.
"""
from __future__ import print_function
import json
import sys
import os
try:
import import_bench as bench
except ImportError:
print('Import Error: Output will not be validated.')
# Return success because we don't want the bench target to fail just
# because the jsonschema module was not found.
sys.exit(os.EX_OK)
def print_and_exit(message, exitcode):
"""Prints message to stderr and returns the exit code.
Args:
message: The message to print
exitcode: The exit code to return
Returns:
The passed exit code
"""
print(message, file=sys.stderr)
return exitcode
def main(args):
"""Main entry point
Args:
args: The command line arguments to the program
Returns:
0 on success or a non-zero failure code
Exceptions:
Exceptions thrown by validate_bench
"""
if len(args) != 2:
return print_and_exit("Usage: %s <bench.out file> <bench.out schema>"
% sys.argv[0], os.EX_USAGE)
try:
bench.parse_bench(args[0], args[1])
except IOError as e:
return print_and_exit("IOError(%d): %s" % (e.errno, e.strerror),
os.EX_OSFILE)
except bench.validator.ValidationError as e:
return print_and_exit("Invalid benchmark output: %s" % e.message,
os.EX_DATAERR)
except bench.validator.SchemaError as e:
return print_and_exit("Invalid schema: %s" % e.message, os.EX_DATAERR)
print("Benchmark output in %s is valid." % args[0])
return os.EX_OK
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
5156341 | """This module is for thread start."""
import state
from bitmessagemain import main
if __name__ == '__main__':
state.kivy = True
print("Kivy Loading......")
main()
| StarcoderdataPython |
3417539 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import datetime
from datetime import timezone
import time
import json
import unittest
import os
from tests.common import async_test, TestHelper
from tests.adls_test_helper import AdlsTestHelper
from cdm.storage.adls import ADLSAdapter
from cdm.utilities.network.token_provider import TokenProvider
from cdm.objectmodel import CdmCorpusDefinition
def IfRunTestsFlagNotSet():
return (os.environ.get("ADLS_RUNTESTS") is None)
class FakeTokenProvider(TokenProvider):
def get_token(self) -> str:
return 'TOKEN'
class AdlsStorageAdapterTestCase(unittest.TestCase):
test_subpath = 'Storage'
def create_dummy_adapter(self):
adapter = ADLSAdapter(root='/fs', hostname='dummy.dfs.core.windows.net', tenant='dummyTenant', resource='dummyResource',
client_id='dummyClientId', secret='dummySecret')
adapter.number_of_retries = 0
return adapter
async def run_write_read_test(self, adapter):
filename = 'WriteReadTest/' + os.environ.get('USERNAME') + '_' + os.environ.get('COMPUTERNAME') + '_Python.txt'
write_contents = str(datetime.datetime.now()) + '\n' + filename
await adapter.write_async(filename, write_contents)
read_contents = await adapter.read_async(filename)
self.assertEqual(write_contents, read_contents)
async def run_check_filetime_test(self, adapter):
offset1 = await adapter.compute_last_modified_time_async('/FileTimeTest/CheckFileTime.txt')
offset2 = await adapter.compute_last_modified_time_async('FileTimeTest/CheckFileTime.txt')
self.assertTrue(offset1)
self.assertTrue(offset2)
self.assertTrue(offset1 == offset2)
utc_now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
self.assertTrue(offset1 < utc_now)
async def run_file_enum_test(self, adapter):
context = adapter.create_file_query_cache_context()
try:
files1 = await adapter.fetch_all_files_async('/FileEnumTest/')
files2 = await adapter.fetch_all_files_async('/FileEnumTest')
files3 = await adapter.fetch_all_files_async('FileEnumTest/')
files4 = await adapter.fetch_all_files_async('FileEnumTest')
# expect 100 files to be enumerated
self.assertTrue(len(files1) == 100 and len(files2)== 100 and len(files3) == 100 and len(files4) == 100)
# these calls should be fast due to cache
start = time.time()
for i in range(0,len(files1) - 1):
self.assertTrue(files1[i] == files2[i] and files1[i] == files3[i] and files1[i] == files4[i])
await adapter.compute_last_modified_time_async(files1[i]);
stop = time.time()
self.assertLess(stop - start, .1, 'Checking cached file modified times took too long')
finally:
context.dispose()
async def run_special_characters_test(self, adapter):
corpus = CdmCorpusDefinition()
corpus.storage.mount('adls', adapter)
corpus.storage.default_namespace = 'adls'
manifest = await corpus.fetch_object_async('default.manifest.cdm.json')
await manifest.file_status_check_async()
self.assertEqual(len(manifest.entities), 1)
self.assertEqual(len(manifest.entities[0].data_partitions), 2)
self.assertEqual(manifest.entities[0].data_partitions[0].location, 'TestEntity-With=Special Characters/year=2020/TestEntity-partition-With=Special Characters-0.csv')
self.assertEqual(manifest.entities[0].data_partitions[1].location, 'TestEntity-With=Special Characters/year=2020/TestEntity-partition-With=Special Characters-1.csv')
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_write_read_shared_key(self):
await self.run_write_read_test(AdlsTestHelper.create_adapter_with_shared_key())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_write_read__client_id(self):
await self.run_write_read_test(AdlsTestHelper.create_adapter_with_client_id())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_check_filetime_shared_key(self):
await self.run_check_filetime_test(AdlsTestHelper.create_adapter_with_shared_key())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_check_filetime_client_id(self):
await self.run_check_filetime_test(AdlsTestHelper.create_adapter_with_client_id())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_file_enum_shared_key(self):
await self.run_file_enum_test(AdlsTestHelper.create_adapter_with_shared_key())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_file_enum_client_id(self):
await self.run_file_enum_test(AdlsTestHelper.create_adapter_with_client_id())
@async_test
@unittest.skipIf(IfRunTestsFlagNotSet(), "ADLS environment variables not set up")
async def test_adls_special_characters(self):
await self.run_special_characters_test(AdlsTestHelper.create_adapter_with_client_id('PathWithSpecialCharactersAndUnescapedStringTest/Root-With=Special Characters:'))
def test_create_corpus_and_adapter_path(self):
host_1 = 'storageaccount.dfs.core.windows.net'
root = '/fs'
adls_adapter = ADLSAdapter(root=root, hostname=host_1, tenant='dummyTenant',
client_id='dummyClientId', secret='dummySecret')
adapter_path_1 = 'https://storageaccount.dfs.core.windows.net/fs/a/1.csv'
adapter_path_2 = 'https://storageaccount.dfs.core.windows.net:443/fs/a/2.csv'
adapter_path_3 = 'https://storageaccount.blob.core.windows.net/fs/a/3.csv'
adapter_path_4 = 'https://storageaccount.blob.core.windows.net:443/fs/a/4.csv'
corpus_path_1 = adls_adapter.create_corpus_path(adapter_path_1)
corpus_path_2 = adls_adapter.create_corpus_path(adapter_path_2)
corpus_path_3 = adls_adapter.create_corpus_path(adapter_path_3)
corpus_path_4 = adls_adapter.create_corpus_path(adapter_path_4)
self.assertEqual(corpus_path_1, '/a/1.csv')
self.assertEqual(corpus_path_2, '/a/2.csv')
self.assertEqual(corpus_path_3, '/a/3.csv')
self.assertEqual(corpus_path_4, '/a/4.csv')
self.assertEqual(adls_adapter.create_adapter_path(corpus_path_1), adapter_path_1)
self.assertEqual(adls_adapter.create_adapter_path(corpus_path_2), adapter_path_2)
self.assertEqual(adls_adapter.create_adapter_path(corpus_path_3), adapter_path_3)
self.assertEqual(adls_adapter.create_adapter_path(corpus_path_4), adapter_path_4)
# Check that an adapter path is correctly created from a corpus path with any namespace
corpus_path_with_namespace_1 = 'adls:/test.json'
corpus_path_with_namespace_2 = 'mylake:/test.json'
expected_adapter_path = 'https://storageaccount.dfs.core.windows.net/fs/test.json'
self.assertEqual(expected_adapter_path, adls_adapter.create_adapter_path(corpus_path_with_namespace_1))
self.assertEqual(expected_adapter_path, adls_adapter.create_adapter_path(corpus_path_with_namespace_2))
# Check that an adapter path is correctly created from a corpus path with colons
corpus_path_with_colons = 'namespace:/a/path:with:colons/some-file.json'
self.assertEqual('https://storageaccount.dfs.core.windows.net/fs/a/path%3Awith%3Acolons/some-file.json', adls_adapter.create_adapter_path(corpus_path_with_colons))
self.assertEqual('/a/path:with:colons/some-file.json', adls_adapter.create_corpus_path('https://storageaccount.dfs.core.windows.net/fs/a/path%3Awith%3Acolons/some-file.json'))
self.assertEqual('/a/path:with:colons/some-file.json', adls_adapter.create_corpus_path('https://storageaccount.dfs.core.windows.net/fs/a/path%3awith%3acolons/some-file.json'))
# Check other special characters
self.assertEqual('https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3Dspecial%3Dcharacters/some-file.json', adls_adapter.create_adapter_path('namespace:/a/path with=special=characters/some-file.json'))
self.assertEqual('/a/path with=special=characters/some-file.json', adls_adapter.create_corpus_path('https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3dspecial%3dcharacters/some-file.json'))
self.assertEqual('/a/path with=special=characters/some-file.json', adls_adapter.create_corpus_path('https://storageaccount.dfs.core.windows.net/fs/a/path%20with%3dspecial%3Dcharacters/some-file.json'))
# Check that an adapter path is null if the corpus path provided is null
self.assertIsNone(adls_adapter.create_adapter_path(None))
host_2 = 'storageaccount.blob.core.windows.net:8888'
adls_adapter = ADLSAdapter(root=root, hostname=host_2, tenant='11111111-1111-1111-1111-111111111111',
client_id='dummyClientId', secret='dummySecret')
adapter_path_5 = 'https://storageaccount.blob.core.windows.net:8888/fs/a/5.csv'
adapter_path_6 = 'https://storageaccount.dfs.core.windows.net:8888/fs/a/6.csv'
adapter_path_7 = 'https://storageaccount.blob.core.windows.net/fs/a/7.csv'
self.assertEqual(adls_adapter.create_corpus_path(adapter_path_5), '/a/5.csv')
self.assertEqual(adls_adapter.create_corpus_path(adapter_path_6), '/a/6.csv')
self.assertEqual(adls_adapter.create_corpus_path(adapter_path_7), None)
def test_config_and_update_config_without_secret(self):
"""
The secret property is not saved to the config.json file for security reasons.
When constructing and ADLS adapter from config, the user should be able to set the secret after the adapter is constructed.
"""
config = {
'root': 'root',
'hostname': 'hostname',
'tenant': 'tenant',
'clientId': 'clientId',
}
try:
adls_adapter1 = ADLSAdapter()
adls_adapter1.update_config(json.dumps(config))
adls_adapter1.client_id = 'clientId'
adls_adapter1.secret = 'secret'
adls_adapter1.shared_key = 'sharedKey'
adls_adapter1.token_provider = FakeTokenProvider()
except Exception:
self.fail('adls_adapter initialized without secret shouldn\'t throw exception when updating config.')
try:
adls_adapter2 = ADLSAdapter()
adls_adapter2.client_id = 'clientId'
adls_adapter2.secret = 'secret'
adls_adapter2.shared_key = 'sharedKey'
adls_adapter2.token_provider = FakeTokenProvider()
adls_adapter2.update_config(json.dumps(config))
except Exception:
self.fail('adls_adapter initialized without secret shouldn\'t throw exception when updating config.')
def test_initialize_hostname_and_root(self):
"""
Test initialize hostname and root for adls adapter.
"""
host1 = 'storageaccount.dfs.core.windows.net'
adlsAdapter1 = ADLSAdapter(hostname=host1, root='root-without-slash', shared_key='')
self.assertEqual(adlsAdapter1.hostname, 'storageaccount.dfs.core.windows.net')
self.assertEqual(adlsAdapter1.root, '/root-without-slash')
adapterPath1 = 'https://storageaccount.dfs.core.windows.net/root-without-slash/a/1.csv'
corpusPath1 = adlsAdapter1.create_corpus_path(adapterPath1)
self.assertEqual(corpusPath1, '/a/1.csv')
self.assertEqual(adlsAdapter1.create_adapter_path(corpusPath1), adapterPath1)
adlsAdapter1WithFolders = ADLSAdapter(hostname=host1, root='root-without-slash/folder1/folder2', shared_key='')
self.assertEqual(adlsAdapter1WithFolders.root, '/root-without-slash/folder1/folder2')
adapterPath2 = 'https://storageaccount.dfs.core.windows.net/root-without-slash/folder1/folder2/a/1.csv'
corpusPath2 = adlsAdapter1WithFolders.create_corpus_path(adapterPath2)
self.assertEqual(corpusPath2, '/a/1.csv')
self.assertEqual(adlsAdapter1WithFolders.create_adapter_path(corpusPath2), adapterPath2)
adlsAdapter2 = ADLSAdapter(hostname=host1, root='/root-starts-with-slash', shared_key='')
self.assertEqual(adlsAdapter2.root, '/root-starts-with-slash')
adlsAdapter2WithFolders = ADLSAdapter(hostname=host1, root='/root-starts-with-slash/folder1/folder2', shared_key='')
self.assertEqual(adlsAdapter2WithFolders.root, '/root-starts-with-slash/folder1/folder2')
adlsAdapter3 = ADLSAdapter(hostname=host1, root='root-ends-with-slash/', shared_key='')
self.assertEqual(adlsAdapter3.root, '/root-ends-with-slash')
adlsAdapter3WithFolders = ADLSAdapter(hostname=host1, root='root-ends-with-slash/folder1/folder2/', shared_key='')
self.assertEqual(adlsAdapter3WithFolders.root, '/root-ends-with-slash/folder1/folder2')
adlsAdapter4 = ADLSAdapter(hostname=host1, root='/root-with-slashes/', shared_key='')
self.assertEqual(adlsAdapter4.root, '/root-with-slashes')
adlsAdapter4WithFolders = ADLSAdapter(hostname=host1, root='/root-with-slashes/folder1/folder2', shared_key='')
self.assertEqual(adlsAdapter4WithFolders.root, '/root-with-slashes/folder1/folder2')
# Mount from config
config = TestHelper.get_input_file_content(self.test_subpath, 'test_initialize_hostname_and_root', 'config.json')
corpus = CdmCorpusDefinition()
corpus.storage.mount_from_config(config)
self.assertEqual(corpus.storage.fetch_adapter('adlsadapter1').root, '/root-without-slash')
self.assertEqual(corpus.storage.fetch_adapter('adlsadapter2').root, '/root-without-slash/folder1/folder2')
self.assertEqual(corpus.storage.fetch_adapter('adlsadapter3').root, '/root-starts-with-slash/folder1/folder2')
self.assertEqual(corpus.storage.fetch_adapter('adlsadapter4').root, '/root-ends-with-slash/folder1/folder2')
self.assertEqual(corpus.storage.fetch_adapter('adlsadapter5').root, '/root-with-slashes/folder1/folder2')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1601517 | import json
import random
import string
from paypal_client import PayPalClient
from paypalpayoutssdk.payouts import PayoutsPostRequest
from paypalhttp.serializers.json_serializer import Json
class CreatePayouts(PayPalClient):
""" Creates a payout batch with 5 payout items
Calls the create batch api (POST - /v1/payments/payouts)
A maximum of 15000 payout items are supported in a single batch request"""
@staticmethod
def build_request_body():
senderBatchId = str(''.join(random.sample(
string.ascii_uppercase + string.digits, k=7)))
return \
{
"sender_batch_header": {
"recipient_type": "EMAIL",
"email_message": "SDK payouts test txn",
"note": "Enjoy your Payout!!",
"sender_batch_id": senderBatchId,
"email_subject": "This is a test transaction from SDK"
},
"items": [{
"note": "Your 5$ Payout!",
"amount": {
"currency": "USD",
"value": "1.00"
},
"receiver": "<EMAIL>",
"sender_item_id": "Test_txn_1"
}, {
"note": "Your 5$ Payout!",
"amount": {
"currency": "USD",
"value": "1.00"
},
"receiver": "<EMAIL>",
"sender_item_id": "Test_txn_2"
}, {
"note": "Your 5$ Payout!",
"amount": {
"currency": "USD",
"value": "1.00"
},
"receiver": "<EMAIL>",
"sender_item_id": "Test_txn_3"
}, {
"note": "Your 5$ Payout!",
"amount": {
"currency": "USD",
"value": "1.00"
},
"receiver": "<EMAIL>",
"sender_item_id": "Test_txn_4"
}, {
"note": "Your 5$ Payout!",
"amount": {
"currency": "USD",
"value": "1.00"
},
"receiver": "<EMAIL>",
"sender_item_id": "Test_txn_5"
}]
}
def create_payouts(self, debug=False):
request = PayoutsPostRequest()
request.request_body(self.build_request_body())
response = self.client.execute(request)
if debug:
print("Status Code: ", response.status_code)
print("Payout Batch ID: " +
response.result.batch_header.payout_batch_id)
print("Payout Batch Status: " +
response.result.batch_header.batch_status)
print("Links: ")
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(
link.rel, link.href, link.method))
# To toggle print the whole body comment/uncomment the below line
#json_data = self.object_to_json(response.result)
#print "json_data: ", json.dumps(json_data, indent=4)
return response
"""This is the driver function which invokes the create_payouts function to create
a Payouts Batch."""
if __name__ == "__main__":
CreatePayouts().create_payouts(debug=True)
| StarcoderdataPython |
3481921 | <filename>vrd/dbhandler.py
import hashlib
import io
import sqlite3
from multiprocessing import Pool
import numpy as np
from tqdm import tqdm
from .image_preprocessing import process_image
class VRDDatabase:
"""A database containing the extracted fingerprints of a specific frame.
The database is based on sqlite3.
Usually used with the [with] keyword to ensure it is properly exited.
Returns:
SavedComparisonDatabase: The comparison database handle
"""
database_file: str
cursor: sqlite3.Cursor
connection: sqlite3.Connection
def __init__(self, database_file: str) -> None:
"""Initialize comparison database.
If database already exists, will load it; otherwise, it will be created.
Args:
database_file (str): Path to sqlite3 database file
"""
self.connection = sqlite3.connect(database_file)
self.cursor = self.connection.cursor()
self.initialize_db()
def __enter__(self):
"""Enter function
Returns:
[type]: [description]
"""
return self
def __exit__(self, _type, _value, _traceback):
"""Exit function
Args:
_type ([type]): [description]
_value ([type]): [description]
_traceback ([type]): [description]
"""
self.commit()
self.close()
def initialize_db(self):
"""Create the necessary tables and set index"""
cursor = self.cursor
conn = self.connection
if not self.table_exists("saved_dl_fingerprints"):
cursor.execute(
"""CREATE TABLE saved_dl_fingerprints (hash text, layer text)"""
)
cursor.execute(
"CREATE UNIQUE INDEX fingerprint_index ON saved_dl_fingerprints(hash)"
)
conn.commit()
if not self.table_exists("processed_thumbnails"):
cursor.execute(
"""CREATE TABLE processed_thumbnails (hash text NOT NULL, thumbnail blob NOT NULL)"""
)
cursor.execute(
"CREATE UNIQUE INDEX thumbnail_index ON processed_thumbnails(hash)"
)
conn.commit()
def table_exists(self, tablename: str):
"""
Returns true of the given table exists, false otherwise.
"""
cursor = self.cursor
self.cursor.execute(
"""SELECT count(name) FROM sqlite_master WHERE type='table' AND name=? """,
(tablename,),
)
# if the count is 1, then table exists
if cursor.fetchone()[0] == 1:
return True
return False
def get_frame_list(self):
"""
Get list of frames with hashes in database
"""
cursor = self.cursor
cursor.execute("""SELECT hash FROM processed_thumbnails""")
return [x[0] for x in cursor.fetchall()]
@staticmethod
def hash_string(string_to_hash: str) -> str:
"""Calculate md5 hash of string
Args:
string_to_hash (str): The string to hash
Returns:
str: The md5 hash of the input string
"""
return (hashlib.md5(string_to_hash)).hexdigest()
def add_processed_frame(
self, file_name_hash: str, frame: np.array, processed_frame=None
):
"""Add the frame image for the given hash.
This is the image actually analyzed by the neural network.
Args:
file_name_hash (str): The hash representing the file name
frame (np.array): An np-array corresponding to the frame
"""
cursor = self.cursor
if processed_frame is None:
processed_frame = VRDDatabase.get_processed_frame(frame)
cursor.execute(
"REPLACE INTO processed_thumbnails VALUES (?,?)",
(file_name_hash, processed_frame),
)
@staticmethod
def get_processed_frame(frame: np.array):
"""Processes a frame (compresses) and returns the results.
Args:
frame (np.array): The frame as a numpy array
Returns:
The compressed frame (in byte format)
"""
output = io.BytesIO()
np.savez_compressed(output, x=frame)
return output.getvalue()
def add_layer_data(self, file_name_hash: str, layer: np.array):
"""Add the fingerprint values for the given hash.
The fingerprint is generally from a CNN layer - therefore the name -,
but could be any numpy array.
Args:
file_name_hash (str): The hash representing the file name
layer (np.array): An np-array corresponding
"""
cursor = self.cursor
output = io.BytesIO()
np.savez(output, x=layer)
np_string = output.getvalue()
cursor.execute(
"REPLACE INTO saved_dl_fingerprints VALUES (?,?)",
(file_name_hash, np_string),
)
def add_many_layer_data(self, hash_layer_list):
"""Add several layers at once, which improves execution speed.
Args:
hash_layer_list (bool): A list of tuples, containing hashes and values.
"""
# raise NotImplementedError
cursor = self.cursor
# TODO: convert secondary value correcty before executing!
cursor.executemany(
"INSERT INTO saved_dl_fingerprints VALUES (?, ?)", hash_layer_list
)
def get_frame(self, file_name_hash: str):
"""Gets the compressed frame for the specified hash, if any.
Args:
file_name_hash (str): A file hash representing the requested frame
Returns:
[type]: None if it doesn't exist, otherwise a numpy array representing the previously saved layer
"""
cursor = self.cursor
try:
processed_frame = next(
cursor.execute(
"SELECT * FROM processed_thumbnails WHERE hash=?", (file_name_hash,)
)
)[1]
except:
return None
data = np.load(io.BytesIO(processed_frame))
return data["x"]
def get_layer_data(self, file_name_hash: str):
"""Gets the layer data for the specified hash, if any.
Args:
file_name_hash (str): A file hash representing the requested frame
Returns:
[type]: None if it doesn't exist, otherwise a numpy array representing the previously saved layer
"""
cursor = self.cursor
try:
layer_data = next(
cursor.execute(
"SELECT * FROM saved_dl_fingerprints WHERE hash=?",
(file_name_hash,),
)
)[1]
except:
return None
data = np.load(io.BytesIO(layer_data))
return data["x"]
def commit(self):
"""Commits to the database"""
self.connection.commit()
def close(self):
"""Closes the database"""
try:
self.connection.close()
except:
pass
def fill_with_processed_frames(self, frames, neural_network, pool_size=6):
"""Fills database with previously processed frames
Args:
frames ([type]): [description]
neural_network ([type]): [description]
pool_size (int, optional): [description]. Defaults to 6.
"""
pool = Pool(pool_size)
already_exist = self.get_frame_list()
tn_set = set()
for img in tqdm(frames.all_images):
if img in already_exist:
continue
tn_set.add(img)
new_iter = ([x, neural_network.target_size] for x in tn_set)
for img, compressed_image in pool.imap_unordered(__calc_fun, new_iter):
self.add_processed_frame(img, None, compressed_image)
@staticmethod
def __calc_fun(args):
img, target_size = args
# TODO: Verify these settings, perhaps we need to grab them from elsewhere? I.e. no trim etc
processed_img = process_image(img, target_size, trim=True)
compressed_img = VRDDatabase.get_processed_frame(processed_img)
return (img, compressed_img)
| StarcoderdataPython |
9613997 | import pytz
import datetime
def convert_utc_to_cst(utc_datetime: datetime.datetime) -> datetime.datetime:
return utc_datetime.replace(tzinfo=pytz.utc).astimezone(
pytz.timezone("Asia/Shanghai")
)
| StarcoderdataPython |
1741139 | <filename>core/connection/connection.py<gh_stars>0
import socket
import os
import tempfile
import glob
import json
import string
import zipfile
HEADER_SIZE = 10
CHUNK_SIZE = 4 * 1024
END_DELIMETER = "*END_OF_FILE*"
COMMAND_DELIMETER = "<END_OF_COMMAND>"
def zip_it(to_download):
# ziph is zipfile handle
if os.path.isdir(to_download):
zipped_name = to_download + ".zip"
zipf = zipfile.ZipFile(zipped_name, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(to_download):
for file in files:
zipf.write(os.path.join(root, file))
zipf.close()
else:
base_name = os.path.basename(to_download)
name, ext = os.path.splitext(base_name)
toZip = name
zipped_name = toZip+'.zip'
zipfile.ZipFile(zipped_name, mode='w').write(base_name)
return zipped_name
class ClientConnection:
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def Connect(self, server_ip, server_port):
self.socket.connect((server_ip, server_port))
self.server_ip = server_ip
self.server_port = server_port
def receive_data(self):
self.data_in_bytes = self.socket.recv(1024)
self.data = self.data_in_bytes.decode("utf-8")
return self.data
def send_data(self, data):
self.data_in_bytes = bytes(data, "utf-8")
self.socket.send(self.data_in_bytes)
def receive_file(self, filename):
with open(filename, "wb") as file:
while True:
chunk = self.socket.recv(CHUNK_SIZE)
if chunk.endswith(END_DELIMETER.encode()):
chunk = chunk[:-len(END_DELIMETER)]
file.write(chunk)
print("[+] Completed Transfer")
break
if "NOT_FOUND".encode() in chunk:
print("[-] Unable to locate file")
break
file.write(chunk)
def send_command_result(self, command_result):
print("[+] Sending Command Result")
chunk = command_result + COMMAND_DELIMETER
chunk_bytes = chunk.encode()
self.socket.sendall(chunk_bytes)
def send_file(self, file2download):
print("[+] File/Folder selected : ", file2download)
zipped_name = zip_it(file2download)
file_content = b''
with open(zipped_name, "rb") as file:
file_content = file.read()
self.send_data(zipped_name)
self.socket.send(file_content+END_DELIMETER.encode())
os.remove(zipped_name)
def change_dir(self):
curr_dir = os.getcwd()
self.send_data(curr_dir)
while True:
command = self.receive_data()
if command == "quit" or command == "stop" or command == "exit":
print("[-] Exiting menu")
break
if command.startswith("cd"):
path2move = command.strip("cd ")
if os.path.exists(path2move):
os.chdir(path2move)
pwd = os.getcwd()
self.send_data(pwd)
else:
self.send_data(os.getcwd())
else:
self.send_data(os.getcwd())
def Close(self):
self.socket.close()
| StarcoderdataPython |
3402343 | <reponame>jfthuong/pydpf-core
"""
to_elemental_fc
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class to_elemental_fc(Operator):
"""Transform input fields into Elemental fields using an averaging
process, result is computed on a given elements scoping.
Parameters
----------
fields_container : FieldsContainer
mesh : MeshedRegion, optional
mesh_scoping : Scoping, optional
smoothen_values : bool, optional
If it is set to true, elemental nodal fields
are first averaged on nodes and then
averaged on elements (default is
false)
collapse_shell_layers : bool, optional
If true shell layers are averaged as well
(default is false)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_mesh_scoping = dpf.Scoping()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_smoothen_values = bool()
>>> op.inputs.smoothen_values.connect(my_smoothen_values)
>>> my_collapse_shell_layers = bool()
>>> op.inputs.collapse_shell_layers.connect(my_collapse_shell_layers)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.averaging.to_elemental_fc(
... fields_container=my_fields_container,
... mesh=my_mesh,
... mesh_scoping=my_mesh_scoping,
... smoothen_values=my_smoothen_values,
... collapse_shell_layers=my_collapse_shell_layers,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self,
fields_container=None,
mesh=None,
mesh_scoping=None,
smoothen_values=None,
collapse_shell_layers=None,
config=None,
server=None,
):
super().__init__(name="to_elemental_fc", config=config, server=server)
self._inputs = InputsToElementalFc(self)
self._outputs = OutputsToElementalFc(self)
if fields_container is not None:
self.inputs.fields_container.connect(fields_container)
if mesh is not None:
self.inputs.mesh.connect(mesh)
if mesh_scoping is not None:
self.inputs.mesh_scoping.connect(mesh_scoping)
if smoothen_values is not None:
self.inputs.smoothen_values.connect(smoothen_values)
if collapse_shell_layers is not None:
self.inputs.collapse_shell_layers.connect(collapse_shell_layers)
@staticmethod
def _spec():
description = """Transform input fields into Elemental fields using an averaging
process, result is computed on a given elements scoping."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
1: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region"],
optional=True,
document="""""",
),
3: PinSpecification(
name="mesh_scoping",
type_names=["scoping"],
optional=True,
document="""""",
),
7: PinSpecification(
name="smoothen_values",
type_names=["bool"],
optional=True,
document="""If it is set to true, elemental nodal fields
are first averaged on nodes and then
averaged on elements (default is
false)""",
),
10: PinSpecification(
name="collapse_shell_layers",
type_names=["bool"],
optional=True,
document="""If true shell layers are averaged as well
(default is false)""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="to_elemental_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsToElementalFc
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsToElementalFc
"""
return super().outputs
class InputsToElementalFc(_Inputs):
"""Intermediate class used to connect user inputs to
to_elemental_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_mesh_scoping = dpf.Scoping()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_smoothen_values = bool()
>>> op.inputs.smoothen_values.connect(my_smoothen_values)
>>> my_collapse_shell_layers = bool()
>>> op.inputs.collapse_shell_layers.connect(my_collapse_shell_layers)
"""
def __init__(self, op: Operator):
super().__init__(to_elemental_fc._spec().inputs, op)
self._fields_container = Input(to_elemental_fc._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
self._mesh = Input(to_elemental_fc._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._mesh)
self._mesh_scoping = Input(to_elemental_fc._spec().input_pin(3), 3, op, -1)
self._inputs.append(self._mesh_scoping)
self._smoothen_values = Input(to_elemental_fc._spec().input_pin(7), 7, op, -1)
self._inputs.append(self._smoothen_values)
self._collapse_shell_layers = Input(
to_elemental_fc._spec().input_pin(10), 10, op, -1
)
self._inputs.append(self._collapse_shell_layers)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator.
Parameters
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
@property
def mesh(self):
"""Allows to connect mesh input to the operator.
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def mesh_scoping(self):
"""Allows to connect mesh_scoping input to the operator.
Parameters
----------
my_mesh_scoping : Scoping
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> # or
>>> op.inputs.mesh_scoping(my_mesh_scoping)
"""
return self._mesh_scoping
@property
def smoothen_values(self):
"""Allows to connect smoothen_values input to the operator.
If it is set to true, elemental nodal fields
are first averaged on nodes and then
averaged on elements (default is
false)
Parameters
----------
my_smoothen_values : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> op.inputs.smoothen_values.connect(my_smoothen_values)
>>> # or
>>> op.inputs.smoothen_values(my_smoothen_values)
"""
return self._smoothen_values
@property
def collapse_shell_layers(self):
"""Allows to connect collapse_shell_layers input to the operator.
If true shell layers are averaged as well
(default is false)
Parameters
----------
my_collapse_shell_layers : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> op.inputs.collapse_shell_layers.connect(my_collapse_shell_layers)
>>> # or
>>> op.inputs.collapse_shell_layers(my_collapse_shell_layers)
"""
return self._collapse_shell_layers
class OutputsToElementalFc(_Outputs):
"""Intermediate class used to get outputs from
to_elemental_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(to_elemental_fc._spec().outputs, op)
self._fields_container = Output(to_elemental_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.averaging.to_elemental_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container
| StarcoderdataPython |
54284 | import string
from rt import *
from listener import Listener
import traceback
import pytz
from datetime import datetime
class Ticket:
def __init__(self, client):
self.client = client
Listener.register(self.on_ready, "on_ready")
Listener.register(self.on_message, "on_message")
Listener.register(self.on_loop, "on_loop")
self.rt_stat = RT_Stat()
self.ticket_url = "https://support.oit.pdx.edu/Ticket/Display.html?id="
self.update_thread = None
self.current_day = 0 # To detect day change
self.updated_today = 0 # To check if the bot have updated today.
def send_message(self, *args):
""" Shortener. """
return self.client.rtm_send_message(*args)
def on_ready(self):
pass
def on_loop(self):
if not self.updated_today:
self.updated_today = 1
self.update_thread = RT.update_cache()
current_time = datetime.now(pytz.timezone('US/Pacific') )
if current_time.weekday() != self.current_day:
self.current_day = current_time.weekday()
if self.update_thread and not self.update_thread.is_alive():
self.update_thread = None
if hasattr(self.update_thread, 'channel'):
# If a channel is bundled, then send a message to the channel.
error_count = self.update_thread.result.get()
response = "Done updating\n"
if error_count:
response += "There were {} errors found. Check the error log to see what they were.".format(error_count)
self.send_message(self.update_thread.channel, response)
print("Done updating!")
def on_message(self, ctx):
try: # Don't exit the bot when an error happens.
if ctx.command and ctx.command[0] != '!':
# Ticket linker.
try:
ticket_list = self.parse_message_for_tickets(ctx.message)
response = ""
for ticket_number in ticket_list:
ticket = RT.get_ticket(ticket_number)
response += self.ticket_url + str(ticket_number) + "\n" + \
"Subject: " + ticket.content['Subject'] + "\n"
self.send_message(ctx.channel, response)
except:
traceback.print_exc()
if ctx.command in ["!response"]:
if len(ctx.args) == 1:
try:
days_ago = int(ctx.args[0])
except ValueError:
traceback.print_exc()
self.client.rtm_send_message(channel_id, "Invalid value. Please enter amount of days.")
self.response_command(ctx.channel, days_ago)
if ctx.command in ["!update"]:
pre_response = "Updating {} tickets since {}".format(RT.get_amount_to_update(), RT.get_last_updated())
self.send_message(ctx.channel, pre_response)
self.update_thread = RT.update_cache()
self.update_thread.channel = ctx.channel
if ctx.command in ["!last_updated"]:
response = "There are {} tickets to update since {}".format(RT.get_amount_to_update(), RT.get_last_updated())
self.send_message(ctx.channel, response)
if ctx.command in ["!untagged"]:
untagged = self.rt_stat.untag_blame()
if not untagged:
response = ":smile: Woo! All the tickets are tagged! :smile:"
self.send_message(ctx.channel, response)
return
response = ":angry: Hey! You guys didn't tag your tickets!!! :angry:\n"
for person in untagged.keys():
response += "{}: {}.\n".format(person, ", ".join(map(str, untagged[person])))
#response = response[:-2] + ".\n" # Replace the last comma with a period.
response += "(This is only for fun, it's not designed to place blame on anyone!)"
self.send_message(ctx.channel, response)
if ctx.command in ['!touch', '!touches', '!tt']:
if len(ctx.args) >= 1:
username = ctx.args[0]
try:
days_ago = int(ctx.args[1])
except ValueError:
traceback.print_exc()
self.client.rtm_send_message(channel_id, "Invalid value. Please enter amount of days.")
self.ticket_touch_command(ctx.channel, days_ago, username)
except:
traceback.print_exc()
self.send_message(ctx.channel, "An error has occured in the bot... :thinking_face:")
def response_command(self, channel_id, days_ago):
self.validate_days_ago(channel_id, days_ago)
response = self.rt_stat.get_average_response_time(days_ago)
if response == None:
self.send_message(channel_id, "No tickets found for the last {} days. Do !update to update cache.".format(days_ago))
return
avg_time, slowest, fastest, no_response, no_response_list = response
avg_time = self.hms(int(avg_time))
response = "Response time in the last " + str(days_ago) + " days:\n" + \
"Average time: {:.0f}h, {:.0f}m, {:.0f}s.".format(*avg_time) + \
"\nSlowest time: {:.0f}h, {:.0f}m, {:.0f}s, ticket #{}\n".format(*self.hms(slowest[1]), slowest[0]) + \
"Fastest time: {:.0f}h, {:.0f}m, {:.0f}s, ticket #{}\n".format(*self.hms(fastest[1]), fastest[0]) + \
"No response: {} out of {}.\n".format(*no_response) + \
"No response tickets: {}.\n".format(' '.join(["#" + str(s) for s in no_response_list])) + \
"(Note that this does not include weekends while calculating time)"
self.send_message(channel_id, response)
def ticket_touch_command(self, channel_id, days_ago, username=None):
self.validate_days_ago(channel_id, days_ago)
touch_dict = self.rt_stat.ticket_touches(days_ago, username)
if username:
touch_count = touch_dict # Rename.
response = "{} ticket touches for {}".format(touch_count, username)
self.send_message(channel_id, response)
def validate_days_ago(self, channel_id, days_ago):
""" Generic responder for invalid days_ago input. Returns false for invalid input."""
if days_ago < 0:
self.send_message(channel_id, "Positive numbers please!")
return False
if days_ago > 365:
self.send_message(channel_id, "Sorry I only have tickets up to 1 year old... :cry:")
return False
return True
def parse_message_for_tickets(self, message):
""" Parse a message and create an integer list of ticket numbers. """
message_split = message.split(" ")
ticket_list = []
for word in message_split:
if not word:
continue
if word[0] == '#':
try:
# Create a
# Make sure things behind # is a legit issue
ticket_number = int(word[1:].translate(str.maketrans('', '', string.punctuation)))
except ValueError:
continue
if ticket_number < 0 or ticket_number in ticket_list:
continue
ticket_list.append(ticket_number)
ticket_list.sort()
return ticket_list
def hms(self, seconds):
"""
Convert seconds to H:M:S in a tuple.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return (h, m, s)
| StarcoderdataPython |
5059974 | <gh_stars>1-10
import keras
import util
from datetime import datetime
def train(args, preprocess_manager):
util.llprint("Loading Data starts... \n")
X, y, sequence_max_length, num_features_all, num_features_activities = preprocess_manager.create_and_encode_training_set(
args)
util.llprint("Loading Data done!\n")
print('Build model...')
# LSTM
if args.dnn_architecture == 0:
main_input = keras.layers.Input(shape=(sequence_max_length, num_features_all), name='main_input')
l1 = keras.layers.recurrent.LSTM(100, implementation=2, activation="tanh", kernel_initializer='glorot_uniform',
return_sequences=False, dropout=0.2)(main_input)
b1 = keras.layers.normalization.BatchNormalization()(l1)
# GRU
elif args.dnn_architecture == 1:
main_input = keras.layers.Input(shape=(sequence_max_length, num_features_all), name='main_input')
l1 = keras.layers.recurrent.GRU(100, implementation=2, activation="tanh", kernel_initializer='glorot_uniform',
return_sequences=False, dropout=0.2)(main_input)
b1 = keras.layers.normalization.BatchNormalization()(l1)
# RNN
elif args.dnn_architecture == 2:
main_input = keras.layers.Input(shape=(sequence_max_length, num_features_all), name='main_input')
l1 = keras.layers.recurrent.SimpleRNN(100, implementation=2, activation="tanh",
kernel_initializer='glorot_uniform', return_sequences=False, dropout=0.2)(
main_input)
b1 = keras.layers.normalization.BatchNormalization()(l1)
activity_output = keras.layers.Dense(num_features_activities + 1, activation='softmax', name='activity_output',
kernel_initializer='glorot_uniform')(b1)
model = keras.models.Model(inputs=[main_input], outputs=[activity_output])
optimizer = keras.optimizers.Nadam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8,
schedule_decay=0.004, clipvalue=3)
model.compile(loss={'activity_output': 'categorical_crossentropy'}, optimizer=optimizer)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
model_checkpoint = keras.callbacks.ModelCheckpoint(
'%smodel_%s.h5' % (args.checkpoint_dir, preprocess_manager.iteration_cross_validation), monitor='val_loss',
verbose=0, save_best_only=True, save_weights_only=False, mode='auto')
lr_reducer = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
model.summary()
start_training_time = datetime.now()
model.fit(X, {'activity_output': y}, validation_split=1 / args.num_folds,
callbacks=[early_stopping, model_checkpoint, lr_reducer], verbose=1, batch_size=args.batch_size_train,
epochs=args.dnn_num_epochs)
training_time = datetime.now() - start_training_time
return training_time.total_seconds()
| StarcoderdataPython |
4823118 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Component: YLPF
# Package : battm
# Name : UKF
# Author : NBIC
# License : BSD 2-Clause
# -------------------------------------------------------------------
#
# Copyright (C) 2019 NEC Brain Inspired Computing
# Research Alliance Laboratories
# All Rights Reserved.
# -------------------------------------------------------------------
#
# -------------------------------------------------------------------
# Provider: NEC Brain Inspired Computing Research Alliance Laboratories
# DOI: https://doi.org/10.1371/journal.pcbi.1004442
# Title: A Bayesian Attractor Model for Perceptual Decision Making
# Class: UKF
# Role: UKF Class.
# -------------------------------------------------------------------
#
import math
import numpy as np
from hopfield import tanh_sigmoid
# unscented kalman filter(UKF)
# UKF の論文: https://doi.org/10.1109/ASSPCC.2000.882463
# 予測結果
class PredictedResult(object):
""" 予測結果 """
def __init(self):
self.predx = None
self.predP = None
self.samples = None
# 推定結果
class EstimationResult(object):
""" 推定結果 """
def __init__(self):
self.estx = None
self.estP = None
# system model
# x' = f(x,w) ; n-vector
# y = h(x) + v ; m-vector
#
# given:
# E[w]=0, V[w]=Q
# E[v]=0, V[v]=R
# initial state: x0|0, P0|0
class UKFModel(object):
def __init__(self, n, m, J, obs, Q, R, x0, P0):
self.n = n
self.m = m
# Python ではラムダ関数ではなく行列 J を渡す
self.J = J
# Pytyon ではラムダ関数 h ではなく、要素リスト obs を渡す
self.obs = obs
self.Q = Q
self.R = R
self.x0 = x0
self.P0 = P0
pass
def UKF_makeModel(n, m, J, obs, Q, R, x0, P0):
model = UKFModel(n, m, J, obs, Q, R, x0, P0)
return model
# xe : x_k-1|k-1
# Pe : P_k-1|k-1
def UKF_predict(cfg, xe, Pe):
model = cfg.BAttM_model
logger = model._logger
num_w = int(math.sqrt(model.Q.size))
# # estx:1×nの行列。
# # 意思決定状態 z の期待値 (est[[t]]$estx)
# # 各カテゴリのzの期待値が格納されている。
estX = np.hstack((xe, np.zeros(num_w)))
# # estP:n×xの共分散行列。
# # 共分散行列:est[[t]]$estP
# # カテゴリ間の共分散が格納されている。
estP = np.hstack((Pe, np.zeros((num_w, model.n))))
estP = np.vstack((estP, np.hstack((np.zeros((model.n, num_w)), model.Q))))
# sampling
# sigma点を計算する
samples_X = UKF_sample(cfg, estX, estP, (model.n + num_w))
weights_s = UKF_weights_s(cfg, (model.n + num_w))
weights_c = UKF_weights_c(cfg, (model.n + num_w))
# U-transformation
samples_x2 = UKF_Utransformation_f(cfg, samples_X)
predx = list_weightedSum(samples_x2, weights_s)
p = [*map(lambda x: x - predx, samples_x2)]
predP = list_weightedSquareSum(cfg, p, weights_c)
res = PredictedResult()
res.predx = predx
res.predP = predP
res.samples = samples_x2
return res
#UKF.update = function(model,pred,y){
def UKF_update(cfg, pred, y):
model = cfg.BAttM_model
logger = model._logger
num_v = int(math.sqrt(model.R.size))
num_w = int(math.sqrt(model.Q.size))
num_x = int(pred.predx.size)
logger.debug("UKF Update v:{} w:{} x:{}".format(num_v, num_w, num_x))
# #sampling
samples_X = pred.samples
weights_s = UKF_weights_s(cfg, (model.n + num_w))
weights_c = UKF_weights_c(cfg, (model.n + num_w))
# # U-transformation
# # Sigma点列の広がり具合を決めるために Unscented 変換する
samples_y = UKF_Utransformation_h(samples_X, model.obs)
predy = list_weightedSum(samples_y, weights_s)
pyy = [*map(lambda x: x - predy, samples_y)]
wpyy = list_weightedSquareSum(cfg, pyy, weights_c)
predPyy = wpyy + model.R
pxy_l1 = [*map(lambda x: x - pred.predx, pred.samples)]
pxy_l2 = [*map(lambda x: x - predy, samples_y)]
predPxy = list_weightedCrossSum(cfg, pxy_l1, pxy_l2, weights_c)
K = np.dot(predPxy, np.linalg.inv(predPyy))
estx = pred.predx + np.dot(K, (y - predy))
estP = pred.predP - np.dot(np.dot(K, predPyy), K.T)
res = EstimationResult()
res.estx = estx
res.estP = estP
return res
# Sigma点列を計算する
def UKF_sample(Cfg, estX, estP, n):
logger = Cfg.BAttM_model._logger
x = list()
estSigma = matrix_sqrt(estP)
# # UKFの論文 p.3 左中央の式
# # \lambda = alpha^2 (L + \kappa) - Lに従う
# lambda = Cfg$UKF.alpha^2 * (n + Cfg$UKF.kappa) - n
lambd = math.pow(Cfg.UKF.alpha, 2) * (n + Cfg.UKF.kappa) - n
# # UKFの論文 p.3 左中央の式(15)の上から三つに従う
x.append(estX)
for i in np.arange(0, n):
x.append(estX + math.sqrt(n + lambd) * estSigma[i, ])
x.append(estX - math.sqrt(n + lambd) * estSigma[i, ])
logger.debug("lambd:{}".format(lambd))
return x
# 関数で Unscented 変換を行う
def UKF_Utransformation_f(Cfg, samples):
model = Cfg.BAttM_model
logger = model._logger
ukf = model.UKF
n = model.n
res = list()
# リストの各要素に対して関数を適用する
for i in np.arange(0, len(samples)):
# UKF論文のUnscented変換で使用される関数
# Ff = function(zw){
# zw[1:n] + f(zw[1:n]) + zw[n + 1:n]
# }
# Ff = lambda zw: zw[1:n] + f(zw[1:n]) + zw[n + 1:n]
zw = samples[i]
f = ff_sigmoid(zw[:n], ukf.J, model.tau, model.rambda)
ff = zw[:n] + f + zw[n:]
res.append(ff)
return np.array(res)
# f = lambda z: ((-1/tau) * z) + (J * tanh(rambda * z))
def ff_sigmoid(x, J, tau, rambda):
res = ((-1.0/tau) * x) + np.dot(J, np.tanh(rambda * x))
return res
# 観測関数 obs (h) 用の UKF Unscented 変換関数
def UKF_Utransformation_h(samples_X, obs):
obs_list = obs[0]
state_list = obs[1]
r = obs[2]
o = obs[3]
result = list()
# obs 関数の定義 (UKF で使用される)
n = len(obs_list)
for c in np.arange(0, len(samples_X)):
ans = 0
z = samples_X[c]
# hopfield.R で定義される sigmoid 関数を参照する
res_sig = tanh_sigmoid(z, r, o)
# # すべてのアトラクタに対して
for i in np.arange(0, n):
# 大歳先生論文の式(5)
list_1 = state_list[i] == 1
list_m1 = state_list[i] == -1
idx = np.prod(res_sig[list_1]) * np.prod(1 - res_sig[list_m1])
ans += obs_list[i] * idx
result.append(ans)
return result
# Sigma点列用のウェイト W^{(m)}_0 を計算する
def UKF_weights_s(Cfg, n):
lambd = math.pow(Cfg.UKF.alpha, 2) * (n + Cfg.UKF.kappa) - n
ws0 = lambd / (n + lambd)
ws = list()
ws.append(ws0)
for i in np.arange(0, n):
ws_p = 1.0 / (2.0 * (n + lambd))
ws.append(ws_p)
ws_n = 1.0 / (2.0 * (n + lambd))
ws.append(ws_n)
return ws
# Sigma点列用のウェイト W^{(c)}_0 を計算する
def UKF_weights_c(Cfg, n):
# lambda = Cfg$UKF.alpha^2 * (n + Cfg$UKF.kappa) - n
lambd = math.pow(Cfg.UKF.alpha, 2) * (n + Cfg.UKF.kappa) - n
# # W^{(c)}_0 = \lambda / (L + \lambda) + (1 - \alpha^2 + \beta)
ws0 = lambd / (n + lambd) + (1 - math.pow(Cfg.UKF.alpha, 2) + Cfg.UKF.beta)
ws = list()
ws.append(ws0)
for i in np.arange(0, n):
ws_p = 1.0 / (2.0 * (n + lambd))
ws.append(ws_p)
ws_m = 1.0 / (2.0 * (n + lambd))
ws.append(ws_m)
return ws
# 行列の平方根を求める
#matrix.sqrt = function(A){
# Dbg.fbegin(match.call()[[1]])
#
# # 特異値分解(SVD)を行う
# # X = U %*% D %*% V を満たす U, D, V を求める
# tmp = svd(A)
#
# # 直行行列 U
# U = tmp$u
#
# # 直行行列 V
# V = tmp$v
#
# # X の特異値を対角成分とする対角行列 D
# # 単位行列を作成する
# D = diag(sqrt(tmp$d))
#
# Dbg.fend(match.call()[[1]])
# return( U %*% D %*% t(V))
#}
def matrix_sqrt(A):
U, d, V = np.linalg.svd(A, full_matrices=True)
D = np.diag(np.sqrt(d))
ms = np.dot(U, D)
ms = np.dot(ms, V)
return ms
# リストの重み付き和を計算する
# ans = \sum^{n}_{i=1} l_i \times w_i
def list_weightedSum(l, weights):
ans = 0
for i in np.arange(0, len(l)):
ans += l[i] * weights[i]
return ans
# リストの重み付き平方和をもとめる
# ans = \sum^{n}_{i=1} l_i * {}^t l_i \times w_i
def list_weightedSquareSum(cfg, l, weights):
ans = None
for i in np.arange(0, len(l)):
outer_dot = np.outer(l[i], l[i].reshape(-1, 1))
res = weights[i] * outer_dot
if ans is None:
ans = res
else:
ans += res
return ans
# リストの重み付き直交和をもとめる
# ans = \sum^n_{i=1} (l1_i * {}^t l2_i ) \times w_i
def list_weightedCrossSum(cfg, l1, l2, weights):
ans = None
for i in np.arange(0, len(l1)):
res = weights[i] * np.outer(l1[i], l2[i].reshape(-1, 1))
if ans is None:
ans = res
else:
ans += res
return ans
| StarcoderdataPython |
252802 | # Using the root title function to change the title
# of the GUI
#? importing the tkinter files
from tkinter import *
#initialising the root window
root = Tk()
#using the title function to change the title
root.title("Simple Title ")
#A simple label
myLabel1 = Label(root, text="This the lable of the GUI")
myLabel1.pack()
#running he main GUi loop
root.mainloop()
| StarcoderdataPython |
133329 | <filename>week3/exercises/how-many.py<gh_stars>0
'''
write a procedure, called how_many, which returns the sum of the number of values associated with a dictionary
'''
def how_many(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
values = 0
for i in aDict:
values += len(aDict[i])
return values
| StarcoderdataPython |
8071905 | # -*- coding: utf-8 -*-
"""JSON-L parser plugin related functions and classes for testing."""
import json
from dfvfs.helpers import text_file
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class JSONLPluginTestCase(test_lib.ParserTestCase):
"""JSON-L parser plugin test case."""
def _ParseJSONLFileWithPlugin(
self, path_segments, plugin, knowledge_base_values=None):
"""Parses a file as an JSON-L log file and returns an event generator.
This method will first test if a JSON-L log file has the required format
using plugin.CheckRequiredFormat() and then extracts events using
plugin.Process().
Args:
path_segments (list[str]): path segments inside the test data directory.
plugin (JSONLPlugin): JSON-L log file plugin.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
Returns:
FakeStorageWriter: storage writer.
"""
storage_writer = fake_writer.FakeStorageWriter()
storage_writer.Open()
file_entry = self._GetTestFileEntry(path_segments)
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=file_entry,
knowledge_base_values=knowledge_base_values)
file_object = file_entry.GetFileObject()
text_file_object = text_file.TextFile(file_object)
line = text_file_object.readline()
json_dict = json.loads(line)
required_format = plugin.CheckRequiredFormat(json_dict)
self.assertTrue(required_format)
plugin.Process(parser_mediator, file_object=file_object)
return storage_writer
| StarcoderdataPython |
1603342 | <reponame>mfkiwl/OpenCGRA-1<filename>fu/single/test/PhiRTL_test.py
"""
==========================================================================
PhiRTL_test.py
==========================================================================
Test cases for functional unit Phi.
Author : <NAME>
Date : November 27, 2019
"""
from pymtl3 import *
from pymtl3.stdlib.test import TestSinkCL
from pymtl3.stdlib.test.test_srcs import TestSrcRTL
from ..PhiRTL import PhiRTL
from ....lib.opt_type import *
from ....lib.messages import *
#-------------------------------------------------------------------------
# Test harness
#-------------------------------------------------------------------------
class TestHarness( Component ):
def construct( s, FunctionUnit, DataType, CtrlType, num_inports, num_outports,
data_mem_size, src0_msgs, src1_msgs, src_opt, sink_msgs ):
s.src_in0 = TestSrcRTL( DataType, src0_msgs )
s.src_in1 = TestSrcRTL( DataType, src1_msgs )
s.src_opt = TestSrcRTL( CtrlType, src_opt )
s.sink_out = TestSinkCL( DataType, sink_msgs )
s.dut = FunctionUnit( DataType, CtrlType, num_inports, num_outports,
data_mem_size )
connect( s.src_in0.send, s.dut.recv_in[0] )
connect( s.src_in1.send, s.dut.recv_in[1] )
connect( s.src_opt.send, s.dut.recv_opt )
connect( s.dut.send_out[0], s.sink_out.recv )
def done( s ):
return s.src_in0.done() and s.src_in1.done() and\
s.src_opt.done() and s.sink_out.done()
def line_trace( s ):
return s.dut.line_trace()
def run_sim( test_harness, max_cycles=100 ):
test_harness.elaborate()
test_harness.apply( SimulationPass() )
test_harness.sim_reset()
# Run simulation
ncycles = 0
print()
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
while not test_harness.done() and ncycles < max_cycles:
test_harness.tick()
ncycles += 1
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
# Check timeout
assert ncycles < max_cycles
test_harness.tick()
test_harness.tick()
test_harness.tick()
def test_Phi():
FU = PhiRTL
DataType = mk_data( 16, 1, 1)
CtrlType = mk_ctrl()
num_inports = 2
num_outports = 1
data_mem_size = 8
FuInType = mk_bits( clog2( num_inports + 1 ) )
pickRegister = [ FuInType( x+1 ) for x in range( num_inports ) ]
src_in0 = [ DataType(1, 0), DataType(3, 1), DataType(6, 0) ]
src_in1 = [ DataType(0, 0), DataType(5, 0), DataType(2, 1) ]
src_opt = [ CtrlType( OPT_PHI, pickRegister ),
CtrlType( OPT_PHI, pickRegister ),
CtrlType( OPT_PHI, pickRegister ) ]
sink_out = [ DataType(1, 1), DataType(3, 1), DataType(2, 1) ]
th = TestHarness( FU, DataType, CtrlType, num_inports, num_outports,
data_mem_size, src_in0, src_in1, src_opt, sink_out )
run_sim( th )
| StarcoderdataPython |
11339956 | <gh_stars>1-10
'''
Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.
Suppose the following input is supplied to the program:
Hello world!
Then, the output should be:
UPPER CASE 1
LOWER CASE 9
'''
ip = input('Enter a sentence fow which upper and lower case letter need to be counted : ')
a,b,c = 0,0,0
for i in ip :
if (97 <= ord(i) <= 122) :
a += 1
if (65 <= ord(i) <= 90) :
b += 1
if (ord(i) == 32) :
c += 1
print ('UPPER CASE ' + str(b))
print ('LOWER CASE ' + str(a)) | StarcoderdataPython |
11370620 | <gh_stars>1-10
import cx_Freeze
executables = [cx_Freeze.Executable("./src/RexRun.py")]
cx_Freeze.setup(
name="T-Rex Run",
options = {
"build_exe": {
"packages":["pygame"],
"include_files":[
"./src/ObstacleClass.py",
"./src/PlayerClass.py",
"./src/settings.py",
"./src/imgs/dino.png",
"./src/imgs/dino_left_foot_up.png",
"./src/imgs/dino_right_foot_up.png",
]
}
},
executables = executables
) | StarcoderdataPython |
4995937 | # Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoding schemes to encode/decode numpy data types non supported by JAX, e.g.
`datetime64` and `string_` dtypes."""
import datetime
from typing import Any, Callable, Dict, NamedTuple, TypeVar, Union
import numpy as onp
import pandas as pd
from jax import numpy as jnp
from pandas.tseries.offsets import DateOffset
from sklearn.preprocessing import LabelEncoder
DTypeLike = TypeVar("DTypeLike")
def encode_int64(seed: int) -> onp.ndarray:
"""Encode an int64 into in a 2-dimensional int32 ndarray.
Args:
seed: a 64- or 32-bit integer.
Returns:
an array of shape (2,) and dtype uint32.
References
See
- jax implementation of PRNGKey.
https://jax.readthedocs.io/en/latest/_autosummary/jax.random.PRNGKey.html # noqa
- https://codereview.stackexchange.com/questions/80386/packing-and-unpacking-two-32-bit-integers-into-an-unsigned-64-bit-integer # noqa
Note:
0xFFFFFFFF = 2**32 -1
"""
if onp.shape(seed):
raise TypeError("seed must be a scalar.")
if isinstance(seed, onp.ndarray):
seed = onp.asscalar(seed)
if not isinstance(seed, (int, onp.int32, onp.uint32, onp.int64)):
raise TypeError(f"seed must be an int, got {type(seed)}")
def _convert(k):
return onp.reshape(k.astype(onp.uint32), [1])
if isinstance(seed, (int, onp.ndarray, onp.int32, onp.uint32)):
# Special handling of raw integer values, which may have be 64bit even
# when jax_enable_x64=False and we don't want to drop the top 32 bits
high = _convert(onp.bitwise_and(onp.right_shift(seed, 32), 0xFFFFFFFF))
else:
high = _convert(onp.right_shift(seed, onp.full_like(seed, 32)))
low = _convert(onp.bitwise_and(seed, 0xFFFFFFFF))
return onp.concatenate([high, low], 0)
def decode_int64(code):
"""See https://codereview.stackexchange.com/questions/80386/packing-and-unpacking-two-32-bit-integers-into-an-unsigned-64-bit-integer # noqa"""
# assert isinstance(code, np.ndarray)
high, low = code
def _convert(k):
return onp.reshape(k.astype(onp.int64), [1])
high = _convert(onp.left_shift(high, 32))
low = _convert(low)
return low + high
def encode_timestamp(date):
if not isinstance(date, pd.Timestamp):
raise TypeError(f"seed must be a pandas Timestamp, got {type(date)}")
return encode_int64(date.value)
def decode_timestamp(code):
return pd.to_datetime(decode_int64(code)[0])
def encode_datetime(date):
if not isinstance(date, datetime.datetime):
raise TypeError(f"seed must be a datetime, got {type(date)}")
return encode_datetime64(onp.datetime64(date))
def decode_datetime(code):
return pd.to_datetime(decode_int64(code)[0]).to_pydatetime()
def encode_datetime64(date):
if date.dtype not in [
onp.dtype("datetime64[ns]"),
onp.dtype("<M8[ns]"),
onp.dtype("<M8[us]"),
]:
raise TypeError(f"seed must be a np.datetime64, got {type(date)}")
return encode_int64(date.astype("<M8[ns]").astype(onp.int64))
def decode_datetime64(code):
return decode_int64(code)[0].astype("<M8[ns]")
class Encoder(NamedTuple):
encode: Callable
decode: Callable
class DecodeDataset(NamedTuple):
iter_coords: Dict
embed_map: Dict
event_map: Dict
embedding_coords: Dict
event_coords: Dict
other_coords: Dict
encoders: Dict
constant_data: Dict
iter_data: Dict
embed_data: Dict
event_data: Dict
def floor_datetime(time, freq: Union[str, DateOffset] = "d"):
r"""Perform floor operation on the data to the specified freq.
Args:
time : time(s) to floor.
freq : The frequency level to floor the index to. Must be a fixed frequency
like ‘S’ (second) not ‘ME’ (month end).
See frequency aliases for a list of possible freq values.
See:
https://pandas.pydata.org/docs/reference/api/pandas.Series.dt.floor.html
As discussed in:
https://stackoverflow.com/questions/5476065/how-to-truncate-the-time-on-a-datetime-object-in-python # noqa
the implementation with pd.Series.dt.florr seems to be the most performant.
times = pd.Series(pd.date_range(start='1/1/2018 04:00:00', end='1/1/2018 22:00:00', freq='s'))
%timeit floor_datetime(times)
791 µs ± 4.19 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions201.htm
"""
assert freq == "d", f"other formats than '{freq}' not yet supported."
# v3 (only in numpy)
time_int64 = time.astype("<M8[ns]").astype(onp.int64)
day = onp.array(24 * 60 * 60 * 1.0e9, onp.int64)
floor_time_int64 = onp.array((time_int64 // day) * day, onp.int64)
floor_time = floor_time_int64.astype("<M8[ns]")
return floor_time
# v1
# assert freq == "d", f"other formats than '{freq}' not yet supported"
# return pd.to_datetime(time).round(freq).to_numpy().astype("<M8[ns]")
# v2
# return pd.Series(time).dt.floor(freq).values.reshape(time.shape).astype("<M8[ns]")
def string_encoder(values: Any) -> Encoder:
ravel = False
original_shape = None
if values.ndim > 1:
ravel = True
original_shape = values.shape
values = values.ravel()
encoder = LabelEncoder().fit(values)
def encode(value):
if ravel:
value = value.ravel()
return encoder.transform(value)
def decode(code):
value = encoder.inverse_transform(code)
if ravel:
value = value.reshape(original_shape)
return value
return Encoder(encode, decode)
def datetime64_encoder(values: Any) -> Encoder:
def encode(value):
return onp.stack(list(map(encode_datetime64, value)))
def decode(code):
return onp.stack(
list(map(decode_datetime64, [code[i] for i in range(len(code))]))
)
return Encoder(encode, decode)
def less(t1, t2):
h1, l1 = t1
h2, l2 = t2
if h1 < h2:
return True
elif h1 > h2:
return False
else:
if l1 < l2:
return True
else:
return False
def floor_jax_datekey(datekey):
"""Floor a date represented as a datekey.
TODO: find an efficient implementation which do not need
to pass by int64 conversions in order to use it smoothly
in Jax 32 bits worflows.
"""
date = decode_int64(onp.array(datekey))
day = onp.array(24 * 60 * 60 * 1.0e9, onp.int64)
floor_date = onp.ndarray.item((date // day) * day)
datekey = encode_int64(floor_date)
datekey = jnp.array(datekey)
return datekey
def encode_dataset(encoders, dataset):
output = {}
for dim, var in dataset.items():
if dim in encoders:
# encode values
values = encoders[dim].encode(var)
else:
values = var
output[dim] = values
return output
def decode_dataset(encoders, dataset):
output = {}
for dim, var in dataset.items():
if dim in encoders:
# encode values
values = encoders[dim].decode(var)
else:
values = var
output[dim] = values
return output
| StarcoderdataPython |
40642 | <gh_stars>1-10
#!/usr/bin/python
import sys
import re
# Test application to check whether EMR pipeline and reading the data works
# This code is from the EMR example:
# https://s3.amazonaws.com/elasticmapreduce/samples/wordcount/wordSplitter.py
def main(argv):
pattern = re.compile("[a-zA-Z][a-zA-Z0-9]*")
for line in sys.stdin:
for word in pattern.findall(line):
print "LongValueSum:" + word.lower() + "\t" + "1"
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3356320 | <reponame>pchtsp/combiLima<filename>app/db.py
import json
import app.aux as aux
def search_company(company_id):
companies = get_companies()
company_name = [c[1] for c in companies if company_id == c[0]]
if len(company_name) > 0:
return company_name[0]
return ""
def get_companies():
with open('data/empresas.txt', 'r') as f:
return [(str(n), l[:-2]) for n, l in enumerate(f.readlines())][1:]
def get_stops():
with open('data/stops.json') as f:
return {elem['id']: elem for elem in json.load(f)}
def get_stops_in_range(north_east, south_west):
return {k: v for k, v in get_stops().items() if
south_west['lat'] <= v['lat'] <= north_east['lat'] and
south_west['lon'] <= v['lon'] <= north_east['lon']}
def get_stops_around_center(center, dist=0.05):
lat = center['lat']
lon = center['lon']
ne = {
'lat': lat + dist,
'lon': lon + dist
}
sw = {
'lat': lat - dist,
'lon': lon - dist
}
return get_stops_in_range(ne, sw)
def get_stop_routes(stop_id):
file = aux.clean_chars(stop_id)
with open('data/stops/{}_routes.json'.format(file)) as f:
content = json.load(f)
routes = [json.loads(elem["routeDesc"]) for elem in content]
return [route.get('agency_name', '') for route in routes]
if __name__ == "__main__":
lat = -12.100345
lng = -77.042943
ne = {
'lat': lat + 0.05,
'lon': lng + 0.05
}
sw = {
'lat': lat - 0.05,
'lon': lng - 0.05
}
stops = get_stops_in_range(ne, sw) | StarcoderdataPython |
5054164 | n1 = int(input('\033[1;45mDigite um número: '))
n2 = int(input('\033[1;42mDigite outro número: '))
soma = n1 + n2
print('A soma de {} e {} é igual a {}'.format(n1, n2, soma)) | StarcoderdataPython |
1948759 | <gh_stars>1-10
# Create a camera at the pyvista module level.
#
import pyvista
camera = pyvista.Camera()
#
# Access the active camera of a plotter and get the position of the
# camera.
#
pl = pyvista.Plotter()
pl.camera.position
# Expected:
## (1.0, 1.0, 1.0)
| StarcoderdataPython |
1905775 | <reponame>RKalaj/moviebase
from flask_script import Manager
from moviebase import app, db, Director, Movie
manager = Manager(app)
@manager.command
def deploy():
db.drop_all()
db.create_all()
nolan = Director(name='<NAME>', about='<NAME> is an English film director, screenwriter, and producer who holds both British and American citizenship. He is one of the highest-grossing directors in history, and among the most acclaimed filmmakers of the 21st century.')
jackson = Director(name='<NAME>', about='Sir <NAME> is a New Zealand film director, screenwriter and film producer. He is best known as the director, writer, and producer of The Lord of the Rings trilogy (2001-03) and The Hobbit trilogy (2012-14), both of which are adapted from the novels of the same name by <NAME>.')
tarantino = Director(name='<NAME>', about='<NAME> is an American film director, writer, and actor. His filmsare characterized by nonlinear storylines, satirical subject matter, an aestheticization of violence, extended scenes of dialogue, ensemble casts consisting of established and lesser-known performers, references to popular culture, soundtracksprimarily containing songs and score pieces from the 1960s to the 1980s, and features of neo-noir film. He is widely considered one of the greatest filmmakers of his generation.')
interstellar = Movie(name='Interstellar', year=2014, about="A team of explorers travel through a wormhole in space in an attempt to ensure humanity's survival.", director=nolan)
lotr3 = Movie(name='The Lord of the Rings: The Return of the King', year=2003, about="Gandalf and Aragorn lead the World of Men against Sauron's army to draw his gaze from Frodo and Sam as they approach Mount Doom with the One Ring.", director=jackson)
inglourious = Movie(name='<NAME>', year=2009, about="In Nazi-occupied France during World War II, a plan to assassinate Nazi leaders by a group of Jewish U.S. soldiers coincides with a theatre owner's vengeful plans for the same.", director=tarantino)
db.session.add(nolan)
db.session.add(jackson)
db.session.add(tarantino)
db.session.add(interstellar)
db.session.add(lotr3)
db.session.add(inglourious)
db.session.commit()
if __name__=='__main__':
manager.run()
| StarcoderdataPython |
5118103 | <reponame>startakovsky/docarray<gh_stars>100-1000
from .document import DocumentArray
from .storage.elastic import StorageMixins, ElasticConfig
__all__ = ['DocumentArrayElastic', 'ElasticConfig']
class DocumentArrayElastic(StorageMixins, DocumentArray):
"""This is a :class:`DocumentArray` that uses Elasticsearch as
vector search engine and storage.
"""
def __new__(cls, *args, **kwargs):
"""``__new__`` method for :class:`DocumentArrayElastic`
:param *args: list of args to instantiate the object
:param **kwargs: dict of args to instantiate the object
:return: the instantiated :class:`DocumentArrayElastic` object
"""
return super().__new__(cls)
| StarcoderdataPython |
3499246 | import numpy as np
from torch import nn
from torch.nn.init import xavier_uniform_
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size()[0], -1)
class Reshape(nn.Module):
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
class Permute(nn.Module):
def __init__(self, *order):
super(Permute, self).__init__()
self.order = order
def forward(self, x):
return x.permute(*self.order).contiguous()
class PositionEncoding(nn.Module):
"""
Simple Positional Encoding layer from Attention Is All You Need
Note dimensionality of input (last dimension) must be even
"""
def __init__(self, max_len = 10000):
super(PositionEncoding, self).__init__()
self.max_len = max_len
def forward(self, x, start = 0):
length = x.shape[1]
dim = x.shape[2]
with torch.no_grad():
encoding = torch.zeros((length, dim)).to(device = x.device, dtype = x.dtype)
pos = torch.arange(start, start + length).view(-1, 1).float() / (self.max_len ** (2 * torch.arange(dim // 2).view(1, -1) / dim)).float()
encoding[:, ::2] = torch.sin(pos)
encoding[:, 1::2] = torch.cos(pos)
return x + encoding
| StarcoderdataPython |
6646333 | <filename>pgen/gen_animation.py<gh_stars>0
import argparse
import pickle
import gym
from pgen import MLP
import pybullet
import pybullet_envs
import numpy as np
import skimage
import skimage.io
import matplotlib.pyplot as plt
def gen_animation(env_name, generator_file, latent_steps=10, save_figs=False,\
render=False):
env = gym.make(env_name)
with open(generator_file, "rb") as file:
pgen = pickle.load(file)[-1]
input_dim = env.observation_space.shape[0]
hid_dim = 16
output_dim = env.action_space.shape[0]
num_weights = input_dim*hid_dim + hid_dim*output_dim
#pgen = MLP(weights, input_dim, hid_dim, num_weights*2)
# walk the latent space, dimension 0
latent_range = 1.
latent_walk = np.arange(-latent_range/2, latent_range/2, \
latent_range / latent_steps)[:,np.newaxis]
fitness_landscape = []
epds = 8
if render and "BulletEnv" in env_name:
env.render()
for latent_walk0 in latent_walk:
for latent_walk1 in latent_walk:
latent_space = np.array([[latent_walk0, latent_walk1]]).reshape(1,2)
agent_params = pgen.forward(latent_space)
agent_mean = np.tanh(agent_params[:1, 0:num_weights])
agent = MLP(agent_mean, input_dim, hid_dim, output_dim=1)
for epd in range(epds):
obs = env.reset()
done = False
epd_reward = 0.0
step = 0
while not done:
act = agent.forward(obs)
act = np.tanh(act)
if act.shape[1] > 1:
act = act.squeeze()
obs, reward, done, info = env.step(act)
epd_reward += reward
if render and "BulletEnv" not in env_name:
env.render()
if epd == 0 and save_figs:
env.unwrapped._render_width = 640
env.unwrapped._render_height = 480
img = env.render(mode="rgb_array")
skimage.io.imsave("./results/figs/{}_step{}_l{}.png"\
.format(env_name, str(step).zfill(3), \
str(int(10*latent_walk0)) \
+ str(int(10*latent_walk1))),\
np.uint8(img))
step += 1
fitness_landscape.append(epd_reward)
print("reward : {}".format(epd_reward), " latent space ", latent_space)
fitness_landscape_plot = np.array(fitness_landscape).reshape(latent_steps, latent_steps)
plt.figure(figsize=(10,10))
plt.imshow(fitness_landscape_plot)
plt.colorbar()
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--env_name", type=str, default="InvertedPendulumBulletEnv-v0")
parser.add_argument("-g", "--generator_file", type=str, default="results/InvPend_generators.pickle")
parser.add_argument("-l", "--latent_steps", type=int, default=3)
parser.add_argument("-s", "--save_figs", type=bool, default=False)
parser.add_argument("-r", "--render", type=bool, default=False)
args = parser.parse_args()
env_name = args.env_name
generator_file = args.generator_file
gen_animation(env_name, generator_file, latent_steps=args.latent_steps, save_figs=args.save_figs, render=args.render)
| StarcoderdataPython |
115057 | """
Usefull tools for plotting with matplotlib
"""
| StarcoderdataPython |
4967549 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annotated Enron Subject Line Corpus Dataset."""
import tensorflow_datasets as tfds
import tensorflow as tf
class Ami(tfds.core.GeneratorBasedBuilder):
"""Annotated Enron Subject Line Corpus Dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict({
'_DOCUMENT': tfds.features.Text(),
'_SUMMARY': tfds.features.Text()
})
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Download the data and define splits."""
path_ = '/media/data_dump/hemant/hemant/nlp/pegasus/junk/pegasus/TF_Create/'
return {
'validation': self._generate_examples(path=os.path.join(path_,"val.csv")),
'train': self._generate_examples(path= os.path.join(path_,"train.csv")),
'test': self._generate_examples(path=os.path.join(path_,"test.csv")),
}
def _generate_examples(self, path):
with tf.io.gfile.GFile(path) as f: # path to custom data
for i, line in enumerate(f):
source, target = line.split('\t')
yield i, {'_DOCUMENT': source, '_SUMMARY': target}
| StarcoderdataPython |
1821947 | #This has some success although not 100%
# import the necessary packages
import numpy as np
import argparse
import imutils
import glob
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--template", default="./test-images/logos/titleist.jpg", help="Path to template image")
ap.add_argument("-i", "--images", default="./test-images/titleist",
help="Path to images where template will be matched")
ap.add_argument("-v", "--visualize",
help="Flag indicating whether or not to visualize each iteration")
args = vars(ap.parse_args())
# load the image image, convert it to grayscale, and detect edges
template = cv2.imread(args["template"])
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("Template", template)
# loop over the images to find the template in
for imagePath in glob.glob(args["images"] + "/*.jpg"):
print(f'Testing {imagePath} against template {args["template"]}')
# load the image, convert it to grayscale, and initialize the
# bookkeeping variable to keep track of the matched region
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
found = None
# loop over the scales of the image
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# resize the image according to the scale, and keep track
# of the ratio of the resizing
resized = imutils.resize(gray, width = int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# if the resized image is smaller than the template, then break
# from the loop
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# detect edges in the resized, grayscale image and apply template
# matching to find the template in the image
edged = cv2.Canny(resized, 50, 200)
result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
# check to see if the iteration should be visualized
if args.get("visualize", False):
# draw a bounding box around the detected region
clone = np.dstack([edged, edged, edged])
cv2.rectangle(clone, (maxLoc[0], maxLoc[1]),
(maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)
cv2.imshow("Visualize", clone)
cv2.waitKey(0)
# if we have found a new maximum correlation value, then update
# the bookkeeping variable
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
# unpack the bookkeeping variable and compute the (x, y) coordinates
# of the bounding box based on the resized ratio
(_, maxLoc, r) = found
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
# draw a bounding box around the detected result and display the image
cv2.rectangle(image, (startX, startY), (endX, endY), (255, 0, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.waitKey(0)
cv2.destroyAllWindows() | StarcoderdataPython |
67791 | <reponame>tf-encrypted/big-tensors
from typing import Optional
import numpy as np
import tensorflow as tf
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.keras.utils import tf_utils
import tf_big.python.ops.big_ops as ops
class Tensor(object):
is_tensor_like = True # needed to pass tf.is_tensor, new as of TF 2.2+
def __init__(self, value):
assert isinstance(value, tf.Tensor), type(value)
assert value.dtype is tf.variant, value.dtype
self._raw = value
@property
def shape(self):
return self._raw.shape
@property
def name(self):
return self._raw.name
@property
def dtype(self):
return tf.int32
# return tf.string
def eval(self, session=None, dtype=None):
tf_tensor = export_tensor(self, dtype=dtype)
evaluated = tf_tensor.eval(session=session)
if tf_tensor.dtype is tf.string:
return evaluated.astype(str)
return evaluated
def __add__(self, other):
other = import_tensor(other)
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
self, other = broadcast(self, other)
res = ops.big_add(self._raw, other._raw)
return Tensor(res)
def __radd__(self, other):
other = import_tensor(other)
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
self, other = broadcast(self, other)
res = ops.big_add(self._raw, other._raw)
return Tensor(res)
def __sub__(self, other):
other = import_tensor(other)
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
self, other = broadcast(self, other)
res = ops.big_sub(self._raw, other._raw)
return Tensor(res)
def __mul__(self, other):
other = import_tensor(other)
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
self, other = broadcast(self, other)
res = ops.big_mul(self._raw, other._raw)
return Tensor(res)
def __floordiv__(self, other):
other = import_tensor(other)
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
self, other = broadcast(self, other)
res = ops.big_div(self._raw, other._raw)
return Tensor(res)
def pow(self, exponent, modulus=None, secure=None):
# TODO (Yann) This broadcast should be implemented
# in big_kernels.cc
exponent = import_tensor(exponent)
modulus = import_tensor(modulus)
self, exponent = broadcast(self, exponent)
res = ops.big_pow(
base=self._raw,
exponent=exponent._raw,
modulus=modulus._raw if modulus else None,
secure=secure if secure is not None else get_secure_default(),
)
return Tensor(res)
def __pow__(self, exponent):
return self.pow(exponent)
def __mod__(self, modulus):
modulus = import_tensor(modulus)
res = ops.big_mod(val=self._raw, mod=modulus._raw)
return Tensor(res)
def inv(self, modulus):
modulus = import_tensor(modulus)
res = ops.big_inv(val=self._raw, mod=modulus._raw)
return Tensor(res)
def _fetch_function(big_tensor):
unwrapped = [export_tensor(big_tensor, dtype=tf.string)]
rewrapper = lambda components_fetched: components_fetched[0].astype(str)
return unwrapped, rewrapper
def _feed_function(big_tensor, feed_value):
return [(big_tensor._raw, feed_value)]
def _feed_function_for_partial_run(big_tensor):
return [big_tensor._raw]
# this allows tf_big.Tensor to be passed directly to tf.Session.run,
# unwrapping and converting the result as needed
tf_session.register_session_run_conversion_functions(
tensor_type=Tensor,
fetch_function=_fetch_function,
feed_function=_feed_function,
feed_function_for_partial_run=_feed_function_for_partial_run,
)
def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False):
assert name is None, "Not implemented, name='{}'".format(name)
assert not as_ref, "Not implemented, as_ref={}".format(as_ref)
assert dtype in [tf.int32, None], dtype
return export_tensor(tensor, dtype=dtype)
# TODO(Morten)
# this allows implicit convertion of tf_big.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)
# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)
def constant(tensor):
assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor)
return import_tensor(tensor)
def _convert_to_numpy_tensor(tensor):
if isinstance(tensor, np.ndarray):
return tensor
if isinstance(tensor, (int, str)):
return np.array([[tensor]])
if isinstance(tensor, (list, tuple)):
return np.array(tensor)
raise ValueError("Cannot convert to NumPy tensor: '{}'".format(type(tensor)))
def _import_tensor_numpy(tensor):
tensor = _convert_to_numpy_tensor(tensor)
if np.issubdtype(tensor.dtype, np.int64) or np.issubdtype(tensor.dtype, np.object_):
tensor = tensor.astype(np.string_)
elif not (
np.issubdtype(tensor.dtype, np.int32)
or np.issubdtype(tensor.dtype, np.string_)
or np.issubdtype(tensor.dtype, np.unicode_)
):
raise ValueError("Unsupported dtype '{}'.".format(tensor.dtype))
if len(tensor.shape) != 2:
raise ValueError("Tensors must have rank 2.")
return Tensor(ops.big_import(tensor))
def _import_tensor_tensorflow(tensor):
if tensor.dtype in [tf.int64]:
tensor = tf.as_string(tensor)
elif tensor.dtype not in [tf.uint8, tf.int32, tf.string]:
raise ValueError("Unsupported dtype '{}'".format(tensor.dtype))
if len(tensor.shape) != 2:
raise ValueError("Tensor must have rank 2.")
return Tensor(ops.big_import(tensor))
def import_tensor(tensor):
if isinstance(tensor, Tensor):
return tensor
if isinstance(tensor, tf.Tensor):
return _import_tensor_tensorflow(tensor)
return _import_tensor_numpy(tensor)
def export_tensor(tensor, dtype=None):
assert isinstance(tensor, Tensor), type(value)
dtype = dtype or tf.string
if dtype not in [tf.int32, tf.string]:
raise ValueError("Unsupported dtype '{}'".format(dtype))
return ops.big_export(tensor._raw, dtype=dtype)
def _import_limbs_tensor_tensorflow(limbs_tensor):
if limbs_tensor.dtype not in [tf.uint8, tf.int32]:
raise ValueError(
"Not implemented limb conversion for dtype {}".format(limbs_tensor.dtype)
)
if len(limbs_tensor.shape) != 3:
raise ValueError("Limbs tensors must be rank 3.")
return Tensor(ops.big_import_limbs(limbs_tensor))
def _import_limbs_tensor_numpy(limbs_tensor):
limbs_tensor = _convert_to_numpy_tensor(limbs_tensor)
if len(tensor.shape) != 3:
raise ValueError("Limbs tensors must have rank 3.")
if not (
np.issubdtype(limbs_tensor.dtype, np.int32)
or np.issubdtype(limbs_tensor.dtype, np.uint8)
):
raise ValueError(
"Not implemented limb conversion for dtype {}".format(tensor.dtype)
)
return Tensor(ops.big_import_limbs(limbs_tensor))
def import_limbs_tensor(limbs_tensor):
if isinstance(limbs_tensor, tf.Tensor):
return _import_limbs_tensor_tensorflow(limbs_tensor)
return _import_limbs_tensor_numpy(limbs_tensor)
def export_limbs_tensor(tensor, dtype=None, max_bitlen=None):
assert isinstance(tensor, Tensor), type(value)
# Indicate missing value as negative
max_bitlen = max_bitlen or -1
dtype = dtype or tf.uint8
if dtype not in [tf.uint8, tf.int32]:
raise ValueError("Unsupported dtype '{}'".format(dtype))
return ops.big_export_limbs(tensor._raw, dtype=dtype, max_bitlen=max_bitlen)
_SECURE = True
def set_secure_default(value):
global _SECURE
_SECURE = value
def get_secure_default():
return _SECURE
def random_uniform(shape, maxval):
if not isinstance(maxval, Tensor):
maxval = import_tensor(maxval)
r_raw = ops.big_random_uniform(shape, maxval._raw)
return Tensor(r_raw)
def random_rsa_modulus(bitlength):
p_raw, q_raw, n_raw = ops.big_random_rsa_modulus(bitlength)
return Tensor(p_raw), Tensor(q_raw), Tensor(n_raw)
def add(x, y):
# TODO(Morten) lifting etc
return x + y
def sub(x, y):
# TODO(Morten) lifting etc
return x - y
def mul(x, y):
# TODO(Morten) lifting etc
return x * y
def pow(base, exponent, modulus=None, secure=None):
# TODO(Morten) lifting etc
assert isinstance(base, Tensor)
return base.pow(exponent=exponent, modulus=modulus, secure=secure)
def matmul(x, y):
# TODO(Morten) lifting etc
return x.matmul(y)
def mod(x, n):
return x.mod(n)
def inv(x, n):
return x.inv(n)
def broadcast(x, y):
x_rank = x.shape.rank
y_rank = y.shape.rank
x_nb_el = x.shape.num_elements()
y_nb_el = y.shape.num_elements()
# e.g broadcast [1] with [1, 1]
if x_rank != y_rank:
if x_rank < y_rank:
x = export_tensor(x)
x = tf.broadcast_to(x, y.shape)
x = import_tensor(x)
elif y_rank < x_rank:
y = export_tensor(y)
y = tf.broadcast_to(y, x.shape)
y = import_tensor(y)
return x, y
# e.g broadcast [1, 1] with [1, 2]
elif x_nb_el != y_nb_el:
if x_nb_el < y_nb_el:
x = export_tensor(x)
x = tf.broadcast_to(x, y.shape)
x = import_tensor(x)
elif x_nb_el > y_nb_el:
y = export_tensor(y)
y = tf.broadcast_to(y, x.shape)
y = import_tensor(y)
return x, y
return x, y
| StarcoderdataPython |
1643881 | from setuptools import setup, find_packages
with open('README.md') as fp:
long_description = fp.read()
setup(
name='typeform',
version='1.1.0',
description='Python Client wrapper for Typeform API',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=[
'type',
'form',
'typeform',
'api',
],
author='Typeform',
author_email='<EMAIL>',
url='https://github.com/MichaelSolati/typeform-python-sdk',
packages=find_packages(),
install_requires=['requests'],
test_suite='typeform.test.suite.test_suite',
license='MIT',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
]
)
| StarcoderdataPython |
3300402 | from .base import Property
from .decorators import function_as_method, method_as_function
| StarcoderdataPython |
3473741 | <reponame>dohmatob/pytorch-playground
import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch CIFAR-X Example')
parser.add_argument('--type', default='cifar10', help='cifar10|cifar100')
parser.add_argument('--channel', type=int, default=128, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)
model = model.cifar10(n_channel=args.channel)
else:
train_loader, test_loader = dataset.get100(batch_size=args.batch_size, num_workers=1)
model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| StarcoderdataPython |
213364 | <reponame>AlbertiPot/unnas<filename>pycls/models/nas/operations.py<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""NAS ops (adopted from DARTS)."""
import torch
import torch.nn as nn
OPS = {
'none': lambda C, stride, affine:
Zero(stride),
'avg_pool_2x2': lambda C, stride, affine:
nn.AvgPool2d(2, stride=stride, padding=0, count_include_pad=False),
'avg_pool_3x3': lambda C, stride, affine:
nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'avg_pool_5x5': lambda C, stride, affine:
nn.AvgPool2d(5, stride=stride, padding=2, count_include_pad=False),
'max_pool_2x2': lambda C, stride, affine:
nn.MaxPool2d(2, stride=stride, padding=0),
'max_pool_3x3': lambda C, stride, affine:
nn.MaxPool2d(3, stride=stride, padding=1),
'max_pool_5x5': lambda C, stride, affine:
nn.MaxPool2d(5, stride=stride, padding=2),
'max_pool_7x7': lambda C, stride, affine:
nn.MaxPool2d(7, stride=stride, padding=3),
'skip_connect': lambda C, stride, affine:
Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'conv_1x1': lambda C, stride, affine:
nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, 1, stride=stride, padding=0, bias=False),
nn.BatchNorm2d(C, affine=affine)
),
'conv_3x3': lambda C, stride, affine:
nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, 3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(C, affine=affine)
),
'sep_conv_3x3': lambda C, stride, affine:
SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5': lambda C, stride, affine:
SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7': lambda C, stride, affine:
SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3': lambda C, stride, affine:
DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5': lambda C, stride, affine:
DilConv(C, C, 5, stride, 4, 2, affine=affine),
'dil_sep_conv_3x3': lambda C, stride, affine:
DilSepConv(C, C, 3, stride, 2, 2, affine=affine),
'conv_3x1_1x3': lambda C, stride, affine:
nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1,3), stride=(1, stride), padding=(0, 1), bias=False),
nn.Conv2d(C, C, (3,1), stride=(stride, 1), padding=(1, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
'conv_7x1_1x7': lambda C, stride, affine:
nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_out, kernel_size, stride=stride,
padding=padding, bias=False
),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(
self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True
):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, groups=C_in, bias=False
),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=1,
padding=padding, groups=C_in, bias=False
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class DilSepConv(nn.Module):
def __init__(
self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True
):
super(DilSepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False
),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(
C_in, C_in, kernel_size=kernel_size, stride=1,
padding=padding, dilation=dilation, groups=C_in, bias=False
),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) # stride=2降低空间解析度,用两个卷积分别获得一半的通道,然后拼接
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) # 左,右,上,下各pad 0,1,0,1列,pad值是0
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.conv_1(x), self.conv_2(y[:,:,1:,1:])], dim=1) # x[1, 32, 32, 32]pad过之后变成了y[1,32,33,33](右边和上边pad一列),这里从后32个像素点开始卷y
out = self.bn(out)
return out
| StarcoderdataPython |
3401128 | <gh_stars>1-10
# pytest --html=tests/report/test-report.html
# above command runs tests and test reports generates in tests/report location.
# nosetests --with-coverage --cover-html
# clean all the .pyc files
# find . -name \*.pyc -delete
# nosetests --with-coverage --cover-html
# pytest --cov=contentstack
# pytest -v --cov=contentstack --cov-report=html
# pytest --html=tests/report/test-report.html
import unittest
from unittest import TestLoader, TestSuite
from .test_assets import TestAsset
from .test_entry import TestEntry
from .test_query import TestQuery
from .test_stack import TestStack
def all_tests():
test_module_stack = TestLoader().loadTestsFromTestCase(TestStack)
test_module_asset = TestLoader().loadTestsFromTestCase(TestAsset)
test_module_entry = TestLoader().loadTestsFromTestCase(TestEntry)
test_module_query = TestLoader().loadTestsFromTestCase(TestQuery)
suite = TestSuite([
test_module_stack,
test_module_asset,
test_module_entry,
test_module_query,
])
| StarcoderdataPython |
11317877 | <gh_stars>1-10
import abc
import os
from typing import List, Tuple
from bopt.basic_types import Hyperparameter
from bopt.hyperparam_values import HyperparamValues
from bopt.sample import SampleCollection
from bopt.models.parameters import ModelParameters
class Model(abc.ABC):
# @abc.abstractmethod
# def predict_next(self,
# hyperparameters: List[Hyperparameter],
# samples: "SampleCollection") -> Tuple[HyperparamValues, "Model"]:
# pass
@abc.abstractmethod
def to_model_params(self) -> ModelParameters:
pass
| StarcoderdataPython |
287955 | <reponame>hkamran80/youtubedl-web
# youtube-dl Web Interface -- Threaded
from flask import Flask, render_template, redirect, request, url_for
import threading
import json
import os
app = Flask(__name__)
file = json.loads(open("web.json").read())
class dwnl(threading.Thread):
def __init__(self, dwnl_url, dwnl_type):
threading.Thread.__init__(self)
self.dwnl_url = dwnl_url
self.dwnl_type = dwnl_type
def run(self):
location = "" # Must set this!
if "youtube" in self.dwnl_url:
if "playlist" in self.dwnl_url:
os.system("youtube-dl -i --yes-playlist -o '{}/%(title)s.%(ext)s' -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4' {}".format(location, self.dwnl_url))
else:
if self.dwnl_type == "mp4":
os.system("youtube-dl -o '{}/%(title)s.%(ext)s' -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4' {}".format(location, self.dwnl_url))
elif self.dwnl_type == "mp3":
os.system("youtube-dl -o '{}/%(title)s_audio.%(ext)s' --extract-audio --audio-format {}".format(location, self.dwnl_url))
else:
os.system("youtube-dl {}".format(self.dwnl_url))
def write_json(link):
global file
with open("web.json", "w") as j:
f = file["links"]
f.append(link)
j.write(json.dumps({"links":f}))
file = json.loads(open("web.json").read())
@app.route("/", methods=["GET"])
def index():
return render_template("primary.html", links_dwnl=file["links"])
@app.route("/new", methods=["POST"])
def new():
if request.form["new_url"]:
write_json(request.form["new_url"])
dwnl = dwnl(request.form["new_url"]).start()
return redirect(url_for("index"))
@app.route("/clear", methods=["GET", "POST"])
def clear():
global file
with open("web.json", "w") as j:
j.write(json.dumps({"links":[]}))
file = json.loads(open("web.json").read())
return redirect(url_for("index"))
app.run("0.0.0.0", 8008, debug=False)
| StarcoderdataPython |
3275252 | # This Function formating the data to plot in Line Chart ...
def get_LineChartData(date_time,actualPower):
lineChartData = []
for i,j in zip(date_time,actualPower):
lineChartData.append(list((i,j)))
return lineChartData
# This Function formating the data to plot in Bar Chart ...
def get_BarChartData(date_time,wind_speed,wind_deg,humidity):
barChartData = [['Date-Time ', 'Wind Speed (m/s)', 'Wind Direction (°)', 'Humidity (%)']]
for a,b,c,d in zip(date_time,wind_speed,wind_deg,humidity):
barChartData.append(list((a,b,c,d)))
return barChartData
| StarcoderdataPython |
243475 | <reponame>iphilpot/DoE_Bloom_Filter
#!/usr/bin/env python
from model.bloom_filter import BloomFilter
from model.users import PresentUsers, AbsentUsers
import matplotlib.pyplot as plt
import numpy as np
import math
# This is the last experiment, a 2 factorial designed experiment. The goal is to understand
# the main effect of each variable, but also the interaction between the to variables we control
# and how they affect the respons variable (false positives).
def main():
# Present file contains 10,000 generated usernames that are added to the bloom filter.
present_users_file = './src/resources/present.txt'
# Absent file contains 1,000 generated usersnames that are not in the bloom filter.
absent_users_file = './src/resources/absent.txt'
# Read files into models
present_users = PresentUsers(present_users_file)
absent_users = AbsentUsers(absent_users_file)
# Loop over a specified range of ints to adjust both the bit array size
# and the hash pass count for the bloom filter. M Range is 50,000 to 70,000 with
# a step of 10,000. This should surround the right sized value of 62352. k range is 3 to
# 5 and also should surround the right sized value of 4.
# TODO: O(n^2) - refactor to more be efficient using a memoization pattern.
cnt_size = []
cnt_passes = []
cnt_fp = []
for hash_count in range(3, 5):
for bit_arr_size in range(50000, 70000, 10000):
# Bloom filter with varying values for both hash passes and bit array sizes
# for 10,000 items
bloom_filter = BloomFilter(bit_arr_size, hash_count)
# Add present users to the bloom filter.
for i in range(len(present_users)):
bloom_filter += present_users[i]
# Test for absent users and count the false positives.
false_positive_count = 0
for user in absent_users:
if user in bloom_filter:
false_positive_count += 1
cnt_fp.append(false_positive_count)
cnt_passes.append(hash_count)
cnt_size.append(bit_arr_size)
print('There are {} false positives when bit array size is {} and hash count is {}'
.format(false_positive_count, bit_arr_size, hash_count))
# TODO: Refactor into something else
# Cube Plot
plt.plot(cnt_passes, cnt_size, 'o')
plt.xticks(ticks=[3, 4])
plt.yticks(ticks=[50000, 60000])
plt.xlabel('Hash Pass Count')
plt.ylabel('Bit Array Size')
plt.title('Cube Plot')
for i in range(4):
plt.text(cnt_passes[i], cnt_size[i], ' ' + str(cnt_fp[i]), size='15')
# Hash Pass Main Effect + Plot
hash_pass_x_axis = cnt_passes[:3:2]
hash_pass_fp_3 = cnt_fp[:2]
hash_pass_fp_4 = cnt_fp[2:4]
hash_pass_y_axis = []
hash_pass_y_axis.append(np.sum(hash_pass_fp_3)/len(hash_pass_fp_3))
hash_pass_y_axis.append(np.sum(hash_pass_fp_4)/len(hash_pass_fp_4))
main_diff_hash = np.absolute(hash_pass_y_axis[0]-hash_pass_y_axis[1])
print('The main effect for hash passes is {}'.format(main_diff_hash))
plt.figure(2)
plt.plot(hash_pass_x_axis, hash_pass_y_axis)
plt.xticks(ticks=[3, 4])
plt.yticks(ticks=[40, 100])
plt.xlabel('Hash Pass Count')
plt.ylabel('False Positive Avg')
plt.title('Hash Pass Count Main Effect')
# Bit Array Size Main Effect + Plot
bit_size_x_axis = cnt_size[:2]
bit_size_fp_5 = cnt_fp[:3:2]
bit_size_fp_6 = cnt_fp[1:4:2]
bit_size_y_axis = []
bit_size_y_axis.append(np.sum(bit_size_fp_5)/len(bit_size_fp_5))
bit_size_y_axis.append(np.sum(bit_size_fp_6)/len(bit_size_fp_6))
main_diff_size = np.absolute(bit_size_y_axis[0]-bit_size_y_axis[1])
print('The main effect for bit array size is {}'.format(main_diff_size))
plt.figure(3)
plt.plot(bit_size_x_axis, bit_size_y_axis)
plt.xticks(ticks=[50000, 60000])
plt.yticks(ticks=[40, 100])
plt.xlabel('Bit Array Size Count')
plt.ylabel('False Positive Avg')
plt.title('Bit Array Size Main Effect')
# Hash Pass/Bit Array Size Interaction Effect + Plot
interaction_fp_1 = [cnt_fp[0], cnt_fp[-1]]
interaction_fp_2 = [cnt_fp[1], cnt_fp[-2]]
interaction_effect = np.absolute((np.sum(interaction_fp_1)/len(interaction_fp_1))-(np.sum(interaction_fp_2)/len(interaction_fp_2)))
print('The Hash Pass / Bit Array Size interaction effect is {}'.format(interaction_effect))
plt.figure(4)
plt.xticks(ticks=[])
plt.yticks(ticks=[])
plt.plot(interaction_fp_1, '-o')
plt.plot(interaction_fp_2, '-o')
plt.title('Hash Pass / Bit Array Size interaction effect')
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5017046 | <reponame>eltld/ezEngine<gh_stars>0
from DBAccess import DBAccess
import pysvn
import requests
from flask import Flask, json
class DBWriter:
"""Class that encapsulates the conversion of json build results into database records.
Also handles the retrival of revision information from the SVN server (commits message etc)."""
########################################################################
## DBWriter constructor
########################################################################
def __init__(self, dbAccess, app):
self.DB = dbAccess
self.config = app.config
self.callback_SendMail = None
########################################################################
## DBWriter public functions
########################################################################
def AddBuildProcessResult(self, json):
"""The parameter 'json' is the entire json output of a build, i.e. the files generated by the build machines
and sent to the CNC tool which in turn sends them here."""
db = self.DB.getDB()
# Get or add build machine
buildMachineId = self.AddBuildMachine(json['Settings'])
# Revision / Timestamp
revision = json['Settings']['Revision']
timestamp = json['Settings']['Timestamp']
# Get old entry if it exists
cur = db.execute('SELECT * FROM BuildProcessResults WHERE Revision=? AND BuildMachine=?',
(revision, buildMachineId))
buildProcessResult = cur.fetchall()
if buildProcessResult:
# If the duration is equal this BuildProcessResult is already present in the DB and we can early out.
if buildProcessResult[0]['Duration'] == json['Duration']:
return buildProcessResult[0][0]
# If the new BuildProcessResult is different, we delete the old one (cascades)
res = db.execute('DELETE FROM BuildProcessResults WHERE id=?', (buildProcessResult[0][0],))
# Insert new entry
res = db.execute('INSERT INTO BuildProcessResults (Revision, Timestamp, Success, Duration, Errors, BuildMachine) values (?, ?, ?, ?, ?, ?)',
(revision, timestamp, json['Success'], json['Duration'], json['Errors'], buildMachineId) )
buildProcessId = res.lastrowid
# Add all sub-entries
self.AddSVNResult(json['SVNResult'], buildProcessId)
self.AddCMakeResult(json['CMakeResult'], buildProcessId)
self.AddBuildResult(json['BuildResult'], buildProcessId)
self.AddTestResult(json['TestResult'], buildProcessId)
# Sync revision information from the SVN server.
self.UpdateSVNRevisions()
# Commit to DB, this should be the only commit in the entire class as all other functions are called from here.
db.commit()
self.CheckToSendMail(revision)
return buildProcessId
########################################################################
## DBWriter private functions
########################################################################
def CheckToSendMail(self, rev):
if (not self.callback_SendMail):
return
db = self.DB.getDB()
cur = db.execute('SELECT COUNT(*) FROM BuildProcessResults WHERE Revision=?', (rev,))
buildResultCount = cur.fetchall()[0][0]
cur = db.execute('SELECT COUNT(*) FROM BuildMachines')
buildMachineCount = cur.fetchall()[0][0]
if (buildResultCount == buildMachineCount):
# Sanity check: we do not send an email if the current revision is more than 10
# revisions behind the head revision. Just to be sure this doesn't end in an
# unfortunate mail spam meltdown.
cur = db.execute('SELECT MAX(id) FROM Revisions')
entries = cur.fetchall()
if (rev + 10 < entries[0][0]):
return
cur = db.execute('SELECT Author FROM Revisions WHERE id=?', (rev,))
AuthorEntry = cur.fetchall()
if (not AuthorEntry):
return
try:
# Get the email address of the author and call the send mail callback.
Author = AuthorEntry[0][0]
response = requests.get(self.config['SVN_USER_TO_MAIL_REQUEST_ADDRESS'] + Author)
responseJson = json.loads(response.content)
AuthorMail = responseJson[Author]
self.callback_SendMail(rev, AuthorMail)
except:
app.logger.debug('*** CheckToSendMail: Unexpected error: %s', sys.exc_info()[0])
return
def UpdateSVNRevisions(self):
"""Updates the 'Revisions' table to have an entry for every revision that we have build data for."""
# SVN setup
svn_api = pysvn.Client()
svn_api.callback_ssl_server_trust_prompt = self.ssl_server_trust_prompt
svn_api.callback_get_login = self.svn_get_login
db = self.DB.getDB()
# Test if there is a revision for which we currently have build data but no revision data.
cur = db.execute('SELECT DISTINCT Revision FROM BuildProcessResults WHERE Revision NOT IN (SELECT id FROM Revisions) ORDER BY Revision ASC')
revisionList = cur.fetchall()
if (not revisionList):
return
# Get all from the first entry that we are missing up to HEAD.
revStart = pysvn.Revision( pysvn.opt_revision_kind.number, revisionList[0][0] )
revHead = pysvn.Revision( pysvn.opt_revision_kind.head )
revlog = svn_api.log(self.config['SVN_ROOT'], revision_start=revStart, revision_end=revHead, discover_changed_paths=True)
# Add SVN results to the database
for pySvnLog in revlog:
# Test whether we already have data for this revision
cur = db.execute('SELECT id FROM Revisions WHERE id=?', (pySvnLog.data['revision'].number,))
rev = cur.fetchall()
if not rev:
changedPaths = '';
# Concatenate all touched files with their action into one string.
for entry in pySvnLog.data['changed_paths']:
changedPaths += (entry.data['action'] + ' ' + entry.data['path'] + "\n")
# Insert into table
res = db.execute('INSERT INTO Revisions (id, Date, Author, Message, ChangedPaths) values (?, ?, ?, ?, ?)',
(pySvnLog.data['revision'].number, long(pySvnLog.data['date']), pySvnLog.data['author'], pySvnLog.data['message'], changedPaths) )
# This function is only called by 'AddBuildProcessResult', so no DB commit here.
return
def ssl_server_trust_prompt(trust_dict):
return True, 5, True
def svn_get_login(realm, username, may_save):
return True, self.config['SVN_USER'], self.config['SVN_PASS'], False
def AddBuildMachine(self, jsonConfiguration):
"""Returns the id of the BuildMachine referenced by the given configuration,
if no entry exists yet it is created."""
db = self.DB.getDB()
# Test whether we already have this build machine
cur = db.execute('SELECT id FROM BuildMachines WHERE ConfigurationName=?', (jsonConfiguration['ConfigurationName'],))
machine = cur.fetchall()
machineId = 0
if not machine:
res = db.execute('INSERT INTO BuildMachines (Configuration, ConfigurationName, DirectHardwareAccess) values (?, ?, ?)',
(jsonConfiguration['Configuration'], jsonConfiguration['ConfigurationName'], jsonConfiguration['DirectHardwareAccess']) )
machineId = res.lastrowid
else:
machineId = machine[0][0]
return machineId
def AddSVNResult(self, jsonSVNResult, buildProcessId):
db = self.DB.getDB()
res = db.execute('INSERT INTO SVNResults (Success, Duration, Errors, BuildProcessResult) values (?, ?, ?, ?)',
(jsonSVNResult['Success'], jsonSVNResult['Duration'], jsonSVNResult['Errors'], buildProcessId) )
SVNResultId = res.lastrowid
jsonProcessRes = jsonSVNResult['ProcessRes']
if (not jsonProcessRes):
return
res = db.execute('INSERT INTO ProcessResults (ExitCode, StdOut, ErrorOut, Duration, SVNResult) values (?, ?, ?, ?, ?)',
(jsonProcessRes['ExitCode'], jsonProcessRes['StdOut'], jsonProcessRes['ErrorOut'], jsonProcessRes['Duration'], SVNResultId) )
return
def AddCMakeResult(self, jsonCMakeResult, buildProcessId):
db = self.DB.getDB()
res = db.execute('INSERT INTO CMakeResults (Success, Duration, Errors, BuildProcessResult) values (?, ?, ?, ?)',
(jsonCMakeResult['Success'], jsonCMakeResult['Duration'], jsonCMakeResult['Errors'], buildProcessId) )
CMakeResultId = res.lastrowid
jsonProcessRes = jsonCMakeResult['ProcessRes']
if (not jsonProcessRes):
return
res = db.execute('INSERT INTO ProcessResults (ExitCode, StdOut, ErrorOut, Duration, CMakeResult) values (?, ?, ?, ?, ?)',
(jsonProcessRes['ExitCode'], jsonProcessRes['StdOut'], jsonProcessRes['ErrorOut'], jsonProcessRes['Duration'], CMakeResultId) )
return
def AddBuildResult(self, jsonBuildResult, buildProcessId):
db = self.DB.getDB()
res = db.execute('INSERT INTO BuildResults (Success, Duration, Errors, BuildProcessResult) values (?, ?, ?, ?)',
(jsonBuildResult['Success'], jsonBuildResult['Duration'], jsonBuildResult['Errors'], buildProcessId) )
buildResultId = res.lastrowid
# Add all build targets
for jsonBuildTargetResult in jsonBuildResult['BuildTargetResults']:
# Add BuildTargetResult
res = db.execute('INSERT INTO BuildTargetResults (Name, Experimental, Success, Duration, Errors, BuildResult) values (?, ?, ?, ?, ?, ?)',
(jsonBuildTargetResult['Name'], jsonBuildTargetResult['Experimental'], jsonBuildTargetResult['Success'],
jsonBuildTargetResult['Duration'], jsonBuildTargetResult['Errors'], buildResultId) )
buildTargetResultId = res.lastrowid
# Add ProcessRes
jsonProcessRes = jsonBuildTargetResult['ProcessRes']
if (not jsonProcessRes):
continue
res = db.execute('INSERT INTO ProcessResults (ExitCode, StdOut, ErrorOut, Duration, BuildTargetResult) values (?, ?, ?, ?, ?)',
(jsonProcessRes['ExitCode'], jsonProcessRes['StdOut'], jsonProcessRes['ErrorOut'], jsonProcessRes['Duration'], buildTargetResultId) )
return
def AddTestResult(self, jsonTestResult, buildProcessId):
db = self.DB.getDB()
res = db.execute('INSERT INTO TestResults (Success, Duration, Errors, BuildProcessResult) values (?, ?, ?, ?)',
(jsonTestResult['Success'], jsonTestResult['Duration'], jsonTestResult['Errors'], buildProcessId) )
testResultId = res.lastrowid
# Add all test targets
for jsonTestTargetResult in jsonTestResult['TestTargetResults']:
# Add TestTargetResult
res = db.execute('INSERT INTO TestTargetResults (Name, NeedsHardwareAccess, Experimental, Success, Duration, Errors, TestResult) values (?, ?, ?, ?, ?, ?, ?)',
(jsonTestTargetResult['Name'], jsonTestTargetResult['NeedsHardwareAccess'], jsonTestTargetResult['Experimental'], jsonTestTargetResult['Success'],
jsonTestTargetResult['Duration'], jsonTestTargetResult['Errors'], testResultId) )
testTargetResultId = res.lastrowid
# Add ProcessRes
jsonProcessRes = jsonTestTargetResult['ProcessRes']
if (not jsonProcessRes):
continue
res = db.execute('INSERT INTO ProcessResults (ExitCode, StdOut, ErrorOut, Duration, TestTargetResult) values (?, ?, ?, ?, ?)',
(jsonProcessRes['ExitCode'], jsonProcessRes['StdOut'], jsonProcessRes['ErrorOut'], jsonProcessRes['Duration'], testTargetResultId) )
return
########################################################################
## DBWriter private fields
########################################################################
DB = None
config = None
callback_SendMail = None | StarcoderdataPython |
3396424 | <reponame>StichtingIAPC/swipe
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sales', '0001_initial'),
('customer_invoicing', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='receiptcustinvoice',
name='receipt',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='sales.Transaction'),
),
migrations.AddField(
model_name='custominvoiceline',
name='custom_invoice',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='customer_invoicing.CustomCustInvoice'),
),
]
| StarcoderdataPython |
9774585 | from squeezeSeg import SqueezeSeg
| StarcoderdataPython |
9753326 | # example of training a final classification model
from sklearn.linear_model import LogisticRegression
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1)
# fit final model
model = LogisticRegression()
model.fit(X, y)
Xnew = [[...], [...]]
ynew = model.predict(Xnew)
# example of training a final classification model
from sklearn.linear_model import LogisticRegression
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1)
# fit final model
model = LogisticRegression()
model.fit(X, y)
# new instances where we do not know the answer
Xnew, _ = make_blobs(n_samples=3, centers=2, n_features=2, random_state=1)
# make a prediction
ynew = model.predict(Xnew)
# show the inputs and predicted outputs
for i in range(len(Xnew)):
print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# example of making a single class prediction
from sklearn.linear_model import LogisticRegression
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1)
# fit final model
model = LogisticRegression()
model.fit(X, y)
# define one new instance
Xnew = [[-0.79415228, 2.10495117]]
# make a prediction
ynew = model.predict(Xnew)
print("X=%s, Predicted=%s" % (Xnew[0], ynew[0]))
Xnew = [[...], [...]]
ynew = model.predict_proba(Xnew)
# example of making multiple probability predictions
from sklearn.linear_model import LogisticRegression
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1)
# fit final model
model = LogisticRegression()
model.fit(X, y)
# new instances where we do not know the answer
Xnew, _ = make_blobs(n_samples=3, centers=2, n_features=2, random_state=1)
# make a prediction
ynew = model.predict_proba(Xnew)
# show the inputs and predicted probabilities
for i in range(len(Xnew)):
print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# example of training a final regression model
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
# generate regression dataset
X, y = make_regression(n_samples=100, n_features=2, noise=0.1, random_state=1)
# fit final model
model = LinearRegression()
model.fit(X, y)
# example of training a final regression model
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
# generate regression dataset
X, y = make_regression(n_samples=100, n_features=2, noise=0.1)
# fit final model
model = LinearRegression()
model.fit(X, y)
# new instances where we do not know the answer
Xnew, _ = make_regression(n_samples=3, n_features=2, noise=0.1, random_state=1)
# make a prediction
ynew = model.predict(Xnew)
# show the inputs and predicted outputs
for i in range(len(Xnew)):
print("X=%s, Predicted=%s" % (Xnew[i], ynew[i]))
# example of training a final regression model
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
# generate regression dataset
X, y = make_regression(n_samples=100, n_features=2, noise=0.1)
# fit final model
model = LinearRegression()
model.fit(X, y)
# define one new data instance
Xnew = [[-1.07296862, -0.52817175]]
# make a prediction
ynew = model.predict(Xnew)
# show the inputs and predicted outputs
print("X=%s, Predicted=%s" % (Xnew[0], ynew[0]))
# Save Model Using Pickle
import pandas
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
import pickle
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=test_size, random_state=seed)
# Fit the model on 33%
model = LogisticRegression()
model.fit(X_train, Y_train)
# save the model to disk
filename = 'finalized_model.sav'
pickle.dump(model, open(filename, 'wb'))
# some time later...
# load the model from disk
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.score(X_test, Y_test)
print(result)
# Save Model Using joblib
import pandas
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=test_size, random_state=seed)
# Fit the model on 33%
model = LogisticRegression()
model.fit(X_train, Y_train)
# save the model to disk
filename = 'finalized_model.sav'
joblib.dump(model, filename)
# some time later...
# load the model from disk
loaded_model = joblib.load(filename)
result = loaded_model.score(X_test, Y_test)
print(result) | StarcoderdataPython |
11306287 | <reponame>rubasov/opensub-utils<gh_stars>1-10
import errno
import os
import sys
import tempfile
import unittest
from os.path import join
# Make it possible to run out of the working copy.
sys.path.insert(
0,
os.path.join(
os.path.dirname(__file__),
os.pardir,
"lib",
))
import opensub # noqa
import opensub.main # noqa
def _test_data_dir():
return os.path.join(
os.path.dirname(__file__),
"test-data",
)
class LookIntoArchive(unittest.TestCase):
def test__extract_filenames_from_zip(self):
"""Should see filenames in the archive."""
expected = [
"Birdman of Alcatraz - 1.srt",
"Birdman of Alcatraz - 2.srt",
]
test_file = os.path.join(_test_data_dir(), "4130212.zip")
with open(test_file, "rb") as tfile:
archive = opensub.SubtitleArchive(url="http://127.0.0.1/dummy/")
archive.tempfile = tfile
subtitle_names = [sfile.name for sfile in archive.yield_open()]
self.assertEqual(subtitle_names, expected)
# Yeah, I know that multiple asserts are not recommended in a single
# test method, but I couldn't bear the repetitive code. In the
# traceback you'll see which assert failed anyway... -- rubasov
class DefaultTemplate(unittest.TestCase):
def setUp(self):
self.template = "{video/dir}{video/base}{subtitle/ext}"
def _assertEqual(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(video=video, subtitle=subtitle)
self.assertEqual(os.path.normpath(fname), expected)
def test__combinations(self):
"""Zillion combinations of templating input."""
self._assertEqual(
"video.avi",
"subtitle.srt",
"video.srt",
)
self._assertEqual(
"video",
"subtitle.srt",
"video.srt",
)
self._assertEqual(
"video.avi",
"subtitle",
"video",
)
self._assertEqual(
"foo.bar.avi",
"baz.qux.srt",
"foo.bar.srt",
)
self._assertEqual(
".video.avi",
".subtitle.srt",
".video.srt",
)
self._assertEqual(
join("dir", "video.avi"),
"subtitle.srt",
join("dir", "video.srt"),
)
self._assertEqual(
"video.avi",
join("dir", "subtitle.srt"),
"video.srt",
)
self._assertEqual(
join("", "dir", "video.avi"),
"subtitle.srt",
join("", "dir", "video.srt"),
)
self._assertEqual(
"video.avi",
join("", "dir", "subtitle.srt"),
"video.srt",
)
self._assertEqual(
join("", "video.avi"),
"subtitle.srt",
join("", "video.srt"),
)
self._assertEqual(
"video.avi",
join("", "subtitle.srt"),
"video.srt",
)
def _assertRaises(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
with self.assertRaises(expected):
builder.build(video=video, subtitle=subtitle)
def test__empty_string_is_invalid_path(self):
"""Fail on empty string."""
self._assertRaises("", "junk", Exception)
self._assertRaises("junk", "", Exception)
class RoundTrip(unittest.TestCase):
def setUp(self):
self.template = "{subtitle/dir}{subtitle/base}{subtitle/ext}"
def _assertEqual(self, video, subtitle, expected):
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(video=video, subtitle=subtitle)
self.assertEqual(os.path.normpath(fname), expected)
def test__roundtrip_safety(self):
"""A break-to-pieces-assemble cycle should result in the original."""
self._assertEqual(
"junk",
"subtitle.srt",
"subtitle.srt",
)
self._assertEqual(
"junk",
join("dir", "subtitle.srt"),
join("dir", "subtitle.srt"),
)
self._assertEqual(
"junk",
join("", "dir", "subtitle.srt"),
join("", "dir", "subtitle.srt"),
)
self._assertEqual(
"junk",
join("", "subtitle.srt"),
join("", "subtitle.srt"),
)
class Extract(unittest.TestCase):
def setUp(self):
self.template = "{subtitle/base}{subtitle/ext}"
def test__extract_to_current_dir(self):
"""Extract subtitles by their original names."""
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(
video="junk",
subtitle=join("", "dir", "subdir", "subtitle.srt"),
)
self.assertEqual(os.path.normpath(fname), "subtitle.srt")
class NumberedTemplate(unittest.TestCase):
def setUp(self):
self.template = "episode{num:02}{subtitle/ext}"
def test__number_formatting(self):
"""Can use numbered templates."""
builder = opensub.FilenameBuilder(template=self.template)
fname = builder.build(
video="junk",
subtitle="subtitle.srt",
num=7,
)
self.assertEqual(os.path.normpath(fname), "episode07.srt")
def test__missing_value_for_template_variable(self):
"""Fail on missing value for template variable."""
builder = opensub.FilenameBuilder(template=self.template)
with self.assertRaises(Exception):
builder.build(
video="junk",
subtitle="subtitle.srt",
)
class SafeOpen(unittest.TestCase):
def test__no_overwrite(self):
"""Do not overwrite existing files by default."""
tmpfile = tempfile.NamedTemporaryFile()
with self.assertRaises(OSError) as cm:
opensub.main.safe_open(tmpfile.name)
self.assertEqual(cm.exception.errno, errno.EEXIST)
tmpfile.close()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8120467 | <gh_stars>100-1000
import multiprocessing
import os
import queue
import signal
import threading
import time
from rohmu import get_transfer
from rohmu.rohmufile import create_sink_pipeline
from pghoard.common import get_object_storage_config
from pghoard.config import key_lookup_for_site
class FileFetchManager:
"""Manages (potentially) multiprocessing related assets for fetching file contents from
object storage. If a multiprocess.Manager instance is provided, the fetch is performed
in a subprocess to avoid GIL related performance constraints, otherwise file is fetched
in current process."""
def __init__(self, app_config, mp_manager, transfer_provider):
self.config = app_config
self.last_activity = time.monotonic()
self.lock = threading.RLock()
self.max_idle_age = 10 * 60
self.mp_manager = mp_manager
self.process = None
self.result_queue = None
self.task_queue = None
self.transfer_provider = transfer_provider
def check_state(self):
if self.process and time.monotonic() - self.last_activity > self.max_idle_age:
self.stop()
def fetch_file(self, site, key, target_path):
self.last_activity = time.monotonic()
self._start_process()
if self.mp_manager:
self.task_queue.put((site, key, target_path))
result = self.result_queue.get()
if result is None:
# Should only happen if the process is terminated while we're waiting for
# a result, which is pretty much the same as timeout
raise queue.Empty
elif isinstance(result[1], Exception):
raise result[1]
return result[1], result[2]
else:
transfer = self.transfer_provider(site)
return FileFetcher(self.config, transfer).fetch(site, key, target_path)
def stop(self):
with self.lock:
if not self.process:
return
self.task_queue.put(None)
self.result_queue.put(None)
process = self.process
self.process = None
self.task_queue = None
self.result_queue = None
process.join(timeout=0.1)
if process.exitcode is None:
os.kill(process.pid, signal.SIGKILL)
process.join()
def _start_process(self):
with self.lock:
if not self.mp_manager or self.process:
return
self.result_queue = self.mp_manager.Queue()
self.task_queue = self.mp_manager.Queue()
self.process = multiprocessing.Process(
target=_remote_file_fetch_loop, args=(self.config, self.task_queue, self.result_queue)
)
self.process.start()
class FileFetcher:
"""Fetches a file from object storage and strips possible encryption and/or compression away."""
def __init__(self, app_config, transfer):
self.config = app_config
self.transfer = transfer
def fetch(self, site, key, target_path):
try:
lookup = key_lookup_for_site(self.config, site)
data, metadata = self.transfer.get_contents_to_string(key)
if isinstance(data, str):
data = data.encode("latin1")
file_size = len(data)
with open(target_path, "wb") as target_file:
output = create_sink_pipeline(
output=target_file, file_size=file_size, metadata=metadata, key_lookup=lookup, throttle_time=0
)
output.write(data)
return file_size, metadata
except Exception:
if os.path.isfile(target_path):
os.unlink(target_path)
raise
def _remote_file_fetch_loop(app_config, task_queue, result_queue):
transfers = {}
while True:
task = task_queue.get()
if not task:
return
try:
site, key, target_path = task
transfer = transfers.get(site)
if not transfer:
transfer = get_transfer(get_object_storage_config(app_config, site))
transfers[site] = transfer
file_size, metadata = FileFetcher(app_config, transfer).fetch(site, key, target_path)
result_queue.put((task, file_size, metadata))
except Exception as e: # pylint: disable=broad-except
result_queue.put((task, e))
| StarcoderdataPython |
4977405 | import geonomics as gnx
import numpy as np
# FLAG DETERMINING WHETHER TO TEST TRAIT MUTATION OR DELETERIOUS MUTATION
mutate_trait = False
mod = gnx.make_model('./GNX_default_model_params.py')
mod.walk(10000, 'burn')
mod.walk(1)
spp = mod.comm[0]
ga = spp.gen_arch
re = ga.recombinations
trt = ga.traits[0]
off = [i.idx for i in spp.values() if i.age == 0]
ga.mutables = [*ga.neut_loci]
np.random.shuffle(ga.mutables)
if mutate_trait:
#PRINT STUFF BEFOREHAND
print('ga.nonneut_loci', ga.nonneut_loci)
print('trait loci', ga.traits[0].loci)
print('trait locus index', ga.traits[0].loc_idx)
print('unmutated genome:\n', spp[off[0]].g)
print('mutated genome:\n', spp[off[-1]].g)
nonneut_loci_b4 = set([*ga.nonneut_loci])
gnx.ops.mutation._do_nonneutral_mutation(spp, [off[-1]], trait_nums=[0])
#PRINT STUFF AFTERWARD
print('trait loci', ga.traits[0].loci)
print('trait locus index', ga.traits[0].loc_idx)
print('unmutated genome:\n', spp[off[0]].g)
print('mutated genome:\n', spp[off[-1]].g)
nonneut_loci_af = set([*ga.nonneut_loci])
new_locus = [*nonneut_loci_af.difference(nonneut_loci_b4)][0]
mut_loc_idx = np.where(trt.loc_idx == np.where(
trt.loci == new_locus)[0][0])[0][0]
mut_homol = np.where(spp[off[-1]].g[mut_loc_idx, :] == 1)[0][0]
#mutated_homologue = np.where(spp[off[-1]].g[np.where(
# trt.loc_idx == np.where(
# trt.loci == new_locus)[0][0])[0][0],:] == 1)[0][0]
# MAKE SURE THE INFO IN GEONOMICS' NATIVE DATA STRUCTURES MATCHES THAT IN THE
# TSKIT STRUCTURES
print(spp._tc.mutations, '\n')
print('last row should read:\n%i\t%i\t%i\t1\t-1' % (
spp._tc.mutations.num_rows - 1,
new_locus,
spp[off[-1]]._nodes_tab_ids[ mut_homol]))
else:
#PRINT STUFF BEFOREHAND
print('delet loci', ga.delet_loci)
print('unmutated genome:\n', spp[off[0]].g)
print('mutated genome:\n', spp[off[-1]].g)
nonneut_loci_b4 = set([*ga.nonneut_loci])
gnx.ops.mutation._do_nonneutral_mutation(spp, [off[-1]], delet_s=0.1)
#PRINT STUFF AFTERWARD
print('delet loci', ga.delet_loci)
print('unmutated genome:\n', spp[off[0]].g)
print('mutated genome:\n', spp[off[-1]].g)
nonneut_loci_af = set([*ga.nonneut_loci])
new_locus = [*nonneut_loci_af.difference(nonneut_loci_b4)][0]
mut_loc_idx = ga.delet_loc_idx[np.where(
ga.delet_loci == new_locus)[0][0]]
mut_homol = np.where(spp[off[-1]].g[mut_loc_idx, :] == 1)[0][0]
#mutated_homologue = np.where(spp[off[-1]].g[np.where(
# trt.loc_idx == np.where(
# trt.loci == new_locus)[0][0])[0][0],:] == 1)[0][0]
# MAKE SURE THE INFO IN GEONOMICS' NATIVE DATA STRUCTURES MATCHES THAT IN THE
# TSKIT STRUCTURES
print(spp._tc.mutations, '\n')
print('last row should read:\n%i\t%i\t%i\t1\t-1' % (
spp._tc.mutations.num_rows - 1,
new_locus,
spp[off[-1]]._nodes_tab_ids[ mut_homol]))
| StarcoderdataPython |
4809894 | <reponame>suomesta/ken3
# -*- coding: utf-8 -*-
"""errortest does tests of an intended compile error.
run compiler with test code, which causes compile error.
"""
import glob
import os
import subprocess
import sys
__author__ = 'toda'
__version__ = '1.0.0'
# compile command
COMMAND = 'g++'
# options of compile command
OPTIONS = ['--std=c++11', '-c', '-I../../']
# '#elif defined D0001_TEST_FUNC_ACTIVATED'
DEF_LENGTH = 39
DEF_PREFIX = '#elif defined D'
DEF_SUFFIX = '_TEST_FUNC_ACTIVATED'
def filenames(targets, exclude):
"""Pick cpp files from current directory.
param[in] targets: appointed targets file names.
if None, pick all files with extension '.cpp'
should be string-list or None
param[in] exclude: appointed file names to be excluded.
should be string-list or None
return targer file names in string-list
"""
if targets:
names = list(targets)
else:
names = [i for i in glob.glob('*.cpp')]
names += [i for i in glob.glob('*/*.cpp')]
if exclude:
names = [i for i in names if i not in exclude]
return sorted(names)
def execute(name, full_description):
"""Execute test program.
param[in] name: appointed target file name. normally .cpp file.
param[in] full_description: a flag to show detailed message and
compiler's message.
return names of NG defines in string-list.
"""
# open file and get lines
with open(name, 'r', encoding='utf-8') as file:
lines = file.readlines()
# pick up defines
defines = set()
for line in lines:
line = line.strip('\r\n')
if (len(line) == DEF_LENGTH and
line.startswith(DEF_PREFIX) and
line.endswith(DEF_SUFFIX)):
defines.add(line.split(' ')[2])
# output file name
out = '.o'.join(name.rsplit('.cpp', 1))
# try to compile and get result
oks = []
ngs = []
for define in sorted(defines):
cmd = ' '.join([COMMAND, name, '-D', define, '-o', out] + OPTIONS)
proc = subprocess.Popen(cmd, shell=True, cwd=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
# record a program if its result is NG
if proc.returncode != 0:
oks.append(define)
else: # success to compile
ngs.append(define)
# remove object file
os.remove(out)
if full_description: # print program's output
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
if full_description:
print('Result in ' + name)
print('OK: {0} ({1})'.format(len(oks), ', '.join(oks)))
print('NG: {0} ({1})'.format(len(ngs), ', '.join(ngs)))
return ngs
def run_tests(names, full_description):
"""Run each test programs.
param[in] names: appointed target file names. normally .cpp files.
param[in] full_description: a flag to show detailed message and
compiler's message
"""
# run all test programs and record NG prgrams
results = []
print('Start errortest!')
for i, name in enumerate(names):
# print program name
if full_description:
print()
print('{0}/{1}: {2}'.format(i + 1, len(names), name))
# run a program
ngs = execute(name, full_description)
# record a program if its result is NG
if ngs:
results.append(name)
# show results
print()
print('Results:')
print(' OK:' + str(len(names) - len(results)))
print(' NG:' + str(len(results)))
if results:
print()
print('NG Programs:')
print('\n'.join(' ' + i for i in results))
return len(results)
def main():
"""Parse args by argparse and run all tests."""
import argparse
# parse sys.argv
parser = argparse.ArgumentParser(description='errortest')
parser.add_argument('-v', '--version', action='version',
version=('%(prog)s ' + __version__))
parser.add_argument('-f', '--full_description', action='store_true',
help='full description')
parser.add_argument('-e', '--exclude', type=str, nargs='+',
help='file names to be excluded')
parser.add_argument('-t', '--targets', type=str, nargs='+',
help='target file names (default: all)')
args = parser.parse_args()
# run each test
return (
run_tests(filenames(args.targets, args.exclude),
args.full_description)
)
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1716012 | <filename>flaskmovie/config.py
import os
# 用于连接数据的数据库。
# SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:ty158917@192.168.3.11:3306/movie"
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:zw123456@127.0.0.1:3306/movie"
# 如果设置成 True (默认情况),Flask-SQLAlchemy 将会追踪对象的修改并且发送信号。
SQLALCHEMY_TRACK_MODIFICATIONS = True
REDIS_URL = "redis://127.0.0.1:6379/0"
SECRET_KEY = 'mtianyan_movie'
UP_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "app/static/uploads/")
P_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "app/static/uploads/pmovies/") # 电影预告上传路径
M_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "app/static/uploads/movies/") # 电影上传路径
FC_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "app/static/uploads/users/") | StarcoderdataPython |
362131 | <filename>heimdall/models/__init__.py
"""Models package."""
from flask import Blueprint
from heimdall import db
bp = Blueprint("database", __name__)
@bp.after_app_request
def commit_session(response):
"""After each successful request commit the current database session."""
if response.status_code < 400:
# TODO: Might have to catch exceptions here
db.session.commit()
return response
| StarcoderdataPython |
9671240 | <filename>pymir/Spectrum.py<gh_stars>100-1000
"""
Spectrum class
ndarray subclass for spectral data
Last updated: 17 December 2012
"""
from __future__ import division
import math
import numpy
from numpy import *
import scipy.stats
import scipy.stats.mstats
import matplotlib.pyplot as plt
import pymir
from pymir import MFCC, Pitch, Transforms
class Spectrum(numpy.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = numpy.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
obj.sampleRate = 0
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.sampleRate = getattr(obj, 'sampleRate', None)
# We do not need to return anything
#####################
# Spectrum methods
#####################
def centroid(self):
"""
Compute the spectral centroid.
Characterizes the "center of gravity" of the spectrum.
Approximately related to timbral "brightness"
"""
binNumber = 0
numerator = 0
denominator = 0
for bin in self:
# Compute center frequency
f = (self.sampleRate / 2.0) / len(self)
f = f * binNumber
numerator = numerator + (f * abs(bin))
denominator = denominator + abs(bin)
binNumber = binNumber + 1
return (numerator * 1.0) / denominator
def chroma(self):
"""
Compute the 12-ET chroma vector from this spectrum
"""
return Pitch.chroma(self)
def crest(self):
"""
Compute the spectral crest factor, i.e. the ratio of the maximum of the spectrum to the
sum of the spectrum
"""
absSpectrum = abs(self)
spectralSum = numpy.sum(absSpectrum)
maxFrequencyIndex = numpy.argmax(absSpectrum)
maxSpectrum = absSpectrum[maxFrequencyIndex]
return maxSpectrum / spectralSum
def flatness(self):
"""
Compute the spectral flatness (ratio between geometric and arithmetic means)
"""
geometricMean = scipy.stats.mstats.gmean(abs(self))
arithmeticMean = self.mean()
return geometricMean / arithmeticMean
def idct(self):
"""
Compute the Inverse Discrete Cosine Transform (IDCT)
"""
return Transforms.idct(self)
def ifft(self):
"""
Compute the Inverse FFT
"""
return Transforms.ifft(self)
def kurtosis(self):
"""
Compute the spectral kurtosis (fourth spectral moment)
"""
return scipy.stats.kurtosis(abs(self))
def mean(self):
"""
Compute the spectral mean (first spectral moment)
"""
return numpy.sum(abs(self)) / len(self)
def mfcc(self, m, NumFilters = 48):
"""
Compute the Mth Mel-Frequency Cepstral Coefficient
"""
return MFCC.mfcc(self, m, NumFilters)
def mfcc2(self, numFilters = 32):
"""
Vectorized MFCC implementation
"""
return MFCC.mfcc2(self, numFilters)
def plot(self):
"""
Plot the spectrum using matplotlib
"""
plt.plot(abs(self))
plt.xlim(0, len(self))
plt.show()
def rolloff(self):
"""
Determine the spectral rolloff, i.e. the frequency below which 85% of the spectrum's energy
is located
"""
absSpectrum = abs(self)
spectralSum = numpy.sum(absSpectrum)
rolloffSum = 0
rolloffIndex = 0
for i in range(0, len(self)):
rolloffSum = rolloffSum + absSpectrum[i]
if rolloffSum > (0.85 * spectralSum):
rolloffIndex = i
break
# Convert the index into a frequency
frequency = rolloffIndex * (self.sampleRate / 2.0) / len(self)
return frequency
def skewness(self):
"""
Compute the spectral skewness (third spectral moment)
"""
return scipy.stats.skew(abs(self))
def spread(self):
"""
Compute the spectral spread (basically a variance of the spectrum around the spectral centroid)
"""
centroid = self.centroid()
binNumber = 0
numerator = 0
denominator = 0
for bin in self:
# Compute center frequency
f = (self.sampleRate / 2.0) / len(self)
f = f * binNumber
numerator = numerator + (((f - centroid) ** 2) * abs(bin))
denominator = denominator + abs(bin)
binNumber = binNumber + 1
return math.sqrt((numerator * 1.0) / denominator)
def variance(self):
"""
Compute the spectral variance (second spectral moment)
"""
return numpy.var(abs(self))
| StarcoderdataPython |
338786 | <reponame>hoostus/beancount-price-sources<filename>hoostus_sources/openexchange.py
"""Fetch prices from Morningstar's JSON 'api'
"""
import datetime
import logging
import re
import json
from urllib import parse
from urllib import error
from beancount.core.number import D
from beancount.prices import source
from beancount.utils import net_utils
"""
bean-price -e 'USD:openexchange/<app_id>:USD_VND'
"""
class Source(source.Source):
"OpenExchange API exchange rate extractor."
def get_url(self, url_template, ticker):
app_id, currencies = ticker.split(':')
from_currency, to_currency = currencies.split('_')
url = url_template.format(app_id, from_currency, to_currency)
logging.info("Fetching %s", url)
try:
response = net_utils.retrying_urlopen(url)
if response is None:
return None
response = response.read().decode('utf-8').strip()
response = json.loads(response)
except error.HTTPError:
return None
# we use quantize because otherwise the conversion from an float to a Decimal
# leaves tons of cruft (i.e. dozens of digits of meaningless precision) that
# just clutters up the price file
price = D(response['rates'][to_currency]).quantize(D('1.000000'))
trade_date = datetime.datetime.fromtimestamp(response['timestamp'], datetime.timezone.utc)
return source.SourcePrice(price, trade_date, from_currency)
def get_historical_price(self, ticker, date):
template = 'https://openexchangerates.org/api/historical/{}.json'.format(date.strftime('%Y-%m-%d'))
template += '?app_id={}&base={}&symbols={}'
return self.get_url(template, ticker)
def get_latest_price(self, ticker):
template = 'https://openexchangerates.org/api/latest.json?app_id={}&base={}&symbols={}'
return self.get_url(template, ticker)
| StarcoderdataPython |
6491689 | wallBlocks = {};
stairBlocks = {};
doorBlocks = {};
supportBlocks ={}
fenceBlocks = {}
slabBlocks = {}
def init():
#Tree
wallBlocks[17] = {};
wallBlocks[17][0] = [5,0]
wallBlocks[17][1] = [5,1]
wallBlocks[17][2] = [5,2]
wallBlocks[17][3] = [5,3]
wallBlocks[162] = {};
wallBlocks[162][0] = [5,4]
wallBlocks[162][1] =[5,5];
supportBlocks[17] = {};
supportBlocks[17][0] = [17,0]
supportBlocks[17][1] = [17,1]
supportBlocks[17][2] = [17,2]
supportBlocks[17][3] = [17,3]
supportBlocks[162] = {};
supportBlocks[162][0] = [160,0]
supportBlocks[162][1] =[161,1];
fenceBlocks[17] = {};
fenceBlocks[17][0] = [85,0]
fenceBlocks[17][1] = [188,0]
fenceBlocks[17][2] = [189,0]
fenceBlocks[17][3] = [190,0]
fenceBlocks[162] = {};
fenceBlocks[162][0] = [191,0]
fenceBlocks[162][1] =[192,0];
stairBlocks[17]={};
stairBlocks[17][0] = 53
stairBlocks[17][1] = 134
stairBlocks[17][2] = 135
stairBlocks[17][3] = 136
stairBlocks[162] = {};
stairBlocks[162][0] = 163
stairBlocks[162][1] =164;
doorBlocks[17] = {};
doorBlocks[17][0] = 64
doorBlocks[17][1] = 193
doorBlocks[17][2] = 194
doorBlocks[17][3] = 195
doorBlocks[162] = {};
doorBlocks[162][0] = 196
doorBlocks[162][1] = 197;
#stone
wallBlocks[1] = {};
wallBlocks[1][0] = [43,5]
wallBlocks[4] = {}
wallBlocks[4][0] = [43,3]
wallBlocks[24] = {};
wallBlocks[24][0] = [24,2]
supportBlocks[1] = {};
supportBlocks[1][0] = [1,0]
supportBlocks[4] = {}
supportBlocks[4][0] = [4,0]
supportBlocks[24] = {};
supportBlocks[24][0] = [24,1]
stairBlocks[1] = {};
stairBlocks[1][0] = 109
stairBlocks[4] = {}
stairBlocks[4][0] = 67
stairBlocks[24] = {};
stairBlocks[24][0] = 128
slabBlocks[1] = {};
slabBlocks[1][0] = [44,0]
slabBlocks[4] = {}
slabBlocks[4][0] = [44,3]
slabBlocks[24] = {};
slabBlocks[24][0] = [44,1]
def mainHouseBox(width, length,material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = supportBlocks[m2][d2][0];
supportValue = supportBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
door = doorBlocks[m1][d1];
lv = int((max(width,length)-2) / 5);
lv = min(lv,3);
house = [];
for i in range(width):
house.append([]);
for j in range(length):
house[i].append([]);
for k in range(lv):
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
subWidth = 5;
subLength = (length - 1)/2;
w1 = subWidth + 1;
w2 = width - subWidth - 1
l1 = length - subLength;
#Ground
for x in xrange(1, width - 1):
for z in xrange( 1, length - 1):
if z > l1:
if w1 <= x < w2:
continue
house[x][z][0][0]=supportId;
house[x][z][0][1]=supportValue;
table = ((1,1),(1,length-2),(width-2,1),(width-2,length-2),
(w1-1,length-2),(w2,length-2),(w1-1,l1),(w2,l1));
for l in range(lv):
#eight support
for (x,z) in table:
for he in xrange(1,4):
house[x][z][l*4+he][0]=supportId;
house[x][z][l*4+he][1]=supportValue;
#wall
for x in xrange(2, width - 2):
z = 1
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20
for x in (1,width-2):
for z in xrange(2,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if z % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(2, w1-1):
z = length - 2;
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(w2+1, width - 2):
z = length - 2;
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for x in xrange(w1,w2):
z = l1
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if x % 2 == 1:
house[x][z][l*4+2][0]=20;
for z in xrange(l1+1, length - 2):
for x in (w1-1,w2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue
if z % 2 == 1:
house[x][z][l*4+2][0]=20;
#floor
for x in xrange(1, width - 1):
for z in xrange(1, length - 1):
if z > l1:
if w1 <= x < w2:
continue
house[x][z][l*4+4][0]=supportId;
house[x][z][l*4+4][1]=supportValue;
for x in xrange(1, width-1):
z = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=0;
z = 0 + length - 1;
if x == w1:
continue;
if x == w2-1:
continue;
if w1 <= x < w2:
house[x][l1+1][l*4+4][0]=stair;
house[x][l1+1][l*4+4][1]=2;
else:
house[x][length-1][l*4+4][0]=stair;
house[x][length-1][l*4+4][1]=2;
for z in xrange(1, length - 1):
x = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
x = width - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
for z in xrange(l1+1, length - 1):
x = w1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
x = w2-1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
#door
x = (width -1)/2;
z = l1;
house[x][z][1][0]=door;
house[x][z][1][1]=0;
house[x][z][2][0]=door;
house[x][z][2][1]=8;
house[x][z+1][0][0]=stair;
house[x][z+1][0][1]=2;
z=z+2;
while (z<length):
house[x][z][0][0]=333;
z = z+1;
return house
def simpleHouseBox(width,length,material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = supportBlocks[m2][d2][0];
supportValue = supportBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
door = doorBlocks[m1][d1];
lv = int((max(width,length)-2) / 5);
lv = min(lv,3);
house = [];
for i in range(width):
house.append([]);
for j in range(length):
house[i].append([]);
for k in range(lv):
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
house[i][j].append([0,0]);
#Ground
for x in range(1,width-1):
for z in xrange(1,length-1):
house[x][z][0][0] = supportId;
house[x][z][0][1] = supportValue;
table =((1,1),(1,length-2),(width-2,1),(width-2,length-2))
for l in range(lv):
#four support
for (x,z) in table:
for he in xrange(1,4):
house[x][z][l*4+he][0]=supportId;
house[x][z][l*4+he][1]=supportValue;
#wall
for x in xrange(2,width-2):
for z in (1,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue;
for x in (1,width-2):
for z in xrange(2,length-2):
for he in xrange(1,4):
house[x][z][l*4+he][0]=wallId;
house[x][z][l*4+he][1]=wallValue;
#window
for x in xrange(3,width-2,2):
z = 1;
house[x][z][l*4+2][0]=20;
z = length - 2;
house[x][z][l*4+2][0]=20;
for z in xrange(3,length-2,2):
x = 1;
house[x][z][l*4+2][0]=20;
x = width - 2;
house[x][z][l*4+2][0]=20;
#Floor:
for x in xrange(1, width -1):
for z in xrange(1,length -1):
house[x][z][l*4+4][0]=supportId;
house[x][z][l*4+4][1]=supportValue;
for x in xrange(1, width-1):
z = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=0;
z = length - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=2;
for z in xrange( 1, length - 1):
x = 0;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=3;
x = width - 1;
house[x][z][l*4+4][0]=stair;
house[x][z][l*4+4][1]=1;
#door
x = (width -1)/2;
z = length-2;
house[x][z][1][0]=door;
house[x][z][1][1]=0;
house[x][z][2][0]=door;
house[x][z][2][1]=8;
house[x][z+1][0][0]=stair;
house[x][z+1][0][1]=2;
return house;
def farmBox(material1):
(w1,v1)=material1;
width = 7;
length = 9;
house =[];
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(2):
house[x][z].append([0,0]);
for x in [0,width-1]:
for z in xrange(0,length):
house[x][z][0][0] = w1;
house[x][z][0][1] = v1;
for x in xrange(1,width-1):
for z in [0,length-1]:
house[x][z][0][0] = w1;
house[x][z][0][1] = v1;
for x in xrange(1,width-1):
for z in xrange(1,length-1):
house[x][z][0][0] = 60;
house[x][z][0][1] = 0;
x = (width-1) / 2;
for z in xrange(1,length-1):
house[x][z][0][0] = 9;
for x in xrange(1,width-1):
if x == (width-1)/2:
continue;
for z in xrange(1,length-1):
house[x][z][1][0] = 59;
house[x][z][1][1] = 4;
return house;
def smithBox(material1,material2):
(m1,d1)=material1;
(m2,d2)=material2;
wallId = wallBlocks[m1][d1][0];
wallValue = wallBlocks[m1][d1][1];
supportId = wallBlocks[m2][d2][0];
supportValue = wallBlocks[m2][d2][1]
stair = stairBlocks[m2][d2];
width = 8;
lenth = 10;
house = []
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(2):
house[x][z].append([0,0]);
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([])
for y in range(6):
house[x][z].append([0,0]);
for x in range(width-1):
for z in range(length):
house[x][z][0][0] = wallId;
house[x][z][0][1] = wallValue;
house[width-1][1][0][0] = stair;
house[width-1][1][0][1] = 0;
house[width-1][2][0][0] = stair;
house[width-1][2][0][1] = 0;
house[width-1][3][0][0] = stair;
house[width-1][3][0][1] = 0;
for x in range(3):
for z in range(4):
house[x][z][1][0] = supportId;
house[x][z][1][1] = supportValue;
if x == 1:
if z in [1,2]:
house[x][z][1][0] = 11;
house[x][z][1][1] = 0;
x = 0;
for z in xrange(4,length-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
z=length-1;
for x in xrange(1,width-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
x = width-2;
for z in xrange(7,length-1):
house[x][z][1][0]=wallId;
house[x][z][1][1]=wallValue;
def buildFloor(floors,themap):
height = len(floors);
width = 0;
length = 0;
for y in range(height):
length = max(length,len(floors[y]));
for x in range(len(floors[y])):
width = max(width,len(floors[y][x]));
house = [];
for x in range(width):
house.append([]);
for z in range(length):
house[x].append([]);
for y in range(height):
house[x][z].append([0,0]);
for y in range(height):
for x in range(len(floors[y])):
for z in range(len(floors[y][x])):
char = floors[y][x][z];
if char in themap:
house[z][x][y] = themap[char]
return house;
def readFloors(fileName):
file = open(fileName,'r');
floor = [];
now = 0;
with open(fileName) as file:
for line in file:
if line[0] == '|':
floor.append([])
else:
floor[-1].append(line);
if floor[-1][-1][-1] == '\r' or floor[-1][-1][-1] == '\n':
floor[-1][-1] = floor[-1][-1][:-1]
return floor;
#material1:mainpart, material2:sidepart
def smithBox(material1,material2):
floor = readFloors("stock-filters/structures/smith.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['O'] = [mainStair,2];
themap['P'] = sideWall;
themap['W'] = sideSupport;
themap['L'] = [11,0]
themap['S'] = [sideStair,3];
themap['s'] = [sideStair,1];
themap['F'] = fence;
themap['D'] = mainWall
themap['Q'] = [54,3]
themap['N'] = [102,0]
themap['n'] = [102,0]
themap['I'] = [101,0]
themap['B'] = [61,0]
themap['R'] = [72,0];
themap['$'] = slabBlocks[m1][d1]
return buildFloor(floor,themap);
def butcherBox(material1,material2):
floor = readFloors("stock-filters/structures/butcher.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['I'] = [2,0];
themap['C'] = mainSupport;
themap['P'] = sideWall;
themap['S'] = [sideStair,2];
themap['N'] = slab
themap['F'] = fence;
themap['O'] = [door,0];
themap['Y'] = [door,8];
themap['T'] = [sideStair,2];
themap['t'] = [sideStair,0];
themap['D'] = mainWall
themap['G'] = [102,0]
themap['g'] = [102,0]
themap['W'] = sideSupport;
themap['L'] = [72,0]
themap['!'] = [50,2]
themap['h'] = [50,0]
return buildFloor(floor,themap);
def churchBox(material1,material2):
floor = readFloors("stock-filters/structures/church.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['S'] = [mainStair,2];
themap['s'] = [mainStair,3];
themap['$'] = [mainStair,0];
themap['L'] = [65,0]
themap['D'] = [door,0]
themap['G'] = [102,0]
themap['g'] = [102,0]
themap['O'] = [door,8];
themap['T'] = [50,2]
themap['t'] = [50,3]
themap['H'] = [50,0]
themap['h'] = [50,1]
return buildFloor(floor,themap);
def lampBox(material2):
floor = readFloors("stock-filters/structures/lamp.txt");
(m2,d2)=material2;
fence = fenceBlocks[m2][d2]
themap = {};
themap['F'] = fence;
themap['W'] = [m2,d2];
themap['T'] = [50,2]
themap['t'] = [50,3]
themap['H'] = [50,0]
themap['h'] = [50,1]
return buildFloor(floor,themap);
def libraryBox(material1,material2):
floor = readFloors("stock-filters/structures/library.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['c'] = mainSupport;
themap['o'] = [mainStair,2];
themap['p'] = sideWall
themap['s'] = [sideStair,2]
themap['S'] = [sideStair,0]
themap['d'] = [door,0]
themap['a'] = [door,8]
themap['e'] = [58,0]
themap['f'] = fence;
themap['g'] = [102,0]
themap['G'] = [102,0]
themap['r'] = [72,0]
themap['l'] = [47,0]
return buildFloor(floor,themap);
def wellBox(material1,material2):
floor = readFloors("stock-filters/structures/well.txt");
(m1,d1)=material1;
(m2,d2)=material2;
mainWall = wallBlocks[m1][d1];
mainSupport = supportBlocks[m1][d1];
sideWall = wallBlocks[m2][d2];
sideSupport = supportBlocks[m2][d2];
door = doorBlocks[m2][d2]
mainStair = stairBlocks[m1][d1];
sideStair = stairBlocks[m2][d2];
slab = slabBlocks[m1][d1]
fence = fenceBlocks[m2][d2]
themap = {};
themap['C'] = mainSupport;
themap['W'] = [8,0]
themap['F'] = fence
return buildFloor(floor,themap);
| StarcoderdataPython |
11218135 | <gh_stars>0
import random
def main():
n=[random.randint(0,25)%25 for _ in range(5) ]
print('numbers ',n)
target=5;
hasmap={} #initial hasmap
for i in n:
if target-i in hasmap.keys():
return i,hasmap[target-i]
else:
hasmap[i]=i
return -1,-1
if __name__ == '__main__':
print(main())
| StarcoderdataPython |
8171174 | import logging
from .base import LitecordObject
from ..utils import dt_to_json
log = logging.getLogger(__name__)
class User(LitecordObject):
"""A general user object.
Parameters
----------
server: :class:`LitecordServer`
Server instance
raw: dict
Raw user data.
Attributes
----------
_raw: dict
Raw user data.
id: int
Snowflake ID of this user.
username: str
User's username.
discriminator: str
User's discriminator.
avatar_hash: str
User's avatar hash, used to retrieve the user's avatar data.
email: str
User's email, can be :py:const:`None`
admin: bool
Flag that shows if the user is an admin user.
bot: bool
If this is a bot user.
flags: int
The flags of this user. Usually used to indicate staff and partner.
verified: bool
Used to show that a user has verified via e-mail.
"""
__slots__ = ('_raw', 'id', 'username', 'discriminator', 'avatar_hash',
'email', 'admin')
def __init__(self, server, raw):
super().__init__(server)
self._raw = raw
self.id = int(raw['user_id'])
self.username = raw['username']
self.discriminator = raw['discriminator']
self.avatar_hash = raw['avatar']
self.email = raw.get('email')
self.admin = raw.get('admin', False)
self.bot = raw.get('bot', False)
self.flags = raw.get('flags', 7)
self.verified = raw.get('verified', True)
def __str__(self):
return f'{self.username}#{self.discriminator}'
def __repr__(self):
return f'<User id={self.id} name={self.username} discriminator={self.discriminator} ' \
f'bot={self.bot} admin={self.admin}>'
def __eq__(self, other):
return isinstance(other, User) and other.id == self.id
@property
def guilds(self):
"""Yield all guilds a user is in."""
# TODO: make this use AsyncIterator
for guild in self.guild_man.all_guilds():
if self.id in guild.member_ids:
yield guild
@property
def members(self):
"""Yield all members a user has attached."""
for guild in self.guilds:
yield guild.members[self.id]
@property
def as_json_private(self):
"""JSON version of the user object but with private info."""
return {**self.as_json, **{
'password': self._raw['password'],
'verified': self._raw['verified'],
'email': self._raw['email'],
}}
@property
def as_json(self):
"""Remove sensitive data from `User._raw` and make it JSON serializable"""
return {
'id': str(self.id),
'username': self.username,
'discriminator': self.discriminator,
'avatar': self.avatar_hash,
'bot': self.bot,
'mfa_enabled': False,
'flags': self.flags,
'verified': self.verified
}
@property
def connections(self):
"""Yield all connections that are related to this user."""
for conn in self.server.get_connections(self.id):
yield conn
@property
def online(self):
"""Returns boolean if the user has at least 1 connection attached to it"""
return len(list(self.server.get_connections(self.id))) > 0
async def dispatch(self, evt_name, evt_data):
"""Dispatch an event to all connections a user has.
Parameters
----------
evt_name: str
Event name.
evt_data: any
Event data.
Returns
-------
bool
"""
log.debug(f"Dispatching {evt_name} to {self.id}")
connections = list(self.connections)
if len(connections) < 1:
return False
for conn in connections:
try:
await conn.dispatch(evt_name, evt_data)
log.debug('Dispatched to %r', conn.session_id)
except:
log.exception('Failed to dispatch event to %r', conn.session_id)
return True
def default_game():
return {
'status': 'online',
'type': 0,
'name': None,
'url': None,
}
class Presence:
"""A presence object.
Presence objects are used to signal clients that someone is playing a game,
or that someone went Online, Idle/AFK or DnD(Do not Disturb).
Parameters
----------
guild: :class:`Guild`
Guild that this presence object relates to.
user: :class:`User`
User that this presence object relates to.
status: dict, optional
Status data to load into the presence object.
Attributes
----------
game: dict
The currently playing game/status.
user: :class:`User`
The user that this presence object is linked to.
guild: :class:`Guild`
Guild that this presence object relates to.
"""
__slots__ = ('game', 'user', 'guild')
def __init__(self, guild, user, status):
self.game = None
self._update(guild, user, status)
def _update(self, guild, user, status):
_base = self.game
if self.game is None:
_base = default_game()
if status is None:
status = {}
self.game = {**_base, **status}
self.user = user
self.guild = guild
if self.game['status'] not in ('online', 'offline', 'idle', 'dnd'):
log.warning(f'Presence for {self.user!r} with unknown status')
def __repr__(self):
return f'<Presence user={self.user!s} status={self.game["status"]!r} game.name={self.game["name"]}>'
@property
def as_json(self):
return {
# Discord sends an incomplete user object with all optional fields(excluding id)
# we are lazy, so we send the same user object you'd receive in other normal events :^)
'user': self.user.as_json,
'guild_id': str(self.guild.id),
'roles': [r.id for r in self.guild.members.get(self.user.id).roles],
'game': {
'type': self.game.get('type'),
'name': self.game.get('name'),
'url': self.game.get('url'),
},
'status': self.game.get('status'),
}
| StarcoderdataPython |
3467062 | <reponame>sfindeisen/skryba
#!/usr/bin/python3
import unittest
from collection import DictionaryCollection, ListCollection
class TestListCollection(unittest.TestCase):
def test_empty(self):
u = ListCollection(None, [])
self.assertEqual(u.all(), [])
def test_singleton(self):
u = ListCollection(None, [5])
self.assertEqual(u.all(), [5])
def test_double(self):
u = ListCollection(None, [5,7])
self.assertEqual(u.all(), [5,7])
def test_getitem(self):
u = ListCollection(None, [5,7])
self.assertEqual(u[0], 5)
self.assertEqual(u[1], 7)
def test_map(self):
u = ListCollection(None, [5,7]).map(lambda x : 3*x)
self.assertEqual(u.all(), [15,21])
def test_reverse_dict(self):
u = ListCollection(None, [5,7,8]).reverse_dict(lambda x : ["odd"] if (x % 2) else ["even"])
self.assertTrue(isinstance(u, DictionaryCollection))
self.assertEqual(u.all(), {"even": [8], "odd": [5,7]})
class TestDictionaryCollection(unittest.TestCase):
def test_empty(self):
u = DictionaryCollection(None, {})
self.assertEqual(u.all(), {})
def test_singleton(self):
u = DictionaryCollection(None, {"A": 5})
self.assertEqual(u.all(), {"A": 5})
def test_double(self):
u = DictionaryCollection(None, {5: "Ala", 7: "Ela"})
self.assertEqual(u.all(), {5: "Ala", 7: "Ela"})
def test_getitem(self):
u = DictionaryCollection(None, {5: "Ala", 7: "Ela"})
self.assertEqual(u[5], "Ala")
self.assertEqual(u[7], "Ela")
def test_map_values(self):
u = DictionaryCollection(None, {19: "Alice", 20: "Bob"}).map_values(lambda x : (len(x), x))
self.assertEqual(u.all(), {19: (5, "Alice"), 20: (3, "Bob")})
def test_map_keys_uq(self):
u = DictionaryCollection(None, {19: "Alice", 20: "Bob"}).map_keys_uq(lambda x : 3*x)
self.assertEqual(u.all(), {57: "Alice", 60: "Bob"})
def test_map_keys_uq_fail(self):
u = DictionaryCollection(None, {19: "Alice", 20: "Bob", 21: "Eve"}).map_values(lambda x : [x])
with self.assertRaises(ValueError):
u.map_keys_uq(lambda x : (x%2))
def test_map_keys(self):
u = DictionaryCollection(None, {19: "Alice", 20: "Bob", 21: "Eve"}) \
.map_values(lambda x : [x]) \
.map_keys(lambda x : (x%2), lambda y,z: (y+z))
self.assertEqual(u.all(), {1: ["Alice", "Eve"], 0: ["Bob"]})
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
25623 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""connect() module function unit tests."""
import unittest
from unittest import mock
import google.auth.credentials
INSTANCE = "test-instance"
DATABASE = "test-database"
PROJECT = "test-project"
USER_AGENT = "user-agent"
def _make_credentials():
class _CredentialsWithScopes(
google.auth.credentials.Credentials, google.auth.credentials.Scoped
):
pass
return mock.Mock(spec=_CredentialsWithScopes)
@mock.patch("google.cloud.spanner_v1.Client")
class Test_connect(unittest.TestCase):
def test_w_implicit(self, mock_client):
from google.cloud.spanner_dbapi import connect
from google.cloud.spanner_dbapi import Connection
client = mock_client.return_value
instance = client.instance.return_value
database = instance.database.return_value
connection = connect(INSTANCE, DATABASE)
self.assertIsInstance(connection, Connection)
self.assertIs(connection.instance, instance)
client.instance.assert_called_once_with(INSTANCE)
self.assertIs(connection.database, database)
instance.database.assert_called_once_with(DATABASE, pool=None)
# Datbase constructs its own pool
self.assertIsNotNone(connection.database._pool)
def test_w_explicit(self, mock_client):
from google.cloud.spanner_v1.pool import AbstractSessionPool
from google.cloud.spanner_dbapi import connect
from google.cloud.spanner_dbapi import Connection
from google.cloud.spanner_dbapi.version import PY_VERSION
credentials = _make_credentials()
pool = mock.create_autospec(AbstractSessionPool)
client = mock_client.return_value
instance = client.instance.return_value
database = instance.database.return_value
connection = connect(
INSTANCE, DATABASE, PROJECT, credentials, pool=pool, user_agent=USER_AGENT,
)
self.assertIsInstance(connection, Connection)
mock_client.assert_called_once_with(
project=PROJECT, credentials=credentials, client_info=mock.ANY
)
client_info = mock_client.call_args_list[0][1]["client_info"]
self.assertEqual(client_info.user_agent, USER_AGENT)
self.assertEqual(client_info.python_version, PY_VERSION)
self.assertIs(connection.instance, instance)
client.instance.assert_called_once_with(INSTANCE)
self.assertIs(connection.database, database)
instance.database.assert_called_once_with(DATABASE, pool=pool)
def test_w_instance_not_found(self, mock_client):
from google.cloud.spanner_dbapi import connect
client = mock_client.return_value
instance = client.instance.return_value
instance.exists.return_value = False
with self.assertRaises(ValueError):
connect(INSTANCE, DATABASE)
instance.exists.assert_called_once_with()
def test_w_database_not_found(self, mock_client):
from google.cloud.spanner_dbapi import connect
client = mock_client.return_value
instance = client.instance.return_value
database = instance.database.return_value
database.exists.return_value = False
with self.assertRaises(ValueError):
connect(INSTANCE, DATABASE)
database.exists.assert_called_once_with()
def test_w_credential_file_path(self, mock_client):
from google.cloud.spanner_dbapi import connect
from google.cloud.spanner_dbapi import Connection
from google.cloud.spanner_dbapi.version import PY_VERSION
credentials_path = "dummy/file/path.json"
connection = connect(
INSTANCE,
DATABASE,
PROJECT,
credentials=credentials_path,
user_agent=USER_AGENT,
)
self.assertIsInstance(connection, Connection)
factory = mock_client.from_service_account_json
factory.assert_called_once_with(
credentials_path, project=PROJECT, client_info=mock.ANY,
)
client_info = factory.call_args_list[0][1]["client_info"]
self.assertEqual(client_info.user_agent, USER_AGENT)
self.assertEqual(client_info.python_version, PY_VERSION)
| StarcoderdataPython |
6431204 | <reponame>fraunhoferfokus/WMAS<gh_stars>1-10
from __future__ import absolute_import
from ..data.session import Session, UNKNOWN
def deserialize_sessions(session_dicts):
sessions = []
for session_dict in session_dicts:
session = deserialize_session(session_dict)
sessions.append(session)
return sessions
def deserialize_session(session_dict):
token = u""
if u"token" in session_dict:
token = session_dict[u"token"]
tests = {"include": [], "exclude": []}
if u"tests" in session_dict:
tests = session_dict[u"tests"]
if u"path" in session_dict:
test_paths = session_dict[u"path"].split(", ")
tests[u"include"] = tests[u"include"] + test_paths
types = []
if u"types" in session_dict:
types = session_dict[u"types"]
user_agent = u""
if u"user_agent" in session_dict:
user_agent = session_dict[u"user_agent"]
labels = []
if u"labels" in session_dict:
labels = session_dict[u"labels"]
timeouts = {}
if u"timeouts" in session_dict:
timeouts = session_dict[u"timeouts"]
pending_tests = None
if u"pending_tests" in session_dict:
pending_tests = session_dict[u"pending_tests"]
running_tests = None
if u"running_tests" in session_dict:
running_tests = session_dict[u"running_tests"]
status = UNKNOWN
if u"status" in session_dict:
status = session_dict[u"status"]
test_state = None
if u"test_state" in session_dict:
test_state = session_dict[u"test_state"]
last_completed_test = None
if u"last_completed_test" in session_dict:
last_completed_test = session_dict[u"last_completed_test"]
date_started = None
if u"date_started" in session_dict:
date_started = session_dict[u"date_started"]
date_finished = None
if u"date_finished" in session_dict:
date_finished = session_dict[u"date_finished"]
is_public = False
if u"is_public" in session_dict:
is_public = session_dict[u"is_public"]
reference_tokens = []
if u"reference_tokens" in session_dict:
reference_tokens = session_dict[u"reference_tokens"]
browser = None
if u"browser" in session_dict:
browser = session_dict[u"browser"]
webhook_urls = []
if u"webhook_urls" in session_dict:
webhook_urls = session_dict[u"webhook_urls"]
expiration_date = None
if u"expiration_date" in session_dict:
expiration_date = session_dict[u"expiration_date"]
malfunctioning_tests = []
if u"malfunctioning_tests" in session_dict:
malfunctioning_tests = session_dict[u"malfunctioning_tests"]
return Session(
token=token,
tests=tests,
types=types,
user_agent=user_agent,
labels=labels,
timeouts=timeouts,
pending_tests=pending_tests,
running_tests=running_tests,
status=status,
test_state=test_state,
last_completed_test=last_completed_test,
date_started=date_started,
date_finished=date_finished,
is_public=is_public,
reference_tokens=reference_tokens,
browser=browser,
webhook_urls=webhook_urls,
expiration_date=expiration_date,
malfunctioning_tests=malfunctioning_tests
)
| StarcoderdataPython |
8009412 | <filename>lstm_model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, layer_size, output_size, dropout):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.layer_size = layer_size
self.output_size = output_size
self.lstm = nn.LSTM(input_size, hidden_size, layer_size, dropout=dropout, batch_first=True)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, placeholder):
# Initialize hidden and cell states
h0 = torch.zeros(self.layer_size, x.size(0), self.hidden_size)
c0 = torch.zeros(self.layer_size, x.size(0), self.hidden_size)
# Seperate output
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
# Linearly transform output to match output size
out = self.linear(out)
return out
# not used
# class RNN(nn.Module):
# def __init__(self, input_size, hidden_size, layer_size, output_size, dropout):
# super(RNN, self).__init__()
# self.hidden_dim = hidden_size
# self.layer_dim = layer_size
# self.rnn = nn.RNN(input_size, hidden_size, layer_size, batch_first=True, nonlinearity='relu', dropout=dropout)
# self.fc = nn.Linear(hidden_size, output_size)
# def forward(self, x):
# h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
# out, hn = self.rnn(x, h0.detach())
# out = self.fc(out)
# return out
| StarcoderdataPython |
1847570 | import getpass
class IO:
"""Input/Output related utilities.
"""
@staticmethod
def input_secret(msg):
"""Prompt the user for an input using a given message. The input will
not be visible on the terminal.
Args:
msg: The message to display to the user prompting for an input.
"""
return getpass.getpass(msg)
@staticmethod
def read_file(filename, *, binary = False):
"""Read a file whose name was supplied as a parameter. By default
reads its content as a text, unless specified, in which case it will
be read as a byte array.
Args:
filename: The name of the file to be read.
binary: Whether to read the file as binary or not.
"""
mode = 'r'
if binary:
mode += 'b'
with open(filename, mode) as f:
return f.read()
@staticmethod
def write_file(filename, content, *, binary = False):
"""Write a given content to a file. The content can be supplied as
a byte array or as plain text.
Args:
filename: The name of the file to be written.
content: The content to write to the specified file.
binary: Whether to read the file as binary or not.
"""
mode = 'w'
if binary:
mode += 'b'
with open(filename, mode) as f:
f.write(content)
| StarcoderdataPython |
5122491 | <reponame>xenein/froide<filename>froide/comments/__init__.py
def get_model():
from .models import FroideComment
return FroideComment
def get_form():
from .forms import CommentForm
return CommentForm
| StarcoderdataPython |
4849269 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
# Copyright 2011-2016 <NAME>
# License: BSD (see file COPYING for details)
"""This module contains mainly a utility function :func:`html2odf`
which converts an ElementTree object generated using
:mod:`etgen.html` to a fragment of ODF.
.. This is part of the Lino test suite. To test it individually, run:
$ python lino/utils/html2odf.py
This is not trivial. The challenge is that HTML and ODF are quite
different document representations. But something like this seems
necessary. Lino uses it in order to generate .odt documents which
contain (among other) chunks of html that have been entered using
TinyMCE and stored in database fields.
TODO: is there really no existing library for this task? I saw
approaches which call libreoffice in headless mode to do the
conversion, but this sounds inappropriate for our situation where we
must glue together fragments from different sources. Also note that we
use :mod:`appy.pod` to do the actual generation.
Usage examples:
>>> from etgen.html import E, tostring
>>> def test(e):
... print (tostring(e))
... print (toxml(html2odf(e)))
>>> test(E.p("This is a ", E.b("first"), " test."))
... #doctest: +NORMALIZE_WHITESPACE
<p>This is a <b>first</b> test.</p>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">This
is a <text:span text:style-name="Strong Emphasis">first</text:span>
test.</text:p>
>>> test(E.p(E.b("This")," is another test."))
... #doctest: +NORMALIZE_WHITESPACE
<p><b>This</b> is another test.</p>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"><text:span
text:style-name="Strong Emphasis">This</text:span> is another test.</text:p>
>>> test(E.p(E.strong("This")," is another test."))
... #doctest: +NORMALIZE_WHITESPACE
<p><strong>This</strong> is another test.</p>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"><text:span
text:style-name="Strong Emphasis">This</text:span> is another test.</text:p>
>>> test(E.p(E.i("This")," is another test."))
... #doctest: +NORMALIZE_WHITESPACE
<p><i>This</i> is another test.</p>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"><text:span
text:style-name="Emphasis">This</text:span> is another test.</text:p>
>>> test(E.td(E.p("This is another test.")))
... #doctest: +NORMALIZE_WHITESPACE
<td><p>This is another test.</p></td>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">This
is another test.</text:p>
>>> test(E.td(E.p(E.b("This"), " is another test.")))
... #doctest: +NORMALIZE_WHITESPACE
<td><p><b>This</b> is another test.</p></td>
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"><text:span
text:style-name="Strong Emphasis">This</text:span> is another test.</text:p>
>>> test(E.ul(E.li("First item"),E.li("Second item")))
... #doctest: +NORMALIZE_WHITESPACE
<ul><li>First item</li><li>Second item</li></ul>
<text:list xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"
text:style-name="podBulletedList"><text:list-item><text:p
text:style-name="podBulletItem">First item</text:p></text:list-item><text:list-item><text:p
text:style-name="podBulletItem">Second item</text:p></text:list-item></text:list>
N.B.: the above chunk is obviously not correct since Writer doesn't display it.
(How can I debug a generated odt file?
I mean if my content.xml is syntactically valid but Writer ...)
Idea: validate it against the ODF specification using lxml
Here is another HTML fragment which doesn't yield a valid result:
>>> from lxml import etree
>>> html = '<td><div><p><b>Bold</b></p></div></td>'
>>> print(toxml(html2odf(etree.fromstring(html))))
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0"/>
:func:`html2odf` converts bold text to a span with a style named
"Strong Emphasis". That's currently a hard-coded name, and the caller
must make sure that a style of that name is defined in the document.
The text formats `<i>` and `<em>` are converted to a style "Emphasis".
Edge case:
>>> print (toxml(html2odf("Plain string")))
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">Plain string</text:p>
>>> print (toxml(html2odf(u"Ein schöner Text")))
<text:p xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">Ein schöner Text</text:p>
Not yet supported
=================
The following is an example for :ticket:`788`. Conversion fails if a
sequence of paragraph-level items are grouped using a div:
>>> test(E.div(E.p("Two numbered items:"),
... E.ol(E.li("first"), E.li("second"))))
... #doctest: +NORMALIZE_WHITESPACE +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
Traceback (most recent call last):
...
IllegalText: The <text:section> element does not allow text
>>> from lxml import etree
>>> test(etree.fromstring('<ul type="disc"><li>First</li><li>Second</li></ul>'))
<ul type="disc"><li>First</li><li>Second</li></ul>
<text:list xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0" text:style-name="podBulletedList"><text:list-item><text:p text:style-name="podBulletItem">First</text:p></text:list-item><text:list-item><text:p text:style-name="podBulletItem">Second</text:p></text:list-item></text:list>
>>> test(E.p(E.dl(E.dt("Foo"), E.dl("A foobar without bar."))))
Traceback (most recent call last):
...
NotImplementedError: <dl> inside <text:p>
"""
from __future__ import unicode_literals
# from future import standard_library
# standard_library.install_aliases()
from builtins import str
import six
import logging
logger = logging.getLogger(__name__)
from io import StringIO
from lxml import etree
from etgen.html import E, tostring
def toxml(node):
"""Convert an ODF node to a string with its XML representation."""
buf = StringIO()
node.toXml(0, buf)
return buf.getvalue()
from odf import text
#~ PTAGS = ('p','td','li')
PTAGS = ('p', 'td', 'div', 'table', 'tr')
def html2odf(e, ct=None, **ctargs):
"""
Convert a :mod:`etgen.html` element to an ODF text element.
Most formats are not implemented.
There's probably a better way to do this...
:ct: the root element ("container"). If not specified, we create one.
"""
sections_counter = 1
#~ print "20120613 html2odf()", e.tag, e.text
if ct is None:
ct = text.P(**ctargs)
#~ if e.tag in PTAGS:
#~ oe = text.P(**ctargs)
#~ else:
#~ oe = text.P(**ctargs)
#~ logger.info("20130201 %s", tostring(e))
#~ raise NotImplementedError("<%s> without container" % e.tag)
if isinstance(e, six.string_types):
ct.addText(e)
#~ oe = text.Span()
#~ oe.addText(e)
#~ yield oe
return ct
if e.tag == 'ul':
ct = text.List(stylename='podBulletedList')
ctargs = dict(stylename='podBulletItem')
#~ ctargs = dict()
text_container = None
if e.tag in ('b', 'strong'):
#~ oe = text.Span(stylename='Bold Text')
oe = text.Span(stylename='Strong Emphasis')
elif e.tag == 'a':
oe = text.Span(stylename='Strong Emphasis')
#~ oe = text.Span(stylename='Bold Text')
elif e.tag in ('i', 'em'):
oe = text.Span(stylename='Emphasis')
elif e.tag == 'span':
oe = text.Span()
elif e.tag == 'br':
oe = text.LineBreak()
elif e.tag == 'h1':
"""
<text:h text:style-name="Heading_20_1" text:outline-level="1">
"""
oe = ct = text.H(stylename="Heading 1", outlinelevel=1)
elif e.tag == 'h2':
oe = ct = text.H(stylename="Heading 2", outlinelevel=2)
elif e.tag == 'h3':
oe = ct = text.H(stylename="Heading 3", outlinelevel=3)
elif e.tag == 'div':
oe = ct = text.Section(name="S" + str(sections_counter))
elif e.tag == 'img':
return # ignore images
elif e.tag == 'ul':
oe = ct
#~ elif e.tag in ('ul','ol'):
#~ oe = text.List(stylename=e.tag.upper())
#~ ctargs = dict(stylename=e.tag.upper()+"_P")
elif e.tag == 'li':
#~ oe = ct
oe = text.ListItem()
text_container = text.P(**ctargs)
oe.appendChild(text_container)
elif e.tag in PTAGS:
oe = ct
#~ if ct.tagName == 'p':
#~ oe = ct
#~ else:
#~ oe = text.P(**ctargs)
else:
logger.info("20130201 %s", tostring(e))
raise NotImplementedError("<%s> inside <%s>" % (e.tag, ct.tagName))
#~ oe = text.Span()
if text_container is None:
text_container = oe
if e.text:
text_container.addText(e.text)
for child in e:
#~ html2odf(child,oe)
html2odf(child, text_container, **ctargs)
#~ for oc in html2odf(child,oe):
# ~ # oe.addElement(oc)
#~ oe.appendChild(oc)
#~ if not True:
#~ if e.tail:
#~ oe.addText(e.tail)
if oe is not ct:
ct.appendChild(oe)
#~ yield oe
#~ if True:
if e.tail:
#~ yield e.tail
#~ yield text.Span(text=e.tail)
#~ yield Text(e.tail)
ct.addText(e.tail)
return ct
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| StarcoderdataPython |
3453555 | <filename>wifi.py<gh_stars>0
import subprocess
import re
from colorama import init
from colorama import Fore, Style
init()
print(f"{Fore.RED}" +
"""
@@@@@@@ @@@@@@@@ @@@@@@@ @@@ @@@ @@@ @@@@@@@
@gam3@@@ @@fac3@@ @@@@@@@@ @@@ @@@ @@@ @@@@@@@
@@! @@@ @@! @@! @@@ @@! !@@ @@! @@!
!@! @!@ !@! !@! @!@ !@! @!! !@! !@!
@!@!!@! @!!!:! @!@ !@! @!@@!@! !!@ @!!
!!@!@! !!!!!: !@! !!! !!@!!! !!! !!!
!!: :!! !!: !!: !!! !!: :!! !!: !!:
:!: !:! :!: :!: !:! :!: !:! :!: :!:
:: ::: :: :::: :::: :: :: ::: :: ::
: : : : :: :: :: : : : ::: : :
"""
+ f"{Style.RESET_ALL}"
)
print("\033[1;32;40m")
command_output = subprocess.run(
["netsh", "wlan", "show", "profiles"], capture_output=True
).stdout.decode()
profile_names = re.findall("All User Profile : (.*)\r", command_output)
wifi_list = []
if len(profile_names) != 0:
for name in profile_names:
wifi_profile = {}
profile_info = subprocess.run(
["netsh", "wlan", "show", "profile", name], capture_output=True
).stdout.decode()
if re.search("Security key : Absent", profile_info):
continue
else:
wifi_profile["ssid"] = name
profile_info_pass = subprocess.run(
["netsh", "wlan", "show", "profile", name, "key=clear"],
capture_output=True,
).stdout.decode()
password = re.search("Key Content : (.*)\r", profile_info_pass)
if password == None:
wifi_profile["password"] = None
else:
wifi_profile["password"] = <PASSWORD>]
wifi_list.append(wifi_profile)
for x in range(len(wifi_list)):
print(wifi_list[x]) | StarcoderdataPython |
4946009 | # Problema 5 Project Euler. Mínim comú múltiple dels números de l'1 al 20
l = range(1, 21)
# Càlcul del mínim comú múltiple dels dos primers membres
num1 = l[0]
num2 = l[1]
if num1 > num2:
num = num1
den = num2
else:
num = num2
den = num1
rem = num % den
while rem != 0:
num = den
den = rem
rem = num % den
gcd = den
lcm = int(int(num1 * num2)/int(gcd))
# Càlcul del mínim comú múltiple del mcm dels dos primers nombres amb el tercer de la llista. I així amb tots els nombres
for i in range(2, 20):
num1 = lcm
num2 = l[i]
if num1 > num2:
num = num1
den = num2
else:
num = num2
den = num1
rem = num % den
while rem != 0:
num = den
den = rem
rem = num % den
gcd = den
lcm = int(int(num1 * num2) / int(gcd))
print(lcm) | StarcoderdataPython |
11300055 | <gh_stars>1-10
from precise_bbcode.bbcode import get_parser
from .models import Comment
def convert_all_comments():
parser = get_parser()
for comment in Comment.objects.exclude(text__exact=''):
comment.html_text = parser.render(comment.text)
comment.save() | StarcoderdataPython |
64551 | """The Applicants object is a container of Applicant objects."""
from __future__ import annotations
from typing import TYPE_CHECKING, Iterable
if TYPE_CHECKING:
from .applicant_factory import ApplicantFactory
from .applicant_abc import Applicant
class ApplicantCollection(Iterable):
def __init__(self, applicant_dicts, applicant_factory: ApplicantFactory):
self._applicant_dicts = applicant_dicts
self._applicant_factory = applicant_factory
self._applicants = None
self._wwid_dict = None
def assemble_applicant_objects(self) -> None:
self._applicants = [
self._applicant_factory.build_applicant(applicant_dict)
for applicant_dict in self._applicant_dicts
]
self._wwid_dict = {
applicant.wwid: applicant
for applicant in self._applicants
}
def __iter__(self):
yield from self._applicants
def get_applicants_by_wwid(self, wwids: Iterable[int]) -> Iterable[Applicant]:
return filter(None, (self._wwid_dict.get(wwid, None) for wwid in wwids))
| StarcoderdataPython |
8005899 | from ast_monitor.gps_sensor import GpsSensor
# Where to store data.
path = 'sensor_data/gps.txt'
gps = GpsSensor(gps_path=path)
gps.get_gps_data()
| StarcoderdataPython |
388009 | <reponame>ProhardONE/python_primer
# Exercise 9.1
# Author: <NAME>
class Line:
def __init__(self, c0, c1):
self.c0 = c0
self.c1 = c1
def __call__(self, x):
return self.c0 + self.c1 * x
def table(self, L, R, n):
"""Return a table with n points for L <= x <= R."""
s = ''
import numpy as np
for x in np.linspace(L, R, n):
y = self(x)
s += '%12g %12g\n' % (x, y)
return s
class Parabola(Line):
pass
l = Line(2, 5)
p = Parabola(2, 5)
print 'Line: ', dir(l)
print 'Parabola:', dir(p)
print 'Line: ', l.__dict__
print 'Parabola:', p.__dict__
"""
Sample run:
python dir_subclass.py
Line: ['__call__', '__doc__', '__init__', '__module__', 'c0', 'c1', 'table']
Parabola: ['__call__', '__doc__', '__init__', '__module__', 'c0', 'c1', 'table']
Line: {'c1': 5, 'c0': 2}
Parabola: {'c1': 5, 'c0': 2}
"""
| StarcoderdataPython |
6568226 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# <NAME> <<EMAIL>>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# <NAME> <<EMAIL>>."
#
# THIS SOFTWARE IS PROVIDED BY <NAME> ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <NAME> OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__rev_id__ = """$Id: Cell.py,v 1.3 2005/08/11 08:53:48 rvk Exp $"""
import struct
import BIFFRecords
from ExcelFormula import ErrorCode
class StrCell(object):
__slots__ = ["__init__", "get_biff_data",
"__parent", "__idx", "__xf_idx", "__sst_idx"]
def __init__(self, parent, idx, xf_idx, sst_idx):
self.__parent = parent
self.__idx = idx
self.__xf_idx = xf_idx
self.__sst_idx = sst_idx
def get_biff_data(self):
return BIFFRecords.LabelSSTRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, self.__sst_idx).get()
class BlankCell(object):
__slots__ = ["__init__", "get_biff_data",
"__parent", "__idx", "__xf_idx"]
def __init__(self, parent, idx, xf_idx):
self.__parent = parent
self.__idx = idx
self.__xf_idx = xf_idx
def get_biff_data(self):
return BIFFRecords.BlankRecord(self.__parent.get_index(), self.__idx, self.__xf_idx).get()
class MulBlankCell(object):
__slots__ = ["__init__", "get_biff_data",
"__parent", "__col1", "__col2", "__xf_idx"]
def __init__(self, parent, col1, col2, xf_idx):
assert col1 < col2, '%d < %d is false'%(col1, col2)
self.__parent = parent
self.__col1 = col1
self.__col2 = col2
self.__xf_idx = xf_idx
def get_biff_data(self):
return BIFFRecords.MulBlankRecord(self.__parent.get_index(), self.__col1, self.__col2, self.__xf_idx).get()
class NumberCell(object):
__slots__ = ["__init__", "get_biff_data",
"__parent", "__idx", "__xf_idx", "__number"]
def __init__(self, parent, idx, xf_idx, number):
self.__parent = parent
self.__idx = idx
self.__xf_idx = xf_idx
self.__number = float(number)
def get_biff_data(self):
rk_encoded = 0
packed = struct.pack('<d', self.__number)
#print self.__number
w0, w1, w2, w3 = struct.unpack('<4H', packed)
if w0 == 0 and w1 == 0 and w2 & 0xFFFC == w2:
# 34 lsb are 0
#print "float RK"
rk_encoded = (w3 << 16) | w2
return BIFFRecords.RKRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, rk_encoded).get()
if abs(self.__number) < 0x20000000 and int(self.__number) == self.__number:
#print "30-bit integer RK"
rk_encoded = (2 | (int(self.__number) << 2)) & 0xffffffffL
return BIFFRecords.RKRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, rk_encoded).get()
temp = self.__number*100
packed100 = struct.pack('<d', temp)
w0, w1, w2, w3 = struct.unpack('<4H', packed100)
if w0 == 0 and w1 == 0 and w2 & 0xFFFC == w2:
# 34 lsb are 0
#print "float RK*100"
rk_encoded = 1 | (w3 << 16) | w2
return BIFFRecords.RKRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, rk_encoded).get()
if abs(temp) < 0x20000000 and int(temp) == temp:
#print "30-bit integer RK*100"
rk_encoded = (3 | (int(temp) << 2)) & 0xffffffffL
return BIFFRecords.RKRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, rk_encoded).get()
#print "Number"
#print
return BIFFRecords.NumberRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, self.__number).get()
class MulNumberCell(object):
__slots__ = ["__init__", "get_biff_data"]
def __init__(self, parent, idx, xf_idx, sst_idx):
self.__parent = parent
self.__idx = idx
self.__xf_idx = xf_idx
self.__sst_idx = sst_idx
def get_biff_data(self):
raise Exception
class FormulaCell(object):
__slots__ = ["__init__", "get_biff_data", "result",
"__parent", "__idx", "__xf_idx", "__result", "__opts", "__frmla", "__str"]
def __init__(self, parent, idx, xf_idx, frmla):
self.__str = None
self.__parent = parent
self.__idx = idx
self.__xf_idx = xf_idx
self.result = frmla.default
self.__opts = frmla.opts != None and frmla.opts or self.__parent.frmla_opts
self.__frmla = frmla
def get_biff_data(self):
frmla_block_data = BIFFRecords.FormulaRecord(self.__parent.get_index(), self.__idx, self.__xf_idx, self.__result, self.__opts, self.__frmla.rpn()).get()
if self.__str:
frmla_block_data += BIFFRecords.StringRecord(self.__str).get()
return frmla_block_data
def set_result(self, value):
self.__result = self._convertToResult(value)
def get_result(self):
return self.__result
result = property(get_result, set_result)
def _convertToResult(self, val):
ret = 0
self.__str = ''
if isinstance(val, (int, float)):
ret = struct.pack('<d', val)
else:
if isinstance(val, bool):
ret = struct.pack('BxB3x', 0x01, val and 0x01 or 0x00)
elif isinstance(val, ErrorCode):
ret = struct.pack('BxB3x', 0x02, val.int())
elif (isinstance(val, (unicode, str)) and val) or bool(val):
ret = struct.pack('B5x', 0x00)
self.__str = unicode(val)
elif not val:
ret = struct.pack('B5x', 0x03)
ret += struct.pack('<H', 0xFFFF)
return struct.unpack('<Q', ret)[0]
| StarcoderdataPython |
3546626 | from __future__ import print_function
import scipy
class ElectrodeModuleMaker(object):
def __init__(self,param):
self.param = param
self.template_lines = []
self.module_lines = []
def run(self):
self.load_template()
self.module_lines = list(self.template_lines)
self.add_ref_elect()
self.add_wrk_elect()
self.add_ctr_elect()
self.write_module()
def load_template(self):
with open(param['module_template'],'r') as f:
self.template_lines = f.readlines()
self.template_lines = [line.strip() for line in self.template_lines]
def find_end_module_index(self):
index = -1
for i,line in enumerate(self.module_lines):
if '$EndMODULE' in line:
index = i
break
return index
def add_ref_elect(self):
end_module_index = self.find_end_module_index()
module_lines_new = self.module_lines[:end_module_index]
ref_elect_lines = self.create_ref_elect()
module_lines_new.extend(ref_elect_lines)
module_lines_new.extend(self.module_lines[end_module_index:])
self.module_lines = module_lines_new
def create_ref_elect(self):
elect_param = self.param['reference_electrode']
pin = elect_param['pin']
radius = elect_param['radius']
radial_pos = elect_param['radial_pos']
angle = elect_param['angle']
angle_rad = deg_to_rad(angle)
x_pos = radial_pos*scipy.cos(-angle_rad)
y_pos = radial_pos*scipy.sin(-angle_rad)
elect_lines = []
elect_lines.append('$PAD')
elect_lines.append('Sh "{0}" C {1:1.3f} {1:1.3f} 0 0 {2}'.format(pin, in_to_mm(2.0*radius),-int(10*angle)))
elect_lines.append('Dr 0 0 0')
elect_lines.append('At SMD N 00888000')
elect_lines.append('Ne 0 ""')
elect_lines.append('Po {0:1.3f} {1:1.3f}'.format(in_to_mm(x_pos), in_to_mm(y_pos)))
elect_lines.append('$EndPAD')
return elect_lines
def add_wrk_elect(self):
end_module_index = self.find_end_module_index()
module_lines_new = self.module_lines[:end_module_index]
wrk_elect_lines = self.create_wrk_elect()
module_lines_new.extend(wrk_elect_lines)
module_lines_new.extend(self.module_lines[end_module_index:])
self.module_lines = module_lines_new
def create_wrk_elect(self):
elect_param = self.param['working_electrode']
pin = elect_param['pin']
radius = elect_param['radius']
elect_lines = []
elect_lines.append('$PAD')
elect_lines.append('Sh "{0}" C {1:1.3f} {1:1.3f} 0 0 0'.format(pin, in_to_mm(2.0*radius)))
elect_lines.append('Dr 0 0 0')
elect_lines.append('At SMD N 00888000')
elect_lines.append('Ne 0 ""')
elect_lines.append('Po 0 0')
elect_lines.append('$EndPAD')
return elect_lines
def add_ctr_elect(self):
end_module_index = self.find_end_module_index()
module_lines_new = self.module_lines[:end_module_index]
ctr_elect_lines = self.create_ctr_elect()
module_lines_new.extend(ctr_elect_lines)
module_lines_new.extend(self.module_lines[end_module_index:])
self.module_lines = module_lines_new
def create_ctr_elect(self):
elect_param = param['counter_electrode']
radial_pos = elect_param['radial_pos']
min_angle = elect_param['angle_range'][0]
max_angle = elect_param['angle_range'][1]
thickness = elect_param['thickness']
num_segments = elect_param['segments']
pin = elect_param['pin']
arc_oversize = elect_param['arc_oversize']
min_angle_rad = deg_to_rad(min_angle)
max_angle_rad = deg_to_rad(max_angle)
delta_angle_rad = max_angle_rad - min_angle_rad
arc_length = radial_pos*delta_angle_rad/float(num_segments)
height = thickness
width = arc_oversize*arc_length
angle_list = -1.0*scipy.linspace(min_angle_rad, max_angle_rad, num_segments)
x_pos_list = [radial_pos*scipy.cos(ang) for ang in angle_list]
y_pos_list = [radial_pos*scipy.sin(ang) for ang in angle_list]
elect_lines = []
for ang, x_pos, y_pos in zip(angle_list,x_pos_list,y_pos_list):
ang_deg = rad_to_deg(ang)
elect_lines.append('$PAD')
elect_lines.append('Sh "{0}" R {1:1.3f} {2:1.3f} 0 0 {3}'.format(pin,in_to_mm(height),in_to_mm(width),-int(10*ang_deg)))
elect_lines.append('Dr 0 0 0')
elect_lines.append('At SMD N 00888000')
elect_lines.append('Ne 0 ""')
elect_lines.append('Po {0:1.4f} {1:1.4f}'.format(in_to_mm(x_pos),in_to_mm(y_pos)))
elect_lines.append('$EndPAD')
return elect_lines
def print_template(self):
for line in self.template_lines:
print(line)
def print_module(self):
for line in self.module_lines:
print(line)
def write_module(self):
with open(self.param['output_file'],'w') as f:
for line in self.module_lines:
f.write('{0}\n'.format(line))
# Utility functions
# ---------------------------------------------------------------------------
def deg_to_rad(val):
return val*scipy.pi/180.0
def rad_to_deg(val):
return val*180.0/scipy.pi
def in_to_mm(val):
return val*25.4
# -----------------------------------------------------------------------------
if __name__ == '__main__':
#working_electrode_radius = 0.05
working_electrode_radius = 0.1
param = {
'module_template': 'template_ELECTRODE.mod',
'working_electrode': {
'pin': 2,
'radius': working_electrode_radius,
},
'reference_electrode': {
'pin': 1,
'radius': 0.15*working_electrode_radius,
'radial_pos': 1.414*working_electrode_radius ,
'angle': 135,
},
'counter_electrode' : {
'pin': 3,
'radial_pos': 1.7*working_electrode_radius,
'angle_range': (-160,90),
'thickness': 0.6*working_electrode_radius,
'segments': 100,
'arc_oversize': 1.5,
},
'output_file': 'ELECTRODE.mod',
}
maker = ElectrodeModuleMaker(param)
maker.run()
| StarcoderdataPython |
4989585 | def resolve_multiple_matches(matches):
max_index = len(matches) - 1
print(f"Multiple matches found, enter index of desired result (0 to {max_index}):")
print("\n".join(f"\t{i}: {v}" for i, v in enumerate(matches)))
while True:
index = input("")
if index.isnumeric() and 0 <= int(index) <= max_index:
return int(index)
else:
print(f"Invalid index, enter a value between 0 and {max_index}")
def get_user_foods(foods, plot_food_ids):
def print_foods():
print("Current foods to plot:")
print("\t" + "\n".join(sorted(["\t" + foods[plot_food_id]['name'] for plot_food_id in plot_food_ids])))
if plot_food_ids:
print_foods()
find_exact_match = True
while True:
no_foods = len(plot_food_ids) == 0
if no_foods:
clear_quit_text = "Quit: q"
else:
clear_quit_text = "Clear list: c"
if find_exact_match:
find_exact_text = "on"
else:
find_exact_text = "off"
find_food = input(f"Find food:\n"
f"\tToggle exact ({find_exact_text}): !\n"
f"\t{clear_quit_text}\n"
f"\tContinue: <Enter>\n").lower()
if find_food == "":
break
elif find_food == "!":
find_exact_match = not find_exact_match
continue
elif find_food == "q" and no_foods:
return None
elif find_food == "c" and not no_foods:
plot_food_ids = []
continue
found_list = []
for food_id in foods:
food_name = foods[food_id]["name"].lower()
if find_exact_match:
if food_name == find_food:
# check an exact match isn't already in the found list
if not any([food_name == foods[already_found_id]["name"] for already_found_id in found_list]):
found_list.append(food_id)
else:
find_food_i = food_name.find(find_food)
if find_food_i > -1:
if find_food_i == 0 or food_name[find_food_i - 1] == " ":
end_i = find_food_i + len(find_food)
if end_i == len(food_name) or food_name[end_i] == " ":
found_list.append(food_id)
if len(found_list) == 0:
print("Can't find " + find_food)
else:
if len(found_list) == 1:
found_id = found_list[0]
else:
found_names = [foods[food_id]["name"] for food_id in found_list]
found_id = found_list[resolve_multiple_matches(found_names)]
if found_id in plot_food_ids:
plot_food_ids.remove(found_id)
else:
plot_food_ids.append(found_id)
print_foods()
return plot_food_ids
def get_user_nutrient(nutrients):
def print_nutrients():
print("Available nutrients:")
print("\n".join(sorted("\t" + [nutrients[nut_id] for nut_id in nutrients])))
print_nutrients()
while True:
find_nutrient = input("Find nutrient:\n\tQuit: q\n").lower()
if find_nutrient == "":
print("Enter a search term")
continue
elif find_nutrient == "q":
return None
found_list = []
for nutrient_id in nutrients:
nutrient_name = nutrients[nutrient_id].lower()
find_nutrient_i = nutrient_name.find(find_nutrient)
if find_nutrient_i == 0 or (find_nutrient_i > 0 and nutrient_name[find_nutrient_i - 1] == " "):
found_list.append(nutrient_id)
if len(found_list) == 0:
print("Can't find " + find_nutrient)
print_nutrients()
else:
if len(found_list) == 1:
return found_list[0]
else:
found_names = [nutrients[nutrient_id] for nutrient_id in found_list]
return found_list[resolve_multiple_matches(found_names)]
| StarcoderdataPython |
3254913 | <gh_stars>1-10
def convert_statcan_xml_to_csv(infile, outfile):
import os
if os.path.isdir(infile):
data_dir = infile
else:
import tempfile
import zipfile
data_dir = tempfile.gettempdir() + "/data"
zip_ref = zipfile.ZipFile(infile, 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(data_dir) if isfile(join(data_dir, f))]
sf = list(filter(lambda x: 'Structure' in x, onlyfiles))[0]
gf = list(filter(lambda x: 'Generic' in x, onlyfiles))[0]
# import xml.etree.ElementTree as ET
from lxml import etree
tree = etree.parse(data_dir + "/" + sf)
root = tree.getroot()
ns = {'structure': 'http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure',
'ns': 'http://www.w3.org/XML/1998/namespace',
'generic': 'http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic'}
all_concepts = [{elem.attrib['id']: elem.find('structure:Name[@ns:lang="en"]', ns).text} for elem in
root.iter('{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure}Concept')]
concepts = list(
set(map(lambda x: list(x.keys())[0], all_concepts)) - set(["GEO", "OBS_VALUE", "OBS_STATUS", "TIME"]))
print("Extracting concepts: " + ', '.join(map(str, concepts)), flush=True)
def get_description(elem):
value = elem.attrib['value']
desc = elem.find("{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure}Description").text
return ([value, desc])
def get_description_for_key(c):
c = re.sub("0$", "", c)
e = root.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message}CodeLists/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure}CodeList[@id='CL_" + c + "']")
if e == None:
e = root.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message}CodeLists/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure}CodeList[@id='CL_" + c.upper() + "']")
cs = e.findall("{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure}Code")
a = list(map(lambda e: get_description(e), cs))
return ({key: value for (key, value) in a})
import re
concept_lookup = {}
geo_lookup = get_description_for_key("GEO")
for c in concepts:
concept_lookup[c] = get_description_for_key(c)
def extract(elem, concepts):
geo = elem.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}SeriesKey/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Value[@concept='GEO']").attrib[
'value']
geo_name = geo_lookup[geo]
if (len(geo) == 9): geo = geo[:7] + "." + geo[7:] # for CTs
cs = list(map(lambda c: elem.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}SeriesKey/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Value[@concept='" + c + "']").attrib[
'value'], concepts))
cs_names = list(map(lambda c: concept_lookup[concepts[c]][cs[c]], range(0, len(concepts))))
time = elem.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Obs/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Time").text
value = elem.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Obs/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}ObsValue")
if value != None:
value = value.attrib['value']
else:
value = elem.find(
"{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Obs/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Attributes/{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Value[@concept='OBS_STATUS']")
if value != None:
value = value.attrib['value']
# print("Found status "+str(value))
return [geo, geo_name] + cs + cs_names + [time, value]
import csv
context = etree.iterparse(data_dir + "/" + gf, events=('end',),
tag='{http://www.SDMX.org/resources/SDMXML/schemas/v2_0/generic}Series')
count = 0
with open(outfile, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["GeoUID", "Name"] + list(map(lambda x: x + " ID", concepts)) + concepts + ["Year", "Value"])
for event, elem in context:
row = extract(elem, concepts)
csvwriter.writerow(row)
count += 1
elem.clear()
if count % 100000 == 0:
print("Done with row " + str(count / 1000) + "k", flush=True)
csvfile.flush()
csvfile.flush()
import shutil
shutil.rmtree(data_dir, ignore_errors=True)
#convert_statcan_xml_to_csv('/Users/jens/stats canada/2001/95F0495XCB2001002', "test.csv")
| StarcoderdataPython |
3451788 | <reponame>suddrey-qut/armer
"""
Armer Class
.. codeauthor:: <NAME>
"""
from __future__ import annotations
from typing import List, Dict, Any, Tuple
import timeit
import importlib
import rospy
import tf2_ros
import yaml
import roboticstoolbox as rtb
from roboticstoolbox.backends.swift import Swift
from spatialmath.base.argcheck import getvector
from armer.utils import populate_transform_stamped
from armer.robots import ROSRobot
from armer.timer import Timer
class Armer:
"""
The Armer Driver.
:param robot: [description], List of robots to be managed by the driver
:type robots: List[rtb.robot.Robot], optional
:param backend: [description], defaults to None
:type backend: rtb.backends.Connector, optional
.. codeauthor:: <NAME>
.. sectionauthor:: <NAME>
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
robots: List[rtb.robot.Robot] = None,
backend: rtb.backends.Connector = None,
backend_args: Dict[str, Any] = None,
readonly_backends: List[Tuple[rtb.backends.Connector, Dict[str, Any]]] = None,
publish_transforms: bool = False,
logging: dict[str, bool] = None) -> None:
self.robots: List[ROSRobot] = robots
self.backend: rtb.backends.Connector = backend
self.readonly_backends : List[rtb.backends.Connector] = readonly_backends \
if readonly_backends else []
if not self.robots:
self.robots = [ROSRobot(self, rtb.models.URDF.UR5())]
if not self.backend:
self.backend = Swift()
self.is_publishing_transforms = publish_transforms
self.broadcaster: tf2_ros.TransformBroadcaster = None
if self.is_publishing_transforms:
self.broadcaster = tf2_ros.TransformBroadcaster()
self.frequency = min([r.frequency for r in self.robots])
self.rate = rospy.Rate(self.frequency)
self.last_tick = timeit.default_timer()
# Launch backend
self.backend.launch(**(backend_args if backend_args else dict()))
for robot in self.robots:
self.backend.add(robot)
for readonly, args in self.readonly_backends:
readonly.launch(**args)
for robot in self.robots:
readonly.add(robot, readonly=True)
# Logging
self.log_frequency = logging and 'frequency' in logging and logging['frequency']
def close(self):
"""
Close backend and stop action servers
"""
self.backend.close()
for robot in self.robots:
robot.close()
def publish_transforms(self) -> None:
"""[summary]
"""
if not self.is_publishing_transforms:
return
for robot in self.robots:
joint_positions = getvector(robot.q, robot.n)
for link in robot.elinks:
if link.parent is None:
continue
if link.isjoint:
transform = link.A(joint_positions[link.jindex], fast=True)
else:
transform = link.A(fast=True)
self.broadcaster.sendTransform(populate_transform_stamped(
link.parent.name,
link.name,
transform
))
for gripper in robot.grippers:
joint_positions = getvector(gripper.q, gripper.n)
for link in gripper.links:
if link.parent is None:
continue
if link.isjoint:
transform = link.A(joint_positions[link.jindex], fast=True)
else:
transform = link.A(fast=True)
self.broadcaster.sendTransform(populate_transform_stamped(
link.parent.name,
link.name,
transform
))
@staticmethod
def load(path: str) -> Armer:
"""
Generates an Armer Driver instance from the configuration file at path
:param path: The path to the configuration file
:type path: str
:return: An Armer driver instance
:rtype: Armer
"""
with open(path, 'r') as handle:
config = yaml.load(handle, Loader=yaml.SafeLoader)
robots: List[rtb.robot.Robot] = []
for spec in config['robots']:
module_name, model_name = spec['model'].rsplit('.', maxsplit=1)
robot_cls = getattr(importlib.import_module(module_name), model_name)
del spec['model']
wrapper = ROSRobot
if 'type' in spec:
module_name, model_name = spec['type'].rsplit('.', maxsplit=1)
wrapper = getattr(importlib.import_module(module_name), model_name)
del spec['type']
robots.append(wrapper(robot_cls(), **spec))
backend = None
backend_args = dict()
if 'backend' in config:
module_name, model_name = config['backend']['type'].rsplit('.', maxsplit=1)
backend_cls = getattr(importlib.import_module(module_name), model_name)
backend = backend_cls()
backend_args = config['args'] if 'args' in config else dict()
readonly_backends = []
if 'readonly_backends' in config:
for spec in config['readonly_backends']:
module_name, model_name = spec['type'].rsplit('.', maxsplit=1)
backend_cls = getattr(importlib.import_module(module_name), model_name)
readonly_backends.append((backend_cls(), spec['args'] if 'args' in spec else dict()))
logging = config['logging'] if 'logging' in config else {}
publish_transforms = config['publish_transforms'] if 'publish_transforms' in config else False
return Armer(
robots=robots,
backend=backend,
backend_args=backend_args,
readonly_backends=readonly_backends,
publish_transforms=publish_transforms,
logging=logging
)
def run(self) -> None:
"""
Runs the driver. This is a blocking call.
"""
self.last_tick = timeit.default_timer()
while not rospy.is_shutdown():
with Timer('ROS', self.log_frequency):
current_time = timeit.default_timer()
dt = current_time - self.last_tick
for robot in self.robots:
robot.step(dt=dt)
self.backend.step(dt=dt)
for backend, args in self.readonly_backends:
backend.step(dt=dt)
self.publish_transforms()
self.rate.sleep()
self.last_tick = current_time
if __name__ == '__main__':
rospy.init_node('manipulator')
manipulator = Armer(publish_transforms=False)
manipulator.run()
| StarcoderdataPython |
1778281 | import numpy as np
#######################################
#######################################
ROOT_PATH = '/home/akeaveny/git/DenseFusion/'
# /home/akeaveny/git/DenseFusion/datasets/ycb_aff/
CLASSES_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/classes.txt'
CLASS_IDS_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/class_ids.txt'
OBJ_PART_CLASSES_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/obj_part_classes.txt'
OBJ_PART_CLASS_IDS_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/obj_part_classes_ids.txt'
OBJ_PART_CLASSES_FILE_TRAIN = ROOT_PATH + 'datasets/ycb_aff/dataset_config/obj_part_classes_train.txt'
OBJ_PART_CLASS_IDS_FILE_TRAIN = ROOT_PATH + 'datasets/ycb_aff/dataset_config/obj_part_classes_ids_train.txt'
TRAIN_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/train_data_list.txt'
TEST_FILE = ROOT_PATH + 'datasets/ycb_aff/dataset_config/test_data_list.txt'
# Trained models
DF_GITHUB_TRAINED_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb/densefusion/pose_model_26_0.012863246640872631.pth'
DF_GITHUB_TRAINED_REFINE_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb/densefusion/pose_refine_model_69_0.009449292959118935.pth'
TRAINED_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb/real_and_syn/pose_model_27_0.012961520093793814.pth'
TRAINED_REFINE_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb/real_and_syn/pose_refine_model_93_0.009422253060541326.pth'
TRAINED_AFF_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb_aff/real_and_syn/pose_model_20_0.012745570227629784.pth'
TRAINED_AFF_REFINE_MODEL = '/data/Akeaveny/weights/DenseFusion/ycb_aff/real_and_syn/pose_refine_model_57_0.009359799028407731.pth'
# MATLAB
OBJ_EVAL_FOLDER_GT = ROOT_PATH + 'affpose/YCB_Aff/matlab/obj/results/gt'
OBJ_EVAL_FOLDER_DF_WO_REFINE = ROOT_PATH + 'affpose/YCB_Aff/matlab/obj/results/df_wo_refine'
OBJ_EVAL_FOLDER_DF_ITERATIVE = ROOT_PATH + 'affpose/YCB_Aff/matlab/obj/results/df_iterative'
AFF_EVAL_FOLDER_GT = ROOT_PATH + 'affpose/YCB_Aff/matlab/aff/results/gt'
AFF_EVAL_FOLDER_DF_WO_REFINE = ROOT_PATH + 'affpose/YCB_Aff/matlab/aff/results/df_wo_refine'
AFF_EVAL_FOLDER_DF_ITERATIVE = ROOT_PATH + 'affpose/YCB_Aff/matlab/aff/results/df_iterative'
#######################################
### YCB
#######################################
DATASET_ROOT_PATH = '/data/Akeaveny/Datasets/YCB_Video_Dataset/'
AFF_DATASET_ROOT_PATH = '/data/Akeaveny/Datasets/YCB_Affordance_Dataset/'
YCB_TOOLBOX_CONFIG = ROOT_PATH + 'YCB_Video_toolbox/results_PoseCNN_RSS2018/'
RGB_EXT = '-color.png'
DEPTH_EXT = '-depth.png'
LABEL_EXT = '-label.png'
META_EXT = '-meta.mat'
BOX_EXT = '-box.txt'
OBJ_PART_LABEL_EXT = '-obj_part_label.png'
AFF_LABEL_EXT = '-aff_label.png'
POSECNN_EXT = '.mat'
#######################################
### YCB AFF
#######################################
NUM_OBJECTS = 21
NUM_OBJECTS_PARTS = 31
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
#######################################
# CAMERA CONFIGS
#######################################
CAM_CX_1 = 312.9869
CAM_CY_1 = 241.3109
CAM_FX_1 = 1066.778
CAM_FY_1 = 1067.487
CAM_CX_2 = 323.7872
CAM_CY_2 = 279.6921
CAM_FX_2 = 1077.836
CAM_FY_2 = 1078.189
CAM_SCALE = 10000.0
HEIGHT, WIDTH = 640, 480
ORIGINAL_SIZE = (HEIGHT, WIDTH)
RESIZE = (int(HEIGHT/1), int(WIDTH/1))
_step = 40
BORDER_LIST = np.arange(start=0, stop=np.max([WIDTH, HEIGHT])+_step, step=_step)
XMAP = np.array([[j for i in range(HEIGHT)] for j in range(WIDTH)])
YMAP = np.array([[i for i in range(HEIGHT)] for j in range(WIDTH)])
#######################################
#######################################
NUM_PT = 1000
NUM_PT_MESH_SMALL = 500
NUM_PT_MESH_LARGE = 2600
FRONT_NUM = 2
REFINE_ITERATIONS = 2
BATCH_SIZE = 1
PRED_C_THRESHOLD = 0.0
| StarcoderdataPython |
59459 | __import__('eigenpy').switchToNumpyArray()
| StarcoderdataPython |
11281688 | from virtualbox import library
from virtualbox.library_ext.progress import IProgress
class IHost(library.IHost):
__doc__ = library.IHost.__doc__
# Work around a bug where createHostOnlyNetworkInterface returns
# host_interface and progress in the wrong order
def create_host_only_network_interface(self):
progress, host_interface = self._call("createHostOnlyNetworkInterface")
host_interface = library.IHostNetworkInterface(host_interface)
progress = IProgress(progress)
return host_interface, progress
create_host_only_network_interface.__doc__ = (
library.IHost.create_host_only_network_interface.__doc__
)
| StarcoderdataPython |
11307566 | import os
import api_tmdb
import api_jellyfin
'''
tmdb -> jellyfin legend:
TV
id ProviderIds.Tmdb
name Name
release_date ProductionYear
media_type Type
Movies
id ProviderIds.Tmdb
title Name
first_air_date ProductionYear
media_type Type # collections ommit media_type
Shared Dict:
{
title: 'Star Wars',
year: '1977', # 0 if value is not known
media_type: 'movie', # currently unused
tmdb_id: '1212', # 0 if not known
jellyfin_id: 'ba3a5d90b37244baa339816bdcbc9ca8'
}
'''
# be careful with this. only first episodes of series isnt working yet so it will add every episode of a series
settings = {
'server_info': {
'server_url': os.environ['JELLYFIN_URL'],
'server_username': os.environ['JELLYFIN_USERNAME'],
'server_password': os.environ['<PASSWORD>'],
},
'only_series_first_episodes': True
}
# wip, this will be read from a file and/or command line
# currently playlist updating isnt working so every time this script runs it will create new playlists
inputs = {
'The Entire Marvel Cinematic Universe': {
'title': 'The Entire Marvel Cinematic Universe',
'id': 12179,
'type': 'list'
},
'Harry Potter Collection': {
'title': 'Harry Potter Collection',
'id': 1241,
'type': 'collection'
},
}
clientManager = api_jellyfin.clientManager
client = clientManager.login(settings['server_info']['server_url'], settings['server_info']['server_username'], settings['server_info']['server_password'])
for item in inputs.items():
tmdb_result = []
print(item)
# query tmdb with a list or collection id and return a list of media items
if item[1]['type'] == "list":
tmdb_result = api_tmdb.get_playlist_from_tmdb_list(item[1]['id'])
elif item[1]['type'] == "collection":
tmdb_result = api_tmdb.get_playlist_from_tmdb_collection(item[1]['id'])
# check the list from tmdb against the jellyfin library and remove media that isnt in the jellyfin library
list_only_matched_items = api_jellyfin.match_items_to_tmdb(client, tmdb_result)
# create a new jellyfin playlist using the new list
api_jellyfin.sync_list_with_jellyfin_playlist(client, item[1]['title'], list_only_matched_items)
clientManager.stop() | StarcoderdataPython |
9633441 | from .settings import *
# Database
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "food_image",
"USER": "food_image",
"PASSWORD": "<PASSWORD>",
"HOST": "127.0.0.1",
"PORT": "5432",
}
}
# settings
DEBUG = True
ALLOWED_HOSTS = ["*"]
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SRF_COOKIE_SECURE = False
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_BUCKET_NAME = "food-vision-images_test"
# logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"file": {
"level": "WARNING",
"class": "logging.FileHandler",
"filename": "debug.log",
},
},
"loggers": {
"django": {
"handlers": ["file"],
"level": "WARNING",
"propagate": True,
},
},
"root": {
"handlers": ["file"],
"level": "WARNING",
},
}
| StarcoderdataPython |
363856 | <gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
from IPython.display import HTML
def data_point_creator(x_bound, y_bound, func, prec=.1):
x = np.arange(-5.12, 5.12, prec)
y = np.arange(-5.12, 5.12, prec)
x, y = np.meshgrid(x, y)
z = np.array(list(map(func, x, y)))
return x, y, z
def three_d_plot(x, y, z, p_type='surface', genetic_points=None, with_countour=False, elev=45):
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_dict = {
'surface': ax.plot_surface,
'wireframe': ax.plot_wireframe,
}
assert p_type in plot_dict.keys()
def animate(i):
x_gen = genetic_points[i, :, 0]
y_gen = genetic_points[i, :, 1]
z_gen = genetic_points[i, :, 2]
ax.clear()
ax.scatter(x_gen, y_gen, z_gen, c='black',s=30)
plot_dict[p_type](x, y, z)
ax.contour(x, y, z, zdir='z', offset=-2, cmap=cm.coolwarm)
ax.set_title('Generation {}'.format(i))
ax.set_xlabel('X')
ax.set_xlim(-10, 10)
ax.set_ylabel('Y')
ax.set_ylim(-10, 10)
ax.set_zlabel('Z')
ax.set_zlim(-2.2, 100)
ax.view_init(elev=elev)
return ax
plot_dict[p_type](x, y, z)
if with_countour :
# cset = ax.contour(x, y, z, zdir='z', offset=-25, cmap=cm.coolwarm)
# cset = ax.contour(x, y, z, zdir='x', offset=-10, cmap=cm.coolwarm)
cset = ax.contour(x, y, z, zdir='y', offset=10, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-10, 10)
ax.set_ylabel('Y')
ax.set_ylim(-10, 10)
ax.set_zlabel('Z')
ax.set_zlim(-25, 25)
if not(genetic_points is None) :
anim = animation.FuncAnimation(fig, animate, frames=genetic_points.shape[0], interval=200)
plt.close()
# call our new function to display the animation
return HTML(anim.to_jshtml())
def two_d_plot(x, y, z, genetic_points=None, with_countour=False, elev=45):
fig = plt.figure()
ax = fig.gca()
def animate(i):
x_gen = genetic_points[i, :, 0]
y_gen = genetic_points[i, :, 1]
ax.clear()
ax.scatter(x_gen, y_gen, c='black',s=30)
ax.contour(x, y, z, zdir='z', offset=-2, cmap=cm.coolwarm)
ax.set_title('Generation {}'.format(i))
ax.set_xlabel('X')
ax.set_xlim(-10, 10)
ax.set_ylabel('Y')
ax.set_ylim(-10, 10)
return ax
if with_countour :
cset = ax.contour(x, y, z, zdir='y', offset=10, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-10, 10)
ax.set_ylabel('Y')
ax.set_ylim(-10, 10)
if not(genetic_points is None) :
anim = animation.FuncAnimation(fig, animate, frames=genetic_points.shape[0], interval=200)
plt.close()
# call our new function to display the animation
return HTML(anim.to_jshtml())
de_jong_func = lambda x, y: x**2 + y**2
a_p_hyper_ellipsoid_func = lambda x, y: x**2 + 2*y**2
ros_valley_func = lambda x, y: 100*(y - x**2)**2 + (1 -x)**2
rastrigin_func = lambda x, y: 20 + np.floor(x**2 + 10*np.cos(2*np.pi*x)) + np.floor(y**2 + 10*np.cos(2*np.pi*y))
multi_rastrigin_func = lambda x: 10*len(x) + sum([np.floor(i**2 + 10*np.cos(2*np.pi*i)) for i in x]) | StarcoderdataPython |
357493 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from datawarehouse import load_higgs
def load_data():
data = load_higgs() # Load the full dataset and return it as a pandas.DataFrame
data = handle_missing_values(data)
data = clean_columns(data)
return data
def handle_missing_values(data, missing_value=-999.0, dummy_variables=False):
"""
Find missing values.
Replace missing value (-999.9) with 0.
If dummy_variables is created then :
for each feature having missing values, add a 'feature_is_missing' boolean feature.
'feature_is_missing' = 1 if the 'feature' is missing, 0 otherwise.
Args
----
data : (pandas.DataFrame) the data with missing values.
missing_value : (default=-999.0) the value that should be considered missing.
dummy_variables : (bool, default=False), if True will add boolean feature columns
indicating that values are missing.
Returns
-------
filled_data : (pandas.DataFrame) the data with handled missing values.
"""
is_missing = (data == missing_value)
filled_data = data[~is_missing].fillna(0.) # Magik
if dummy_variables :
missing_col = [c for c in is_missing.columns if np.any(is_missing[c])]
new_cols = {c: c+"_is_missing" for c in missing_col} # new columns names
bool_df = is_missing[missing_col] # new boolean columns
bool_df = bool_df.rename(columns=new_cols)
filled_data = filled_data.join(bool_df) # append the new boolean columns to the data
return filled_data
def clean_columns(data):
"""
Removes : EventId, KaggleSet, KaggleWeight
Cast labels to float.
"""
data = data.drop(["DER_mass_MMC", "EventId", "KaggleSet", "KaggleWeight",], axis=1)
label_to_float(data) # Works inplace
return data
def label_to_float(data):
"""
Transform the string labels to float values.
s -> 1.0
b -> 0.0
Works inplace on the given data !
Args
----
data: the dataset should be a pandas.DataFrame like object.
This function will modify the given data inplace.
"""
if data['Label'].dtype == object:
#copy entry in human usable form
data["Label"] = (data["Label"] == 's').astype("float")
else:
pass
def normalize_weight(data, background_luminosity=410999.84732187376,
signal_luminosity=691.9886077135781):
"""Normalizes weight inplace"""
w = data['Weight'].values
y = data['Label'].values
data['Weight'] = compute_normalized_weight(w, y, background_luminosity=background_luminosity, signal_luminosity=signal_luminosity)
def compute_normalized_weight(w, y,
background_luminosity=410999.84732187376,
signal_luminosity=691.9886077135781):
"""Normalize the given weight to assert that the luminosity is the same as the nominal.
Returns the normalized weight vector/Series
"""
background_weight_sum = w[y==0].sum()
signal_weight_sum = w[y==1].sum()
w_new = w.copy()
w_new[y==0] = w[y==0] * ( background_luminosity / background_weight_sum )
w_new[y==1] = w[y==1] * ( signal_luminosity / signal_weight_sum )
return w_new
def split_data_label_weights(data):
X = data.drop(["Weight", "Label"], axis=1)
X = X.drop(["origWeight", "detailLabel"], axis=1, errors="ignore")
y = data["Label"]
W = data["Weight"]
return X, y, W
def split_train_test(data, idx_train, idx_test):
n_samples = data.shape[0]
n_train = idx_train.shape[0]
n_test = n_samples - n_train
if n_test < 0:
raise ValueError('The number of train samples ({}) exceed the total number of samples ({})'.format(n_train, n_samples))
train_data = data.iloc[idx_train]
test_data = data.iloc[idx_test]
return train_data, test_data
# def skew(data, z=1.0, missing_value=0., remove_mass_MMC=True):
# data_skewed = data.copy()
# if not "DER_mass_MMC" in data_skewed.columns:
# data_skewed["DER_mass_MMC"] = np.zeros(data.shape[0]) # Add dummy column
# tau_energy_scale(data_skewed, z, missing_value=missing_value) # Modify data inplace
# data_skewed = data_skewed.drop(["ORIG_mass_MMC", "ORIG_sum_pt"], axis=1)
# if remove_mass_MMC and "DER_mass_MMC" in data_skewed.columns:
# data_skewed = data_skewed.drop( ["DER_mass_MMC"], axis=1 )
# return data_skewed
| StarcoderdataPython |
20255 | import re
import pathlib
from clldutils.text import strip_chars
from cldfbench import Dataset as BaseDataset
from cldfbench import CLDFSpec
QUOTES = '“”'
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "lapollaqiang"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(dir=self.cldf_dir, module='Generic', metadata_fname='cldf-metadata.json')
def cmd_download(self, args):
pass
def cmd_makecldf(self, args):
args.writer.cldf.add_component('LanguageTable')
args.writer.cldf.add_component(
'ExampleTable',
'Text_ID',
{'name': 'Sentence_Number', 'datatype': 'integer'},
{'name': 'Phrase_Number', 'datatype': 'integer'},
)
args.writer.cldf.add_table('texts.csv', 'ID', 'Title')
args.writer.cldf.add_foreign_key('ExampleTable', 'Text_ID', 'texts.csv', 'ID')
args.writer.objects['LanguageTable'].append({'ID': 'qiang', 'Name':
'Qiang', 'Glottocode': 'west2876'})
example_number = 0
for text_id, title, lines in iter_texts(self.raw_dir.read('Qiang-2.txt').split('\n')):
args.writer.objects['texts.csv'].append({'ID': text_id, 'Title': title})
text, gloss = [], []
for igt in iter_igts(lines):
text.extend(igt[1])
gloss.extend(igt[2])
for sid, sentence in enumerate(iter_sentences(zip(text, gloss)), start=1):
for pid, phrase in enumerate(iter_phrases(sentence), start=1):
example_number += 1
args.writer.objects['ExampleTable'].append({
'ID': example_number,
'Primary_Text': ' '.join(p[0] for p in phrase),
'Analyzed_Word': [p[0] for p in phrase],
'Gloss': [p[1] for p in phrase],
'Text_ID': text_id,
'Language_ID': 'qiang',
'Sentence_Number': sid,
'Phrase_Number': pid,
})
def iter_phrases(chunks):
phrase_end = ',;'
phrase = []
for text, gloss in chunks:
phrase.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in phrase_end:
yield phrase[:]
phrase = []
assert phrase
yield phrase
def iter_sentences(chunks):
sentence_end = '.!?'
sentence = []
for text, gloss in chunks:
sentence.append((text, gloss))
if strip_chars(QUOTES, text)[-1] in sentence_end:
yield sentence[:]
sentence = []
assert not sentence
def iter_igts(lines):
assert len(lines) % 3 == 0
for text, gloss, sep in [lines[i:i+3] for i in range(0, len(lines), 3)]:
assert not sep
m = re.match('(?P<number>[0-9]+)\s+', text)
assert m
sid = m.group('number')
text = text[m.end():].split()
gloss = gloss.split()
assert len(text) == len(gloss)
yield sid, text, gloss
def iter_texts(all_lines):
header_pattern = re.compile('Text\s+(?P<number>[0-9]+)\s*:\s+(?P<title>.+)')
text_id, title, lines = None, None, []
for line in all_lines:
line = line.strip()
header = header_pattern.match(line)
if header:
if text_id:
yield text_id, title, lines
lines = []
text_id, title = header.group('number'), header.group('title')
continue
lines.append(line)
if lines:
yield text_id, title, lines
| StarcoderdataPython |
11395553 | <filename>cooperhewitt/api/multiclient/__init__.py
# https://pythonhosted.org/setuptools/setuptools.html#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
import cooperhewitt.api.client
import grequests
class OAuth2(cooperhewitt.api.client.OAuth2):
def execute_methods(self, multi, size=10):
req = []
for details in multi:
url, args = self.prepare_request(*details)
req.append(grequests.post(url, **args))
rsp = grequests.map(req)
for r in rsp:
yield self.parse_response(r)
| StarcoderdataPython |
5023331 | <gh_stars>1-10
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ArrayLengthNode(Node, ArmLogicTreeNode):
'''Array length node'''
bl_idname = 'LNArrayLengthNode'
bl_label = 'Array Length'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketArray', 'Array')
self.outputs.new('NodeSocketInt', 'Length')
add_node(ArrayLengthNode, category='Array')
| StarcoderdataPython |
1736488 | import gym
from gym import spaces
from gym.utils import seeding
def cmp(a, b):
return int((a > b)) - int((a < b))
# 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
def draw_card(np_random):
return np_random.choice(deck)
def draw_hand(np_random):
return [draw_card(np_random), draw_card(np_random)]
def usable_ace(hand): # 是否有可用的Ace
return 1 in hand and sum(hand) + 10 <= 21
def sum_hand(hand): # 当前的和,Ace能用就加10。
if usable_ace(hand):
return sum(hand) + 10
return sum(hand)
def is_bust(hand): # 是否爆了
return sum_hand(hand) > 21
def score(hand): # 如果爆了0分,否则是当前的和
return 0 if is_bust(hand) else sum_hand(hand)
def is_natural(hand): # 是否natural牌
return sorted(hand) == [1, 10]
class BlackjackEnv(gym.Env):
"""简单的blackjack环境
Blackjack是一个纸牌游戏,目的是纸牌的和尽量接近21但是不能超过。这里的玩家是和一个
固定策略的庄家。
花牌(Jack, Queen, King)是10。 have point value 10.
Ace即可以看成11也可以看成1,如果可以看成11那么就叫Usable。
这个游戏可以任务牌的数量是无限的。因此每次取牌的概率是固定的。
游戏开始时玩家和庄家都有两张牌,庄家的一张牌是亮出来的。
玩家可以要牌或者停止要牌。如果玩家的牌超过21点,则庄家获胜。
如果玩家没有超过21点就停止要牌,则轮到庄家要牌,这里的庄家是采取固定的策略——如果没超过16就
继续要牌,如果超过16(大于等于17)则停止要牌。如果庄家超过21点则玩家获胜,
否则比较两人牌的大小,大者获胜,一样大则平局。赢的reward是1,输了-1,平局0。
"""
def __init__(self, natural=False):
self.action_space = spaces.Discrete(2)
# Tuple-1 1-31表示玩家的牌的和,注意如果玩家到了21点肯定不会再要牌,
# 因此即使爆了和最大也是20+11=31,其实根据我们的分析12以下也
# 没必要有,不过有也没关系。
# Tuple-2 1-10表示庄家亮牌的点数
# Tuple-3 0和1表示是否有可用的Ace
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)))
self.seed()
# 这个Flag表示如果玩家natural赢了的奖励是1.5倍。
self.natural = natural
# 开始游戏
self.reset()
self.nA = 2
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
if action: # hit: 继续要牌
self.player.append(draw_card(self.np_random))
if is_bust(self.player):
done = True
reward = -1
else:
done = False
reward = 0
else: # stick: 玩家不要牌了,模拟庄家的策略直到游戏结束。
done = True
while sum_hand(self.dealer) < 17:
self.dealer.append(draw_card(self.np_random))
reward = cmp(score(self.player), score(self.dealer))
# 如果self.natural并且玩家通过natural获胜,这是1.5倍奖励
if self.natural and is_natural(self.player) and reward == 1:
reward = 1.5
return self._get_obs(), reward, done, {}
def _get_obs(self):
return (sum_hand(self.player), self.dealer[0], usable_ace(self.player))
def reset(self):
# 每人都来两张牌
self.dealer = draw_hand(self.np_random)
self.player = draw_hand(self.np_random)
# 如果玩家的牌没到12点就自动帮他要牌
while sum_hand(self.player) < 12:
self.player.append(draw_card(self.np_random))
return self._get_obs() | StarcoderdataPython |
3228255 | <filename>ping-command.py
@bot.command() #Ping command
async def ping(ctx):
await ctx.send(f"Pong 🏓")
| StarcoderdataPython |
3436665 | import argparse
import logging
import structlog
from dcicutils.es_utils import create_es_client
from dcicutils.ff_utils import get_health_page
log = structlog.getLogger(__name__)
EPILOG = __doc__
def main():
"""
Simple command to adjust settings on the Kibana index in ES, so that
searches against all indices do not create issues due to default config
"""
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.INFO)
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Configure Kibana Index", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--es-endpoint', help='Elasticsearch endpoint, including port')
parser.add_argument('--env', help='Use the Elasticsearch associated with this EB environment')
args = parser.parse_args()
# require either --es-endpoit or --env (not both)
if not args.es_endpoint or args.env:
log.error('configure_kibana_index: must provide either --es-endpoint'
'or --env to this command! You gave neither.')
return
elif args.es_endpoint and args.env:
log.error('configure_kibana_index: must provide either --es-endpoint'
'or --env to this command! You gave both.')
return
elif args.es_endpoint:
use_es = args.es_endpoint
elif args.env:
use_es = get_health_page(ff_env=args.env)['elasticsearch']
# create client and ensure kibana index exists
client = create_es_client(use_es, use_aws_auth=True)
if not client.indices.exists(index='.kibana'):
log.error('configure_kibana_index: .kibana index does not exist for'
'endpoints %s' % use_es)
return
kibana_settings = {'max_result_window': 100000}
client.indices.put_settings(index='.kibana', body=kibana_settings)
log.info('configure_kibana_index: successfully changed settings %s'
% list(kibana_settings.keys()))
if __name__ == "__main__":
main()
| StarcoderdataPython |
4947991 | # -*- coding: utf-8 -*-
from typing import Optional, Union
import pandas as pd
import typic
from standard_precip.spi import SPI
from tstoolbox import tsutils
def _nlarge_nsmall(pe_data, nlargest, nsmallest, groupby):
if nlargest is None and nsmallest is None:
return pe_data
nlarge = pd.Series()
nsmall = pd.Series()
if nlargest is not None:
nlarge = pe_data.resample(groupby).apply(
lambda x: x.nlargest(int(nlargest), x.columns[0])
)
nlarge = nlarge.droplevel(0)
nlarge.sort_index(inplace=True)
nlarge = nlarge.reindex(
pd.date_range(start=nlarge.index[0], end=nlarge.index[-1], freq="D")
)
if nsmallest is not None:
nsmall = pe_data.resample(groupby).apply(
lambda x: x.nsmallest(int(nsmallest), x.columns[0])
)
nsmall = nsmall.droplevel(0)
nsmall.sort_index(inplace=True)
nsmall = nsmall.reindex(
pd.date_range(start=nsmall.index[0], end=nsmall.index[-1], freq="D")
)
if nsmallest is not None and nlargest is None:
return nsmall
if nsmallest is None and nlargest is not None:
return nlarge
return pd.concat([nsmall, nlarge], axis="columns")
@tsutils.transform_args(source_units=tsutils.make_list)
@typic.al
def spei(
rainfall: Optional[Union[tsutils.IntGreaterEqualToOne, str]],
pet: Optional[Union[tsutils.IntGreaterEqualToOne, str]],
source_units,
nsmallest=None,
nlargest=None,
groupby="M",
fit_type="lmom",
dist_type="gam",
scale=1,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
skiprows=None,
index_type="datetime",
names=None,
print_input=False,
):
from tstoolbox.tstoolbox import read
tsd = read(
rainfall,
pet,
names=["rainfall", "pet"],
source_units=source_units,
target_units=["mm", "mm"],
)
tsd["pe"] = tsd["rainfall:mm"] - tsd["pet:mm"]
tsd["date"] = tsd.index
spi = SPI()
# def calculate(self, df: pd.DataFrame, date_col: str, precip_cols: list, freq: str="M",
# scale: int=1, freq_col: str=None, fit_type: str='lmom', dist_type: str='gam',
# **dist_kwargs) -> pd.DataFrame:
tsd = tsutils.asbestfreq(tsd)
ndf = spi.calculate(
tsd,
"date",
"pe",
freq=tsd.index.freqstr,
scale=scale,
fit_type=fit_type,
dist_type=dist_type,
)
return _nlarge_nsmall(ndf, nlargest, nsmallest, groupby)
@tsutils.transform_args(source_units=tsutils.make_list)
@typic.al
def pe(
rainfall: Optional[Union[tsutils.IntGreaterEqualToOne, str]],
pet: Optional[Union[tsutils.IntGreaterEqualToOne, str]],
source_units,
nsmallest=None,
nlargest=None,
groupby="M",
window=30,
min_periods=None,
center=None,
win_type=None,
closed=None,
input_ts="-",
start_date=None,
end_date=None,
dropna="no",
clean=False,
round_index=None,
names=None,
target_units="mm",
print_input=False,
):
from tstoolbox.tstoolbox import read
tsd = read(
rainfall,
pet,
names=["rainfall", "pet"],
source_units=source_units,
target_units=["mm", "mm"],
)
pe_data = tsd["rainfall:mm"] - tsd["pet:mm"]
pe_data = tsutils._normalize_units(pe_data, "mm", target_units)
pe_data = (
pe_data.astype(float)
.rolling(
window,
min_periods=min_periods,
center=center,
win_type=win_type,
closed=closed,
)
.sum()
)
return _nlarge_nsmall(pe_data, nlargest, nsmallest, groupby)
| StarcoderdataPython |
181906 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 17:38:58 2019
"""
from PyQt5.QtWidgets import (QPushButton, QDialog, QDialogButtonBox,
QLineEdit, QFormLayout, QMessageBox,
QFileDialog, QLabel)
import os
class FileBrowser(QDialog):
def __init__(self, *args, **kwargs):
super(FileBrowser, self).__init__(*args, **kwargs)
self.setWindowTitle("Open Files")
self.setGeometry(100,100, 800,200)
self.button_opennd2 = QPushButton('Open image file')
self.button_opennd2.setEnabled(True)
self.button_opennd2.clicked.connect(self.getnd2path)
self.button_opennd2.setToolTip("Browse for an image file")
self.button_opennd2.setMaximumWidth(150)
self.button_openfolder = QPushButton('Open image folder')
self.button_openfolder.setEnabled(True)
self.button_openfolder.clicked.connect(self.getfolder)
self.button_openfolder.setToolTip("Browse for folder with images")
self.button_openfolder.setMaximumWidth(150)
self.button_openhdf = QPushButton('Open mask file')
self.button_openhdf.setEnabled(True)
self.button_openhdf.clicked.connect(self.gethdfpath)
self.button_openhdf.setToolTip("Browse for a mask file")
self.button_openhdf.setMaximumWidth(150)
self.newhdfentry = QLineEdit()
self.newhdfentry.setText("newmaskfile")
self.nd2name = ''
self.hdfname = ''
flo = QFormLayout()
QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.labelnd2 = QLabel()
self.labelnd2.setText('No image file (.nd2, .tif, .tiff) selected')
self.labelhdf = QLabel()
self.labelhdf.setText('No mask file (.h5, .tif, .tiff) selected')
self.labelfolder = QLabel()
self.labelfolder.setText('No folder selected')
flo.addRow(self.labelnd2, self.button_opennd2)
flo.addRow(self.labelfolder, self.button_openfolder)
flo.addRow(self.labelhdf, self.button_openhdf)
flo.addRow('If no hdf file already exists, give a name to create a new file', self.newhdfentry)
flo.addWidget(self.buttonBox)
self.setLayout(flo)
def getnd2path(self):
self.nd2name,_ = QFileDialog.getOpenFileName(self, 'Open image file','',
'Image files (*.nd2 *.tif *.tiff *.jpg *.jpeg *.png *.bmp '
'*.pbm *.pgm *.ppm *.pxm *.pnm *.jp2 '
'*.TIF *.TIFF *.JPG *.JPEG *.PNG *.BMP '
'*.PBM *.PGM *.PPM *.PXM *.PNM *.JP2)')
if self.nd2name != '':
self.labelnd2.setText(self.nd2name)
self.labelfolder.setText('')
def gethdfpath(self):
self.hdfname,_ = QFileDialog.getOpenFileName(self,'Open mask file','', 'Mask files (*.h5 *.tif *.tiff)')
if self.hdfname != '':
self.check_hdfpath()
self.labelhdf.setText(self.hdfname)
self.newhdfentry.setText("")
def getfolder(self):
self.nd2name = QFileDialog.getExistingDirectory(self, ("Select Image Folder"))
if self.nd2name != '':
self.labelfolder.setText(self.nd2name)
self.labelnd2.setText('')
def check_hdfpath(self):
"""Checks if hdf path already exists when loading tiff, to avoid
data loss"""
path, ext = os.path.splitext(self.hdfname)
if ext=='.tiff' or ext=='.tif' or ext=='.TIFF' or ext=='.TIF':
if os.path.isfile(path+'.h5'):
QMessageBox.critical(self, 'Warning',
'A .h5 file with the same name as the loaded '
'tif exists already and will be overwritten.'
' Rename either the tif or the h5 file to '
'avoid data loss.')
| StarcoderdataPython |
8182920 | # ActivitySim
# See full license in LICENSE.txt.
from builtins import range
from builtins import object
import logging
import numpy as np
import pandas as pd
from activitysim.core.util import quick_loc_series
logger = logging.getLogger(__name__)
NOT_IN_SKIM_ZONE_ID = -1
NOT_IN_SKIM_NAN = np.nan
ROW_MAJOR_LAYOUT = True
class OffsetMapper(object):
"""
Utility to map skim zone ids to ordinal offsets (e.g. numpy array indices)
Can map either by a fixed offset (e.g. -1 to map 1-based to 0-based)
or by an explicit mapping of zone id to offset (slower but more flexible)
Internally, there are two representations:
offset_int:
int offset which when added to zone_id yields skim array index (e.g. -1 to map 1-based zones to 0-based index)
offset_series:
pandas series with zone_id index and skim array offset values. Ordinarily, index is just range(0, omx_size)
if series has duplicate offset values, this can map multiple zone_ids to a single skim array index
(e.g. can map maz zone_ids to corresponding taz skim offset)
"""
def __init__(self, offset_int=None, offset_list=None, offset_series=None):
self.offset_int = self.offset_series = None
assert (offset_int is not None) + (offset_list is not None) + (offset_series is not None) <= 1
if offset_int is not None:
self.set_offset_int(offset_int)
elif offset_list is not None:
self.set_offset_list(offset_list)
elif offset_series is not None:
self.set_offset_series(offset_series)
def print_offset(self, message=''):
assert (self.offset_int is not None) or (self.offset_series is not None)
if self.offset_int is not None:
print(f"{message} offset_int: {self.offset_int}")
elif self.offset_series is not None:
print(f"{message} offset_series:\n {self.offset_series}")
else:
print(f"{message} offset: None")
def set_offset_series(self, offset_series):
"""
Parameters
----------
offset_series: pandas.Series
series with zone_id index and skim array offset values (can map many zone_ids to skim array index)
"""
assert isinstance(offset_series, pd.Series)
self.offset_series = offset_series
self.offset_int = None
def set_offset_list(self, offset_list):
"""
Convenience method to set offset_series using an integer list the same size as target skim dimension
with implicit skim index mapping (e.g. an omx mapping as returned by omx_file.mapentries)
Parameters
----------
offset_list : list of int
"""
assert isinstance(offset_list, list) or isinstance(offset_list, np.ndarray)
if isinstance(offset_list, np.ndarray):
offset_list = list(offset_list)
# - for performance, check if this is a simple range that can ber represented by an int offset
first_offset = offset_list[0]
if (offset_list == list(range(first_offset, len(offset_list)+first_offset))):
offset_int = -1 * first_offset
self.set_offset_int(offset_int)
else:
offset_series = pd.Series(data=list(range(len(offset_list))), index=offset_list)
self.set_offset_series(offset_series)
def set_offset_int(self, offset_int):
"""
specify int offset which when added to zone_id yields skim array index (e.g. -1 to map 1-based to 0-based)
Parameters
----------
offset_int : int
"""
# should be some duck subtype of integer (but might be, say, numpy.int64)
assert int(offset_int) == offset_int
self.offset_int = int(offset_int)
self.offset_series = None
def map(self, zone_ids):
"""
map zone_ids to skim indexes
Parameters
----------
zone_ids : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)
Returns
-------
offsets : numpy array of int
"""
if self.offset_series is not None:
assert(self.offset_int is None)
assert isinstance(self.offset_series, pd.Series)
# FIXME - faster to use series.map if zone_ids is a series?
offsets = quick_loc_series(zone_ids, self.offset_series).fillna(NOT_IN_SKIM_ZONE_ID).astype(int)
elif self.offset_int:
assert (self.offset_series is None)
# apply integer offset, but map NOT_IN_SKIM_ZONE_ID to self
offsets = np.where(zone_ids == NOT_IN_SKIM_ZONE_ID, NOT_IN_SKIM_ZONE_ID, zone_ids + self.offset_int)
else:
offsets = zone_ids
return offsets
class SkimDict(object):
"""
A SkimDict object is a wrapper around a dict of multiple skim objects,
where each object is identified by a key.
Note that keys are either strings or tuples of two strings (to support stacking of skims.)
"""
def __init__(self, skim_tag, skim_info, skim_data):
logger.info(f"SkimDict init {skim_tag}")
self.skim_tag = skim_tag
self.skim_info = skim_info
self.usage = set() # track keys of skims looked up
self.offset_mapper = self._offset_mapper() # (in function so subclass can override)
self.omx_shape = skim_info.omx_shape
self.skim_data = skim_data
self.dtype = np.dtype(skim_info.dtype_name) # so we can coerce if we have missing values
# - skim_dim3 dict maps key1 to dict of key2 absolute offsets into block
# DRV_COM_WLK_BOARDS: {'MD': 4, 'AM': 3, 'PM': 5}, ...
self.skim_dim3 = {}
for skim_key, offset in skim_info.block_offsets.items():
if isinstance(skim_key, tuple):
key1, key2 = skim_key
self.skim_dim3.setdefault(key1, {})[key2] = offset
logger.info(f"SkimDict.build_3d_skim_block_offset_table registered {len(self.skim_dim3)} 3d keys")
def _offset_mapper(self):
"""
Return an OffsetMapper to set self.offset_mapper for use with skims
This allows subclasses (e.g. MazSkimDict) to 'tweak' the parent offset mapper.
Returns
-------
OffsetMapper
"""
offset_mapper = OffsetMapper()
if self.skim_info.offset_map is not None:
offset_mapper.set_offset_list(offset_list=self.skim_info.offset_map)
else:
# assume this is a one-based skim map
offset_mapper.set_offset_int(-1)
return offset_mapper
@property
def zone_ids(self):
"""
Return list of zone_ids we grok in skim index order
Returns
-------
ndarray of int domain zone_ids
"""
if self.offset_mapper.offset_series is not None:
ids = self.offset_mapper.offset_series.index.values
else:
ids = np.array(range(self.omx_shape[0])) - self.offset_mapper.offset_int
return ids
def get_skim_usage(self):
"""
return set of keys of skims looked up. e.g. {'DIST', 'SOV'}
Returns
-------
set:
"""
return self.usage
def _lookup(self, orig, dest, block_offsets):
"""
Return list of skim values of skims(s) at orig/dest for the skim(s) at block_offset in skim_data
Supplying a single int block_offset makes the lookup 2-D
Supplying a list of block_offsets (same length as orig and dest lists) allows 3D lookup
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
block_offsets: int or list of dim3 blockoffsets for the od pairs
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
# fixme - remove?
assert not (np.isnan(orig) | np.isnan(dest)).any()
# only working with numpy in here
orig = np.asanyarray(orig).astype(int)
dest = np.asanyarray(dest).astype(int)
mapped_orig = self.offset_mapper.map(orig)
mapped_dest = self.offset_mapper.map(dest)
if ROW_MAJOR_LAYOUT:
result = self.skim_data[block_offsets, mapped_orig, mapped_dest]
else:
result = self.skim_data[mapped_orig, mapped_dest, block_offsets]
# FIXME - should return nan if not in skim (negative indices wrap around)
in_skim = (mapped_orig >= 0) & (mapped_orig < self.omx_shape[0]) & \
(mapped_dest >= 0) & (mapped_dest < self.omx_shape[1])
# if not ((in_skim | (orig == NOT_IN_SKIM_ZONE_ID) | (dest == NOT_IN_SKIM_ZONE_ID)).all()):
# print(f"orig\n{orig}")
# print(f"dest\n{dest}")
# print(f"in_skim\n{in_skim}")
# check for bad indexes (other than NOT_IN_SKIM_ZONE_ID)
assert (in_skim | (orig == NOT_IN_SKIM_ZONE_ID) | (dest == NOT_IN_SKIM_ZONE_ID)).all(), \
f"{(~in_skim).sum()} od pairs not in skim"
if not in_skim.all():
result = np.where(in_skim, result, NOT_IN_SKIM_NAN).astype(self.dtype)
return result
def lookup(self, orig, dest, key):
"""
Return list of skim values of skims(s) at orig/dest in skim with the specified key (e.g. 'DIST')
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
key: str
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
self.usage.add(key)
block_offset = self.skim_info.block_offsets.get(key)
assert block_offset is not None, f"SkimDict lookup key '{key}' not in skims"
try:
result = self._lookup(orig, dest, block_offset)
except Exception as err:
logger.error("SkimDict lookup error: %s: %s", type(err).__name__, str(err))
logger.error(f"key {key}")
logger.error(f"orig max {orig.max()} min {orig.min()}")
logger.error(f"dest max {dest.max()} min {dest.min()}")
raise err
return result
def lookup_3d(self, orig, dest, dim3, key):
"""
3D lookup of skim values of skims(s) at orig/dest for stacked skims indexed by dim3 selector
The idea is that skims may be stacked in groups with a base key and a dim3 key (usually a time of day key)
On import (from omx) skims stacks are represented by base and dim3 keys seperated by a double_underscore
e.g. DRV_COM_WLK_BOARDS__AM indicates base skim key DRV_COM_WLK_BOARDS with a time of day (dim3) of 'AM'
Since all the skimsa re stored in a single contiguous 3D array, we can use the dim3 key as a third index
and thus rapidly get skim values for a list of (orig, dest, tod) tuples using index arrays ('fancy indexing')
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
block_offsets: list with one dim3 key for each orig/dest pair
Returns
-------
Numpy.ndarray: list of skim values
"""
self.usage.add(key) # should we keep usage stats by (key, dim3)?
assert key in self.skim_dim3, f"3d skim key {key} not in skims."
# map dim3 to block_offsets
skim_keys_to_indexes = self.skim_dim3[key]
# skim_indexes = dim3.map(skim_keys_to_indexes).astype('int')
try:
block_offsets = np.vectorize(skim_keys_to_indexes.get)(dim3) # this should be faster than map
result = self._lookup(orig, dest, block_offsets)
except Exception as err:
logger.error("SkimDict lookup_3d error: %s: %s", type(err).__name__, str(err))
logger.error(f"key {key}")
logger.error(f"orig max {orig.max()} min {orig.min()}")
logger.error(f"dest max {dest.max()} min {dest.min()}")
logger.error(f"skim_keys_to_indexes: {skim_keys_to_indexes}")
logger.error(f"dim3 {np.unique(dim3)}")
logger.error(f"dim3 block_offsets {np.unique(block_offsets)}")
raise err
return result
def wrap(self, orig_key, dest_key):
"""
return a SkimWrapper for self
"""
return SkimWrapper(self, orig_key, dest_key)
def wrap_3d(self, orig_key, dest_key, dim3_key):
"""
return a SkimWrapper for self
"""
return Skim3dWrapper(self, orig_key, dest_key, dim3_key)
class SkimWrapper(object):
"""
A SkimWrapper object is an access wrapper around a SkimDict of multiple skim objects,
where each object is identified by a key.
This is just a way to simplify expression files by hiding the and orig, dest arguments
when the orig and dest vectors are in a dataframe with known column names (specified at init time)
The dataframe is identified by set_df because it may not be available (e.g. due to chunking)
at the time the SkimWrapper is instantiated.
When the user calls skims[key], key is an identifier for which skim
to use, and the object automatically looks up impedances of that skim
using the specified orig_key column in df as the origin and
the dest_key column in df as the destination. In this way, the user
does not do the O-D lookup by hand and only specifies which skim to use
for this lookup. This is the only purpose of this object: to
abstract away the O-D lookup and use skims by specifying which skim
to use in the expressions.
Note that keys are either strings or tuples of two strings (to support stacking of skims.)
"""
def __init__(self, skim_dict, orig_key, dest_key):
"""
Parameters
----------
skim_dict: SkimDict
orig_key: str
name of column in dataframe to use as implicit orig for lookups
dest_key: str
name of column in dataframe to use as implicit dest for lookups
"""
self.skim_dict = skim_dict
self.orig_key = orig_key
self.dest_key = dest_key
self.df = None
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the origin and destination ids
Returns
-------
self (to facilitiate chaining)
"""
assert self.orig_key in df, f"orig_key '{self.orig_key}' not in df columns: {list(df.columns)}"
assert self.dest_key in df, f"dest_key '{self.dest_key}' not in df columns: {list(df.columns)}"
self.df = df
return self
def lookup(self, key, reverse=False):
"""
Generally not called by the user - use __getitem__ instead
Parameters
----------
key : hashable
The key (identifier) for this skim object
od : bool (optional)
od=True means lookup standard origin-destination skim value
od=False means lookup destination-origin skim value
Returns
-------
impedances: pd.Series
A Series of impedances which are elements of the Skim object and
with the same index as df
"""
assert self.df is not None, "Call set_df first"
if reverse:
s = self.skim_dict.lookup(self.df[self.dest_key], self.df[self.orig_key], key)
else:
s = self.skim_dict.lookup(self.df[self.orig_key], self.df[self.dest_key], key)
return pd.Series(s, index=self.df.index)
def reverse(self, key):
"""
return skim value in reverse (d-o) direction
"""
return self.lookup(key, reverse=True)
def max(self, key):
"""
return max skim value in either o-d or d-o direction
"""
assert self.df is not None, "Call set_df first"
s = np.maximum(
self.skim_dict.lookup(self.df[self.dest_key], self.df[self.orig_key], key),
self.skim_dict.lookup(self.df[self.orig_key], self.df[self.dest_key], key)
)
return pd.Series(s, index=self.df.index)
def __getitem__(self, key):
"""
Get the lookup for an available skim object (df and orig/dest and column names implicit)
Parameters
----------
key : hashable
The key (identifier) for the skim object
Returns
-------
impedances: pd.Series with the same index as df
A Series of impedances values from the single Skim with specified key, indexed byt orig/dest pair
"""
return self.lookup(key)
class Skim3dWrapper(object):
"""
This works the same as a SkimWrapper above, except the third dim3 is also supplied,
and a 3D lookup is performed using orig, dest, and dim3.
Parameters
----------
skims: Skims
This is the Skims object to wrap
dim3_key : str
This identifies the column in the dataframe which is used to
select among Skim object using the SECOND item in each tuple (see
above for a more complete description)
"""
def __init__(self, skim_dict, orig_key, dest_key, dim3_key):
"""
Parameters
----------
skim_dict: SkimDict
orig_key: str
name of column of zone_ids in dataframe to use as implicit orig for lookups
dest_key: str
name of column of zone_ids in dataframe to use as implicit dest for lookups
dim3_key: str
name of column of dim3 keys in dataframe to use as implicit third dim3 key for 3D lookups
e.g. string column with time_of_day keys (such as 'AM', 'MD', 'PM', etc.)
"""
self.skim_dict = skim_dict
self.orig_key = orig_key
self.dest_key = dest_key
self.dim3_key = dim3_key
self.df = None
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the orig, dest, and dim3 values
Returns
-------
self (to facilitiate chaining)
"""
assert self.orig_key in df, f"orig_key '{self.orig_key}' not in df columns: {list(df.columns)}"
assert self.dest_key in df, f"dest_key '{self.dest_key}' not in df columns: {list(df.columns)}"
assert self.dim3_key in df, f"dim3_key '{self.dim3_key}' not in df columns: {list(df.columns)}"
self.df = df
return self
def __getitem__(self, key):
"""
Get the lookup for an available skim object (df and orig/dest/dim3 and column names implicit)
Parameters
----------
key : hashable
The key (identifier) for this skim object
Returns
-------
impedances: pd.Series with the same index as df
A Series of impedances values from the set of skims with specified base key, indexed by orig/dest/dim3
"""
assert self.df is not None, "Call set_df first"
orig = self.df[self.orig_key].astype('int')
dest = self.df[self.dest_key].astype('int')
dim3 = self.df[self.dim3_key]
skim_values = self.skim_dict.lookup_3d(orig, dest, dim3, key)
return pd.Series(skim_values, self.df.index)
class MazSkimDict(SkimDict):
"""
MazSkimDict provides a facade that allows skim-like lookup by maz orig,dest zone_id
when there are often too many maz zones to create maz skims.
Dependencies: network_los.load_data must have already loaded: taz skim_dict, maz_to_maz_df, and maz_taz_df
It performs lookups from a sparse list of maz-maz od pairs on selected attributes (e.g. WALKDIST)
where accuracy for nearby od pairs is critical. And is backed by a fallback taz skim dict
to return values of for more distant pairs (or for skims that are not attributes in the maz-maz table.)
"""
def __init__(self, skim_tag, network_los, taz_skim_dict):
"""
we need network_los because we have dependencies on network_los.load_data (e.g. maz_to_maz_df, maz_taz_df,
and the fallback taz skim_dict)
We require taz_skim_dict as an explicit parameter to emphasize that we are piggybacking on taz_skim_dict's
preexisting skim_data and skim_info, rather than instantiating duplicate copies thereof.
Note, however, that we override _offset_mapper (called by super.__init__) to create our own
custom self.offset_mapper that maps directly from MAZ zone_ids to TAZ skim array indexes
Parameters
----------
skim_tag: str
network_los: Network_LOS
taz_skim_dict: SkimDict
"""
self.network_los = network_los
super().__init__(skim_tag, taz_skim_dict.skim_info, taz_skim_dict.skim_data)
assert self.offset_mapper is not None # should have been set with _init_offset_mapper
self.dtype = np.dtype(self.skim_info.dtype_name)
self.base_keys = taz_skim_dict.skim_info.base_keys
self.sparse_keys = list(set(network_los.maz_to_maz_df.columns) - {'OMAZ', 'DMAZ'})
self.sparse_key_usage = set()
def _offset_mapper(self):
"""
return an OffsetMapper to map maz zone_ids to taz skim indexes
Specifically, an offset_series with MAZ zone_id index and TAZ skim array offset values
This is called by super().__init__ AFTER
Returns
-------
OffsetMapper
"""
# start with a series with MAZ zone_id index and TAZ zone id values
maz_to_taz = self.network_los.maz_taz_df[['MAZ', 'TAZ']].set_index('MAZ').sort_values(by='TAZ').TAZ
# use taz offset_mapper to create series mapping directly from MAZ to TAZ skim index
taz_offset_mapper = super()._offset_mapper()
maz_to_skim_offset = taz_offset_mapper.map(maz_to_taz)
if isinstance(maz_to_skim_offset, np.ndarray):
maz_to_skim_offset = pd.Series(maz_to_skim_offset, maz_to_taz.index) # bug
# MAZ
# 19062 330 <- The TAZ would be, say, 331, and the offset is 330
# 8429 330
# 9859 331
assert isinstance(maz_to_skim_offset, np.ndarray) or isinstance(maz_to_skim_offset, pd.Series)
if isinstance(maz_to_skim_offset, pd.Series):
offset_mapper = OffsetMapper(offset_series=maz_to_skim_offset)
elif isinstance(maz_to_skim_offset, np.ndarray):
offset_mapper = OffsetMapper(offset_list=maz_to_skim_offset)
return offset_mapper
def get_skim_usage(self):
return self.sparse_key_usage.union(self.usage)
def sparse_lookup(self, orig, dest, key):
"""
Get impedence values for a set of origin, destination pairs.
Parameters
----------
orig : 1D array
dest : 1D array
key : str
skim key
Returns
-------
values : numpy 1D array
"""
self.sparse_key_usage.add(key)
max_blend_distance = self.network_los.max_blend_distance.get(key, 0)
if max_blend_distance == 0:
blend_distance_skim_name = None
else:
blend_distance_skim_name = self.network_los.blend_distance_skim_name
# fixme - remove?
assert not (np.isnan(orig) | np.isnan(dest)).any()
# we want values from mazpairs, where we have them
values = self.network_los.get_mazpairs(orig, dest, key)
is_nan = np.isnan(values)
if max_blend_distance > 0:
# print(f"{is_nan.sum()} nans out of {len(is_nan)} for key '{self.key}")
# print(f"blend_distance_skim_name {self.blend_distance_skim_name}")
backstop_values = super().lookup(orig, dest, key)
# get distance skim if a different key was specified by blend_distance_skim_name
if (blend_distance_skim_name != key):
distance = self.network_los.get_mazpairs(orig, dest, blend_distance_skim_name)
else:
distance = values
# for distances less than max_blend_distance, we blend maz-maz and skim backstop values
# shorter distances have less fractional backstop, and more maz-maz
# beyond max_blend_distance, just use the skim values
backstop_fractions = np.minimum(distance / max_blend_distance, 1)
values = np.where(is_nan,
backstop_values,
backstop_fractions * backstop_values + (1 - backstop_fractions) * values)
elif is_nan.any():
# print(f"{is_nan.sum()} nans out of {len(is_nan)} for key '{self.key}")
if key in self.base_keys:
# replace nan values using simple backstop without blending
backstop_values = super().lookup(orig, dest, key)
values = np.where(is_nan, backstop_values, values)
else:
# FIXME - if no backstop skim, then return 0 (which conventionally means "not available")
values = np.where(is_nan, 0, values)
# want to return same type as backstop skim
values = values.astype(self.dtype)
return values
def lookup(self, orig, dest, key):
"""
Return list of skim values of skims(s) at orig/dest in skim with the specified key (e.g. 'DIST')
Look up in sparse table (backed by taz skims) if key is a sparse_key, otherwise look up in taz skims
For taz skim lookups, the offset_mapper will convert maz zone_ids directly to taz skim indexes.
Parameters
----------
orig: list of orig zone_ids
dest: list of dest zone_ids
key: str
Returns
-------
Numpy.ndarray: list of skim values for od pairs
"""
if key in self.sparse_keys:
# logger.debug(f"MazSkimDict using SparseSkimDict for key '{key}'")
values = self.sparse_lookup(orig, dest, key)
else:
values = super().lookup(orig, dest, key)
return values
class DataFrameMatrix(object):
"""
Utility class to allow a pandas dataframe to be treated like a 2-D array,
indexed by rowid, colname
For use in vectorized expressions where the desired values depend on both a row column selector
e.g. size_terms.get(df.dest_taz, df.purpose)
::
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [10,20,30,40,50]}, index=[100,101,102,103,104])
dfm = DataFrameMatrix(df)
dfm.get(row_ids=[100,100,103], col_ids=['a', 'b', 'a'])
returns [1, 10, 4]
"""
def __init__(self, df):
"""
Parameters
----------
df - pandas dataframe of uniform type
"""
self.df = df
self.data = df.values
self.offset_mapper = OffsetMapper()
self.offset_mapper.set_offset_list(list(df.index))
self.cols_to_indexes = {k: v for v, k in enumerate(df.columns)}
def get(self, row_ids, col_ids):
"""
Parameters
----------
row_ids - list of row_ids (df index values)
col_ids - list of column names, one per row_id,
specifying column from which the value for that row should be retrieved
Returns
-------
series with one row per row_id, with the value from the column specified in col_ids
"""
# col_indexes = segments.map(self.cols_to_indexes).astype('int')
# this should be faster than map
col_indexes = np.vectorize(self.cols_to_indexes.get)(col_ids)
row_indexes = self.offset_mapper.map(np.asanyarray(row_ids))
not_in_skim = (row_indexes == NOT_IN_SKIM_ZONE_ID)
if not_in_skim.any():
logger.warning(f"DataFrameMatrix: {not_in_skim.sum()} row_ids of {len(row_ids)} not in skim.")
not_in_skim = not_in_skim.values
logger.warning(f"row_ids: {row_ids[not_in_skim]}")
logger.warning(f"col_ids: {col_ids[not_in_skim]}")
raise RuntimeError(f"DataFrameMatrix: {not_in_skim.sum()} row_ids of {len(row_ids)} not in skim.")
assert (row_indexes >= 0).all(), f"{row_indexes}"
result = self.data[row_indexes, col_indexes]
# FIXME - if ids (or col_ids?) is a series, return series with same index?
if isinstance(row_ids, pd.Series):
result = pd.Series(result, index=row_ids.index)
return result
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.