id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
5164617 | from . import test_event_sale
from . import test_event_sale_ui
| StarcoderdataPython |
275748 | <filename>beekeeper/hive.py
"""
Provides the Hive class to work with JSON hive files, both remotely
retrieved and opened from a local file
"""
from __future__ import division
try:
from urllib2 import URLError
except ImportError:
from urllib.error import URLError
import json
import os
from beekeeper.comms import download_as_json, ResponseException
from beekeeper.exceptions import HiveLoadedOverHTTP, MissingHive, VersionNotInHive
class Hive(dict):
"""
The Hive class is invisible to the developer; it wraps the parsed JSON and
provides methods for working with the information in it. Right now, most
methods have to do with getting the JSON and parsing version information.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@classmethod
def from_file(cls, fname, version=None, require_https=True):
"""
Create a Hive object based on JSON located in a local file.
"""
if os.path.exists(fname):
with open(fname) as hive_file:
return cls(**json.load(hive_file)).from_version(version, require_https=require_https)
else:
raise MissingHive(fname)
@classmethod
def from_url(cls, url, version=None, require_https=False):
"""
Create a Hive object based on JSON located at a remote URL.
"""
if "https://" in url:
require_https = True
if "http://" in url and require_https:
try:
hive = cls.from_url(url, version=version, require_https=False)
except HiveLoadedOverHTTP as err:
hive = err.hive
raise HiveLoadedOverHTTP(url, hive)
else:
try:
return cls(**download_as_json(url)).from_version(version, require_https)
except (ResponseException, URLError):
raise MissingHive(url)
@classmethod
def from_domain(cls, domain, version=None, require_https=True):
"""
Try to find a hive for the given domain; raise an error if we have to
failover to HTTP and haven't explicitly suppressed it in the call.
"""
url = "https://" + domain + "/api/hive.json"
try:
return cls.from_url(url, version=version, require_https=require_https)
except MissingHive:
url = "http://" + domain + "/api/hive.json"
return cls.from_url(url, version=version, require_https=require_https)
def from_version(self, version, require_https=False):
"""
Create a Hive object based on the information in the object
and the version passed into the method.
"""
if version is None or self.version() == version:
return self
else:
return Hive.from_url(self.get_version_url(version), require_https=require_https)
def get_version_url(self, version):
"""
Retrieve the URL for the designated version of the hive.
"""
for each_version in self.other_versions():
if version == each_version["version"] and "location" in each_version:
return each_version.get("location")
raise VersionNotInHive(version)
def version(self):
"""
Retrieve the current hive's version, if present.
"""
return self.get("versioning", {}).get("version", None)
def other_versions(self):
"""
Generate a list of other versions in the hive.
"""
return self.get("versioning", {}).get("other_versions", [])
| StarcoderdataPython |
370074 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
# **********************************************************************************#
# File: PMS gateway file
# Author: Myron
# **********************************************************************************#
from utils.dict import (
DefaultDict,
CompositeDict
)
from . base_gateway import BasePMSGateway
from .. configs import logger
from .. trade import (
OrderState,
OrderStateMessage
)
class PMSGateway(BasePMSGateway):
"""
组合管理模块
* 管理账户的持仓信息
* 管理账户的订单委托信息
* 管理账户的成交回报信息
"""
def __init__(self, clock=None, accounts=None, data_portal=None,
position_info=None, initial_value_info=None,
order_info=None, trade_info=None, benchmark_info=None,
total_commission_info=None, event_engine=None,
ctp_gateway=None):
"""
组合管理配置
Args:
clock(clock): 时钟
accounts(dict): 账户管理
data_portal(data_portal): 数据模块
position_info(dict): 账户持仓信息 |-> dict(account: dict(date: dict))
initial_value_info(dict): 初始权益信息 |-> dict(account: dict)
order_info(dict): 订单委托 |-> dict(account: dict(order_id: order))
trade_info(dict): 成交记录 |-> dict(account: dict(date: list))
benchmark_info(dict): 用户对比权益曲线 |-> dict(account: string)
total_commission_info(dict): 手续费记录 |-> dict(account: dict(date: float))
event_engine(obj): event engine
ctp_gateway(obj): subscriber gateway.
"""
super(PMSGateway, self).__init__()
self.clock = clock
self.accounts = accounts
self.data_portal = data_portal
self.position_info = position_info or DefaultDict(DefaultDict(dict))
self.initial_value_info = initial_value_info or DefaultDict(dict)
self.order_info = order_info or DefaultDict(dict)
self.trade_info = trade_info or DefaultDict(DefaultDict(list))
self.benchmark_info = benchmark_info or dict()
self.total_commission_info = total_commission_info or DefaultDict(DefaultDict(0))
self.event_engine = event_engine
self.ctp_gateway = ctp_gateway
self._account_name_id_map = {account: config.account_id for account, config in self.accounts.iteritems()}
self._account_id_name_map = {config.account_id: account for account, config in self.accounts.iteritems()}
@classmethod
def from_config(cls, clock, sim_params, data_portal, accounts=None, event_engine=None, ctp_gateway=None):
"""
从配置中生而成 PMS Gateway
"""
position_info = DefaultDict(DefaultDict(dict))
initial_value_info = DefaultDict(dict)
total_commission_info = DefaultDict(DefaultDict(0))
benchmark_info = dict()
accounts = accounts or sim_params.accounts
for account, config in accounts.iteritems():
account_id = config.account_id
benchmark_info[account_id] = sim_params.major_benchmark
return cls(clock=clock, accounts=accounts, data_portal=data_portal,
position_info=position_info, initial_value_info=initial_value_info,
benchmark_info=benchmark_info, total_commission_info=total_commission_info,
event_engine=event_engine, ctp_gateway=ctp_gateway)
def send_order(self, order, account_id=None):
"""
Send order event.
Args:
order(obj): order object
account_id(string): account id
"""
logger.info('[PMS Gateway] [Send order] account_id: {}, order_id: {}, '
'order submitted.'.format(account_id, order.order_id))
order.state = OrderState.ORDER_SUBMITTED
order.state_message = OrderStateMessage.OPEN
self.order_info[account_id][order.order_id] = order
logger.info('[PMS Gateway] [Send order] account_id: {}, order_id: {}, '
'subscribe trade response of current order.'.format(account_id, order.order_id))
self.ctp_gateway.trader_gateway.send_order(order)
def cancel_order(self, order_id, account_id=None):
"""
Cancel order event.
Args:
order_id(string): order id
account_id(string): account id
"""
target_order = self.order_info[account_id].get(order_id)
if target_order is None:
logger.warn('[PMS Gateway] [Cancel order] account_id: {}, order_id: {}, '
'can not find order.'.format(account_id, order_id))
return
target_order.state = OrderState.CANCELED
target_order.state_message = OrderStateMessage.CANCELED
logger.info('[PMS Gateway] [Cancel order] account_id: {}, order_id: {}, '
'order cancelled.'.format(account_id, order_id))
def deal_with_trade(self, trade=None, **kwargs):
"""
Deal with trade event.
"""
logger.info('[PMS Gateway] [deal with trade] trade_id: {}, publish on_trade.'.format(trade.exchange_trade_id))
account_id, order_id = trade.account_id, trade.order_id
self.trade_info[account_id][order_id].append(trade)
def deal_with_order(self, order_data, **kwargs):
"""
Deal with order.
Args:
order_data(dict): order data item
"""
account_id = order_data['accountId']
order_id = order_data['extOrdId']
order = self.order_info[account_id].get(order_id)
if order:
order.update_from_subscribe(item=order_data)
else:
logger.warn('[PMS Gateway] [deal with order] account_id: {}, order_id: {}, '
'no relevant order in record.'.format(account_id, order_id))
def get_positions(self, account_id):
"""
Get positions.
Args:
account_id(string): account id
"""
return self.ctp_gateway.query_position_detail(account_id)
def get_orders(self, account_id):
"""
Get orders.
Args:
account_id(string): account id
"""
return self.order_info[account_id]
def get_trades(self, account_id):
"""
Get trades.
Args:
account_id(string): account id
"""
return self.trade_info[account_id]
def get_portfolio_info(self, account=None, info_date=None):
"""
获取当前时刻用户权益
Args:
account(string): account name
info_date(datetime.datetime): Optional, 交易日期
"""
zipped_data = CompositeDict()
accounts = [account] if account else self.accounts.keys()
for account in accounts:
orders = self.order_info[account].get(info_date, dict())
positions = self.position_info[account].get(info_date, dict())
trades = self.trade_info[account].get(info_date, list())
total_commission = self.total_commission_info[account].get(info_date)
previous_date = self.data_portal.calendar_service.get_direct_trading_day(info_date, 1, forward=False)
previous_positions = \
self.position_info[account].get(previous_date, self.initial_value_info[account]['positions'])
zipped_data[account]['previous_positions'] = previous_positions
zipped_data[account]['initial_value'] = self.initial_value_info[account]
zipped_data[account]['orders'] = orders
zipped_data[account]['positions'] = positions
zipped_data[account]['trades'] = trades
zipped_data[account]['total_commission'] = total_commission
return zipped_data
def to_dict(self):
"""
Returns:
dict: PMS 信息汇总
"""
return {
'accounts': self.accounts,
'orders': self.order_info,
'positions': self.position_info,
'initial_value': self.initial_value_info,
'benchmark': self.benchmark_info,
'total_commission': self.total_commission_info
}
| StarcoderdataPython |
4936028 | # Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""L2 Utilities Library."""
import binascii
from textwrap import wrap
from enum import IntEnum
from resources.libraries.python.Constants import Constants
from resources.libraries.python.PapiExecutor import PapiExecutor
from resources.libraries.python.topology import Topology
from resources.libraries.python.ssh import exec_cmd_no_error
class L2VtrOp(IntEnum):
"""VLAN tag rewrite operation."""
L2_VTR_DISABLED = 0
L2_VTR_PUSH_1 = 1
L2_VTR_PUSH_2 = 2
L2_VTR_POP_1 = 3
L2_VTR_POP_2 = 4
L2_VTR_TRANSLATE_1_1 = 5
L2_VTR_TRANSLATE_1_2 = 6
L2_VTR_TRANSLATE_2_1 = 7
L2_VTR_TRANSLATE_2_2 = 8
class L2Util(object):
"""Utilities for l2 configuration."""
@staticmethod
def mac_to_int(mac_str):
"""Convert MAC address from string format (e.g. 01:02:03:04:05:06) to
integer representation (1108152157446).
:param mac_str: MAC address in string representation.
:type mac_str: str
:returns: Integer representation of MAC address.
:rtype: int
"""
return int(mac_str.replace(':', ''), 16)
@staticmethod
def int_to_mac(mac_int):
"""Convert MAC address from integer representation (e.g. 1108152157446)
to string format (01:02:03:04:05:06).
:param mac_int: MAC address in integer representation.
:type mac_int: int
:returns: String representation of MAC address.
:rtype: str
"""
return ':'.join(wrap("{:012x}".format(mac_int), width=2))
@staticmethod
def mac_to_bin(mac_str):
"""Convert MAC address from string format (e.g. 01:02:03:04:05:06) to
binary representation (\x01\x02\x03\x04\x05\x06).
:param mac_str: MAC address in string representation.
:type mac_str: str
:returns: Binary representation of MAC address.
:rtype: binary
"""
return binascii.unhexlify(mac_str.replace(':', ''))
@staticmethod
def bin_to_mac(mac_bin):
"""Convert MAC address from binary representation
(\x01\x02\x03\x04\x05\x06) to string format (e.g. 01:02:03:04:05:06).
:param mac_bin: MAC address in binary representation.
:type mac_bin: binary
:returns: String representation of MAC address.
:rtype: str
"""
mac_str = ':'.join(binascii.hexlify(mac_bin)[i:i + 2]
for i in range(0, 12, 2))
return str(mac_str.decode('ascii'))
@staticmethod
def vpp_add_l2fib_entry(node, mac, interface, bd_id, static_mac=1,
filter_mac=0, bvi_mac=0):
""" Create a static L2FIB entry on a VPP node.
:param node: Node to add L2FIB entry on.
:param mac: Destination mac address in string format 01:02:03:04:05:06.
:param interface: Interface name or sw_if_index.
:param bd_id: Bridge domain index.
:param static_mac: Set to 1 to create static MAC entry.
(Default value = 1)
:param filter_mac: Set to 1 to drop packet that's source or destination
MAC address contains defined MAC address. (Default value = 0)
:param bvi_mac: Set to 1 to create entry that points to BVI interface.
(Default value = 0)
:type node: dict
:type mac: str
:type interface: str or int
:type bd_id: int or str
:type static_mac: int or str
:type filter_mac: int or str
:type bvi_mac: int or str
"""
if isinstance(interface, basestring):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
cmd = 'l2fib_add_del'
err_msg = 'Failed to add L2FIB entry on host {host}'.format(
host=node['host'])
args = dict(mac=L2Util.mac_to_bin(mac),
bd_id=int(bd_id),
sw_if_index=sw_if_index,
is_add=1,
static_mac=int(static_mac),
filter_mac=int(filter_mac),
bvi_mac=int(bvi_mac))
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_replies(err_msg).\
verify_reply(err_msg=err_msg)
@staticmethod
def create_l2_bd(node, bd_id, flood=1, uu_flood=1, forward=1, learn=1,
arp_term=0):
"""Create an L2 bridge domain on a VPP node.
:param node: Node where we wish to crate the L2 bridge domain.
:param bd_id: Bridge domain index.
:param flood: Enable/disable bcast/mcast flooding in the BD.
(Default value = 1)
:param uu_flood: Enable/disable unknown unicast flood in the BD.
(Default value = 1)
:param forward: Enable/disable forwarding on all interfaces in
the BD. (Default value = 1)
:param learn: Enable/disable MAC learning on all interfaces in the BD.
(Default value = 1)
:param arp_term: Enable/disable arp termination in the BD.
(Default value = 1)
:type node: dict
:type bd_id: int or str
:type flood: int or str
:type uu_flood: int or str
:type forward: int or str
:type learn: int or str
:type arp_term: int or str
"""
cmd = 'bridge_domain_add_del'
err_msg = 'Failed to create L2 bridge domain on host {host}'.format(
host=node['host'])
args = dict(bd_id=int(bd_id),
flood=int(flood),
uu_flood=int(uu_flood),
forward=int(forward),
learn=int(learn),
arp_term=int(arp_term),
is_add=1)
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_replies(err_msg).\
verify_reply(err_msg=err_msg)
@staticmethod
def add_interface_to_l2_bd(node, interface, bd_id, shg=0, port_type=0):
"""Add an interface to the L2 bridge domain.
Get SW IF ID and add it to the bridge domain.
:param node: Node where we want to execute the command that does this.
:param interface: Interface name.
:param bd_id: Bridge domain index.
:param shg: Split-horizon group index. (Default value = 0)
:param port_type: Port mode: 0 - normal, 1 - BVI, 2 - UU_FWD.
(Default value = 0)
:type node: dict
:type interface: str
:type bd_id: int or str
:type shg: int or str
:type port_type: int or str
"""
sw_if_index = Topology.get_interface_sw_index(node, interface)
cmd = 'sw_interface_set_l2_bridge'
err_msg = 'Failed to add interface {ifc} to L2 bridge domain on host ' \
'{host}'.format(ifc=interface, host=node['host'])
args = dict(rx_sw_if_index=sw_if_index,
bd_id=int(bd_id),
shg=int(shg),
port_type=int(port_type),
enable=1)
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_replies(err_msg).\
verify_reply(err_msg=err_msg)
@staticmethod
def vpp_add_l2_bridge_domain(node, bd_id, port_1, port_2, learn=True):
"""Add L2 bridge domain with 2 interfaces to the VPP node.
:param node: Node to add L2BD on.
:param bd_id: Bridge domain ID.
:param port_1: First interface name added to L2BD.
:param port_2: Second interface name added to L2BD.
:param learn: Enable/disable MAC learn.
:type node: dict
:type bd_id: int
:type port_1: str
:type port_2: str
:type learn: bool
"""
sw_if_index1 = Topology.get_interface_sw_index(node, port_1)
sw_if_index2 = Topology.get_interface_sw_index(node, port_2)
learn_int = 1 if learn else 0
cmd1 = 'bridge_domain_add_del'
args1 = dict(bd_id=int(bd_id),
flood=1,
uu_flood=1,
forward=1,
learn=learn_int,
arp_term=0,
is_add=1)
cmd2 = 'sw_interface_set_l2_bridge'
args2 = dict(rx_sw_if_index=sw_if_index1,
bd_id=int(bd_id),
shg=0,
port_type=0,
enable=1)
args3 = dict(rx_sw_if_index=sw_if_index2,
bd_id=int(bd_id),
shg=0,
port_type=0,
enable=1)
err_msg = 'Failed to add L2 bridge domain with 2 interfaces on host' \
' {host}'.format(host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3).\
get_replies(err_msg).verify_replies(err_msg=err_msg)
@staticmethod
def vpp_setup_bidirectional_cross_connect(node, interface1, interface2):
"""Create bidirectional cross-connect between 2 interfaces on vpp node.
:param node: Node to add bidirectional cross-connect.
:param interface1: First interface name or sw_if_index.
:param interface2: Second interface name or sw_if_index.
:type node: dict
:type interface1: str or int
:type interface2: str or int
"""
if isinstance(interface1, basestring):
sw_iface1 = Topology().get_interface_sw_index(node, interface1)
else:
sw_iface1 = interface1
if isinstance(interface2, basestring):
sw_iface2 = Topology().get_interface_sw_index(node, interface2)
else:
sw_iface2 = interface2
cmd = 'sw_interface_set_l2_xconnect'
args1 = dict(rx_sw_if_index=sw_iface1,
tx_sw_if_index=sw_iface2,
enable=1)
args2 = dict(rx_sw_if_index=sw_iface2,
tx_sw_if_index=sw_iface1,
enable=1)
err_msg = 'Failed to add L2 cross-connect between two interfaces on' \
' host {host}'.format(host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg).\
verify_replies(err_msg=err_msg)
@staticmethod
def vpp_setup_bidirectional_l2_patch(node, interface1, interface2):
"""Create bidirectional l2 patch between 2 interfaces on vpp node.
:param node: Node to add bidirectional l2 patch.
:param interface1: First interface name or sw_if_index.
:param interface2: Second interface name or sw_if_index.
:type node: dict
:type interface1: str or int
:type interface2: str or int
"""
if isinstance(interface1, basestring):
sw_iface1 = Topology().get_interface_sw_index(node, interface1)
else:
sw_iface1 = interface1
if isinstance(interface2, basestring):
sw_iface2 = Topology().get_interface_sw_index(node, interface2)
else:
sw_iface2 = interface2
cmd = 'l2_patch_add_del'
args1 = dict(rx_sw_if_index=sw_iface1,
tx_sw_if_index=sw_iface2,
is_add=1)
args2 = dict(rx_sw_if_index=sw_iface2,
tx_sw_if_index=sw_iface1,
is_add=1)
err_msg = 'Failed to add L2 patch between two interfaces on' \
' host {host}'.format(host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg).\
verify_replies(err_msg=err_msg)
@staticmethod
def linux_add_bridge(node, br_name, if_1, if_2, set_up=True):
"""Bridge two interfaces on linux node.
:param node: Node to add bridge on.
:param br_name: Bridge name.
:param if_1: First interface to be added to the bridge.
:param if_2: Second interface to be added to the bridge.
:param set_up: Change bridge interface state to up after create bridge.
Optional. Default: True.
:type node: dict
:type br_name: str
:type if_1: str
:type if_2: str
:type set_up: bool
"""
cmd = 'brctl addbr {0}'.format(br_name)
exec_cmd_no_error(node, cmd, sudo=True)
cmd = 'brctl addif {0} {1}'.format(br_name, if_1)
exec_cmd_no_error(node, cmd, sudo=True)
cmd = 'brctl addif {0} {1}'.format(br_name, if_2)
exec_cmd_no_error(node, cmd, sudo=True)
if set_up:
cmd = 'ip link set dev {0} up'.format(br_name)
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
def linux_del_bridge(node, br_name, set_down=True):
"""Delete bridge from linux node.
..note:: The network interface corresponding to the bridge must be
down before it can be deleted!
:param node: Node to delete bridge from.
:param br_name: Bridge name.
:param set_down: Change bridge interface state to down before delbr
command. Optional. Default: True.
:type node: dict
:type br_name: str
:type set_down: bool
"""
if set_down:
cmd = 'ip link set dev {0} down'.format(br_name)
exec_cmd_no_error(node, cmd, sudo=True)
cmd = 'brctl delbr {0}'.format(br_name)
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
def vpp_get_bridge_domain_data(node, bd_id=0xffffffff):
"""Get all bridge domain data from a VPP node. If a domain ID number is
provided, return only data for the matching bridge domain.
:param node: VPP node to get bridge domain data from.
:param bd_id: Numeric ID of a specific bridge domain.
:type node: dict
:type bd_id: int
:returns: List of dictionaries containing data for each bridge domain,
or a single dictionary for the specified bridge domain.
:rtype: list or dict
"""
cmd = 'bridge_domain_dump'
cmd_reply = 'bridge_domain_details'
args = dict(bd_id=int(bd_id))
err_msg = 'Failed to get L2FIB dump on host {host}'.format(
host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
data = papi_resp.reply[0]['api_reply']
bd_data = list() if bd_id == Constants.BITWISE_NON_ZERO else dict()
for bridge_domain in data:
if bd_id == Constants.BITWISE_NON_ZERO:
bd_data.append(bridge_domain[cmd_reply])
else:
if bridge_domain[cmd_reply]['bd_id'] == bd_id:
return bridge_domain[cmd_reply]
return bd_data
@staticmethod
def l2_vlan_tag_rewrite(node, interface, tag_rewrite_method,
push_dot1q=True, tag1_id=None, tag2_id=None):
"""Rewrite tags in ethernet frame.
:param node: Node to rewrite tags.
:param interface: Interface on which rewrite tags.
:param tag_rewrite_method: Method of tag rewrite.
:param push_dot1q: Optional parameter to disable to push dot1q tag
instead of dot1ad.
:param tag1_id: Optional tag1 ID for VLAN.
:param tag2_id: Optional tag2 ID for VLAN.
:type node: dict
:type interface: str or int
:type tag_rewrite_method: str
:type push_dot1q: bool
:type tag1_id: int
:type tag2_id: int
"""
tag1_id = int(tag1_id) if tag1_id else 0
tag2_id = int(tag2_id) if tag2_id else 0
vtr_oper = getattr(L2VtrOp, 'L2_VTR_{}'.format(
tag_rewrite_method.replace('-', '_').upper()))
if isinstance(interface, basestring):
iface_key = Topology.get_interface_by_name(node, interface)
sw_if_index = Topology.get_interface_sw_index(node, iface_key)
else:
sw_if_index = interface
cmd = 'l2_interface_vlan_tag_rewrite'
args = dict(sw_if_index=sw_if_index,
vtr_op=int(vtr_oper),
push_dot1q=int(push_dot1q),
tag1=tag1_id,
tag2=tag2_id)
err_msg = 'Failed to set VLAN TAG rewrite on host {host}'.format(
host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_replies(err_msg).\
verify_reply(err_msg=err_msg)
@staticmethod
def get_l2_fib_table(node, bd_id):
"""Retrieves the L2 FIB table.
:param node: VPP node.
:param bd_id: Index of the bridge domain.
:type node: dict
:type bd_id: int
:returns: L2 FIB table.
:rtype: list
"""
cmd = 'l2_fib_table_dump'
cmd_reply = 'l2_fib_table_details'
args = dict(bd_id=int(bd_id))
err_msg = 'Failed to get L2FIB dump on host {host}'.format(
host=node['host'])
with PapiExecutor(node) as papi_exec:
papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
data = papi_resp.reply[0]['api_reply']
fib_data = list()
for fib in data:
fib_item = fib[cmd_reply]
fib_item['mac'] = L2Util.bin_to_mac(fib_item['mac'])
fib_data.append(fib_item)
return fib_data
@staticmethod
def get_l2_fib_entry_by_mac(node, bd_index, mac):
"""Retrieves the L2 FIB entry specified by MAC address using PAPI.
:param node: VPP node.
:param bd_index: Index of the bridge domain.
:param mac: MAC address used as the key in L2 FIB data structure.
:type node: dict
:type bd_index: int
:type mac: str
:returns: L2 FIB entry
:rtype: dict
"""
bd_data = L2Util.vpp_get_bridge_domain_data(node)
bd_id = bd_data[bd_index-1]['bd_id']
table = L2Util.get_l2_fib_table(node, bd_id)
for entry in table:
if entry['mac'] == mac:
return entry
return {}
| StarcoderdataPython |
1883243 | <filename>tests/conftest.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import pytest
from rasa_nlu import data_router, config
from rasa_nlu.components import ComponentBuilder
logging.basicConfig(level="DEBUG")
CONFIG_DEFAULTS_PATH = "sample_configs/config_defaults.yml"
DEFAULT_DATA_PATH = "data/examples/rasa/demo-rasa.json"
# see `rasa_nlu.data_router` for details. avoids deadlock in
# `deferred_from_future` function during tests
data_router.DEFERRED_RUN_IN_REACTOR_THREAD = False
@pytest.fixture(scope="session")
def component_builder():
return ComponentBuilder()
@pytest.fixture(scope="session")
def spacy_nlp(component_builder, default_config):
return component_builder.create_component("nlp_spacy", default_config).nlp
@pytest.fixture(scope="session")
def ner_crf_pos_feature_config():
return {
"features": [
["low", "title", "upper", "pos", "pos2"],
["bias", "low", "suffix3", "suffix2", "upper",
"title", "digit", "pos", "pos2", "pattern"],
["low", "title", "upper", "pos", "pos2"]]
}
@pytest.fixture(scope="session")
def mitie_feature_extractor(component_builder, default_config):
return component_builder.create_component("nlp_mitie", default_config).extractor
@pytest.fixture(scope="session")
def default_config():
return config.load(CONFIG_DEFAULTS_PATH)
| StarcoderdataPython |
11303029 | """Python Protocol API v3 type definitions and value classes."""
from opentrons_shared_data.labware.dev_types import LabwareParameters
from opentrons.types import (
DeckSlotName,
Location,
MountType as Mount,
Mount as DeprecatedMount,
Point,
)
from opentrons.protocol_engine import DeckSlotLocation, PipetteName
__all__ = [
# re-exports from opentrons_shared_data.labware.dev_types
"LabwareParameters",
# re-exports from opentrons.types
"DeckSlotName",
"Location",
"Mount",
"DeprecatedMount",
"Point",
# re-exports from opentrons.protocol_engine
"DeckSlotLocation",
"PipetteName",
]
| StarcoderdataPython |
6543321 | <filename>model_compiler/tests/model_compiler/models/targets/test_tensorrt_model.py
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import numpy
import pytest
import tensorflow as tf
import tensorrt
from tensorrt import Builder, DataType, ElementWiseOperation, Logger, Weights
import model_compiler.compilers.repository as compiler_repository
from model_compiler.models.irs.tf_model import Input as TfInput, TensorFlowModel
from model_compiler.models.targets.tensorrt_model import TensorRTModel
from model_compiler.protos.generated.model_config_pb2 import ModelInput, ModelOutput
def _make_tensorrt_model() -> TensorRTModel:
with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 4], name='x')
input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 4], name='y')
weight = tf.Variable(initial_value=[2.0, 3.0, 4.0, 5.0], dtype=tf.float32)
output_z = tf.add(input_x + input_y, weight, name='z')
session.run(weight.initializer)
compiler, config_type = compiler_repository.REPOSITORY.get(TensorFlowModel, TensorRTModel)
return compiler(source=TensorFlowModel(inputs=[TfInput(tensor=input_x), TfInput(tensor=input_y)],
outputs=[output_z],
session=session),
config=config_type.from_json({'max_batch_size': 4}))
def _make_implicit_batch_size_tensorrt_model() -> TensorRTModel:
with Logger() as logger, Builder(logger) as builder, builder.create_network() as network:
input_x = network.add_input(name='x', dtype=DataType.FLOAT, shape=[4])
input_y = network.add_input(name='y', dtype=DataType.FLOAT, shape=[4])
weight = network.add_constant(
shape=[4],
weights=Weights(a=numpy.array([2.0, 3.0, 4.0, 5.0], dtype=numpy.float32))
).get_output(0)
output_z = network.add_elementwise(input1=network.add_elementwise(input1=input_x,
input2=input_y,
op=ElementWiseOperation.SUM).get_output(0),
input2=weight,
op=ElementWiseOperation.SUM).get_output(0)
output_z.name = 'z'
network.mark_output(tensor=output_z)
return TensorRTModel(cuda_engine=builder.build_cuda_engine(network), input_data_formats=[None, None])
@pytest.mark.gpu_test
class KerasModelFileTestCase(TestCase):
def test_get_inputs(self):
saved_model = _make_tensorrt_model()
self.assertEqual(saved_model.get_inputs(),
[ModelInput(name='x:0', data_type=tf.float32.as_datatype_enum, format=None, dims=[4]),
ModelInput(name='y:0', data_type=tf.float32.as_datatype_enum, format=None, dims=[4])])
def test_get_outputs(self):
saved_model = _make_tensorrt_model()
self.assertEqual(saved_model.get_outputs(),
[ModelOutput(name='z:0', data_type=tf.float32.as_datatype_enum, dims=[4])])
def test_get_inputs_implicit_batch_size(self):
saved_model = _make_implicit_batch_size_tensorrt_model()
self.assertEqual(saved_model.get_inputs(),
[ModelInput(name='x', data_type=tf.float32.as_datatype_enum, format=None, dims=[4]),
ModelInput(name='y', data_type=tf.float32.as_datatype_enum, format=None, dims=[4])])
def test_get_outputs_implicit_batch_size(self):
saved_model = _make_implicit_batch_size_tensorrt_model()
self.assertEqual(saved_model.get_outputs(),
[ModelOutput(name='z', data_type=tf.float32.as_datatype_enum, dims=[4])])
def test_save(self):
saved_model = _make_tensorrt_model()
with TemporaryDirectory() as save_path:
saved_model.save(save_path)
self.assertEqual(os.listdir(save_path), ['model.plan'])
def test_get_platform(self):
self.assertEqual(TensorRTModel.get_platform(), ('tensorrt_plan', tensorrt.__version__))
| StarcoderdataPython |
9673541 | <gh_stars>10-100
"""
Django settings for ponyconf project.
"""
from django.utils.translation import ugettext_lazy as _
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# the post_migrate creating the first site should be called at first
'django.contrib.sites',
# our apps
'ponyconf',
'accounts',
'cfp',
'mailing',
# third-party apps
'bootstrap3',
'django_select2',
'crispy_forms',
# build-in apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cfp.middleware.ConferenceMiddleware',
]
ROOT_URLCONF = 'ponyconf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#'ponyconf.context_processors.site',
'cfp.context_processors.conference',
],
},
},
]
WSGI_APPLICATION = 'ponyconf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = [
('en', _('English')),
('fr', _('French')),
]
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
('jquery', os.path.join(BASE_DIR, 'node_modules/jquery/dist/')),
]
LOGIN_REDIRECT_URL = 'home'
SITE_ID = 1
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'ponyconf.backends.EmailBackend',
]
LOGOUT_REDIRECT_URL = 'home'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'select2': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'select2',
},
}
SELECT2_CACHE_BACKEND = 'select2'
SERVER_EMAIL = '<EMAIL>'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
| StarcoderdataPython |
1687984 | from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Optional
import typer
from pydantic import DirectoryPath, FilePath, HttpUrl
from typing_extensions import Literal
from judge.tools.config import BaseJudgeConfig
@dataclass(frozen=True)
class Sample:
ext: Literal["in", "out"]
path: Path
data: bytes
@dataclass
class TestCasePath:
name: str
in_path: Optional[Path] = None
out_path: Optional[Path] = None
class CompareMode(Enum):
EXACT_MATCH = "exact-match"
CRLF_INSENSITIVE_EXACT_MATCH = "crlf-insensitive-exact-match"
IGNORE_SPACES = "ignore-spaces"
IGNORE_SPACES_AND_NEWLINES = "ignore-spaces-and-newlines"
class TimerMode(Enum):
NO_TIME = ""
GNU_TIME = "gnu-time"
class BaseJudgeStatus:
name = ""
color = ""
@classmethod
def style(self) -> str:
"""define output of typer.style"""
return typer.style(self.__str__(), fg=self.color)
@classmethod
def __str__(self) -> str:
"""define standard string output"""
return self.name
class AC_(BaseJudgeStatus):
name = "AC"
color = typer.colors.GREEN
class WA_(BaseJudgeStatus):
name = "WA"
color = typer.colors.RED
class RE_(BaseJudgeStatus):
name = "RE"
color = typer.colors.RED
class MLE_(BaseJudgeStatus):
name = "MLE"
color = typer.colors.YELLOW
class TLE_(BaseJudgeStatus):
name = "TLE"
color = typer.colors.YELLOW
class JudgeStatus(Enum):
AC = AC_
WA = WA_
RE = RE_
TLE = TLE_
MLE = MLE_
class VerboseStr(str, Enum):
error = "error"
error_detail = "error_detail"
all = "all"
detail = "detail"
dd = "dd"
@dataclass
class History:
status: JudgeStatus
testcase: TestCasePath
output: bytes
exitcode: int
elapsed: float
memory: Optional[float] = None
class JudgeConfig(BaseJudgeConfig):
workdir: DirectoryPath
URL: Optional[HttpUrl] = None
file: Optional[FilePath] = None
contest: Optional[str] = None
problem: Optional[str] = None
testdir: Optional[DirectoryPath] = None
py: bool = True
pypy: bool = False
cython: bool = False
mle: Optional[float] = 1024
tle: Optional[float] = 2000
mode: CompareMode = CompareMode.EXACT_MATCH
tolerance: Optional[float] = None
jobs: Optional[int] = None
verbose: VerboseStr = VerboseStr.error_detail
| StarcoderdataPython |
124235 | <reponame>bcgov/CIT<gh_stars>1-10
from django.test import TestCase
from django.contrib.gis.geos import Point
from ...models import Opportunity, ApprovalStatus
class OpportunityModelTest(TestCase):
def test_fields(self):
approval_status = ApprovalStatus(status_name="Test status",
status_description="This is a test status",
status_code="TEST",
active_status=True)
opportunity = Opportunity(1, "555 Testing Rd.", Point(110, -80, srid=4326), approval_status=approval_status)
self.assertEqual(opportunity.id, 1)
self.assertEqual(opportunity.address, "555 Testing Rd.")
self.assertEqual(opportunity.point, Point(110, -80, srid=4326))
self.assertEqual(opportunity.approval_status, approval_status) | StarcoderdataPython |
3543176 | <gh_stars>1-10
/usr/lib64/python3.5/bisect.py | StarcoderdataPython |
3245374 | import sqlite3
def create_db(db_name):
with sqlite3.connect(db_name) as db:
cursor = db.cursor()
cursor.execute("""CREATE TABLE Tasks(
TaskID integer,
Description text,
ProjectID integer,
PRIMARY KEY(TaskID),
FOREIGN KEY(ProjectID) REFERENCES Projects(ProjectID)
);""")
cursor.execute("""CREATE TABLE Projects(
ProjectID integer,
Title TEXT NOT NULL,
Created timestamp,
PRIMARY KEY(ProjectID)
);""")
db.commit()
| StarcoderdataPython |
6575236 | <reponame>james94/driverlessai-recipes
"""Extract LIEF features from PE files"""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
class PEImportsFeatures(CustomTransformer):
_modules_needed_by_name = ['lief==0.9.0']
_regression = True
_binary = True
_multiclass = True
_is_reproducible = True
_parallel_task = True # if enabled, params_base['n_jobs'] will be >= 1 (adaptive to system), otherwise 1
_can_use_gpu = True # if enabled, will use special job scheduler for GPUs
_can_use_multi_gpu = True # if enabled, can get access to multiple GPUs for single transformer (experimental)
_numeric_output = True
@staticmethod
def get_default_properties():
return dict(col_type="text", min_cols=1, max_cols=1, relative_importance=1)
@staticmethod
def do_acceptance_test():
return False
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def load_pe(self, file_path):
with open(file_path, 'rb') as f:
bytez = bytearray(f.read())
return (bytez)
def imports_features(self, lief_binary):
from sklearn.feature_extraction import FeatureHasher
imports = lief_binary.imports
features = {}
for lib in imports:
if lib.name not in features:
features[lib.name] = []
for entry in lib.entries:
if entry.is_ordinal:
features[lib.name].append("ordinal" + str(entry.ordinal))
else:
features[lib.name].append(entry.name[:10000])
features_hashed = {}
libraries = sorted(list(set([l.lower() for l in features.keys()])))
for i, x in enumerate(FeatureHasher(256, input_type='string').transform([libraries]).toarray()[0]):
features_hashed.update({f'Imports_libraries_hash_{i}': x})
entries = sorted([lib.lower() + ':' + e for lib, elist in features.items() for e in elist])
for i, x in enumerate(FeatureHasher(1024, input_type='string').transform([entries]).toarray()[0]):
features_hashed.update({f'Imports_entries_hash_{i}': x})
return features_hashed
def get_imports_features(self, file_path):
import lief
try:
pe_bytez = self.load_pe(file_path)
lief_binary = lief.PE.parse(list(pe_bytez))
X = self.imports_features(lief_binary)
return X
except:
X = {f'Imports_libraries_hash_{i}': 0 for i in range(256)}
X.update({f'Imports_entries_hash_{i}': 0 for i in range(1024)})
return X
def transform(self, X: dt.Frame):
import pandas as pd
ret_df = pd.DataFrame(
[
self.get_imports_features(x)
for x in X.to_pandas().values[:, 0]
]
)
self._output_feature_names = ret_df.columns.to_list()
self._feature_desc = self._output_feature_names
return ret_df
| StarcoderdataPython |
11283309 | <reponame>mlcommons/peoples-speech
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
# Copyright (C) 2017 Intellisist, Inc. (Author: <NAME>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# this script trains a vanilla RNNLM with TensorFlow.
# to call the script, do
# python steps/tfrnnlm/lstm_fast.py --data_path=$datadir \
# --save_path=$savepath --vocab_path=$rnn.wordlist [--hidden-size=$size]
#
# One example recipe is at egs/ami/s5/local/tfrnnlm/run_vanilla_rnnlm.sh
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import absl
import absl.flags as flags
import tensorflow as tf
from tensorflow.python.keras.losses import LossFunctionWrapper
import reader
from lstm import RNNLMModel, RNNLMModelTrainer
# flags.DEFINE_integer("hidden_size", 200, "hidden dim of RNN")
#
# flags.DEFINE_string("data_path", None,
# "Where the training/test data is stored.")
# flags.DEFINE_string("vocab_path", None,
# "Where the wordlist file is stored.")
# flags.DEFINE_string("save_path", "export",
# "Model output directory.")
# flags.DEFINE_bool("use_fp16", False,
# "Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
class Config(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.8
batch_size = 64
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
# this new "softmax" function we show can train a "self-normalized" RNNLM where
# the sum of the output is automatically (close to) 1.0
# which saves a lot of computation for lattice-rescoring
def new_softmax(labels, logits):
flatten_labels = tf.reshape(labels, [-1])
n_samples = tf.shape(flatten_labels)[0]
flatten_logits = tf.reshape(logits, shape=[n_samples, -1])
f_logits = tf.exp(flatten_logits)
row_sums = tf.reduce_sum(f_logits, -1) # this is the negative part of the objf
t2 = tf.expand_dims(flatten_labels, 1)
range = tf.expand_dims(tf.range(n_samples), 1)
ind = tf.concat([range, t2], 1)
res = tf.gather_nd(flatten_logits, ind)
return -res + row_sums - 1
class MyFastLossFunction(LossFunctionWrapper):
def __init__(self):
super().__init__(new_softmax)
class FastRNNLMModel(RNNLMModel):
def __init__(self, config):
super().__init__(config, tf.constant_initializer(-9))
def get_loss(self, word_ids, labels, is_training=False):
logits = self.get_logits(word_ids, is_training)
loss_obj = MyFastLossFunction()
return loss_obj(labels, logits)
def get_score(self, logits):
# In this implementation, logits can be used as dist output
return logits
def get_config():
return Config()
def main(_):
# Turn this on to try the model code with this source file itself!
__TESTING = False
if __TESTING:
(train_data, valid_data), word_map = reader.rnnlm_gen_data(
__file__, reader.__file__
)
else:
if not FLAGS.data_path:
raise ValueError("Must set --data_path to RNNLM data directory")
raw_data = reader.rnnlm_raw_data(FLAGS.data_path, FLAGS.vocab_path)
train_data, valid_data, _, word_map = raw_data
config = get_config()
config.hidden_size = FLAGS.hidden_size
config.vocab_size = len(word_map)
if __TESTING:
# use a much smaller scale on our tiny test data
config.num_steps = 8
config.batch_size = 4
model = FastRNNLMModel(config)
train_producer = reader.RNNLMProducer(
train_data, config.batch_size, config.num_steps
)
trainer = RNNLMModelTrainer(model, config)
valid_producer = reader.RNNLMProducer(
valid_data, config.batch_size, config.num_steps
)
# Save variables to disk if you want to prevent crash...
# Data producer can also be saved to preverse feeding progress.
checkpoint = tf.train.Checkpoint(trainer=trainer, data_feeder=train_producer)
manager = tf.train.CheckpointManager(checkpoint, "checkpoints/", 5)
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
lr = config.learning_rate * lr_decay
trainer.train_one_epoch(train_producer, lr)
manager.save()
eval_loss = trainer.evaluate(valid_producer)
print("validating: loss={}".format(eval_loss))
# Export
print("Saving model to %s." % FLAGS.save_path)
spec = [
tf.TensorSpec(
shape=[config.num_layers, 2, 1, config.hidden_size],
dtype=data_type(),
name="context",
),
tf.TensorSpec(shape=[1, 1], dtype=tf.int32, name="word_id"),
]
cfunc = model.single_step.get_concrete_function(*spec)
cfunc2 = model.get_initial_state.get_concrete_function()
tf.saved_model.save(
model,
FLAGS.save_path,
signatures={"single_step": cfunc, "get_initial_state": cfunc2},
)
if __name__ == "__main__":
absl.app.run(main)
| StarcoderdataPython |
12840372 | """
@Author : xiaotao
@Email : <EMAIL>
@Lost modifid : 2020/4/24 10:18
@Filename : LanguagePack.py
@Description :
@Software : PyCharm
"""
class RET:
"""
语言类包
"""
OK = "200"
DBERR = "501"
NODATA = "462"
DATAEXIST = "433"
DATAERR = "499"
REQERR = "521"
IPERR = "422"
THIRDERR = "431"
IOERR = "502"
SERVERERR = "500"
UNKNOWERR = "451"
USER_STATUS = "465"
# 元组中第一个为中文,第二个为英文,第三个为繁体
language_pack = {
RET.OK: ("成功",),
RET.DBERR: ("数据库查询错误",),
RET.NODATA: ("数据不存在",),
RET.DATAEXIST: ("数据已存在",),
RET.DATAERR: ("数据格式错误",),
RET.REQERR: ("非法请求或请求次数受限",),
RET.IPERR: ("IP受限",),
RET.THIRDERR: ("第三方系统错误",),
RET.IOERR: ("文件读写错误",),
RET.SERVERERR: ("内部错误",),
RET.UNKNOWERR: ("未知错误",),
RET.USER_STATUS: ("账号已被禁用,如有疑义请联系平台客服",),
}
class Language(object):
_lang ='zh_cn'
@classmethod
def init(cls, lang):
cls._lang = lang
@classmethod
def get(cls, value):
lang = language_pack.get(value)
if not lang:
return None
if cls._lang == 'zh_cn' and len(lang) > 0:
return lang[0]
elif cls._lang == 'en_US' and len(lang) > 1:
return lang[1]
elif cls._lang == 'zh_F' and len(lang) > 2:
return lang[2]
| StarcoderdataPython |
6689083 | import os
import sys
import json
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from torch.optim import lr_scheduler
from pathlib import Path
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import *
from temporal_transforms import *
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_training_set, get_validation_set, get_test_set
from utils.utils import *
from train import train_epoch
from validation import val_epoch
import test
if __name__ == '__main__':
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if not os.path.exists(opt.result_path):
os.makedirs(opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}'.format(opt.model)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
opt.store_name = '_'.join([opt.dataset, opt.model, str(opt.width_mult) + 'x',
opt.modality, str(opt.sample_duration)])
print(opt)
# create results folder
result_path = Path(opt.result_path)
if not result_path.exists():
result_path.mkdir(parents=True, exist_ok=True)
print("Created the results directory: " + str(result_path))
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
print(model)
criterion = nn.CrossEntropyLoss()
if not opt.no_cuda:
criterion = criterion.cuda()
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
logger = TensorboardLogger(opt.result_path)
if not opt.no_train:
assert opt.train_crop in ['random', 'corner', 'center']
if opt.train_crop == 'random':
crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'corner':
crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'center':
crop_method = MultiScaleCornerCrop(
opt.scales, opt.sample_size, crop_positions=['c'])
else:
raise ValueError("Train crop not provided")
if opt.no_hflip or opt.dataset.lower() == "jester":
# in the jester dataset there a lot of gestures that depend on the direction they are performed in
spatial_transform = Compose([
# RandomRotate(),
# RandomResize(),
crop_method,
# MultiplyValues(),
# Dropout(),
# SaltImage(),
# Gaussian_blur(),
# SpatialElasticDisplacement(),
ToTensor(opt.norm_value), norm_method
])
else:
spatial_transform = Compose([
RandomHorizontalFlip(),
# RandomRotate(),
# RandomResize(),
crop_method,
# MultiplyValues(),
# Dropout(),
# SaltImage(),
# Gaussian_blur(),
# SpatialElasticDisplacement(),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = TemporalRandomCrop(opt.sample_duration, opt.downsample)
target_transform = ClassLabel()
training_data = get_training_set(opt, spatial_transform,
temporal_transform, target_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'prec1', 'prec5', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'prec1', 'prec5', 'lr'])
if opt.nesterov:
dampening = 0
else:
dampening = opt.dampening
optimizer = optim.SGD(
parameters,
lr=opt.learning_rate,
momentum=opt.momentum,
dampening=dampening,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
scheduler = Scheduler(optimizer, opt)
if not opt.no_val:
spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = TemporalCenterCrop(opt.sample_duration, opt.downsample)
target_transform = ClassLabel()
validation_data = get_validation_set(
opt, spatial_transform, temporal_transform, target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=16,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
val_logger = Logger(
os.path.join(opt.result_path, 'val.log'), ['epoch', 'loss', 'prec1', 'prec5'])
best_prec1 = 0
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
best_prec1 = checkpoint['best_prec1']
opt.begin_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if "scheduler" in checkpoint:
scheduler.load_state_dict(checkpoint)
else:
print("Scheduler state dict not found in the checkpoint file")
print('run')
for i in range(opt.begin_epoch, opt.n_epochs + 1):
if not opt.no_train:
scheduler.adjust_learning_rate(i)
train_epoch(i, train_loader, model, criterion, optimizer, opt, logger)
state = {
'epoch': i,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_prec1': best_prec1,
'scheduler': scheduler.state_dict()
}
save_checkpoint(state, False, opt)
if not opt.no_val:
validation_loss, prec1 = val_epoch(i, val_loader, model, criterion, opt, logger)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
state = {
'epoch': i,
'arch': opt.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_prec1': best_prec1,
'scheduler': scheduler.state_dict()
}
save_checkpoint(state, is_best, opt)
scheduler.adjust_learning_rate_validation(i, validation_loss)
if opt.test:
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
# temporal_transform = LoopPadding(opt.sample_duration, opt.downsample)
temporal_transform = TemporalRandomCrop(opt.sample_duration, opt.downsample)
target_transform = VideoID()
test_data = get_test_set(opt, spatial_transform, temporal_transform,
target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=True)
test.test(test_loader, model, opt, test_data.class_names)
| StarcoderdataPython |
5085457 | #!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
import os
import sys
from numpy.distutils.system_info import get_info
# try to find LAPACK and BLAS
blas_info = get_info('blas_opt')
if sys.platform == 'darwin':
# OS X
try:
blas_include = blas_info['extra_compile_args'][1][2:]
except KeyError:
blas_include = None
if not blas_include or not os.path.exists(blas_include):
# for yosemite
blas_include = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/Headers'
if not os.path.exists(blas_include):
raise RuntimeError("Could not locate blas libraries")
else:
# Linux
blas_include = "/usr/include/atlas/"
includes = [blas_include, np.get_include()]
extensions = [
Extension(
"bayesian_quadrature.linalg_c", ["bayesian_quadrature/linalg_c.pyx"],
include_dirs=includes,
libraries=["m", "lapack", "blas"]
),
Extension(
"bayesian_quadrature.gauss_c", ["bayesian_quadrature/gauss_c.pyx"],
include_dirs=includes,
libraries=["m"]
),
Extension(
"bayesian_quadrature.bq_c", ["bayesian_quadrature/bq_c.pyx"],
include_dirs=includes,
libraries=["m"]
),
Extension(
"bayesian_quadrature.util_c", ["bayesian_quadrature/util_c.pyx"],
include_dirs=includes,
libraries=["m"]
)
]
setup(
name='bayesian_quadrature',
version=open('VERSION.txt').read().strip(),
description='Python library for performing Bayesian Quadrature',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jhamrick/bayesian-quadrature',
packages=['bayesian_quadrature', 'bayesian_quadrature.tests'],
ext_modules=cythonize(extensions),
keywords='bayesian quadrature statistics',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Mathematics"
],
install_requires=[
'numpy',
'scipy',
'Cython',
'gaussian_processes'
]
)
| StarcoderdataPython |
1600978 | from dist_zero.types import Type
from dist_zero import errors, primitive
class Expression(object):
'''
Abstract base class for the core expression objects. These form the starting point for the DistZero compiler.
'''
def __init__(self):
self.spy_keys = set()
def Spy(self, key):
self.spy_keys.add(key)
return self
def __call__(self, other):
return Apply(f=self, arg=other)
def __getitem__(self, key):
return self.Project(key)
def Project(self, key):
return PrimitiveExpression(primitive.Project(key))(self)
def Inject(self, key):
return PrimitiveExpression(primitive.Inject(key))(self)
class Apply(Expression):
'''
Expression class for a function to be applied to an argument.
'''
def __init__(self, arg: Expression, f: Expression):
'''
:param Expression arg: An expression object.
:param Expression f: An expression of a function type with a source matching the type of ``arg``.
'''
self.arg = arg
self.f = f
super(Apply, self).__init__()
class Lambda(Expression):
'''
Create a DistZero function expression from a python function.
'''
def __init__(self, srcType: Type, tgtType: Type, f):
'''
:param Type srcType: The source type of the function
:param Type tgtType: The target type of the function
:param function f: A python function to represent this underlying `Lambda` instance.
It will be called once each time this lambda is applied during normalization.
'''
self.srcType = srcType
self.tgtType = tgtType
self.f = f
super(Lambda, self).__init__()
class Record(Expression):
'''
A record expression creating universal compound data from components. Like a tuple or record type in other languages.
'''
def __init__(self, items):
'''
:param items: The named components of the record.
:type items: list[tuple[str, `Expression`]]
'''
self.items = items
super(Record, self).__init__()
class Case(Expression):
'''
A case expression, building a function on a `Sum` type out of functions on the components.
'''
def __init__(self, items):
'''
:param items: A list of pairs (key, expr) where expr is an `Expression` of a function type.
Each ``expr`` should define the behavior of the case expression when its
source in the state identified by ``key``.
:type items: list[tuple[str, `Expression`]]
'''
self.items = items
self._d = None
super(Case, self).__init__()
def dict(self):
if self._d is None:
self._d = dict(self.items)
return self._d
class Constant(Expression):
'''A constant value expression.'''
def __init__(self, value):
self.value = value
super(Constant, self).__init__()
class ListOp(Expression):
'''
A pointwise operation on lists. Examples include map, filter and sort.
Since these variants are all treated so similarly, they are represented in a common class and distinguished by
``opVariant``
'''
def __init__(self, opVariant, f: Expression):
'''
:param str opVariant: Identifies which kind of pointwise operation this is.
:param Expression f: A function expression involved in this `ListOp`. How it is used depends on ``opVariant``
'''
self.opVariant = opVariant
self.f = f
super(ListOp, self).__init__()
class PrimitiveExpression(Expression):
'''Each instance of `PrimitiveOp` can be treated as an expression by passing it to `PrimitiveExpression`'''
def __init__(self, primitive):
'''
:param PrimitiveOp primitive: A primitive operation
'''
self.primitive = primitive
super(PrimitiveExpression, self).__init__()
class RecordedUser(Expression):
def __init__(self, concrete_recorded_user):
self.concrete_recorded_user = concrete_recorded_user
super(RecordedUser, self).__init__()
# Fundamental input types
class WebInput(Expression):
'''
A fundamential input corresponding to a web endpoint identified by a domain name.
'''
def __init__(self, domain_name: str):
'''
:param str domain_name: A domain name identifying the web endpoint.
'''
self.domain_name = domain_name
super(WebInput, self).__init__()
| StarcoderdataPython |
3274895 | <reponame>jpenrici/Extensions_Inkscape
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Extensão para o Inkscape randomizar as cores RGB, o preenchimento e/ou
# contorno de objetos selecionados.
import random
import inkex
import simplestyle
class RandomRGB(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("", "--randomizeRed",
action="store",
type="inkbool",
dest="randomizeRed",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeGreen",
action="store",
type="inkbool",
dest="randomizeGreen",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeBlue",
action="store",
type="inkbool",
dest="randomizeBlue",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeFill",
action="store",
type="inkbool",
dest="randomizeFill",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeStroke",
action="store",
type="inkbool",
dest="randomizeStroke",
default=False,
help="")
self.OptionParser.add_option("", "--keepColors",
action="store",
type="inkbool",
dest="keepColors",
default=False,
help="")
def effect(self):
for id, node in self.selected.iteritems():
try:
style = simplestyle.parseStyle(node.get('style'))
except:
inkex.errormsg(_("No style attribute found for id: %s") % id)
continue
if (self.options.randomizeFill == False and
self.options.randomizeStroke == False):
break
if (self.options.keepColors):
fill_red = style['fill'][1:3]
fill_green = style['fill'][3:5]
fill_blue = style['fill'][5:7]
stroke_red = style['stroke'][1:3]
stroke_green = style['stroke'][3:5]
stroke_blue = style['stroke'][5:7]
else:
fill_red = "00"
fill_green = "00"
fill_blue = "00"
stroke_red = "00"
stroke_green = "00"
stroke_blue = "00"
if (self.options.randomizeFill):
if (self.options.randomizeRed):
fill_red = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeGreen):
fill_green = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeBlue):
fill_blue = "%02x" % random.randint(0, 0xFF)
fill = "#%s%s%s" % (fill_red, fill_green, fill_blue)
style['fill'] = fill
node.set('style', simplestyle.formatStyle(style))
if (self.options.randomizeStroke):
if (self.options.randomizeRed):
stroke_red = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeGreen):
stroke_green = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeBlue):
stroke_blue = "%02x" % random.randint(0, 0xFF)
stroke = "#%s%s%s)" % (stroke_red, stroke_green, stroke_blue)
style['stroke'] = stroke
node.set('style', simplestyle.formatStyle(style))
if __name__ == '__main__':
e = RandomRGB()
e.affect()
| StarcoderdataPython |
4860367 | #!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object, str
from typing import Dict, List, Optional
import zmq
from openr.KvStore import ttypes as kv_store_types
from openr.utils import consts, serializer, socket
class KvStoreClient(object):
def __init__(
self,
zmq_ctx,
kv_store_cmd_url,
timeout=consts.Consts.TIMEOUT_MS,
proto_factory=consts.Consts.PROTO_FACTORY,
):
self._kv_store_cmd_socket = socket.Socket(
zmq_ctx, zmq.REQ, timeout, proto_factory
)
self._kv_store_cmd_socket.connect(kv_store_cmd_url)
def get_keys(self, keys):
""" Get values corresponding to keys from KvStore.
It gets from local snapshot KeyVals of the kvstore.
"""
req_msg = kv_store_types.Request(kv_store_types.Command.KEY_GET)
req_msg.keyGetParams = kv_store_types.KeyGetParams(keys)
self._kv_store_cmd_socket.send_thrift_obj(req_msg)
return self._kv_store_cmd_socket.recv_thrift_obj(kv_store_types.Publication)
def set_key(self, keyVals):
req_msg = kv_store_types.Request(kv_store_types.Command.KEY_SET)
req_msg.keySetParams = kv_store_types.KeySetParams(keyVals)
self._kv_store_cmd_socket.send_thrift_obj(req_msg)
return self._kv_store_cmd_socket.recv()
def dump_all_with_filter(
self,
prefix: str = "",
originator_ids: Optional[List[str]] = None,
keyval_hash: Optional[Dict[str, kv_store_types.Value]] = None,
):
""" dump the entries of kvstore whose key matches the given prefix
if prefix is an empty string, the full KV store is dumped
"""
req_msg = kv_store_types.Request(kv_store_types.Command.KEY_DUMP)
req_msg.keyDumpParams = kv_store_types.KeyDumpParams(prefix)
req_msg.keyDumpParams.originatorIds = []
req_msg.keyDumpParams.keyValHashes = None
if originator_ids:
req_msg.keyDumpParams.originatorIds = originator_ids
if keyval_hash:
req_msg.keyDumpParams.keyValHashes = keyval_hash
self._kv_store_cmd_socket.send_thrift_obj(req_msg)
return self._kv_store_cmd_socket.recv_thrift_obj(kv_store_types.Publication)
def dump_key_with_prefix(self, prefix=""):
""" dump the hashes of kvstore whose key matches the given prefix
if prefix is an empty string, the full KV hash is dumped
"""
req_msg = kv_store_types.Request(kv_store_types.Command.HASH_DUMP)
req_msg.keyDumpParams = kv_store_types.KeyDumpParams(prefix)
self._kv_store_cmd_socket.send_thrift_obj(req_msg)
resp = self._kv_store_cmd_socket.recv()
return serializer.deserialize_thrift_object(
resp, kv_store_types.Publication, self._kv_store_cmd_socket.proto_factory
)
def dump_peers(self):
""" dump the entries of kvstore whose key matches the given prefix
if prefix is an empty string, the full KV store is dumped
"""
req_msg = kv_store_types.Request(kv_store_types.Command.PEER_DUMP)
self._kv_store_cmd_socket.send_thrift_obj(req_msg)
return self._kv_store_cmd_socket.recv_thrift_obj(kv_store_types.PeerCmdReply)
| StarcoderdataPython |
11255229 | from wepay.tests import CallBaseTestCase
class SubscriptionPlanTestCase(CallBaseTestCase):
def test_subscription_plan(self):
args = [
('subscription_plan_id', 12345)
]
kwargs = {}
self._test_call('/subscription_plan', args, kwargs)
def test_subscription_plan_find(self):
args = []
kwargs = {
'account_id': 54321,
'start': 10,
'limit': 17,
'state': 'expired',
'reference_id': 'ref_subscription_plan_123'
}
self._test_call('/subscription_plan/find', args, kwargs)
def test_subscription_plan_create(self):
args = [
('account_id', 54321),
('name', "Dummy Subscription Plan"),
('short_description', "Dummy Subscription_Plan Description"),
('amount', 57.90),
('period', 'yearly')
]
kwargs = {
'currency': 'USD',
'app_fee': 4.54,
'callback_uri': 'https://example.com/callback',
'trial_length': 3,
'setup_fee': 2.43,
'reference_id': 'subscription_plan_ref_321',
}
self._test_call('/subscription_plan/create', args, kwargs)
def test_subscription_plan_delete(self):
args = [
('subscription_plan_id', 1234)
]
kwargs = {
'reason': "Dummy Subscription_Plan Delete Reason."
}
self._test_call('/subscription_plan/delete', args, kwargs)
def test_subscription_plan_get_button(self):
args = [
('account_id', 12345),
('button_type', 'subscription_all_plans'),
]
kwargs = {
'subscription_plan_id': 1234,
'button_text': "Dummy Subscription",
'button_options': {
'show_plan_price': True,
'show_plans': True,
'reference_id': 'ref_button_subscription_plan_123456'
}
}
self._test_call('/subscription_plan/get_button', args, kwargs)
def test_subscription_plan_modify(self):
args = [
('subscription_plan_id', 54321)
]
kwargs = {
'name': "<NAME>",
'short_description': "Dummy Subscription_Plan Description",
'amount': 27.90,
'app_fee': 4.54,
'callback_uri': 'https://example.com/callback',
'trial_length': 3,
'setup_fee': 2.43,
'update_subscriptions': 'paying_lower',
'transition_expire_days': 5,
'reference_id': 'subscription_plan_ref_321'
}
self._test_call('/subscription_plan/modify', args, kwargs)
| StarcoderdataPython |
354105 | """
Base classes for collections of samples.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import inspect
import logging
import os
import random
import string
import eta.core.serial as etas
import eta.core.utils as etau
from fiftyone.core.aggregations import Aggregation
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
from fiftyone.core.odm.frame import DatasetFrameSampleDocument
from fiftyone.core.odm.sample import (
DatasetSampleDocument,
default_sample_fields,
)
import fiftyone.core.stages as fos
import fiftyone.core.utils as fou
foua = fou.lazy_import("fiftyone.utils.annotations")
foud = fou.lazy_import("fiftyone.utils.data")
logger = logging.getLogger(__name__)
def _make_registrar():
"""Makes a decorator that keeps a registry of all functions decorated by
it.
Usage::
my_decorator = _make_registrar()
my_decorator.all # dictionary mapping names to functions
"""
registry = {}
def registrar(func):
registry[func.__name__] = func
# Normally a decorator returns a wrapped function, but here we return
# `func` unmodified, after registering it
return func
registrar.all = registry
return registrar
# Keeps track of all view stage methods
view_stage = _make_registrar()
class SampleCollection(object):
"""Abstract class representing an ordered collection of
:class:`fiftyone.core.sample.Sample` instances in a
:class:`fiftyone.core.dataset.Dataset`.
"""
def __str__(self):
return repr(self)
def __repr__(self):
return self.summary()
def __bool__(self):
return len(self) > 0
def __len__(self):
raise NotImplementedError("Subclass must implement __len__()")
def __contains__(self, sample_id):
try:
self[sample_id]
except KeyError:
return False
return True
def __getitem__(self, sample_id_or_slice):
raise NotImplementedError("Subclass must implement __getitem__()")
def __iter__(self):
return self.iter_samples()
@property
def name(self):
"""The name of the collection."""
raise NotImplementedError("Subclass must implement name")
@property
def media_type(self):
"""The media type of the collection."""
raise NotImplementedError("Subclass must implement media_type")
@property
def info(self):
"""The :meth:`fiftyone.core.dataset.Dataset.info` dict of the dataset
underlying the collection.
"""
raise NotImplementedError("Subclass must implement info")
def _build_aggregation(self, aggregations):
scalar_result = isinstance(aggregations, Aggregation)
if scalar_result:
aggregations = [aggregations]
elif not aggregations:
return False, [], None
# pylint: disable=no-member
schema = self.get_field_schema()
if self.media_type == fom.VIDEO:
frame_schema = self.get_frame_field_schema()
else:
frame_schema = None
pipelines = {}
for agg in aggregations:
if not isinstance(agg, Aggregation):
raise TypeError("'%s' is not a an Aggregation" % agg.__class__)
field = agg._get_output_field(self)
pipelines[field] = agg._to_mongo(
self._dataset, schema, frame_schema
)
result_d = {}
return scalar_result, aggregations, [{"$facet": pipelines}]
def _process_aggregations(self, aggregations, result, scalar_result):
results = []
for agg in aggregations:
try:
results.append(
agg._get_result(result[agg._get_output_field(self)][0])
)
except:
results.append(agg._get_default_result())
return results[0] if scalar_result else results
def aggregate(self, aggregations, _attach_frames=True):
"""Aggregates one or more
:class:`fiftyone.core.aggregations.Aggregation` instances.
Note that it is best practice to group aggregations into a single call
to :meth:`aggregate() <aggregate>`, as this will be more efficient than
performing multiple aggregations in series.
Args:
aggregations: an :class:`fiftyone.core.aggregations.Aggregation` or
iterable of :class:`<fiftyone.core.aggregations.Aggregation>`
instances
Returns:
an :class:`fiftyone.core.aggregations.AggregationResult` or list of
:class:`fiftyone.core.aggregations.AggregationResult` instances
corresponding to the input aggregations
"""
scalar_result, aggregations, facets = self._build_aggregation(
aggregations
)
if len(aggregations) == 0:
return []
# pylint: disable=no-member
pipeline = self._pipeline(
pipeline=facets, attach_frames=_attach_frames
)
try:
# pylint: disable=no-member
result = next(self._dataset._sample_collection.aggregate(pipeline))
except StopIteration:
pass
return self._process_aggregations(aggregations, result, scalar_result)
async def _async_aggregate(self, coll, aggregations):
scalar_result, aggregations, facets = self._build_aggregation(
aggregations
)
if not aggregations:
return []
# pylint: disable=no-member
pipeline = self._pipeline(pipeline=facets)
try:
# pylint: disable=no-member
result = await coll.aggregate(pipeline).to_list(1)
result = result[0]
except StopIteration:
pass
return self._process_aggregations(aggregations, result, scalar_result)
def summary(self):
"""Returns a string summary of the collection.
Returns:
a string summary
"""
raise NotImplementedError("Subclass must implement summary()")
def first(self):
"""Returns the first sample in the collection.
Returns:
a :class:`fiftyone.core.sample.Sample` or
:class:`fiftyone.core.sample.SampleView`
Raises:
ValueError: if the collection is empty
"""
try:
return next(iter(self))
except StopIteration:
raise ValueError("%s is empty" % self.__class__.__name__)
def last(self):
"""Returns the last sample in the collection.
Returns:
a :class:`fiftyone.core.sample.Sample` or
:class:`fiftyone.core.sample.SampleView`
Raises:
ValueError: if the collection is empty
"""
return self[-1:].first()
def head(self, num_samples=3):
"""Returns a list of the first few samples in the collection.
If fewer than ``num_samples`` samples are in the collection, only
the available samples are returned.
Args:
num_samples (3): the number of samples
Returns:
a list of :class:`fiftyone.core.sample.Sample` objects
"""
return [s for s in self[:num_samples]]
def tail(self, num_samples=3):
"""Returns a list of the last few samples in the collection.
If fewer than ``num_samples`` samples are in the collection, only
the available samples are returned.
Args:
num_samples (3): the number of samples
Returns:
a list of :class:`fiftyone.core.sample.Sample` objects
"""
return [s for s in self[-num_samples:]]
def iter_samples(self):
"""Returns an iterator over the samples in the collection.
Returns:
an iterator over :class:`fiftyone.core.sample.Sample` or
:class:`fiftyone.core.sample.SampleView` instances
"""
raise NotImplementedError("Subclass must implement iter_samples()")
def get_field_schema(
self, ftype=None, embedded_doc_type=None, include_private=False
):
"""Returns a schema dictionary describing the fields of the samples in
the collection.
Args:
ftype (None): an optional field type to which to restrict the
returned schema. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): an optional embedded document type to
which to restrict the returned schema. Must be a subclass of
:class:`fiftyone.core.odm.BaseEmbeddedDocument`
include_private (False): whether to include fields that start with
`_` in the returned schema
Returns:
a dictionary mapping field names to field types
"""
raise NotImplementedError("Subclass must implement get_field_schema()")
def get_frame_field_schema(
self, ftype=None, embedded_doc_type=None, include_private=False
):
"""Returns a schema dictionary describing the fields of the frames of
the samples in the collection.
Only applicable for video collections.
Args:
ftype (None): an optional field type to which to restrict the
returned schema. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): an optional embedded document type to
which to restrict the returned schema. Must be a subclass of
:class:`fiftyone.core.odm.BaseEmbeddedDocument`
include_private (False): whether to include fields that start with
`_` in the returned schema
Returns:
a dictionary mapping field names to field types, or ``None`` if
the collection is not a video collection
"""
raise NotImplementedError(
"Subclass must implement get_frame_field_schema()"
)
def make_unique_field_name(self, root=""):
"""Makes a unique field name with the given root name for the
collection.
Args:
root (""): an optional root for the output field name
Returns:
the field name
"""
if not root:
root = _get_random_characters(6)
fields = self.get_field_schema()
field_name = root
if field_name in fields:
field_name += "_" + _get_random_characters(6)
while field_name in fields:
field_name += _get_random_characters(1)
return field_name
def validate_fields_exist(self, field_or_fields):
"""Validates that the collection has fields with the given names.
If ``field_or_fields`` contains an embedded field name such as
``field_name.document.field``, only the root ``field_name`` is checked
for existence.
Args:
field_or_fields: a field name or iterable of field names
Raises:
ValueError: if one or more of the fields do not exist
"""
if etau.is_str(field_or_fields):
field_or_fields = [field_or_fields]
if self.media_type == fom.VIDEO:
frame_fields = list(
filter(lambda n: n.startswith("frames."), field_or_fields)
)
field_or_fields = list(
filter(lambda n: not n.startswith("frames."), field_or_fields)
)
else:
frame_fields = []
schema = self.get_field_schema(include_private=True)
default_fields = set(
default_sample_fields(
DatasetSampleDocument, include_private=True, include_id=True
)
)
for field in field_or_fields:
# We only validate that the root field exists
field_name = field.split(".", 1)[0]
if field_name not in schema and field_name not in default_fields:
raise ValueError("Field '%s' does not exist" % field_name)
if self.media_type != fom.VIDEO:
return
frame_schema = self.get_frame_field_schema(include_private=True)
default_frame_fields = set(
default_sample_fields(
DatasetFrameSampleDocument,
include_private=True,
include_id=True,
)
)
for field in frame_fields:
# We only validate that the root field exists
field_name = field.split(".", 2)[1]
if (
field_name not in frame_schema
and field_name not in default_frame_fields
):
raise ValueError("Field '%s' does not exist" % field_name)
def validate_field_type(
self, field_name, ftype, embedded_doc_type=None, subfield=None
):
"""Validates that the collection has a field of the given type.
Args:
field_name: the field name
ftype: the expected field type. Must be a subclass of
:class:`fiftyone.core.fields.Field`
embedded_doc_type (None): the
:class:`fiftyone.core.odm.BaseEmbeddedDocument` type of the
field. Used only when ``ftype`` is an embedded
:class:`fiftyone.core.fields.EmbeddedDocumentField`
subfield (None): the type of the contained field. Used only when
``ftype`` is a :class:`fiftyone.core.fields.ListField` or
:class:`fiftyone.core.fields.DictField`
Raises:
ValueError: if the field does not exist or does not have the
expected type
"""
schema = self.get_field_schema()
frames = self.media_type == fom.VIDEO and field_name.startswith(
"frames."
)
if frames:
field_name = field_name[len("frames.") :]
if frames:
frame_schema = self.get_frame_field_schema()
if field_name not in frame_schema:
raise ValueError("Field '%s' does not exist" % field_name)
field = frame_schema[field_name]
else:
field = schema[field_name]
if embedded_doc_type is not None:
if not isinstance(field, fof.EmbeddedDocumentField) or (
field.document_type is not embedded_doc_type
):
raise ValueError(
"Field '%s' must be an instance of %s; found %s"
% (field_name, ftype(embedded_doc_type), field)
)
elif subfield is not None:
if not isinstance(field, (fof.ListField, fof.DictField)):
raise ValueError(
"Field type %s must be an instance of %s when a subfield "
"is provided" % (ftype, (fof.ListField, fof.DictField))
)
if not isinstance(field, ftype) or not isinstance(
field.field, subfield
):
raise ValueError(
"Field '%s' must be an instance of %s; found %s"
% (field_name, ftype(field=subfield()), field)
)
else:
if not isinstance(field, ftype):
raise ValueError(
"Field '%s' must be an instance of %s; found %s"
% (field_name, ftype, field)
)
def get_tags(self):
"""Returns the list of unique tags of samples in the collection.
Returns:
a list of tags
"""
raise NotImplementedError("Subclass must implement get_tags()")
def compute_metadata(self, overwrite=False):
"""Populates the ``metadata`` field of all samples in the collection.
Any samples with existing metadata are skipped, unless
``overwrite == True``.
Args:
overwrite (False): whether to overwrite existing metadata
"""
with fou.ProgressBar() as pb:
for sample in pb(self):
if sample.metadata is None or overwrite:
sample.compute_metadata()
@classmethod
def list_view_stages(cls):
"""Returns a list of all available methods on this collection that
apply :class:`fiftyone.core.stages.ViewStage` operations that return
:class:`fiftyone.core.view.DatasetView` instances.
Returns:
a list of :class:`SampleCollection` method names
"""
return list(view_stage.all)
def add_stage(self, stage):
"""Applies the given :class:`fiftyone.core.stages.ViewStage` to the
collection.
Args:
stage: a :class:`fiftyone.core.stages.ViewStage`
Returns:
a :class:`fiftyone.core.view.DatasetView`
Raises:
:class:`fiftyone.core.stages.ViewStageError`: if the stage was not
a valid stage for this collection
"""
return self._add_view_stage(stage)
@view_stage
def exclude(self, sample_ids):
"""Excludes the samples with the given IDs from the collection.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Exclude a single sample from a dataset
#
view = dataset.exclude("5f3c298768fd4d3baf422d2f")
#
# Exclude a list of samples from a dataset
#
view = dataset.exclude([
"5f3c298768fd4d3baf422d2f",
"5f3c298768fd4d3baf422d30"
])
Args:
sample_ids: a sample ID or iterable of sample IDs
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Exclude(sample_ids))
@view_stage
def exclude_fields(self, field_names):
"""Excludes the fields with the given names from the returned
:class:`fiftyone.core.sample.SampleView` instances.
Note that default fields cannot be excluded.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Exclude a field from all samples in a dataset
#
view = dataset.exclude_fields("predictions")
#
# Exclude a list of fields from all samples in a dataset
#
view = dataset.exclude_fields(["ground_truth", "predictions"])
Args:
field_names: a field name or iterable of field names to exclude
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.ExcludeFields(field_names))
@view_stage
def exclude_objects(self, objects):
"""Excludes the specified objects from the view.
The returned view will omit the objects specified in the provided
``objects`` argument, which should have the following format::
[
{
"sample_id": "5f8d254a27ad06815ab89df4",
"field": "ground_truth",
"object_id": "5f8d254a27ad06815ab89df3",
},
{
"sample_id": "5f8d255e27ad06815ab93bf8",
"field": "ground_truth",
"object_id": "5f8d255e27ad06815ab93bf6",
},
...
]
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Exclude the objects currently selected in the App
#
session = fo.launch_app(dataset)
# Select some objects in the App...
view = dataset.exclude_objects(session.selected_objects)
Args:
objects: a list of dicts specifying the objects to exclude
"""
return self._add_view_stage(fos.ExcludeObjects(objects))
@view_stage
def exists(self, field, bool=True):
"""Returns a view containing the samples that have (or do not have) a
non-``None`` value for the given field.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include samples that have a value in their `predictions`
# field
#
view = dataset.exists("predictions")
#
# Only include samples that do NOT have a value in their
# `predictions` field
#
view = dataset.exists("predictions", False)
Args:
field: the field
bool (True): whether to check if the field exists (True) or does
not exist (False)
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Exists(field, bool=bool))
@view_stage
def filter_field(self, field, filter, only_matches=False):
"""Filters the values of a given sample (or embedded document) field.
Values of ``field`` for which ``filter`` returns ``False`` are
replaced with ``None``.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include classifications in the `predictions` field (assume
# it is a `Classification` field) whose `label` is "cat"
#
view = dataset.filter_field("predictions", F("label") == "cat")
#
# Only include classifications in the `predictions` field (assume
# it is a `Classification` field) whose `confidence` is greater
# than 0.8
#
view = dataset.filter_field("predictions", F("confidence") > 0.8)
Args:
field: the field to filter
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples that match
the filter
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterField(field, filter, only_matches=only_matches)
)
@view_stage
def filter_labels(self, field, filter, only_matches=False):
"""Filters the :class:`fiftyone.core.labels.Label` elements in a labels
list field of each sample.
The specified ``field`` must be one of the following types:
- :class:`fiftyone.core.labels.Classifications`
- :class:`fiftyone.core.labels.Detections`
- :class:`fiftyone.core.labels.Polylines`
- :class:`fiftyone.core.labels.Keypoints`
Classifications Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include classifications in the `predictions` field whose
# `confidence` greater than 0.8
#
view = dataset.filter_labels("predictions", F("confidence") > 0.8)
#
# Only include classifications in the `predictions` field whose
# `label` is "cat" or "dog", and only show samples with at least
# one classification after filtering
#
view = dataset.filter_labels(
"predictions",
F("label").is_in(["cat", "dog"]),
only_matches=True,
)
Detections Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include detections in the `predictions` field whose
# `confidence` is greater than 0.8
#
stage = filter_labels("predictions", F("confidence") > 0.8)
view = dataset.add_stage(stage)
#
# Only include detections in the `predictions` field whose `label`
# is "cat" or "dog", and only show samples with at least one
# detection after filtering
#
view = dataset.filter_labels(
"predictions",
F("label").is_in(["cat", "dog"]),
only_matches=True,
)
#
# Only include detections in the `predictions` field whose bounding
# box area is smaller than 0.2
#
# bbox is in [top-left-x, top-left-y, width, height] format
bbox_area = F("bounding_box")[2] * F("bounding_box")[3]
view = dataset.filter_labels("predictions", bbox_area < 0.2)
Polylines Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include polylines in the `predictions` field that are filled
#
view = dataset.filter_labels("predictions", F("filled"))
#
# Only include polylines in the `predictions` field whose `label`
# is "lane", and only show samples with at least one polyline after
# filtering
#
view = dataset.filter_labels(
"predictions", F("label") == "lane", only_matches=True
)
#
# Only include polylines in the `predictions` field with at least
# 10 vertices
#
num_vertices = F("points").map(F().length()).sum()
view = dataset.filter_labels("predictions", num_vertices >= 10)
Keypoints Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include keypoints in the `predictions` field whose `label`
# is "face", and only show samples with at least one keypoint after
# filtering
#
view = dataset.filter_labels(
"predictions", F("label") == "face", only_matches=True
)
#
# Only include keypoints in the `predictions` field with at least
# 10 points
#
view = dataset.filter_labels(
"predictions", F("points").length() >= 10
)
Args:
field: the labels list field to filter
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples with at least
one label after filtering
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterLabels(field, filter, only_matches=only_matches)
)
@view_stage
def filter_classifications(self, field, filter, only_matches=False):
"""Filters the classifications of the given
:class:`fiftyone.core.labels.Classifications` field.
Elements of ``<field>.classifications`` for which ``filter`` returns
``False`` are omitted from the field.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include classifications in the `predictions` field whose
# `confidence` greater than 0.8
#
view = dataset.filter_classifications(
"predictions", F("confidence") > 0.8
)
#
# Only include classifications in the `predictions` field whose
# `label` is "cat" or "dog", and only show samples with at least
# one classification after filtering
#
view = dataset.filter_classifications(
"predictions", F("label").is_in(["cat", "dog"]), only_matches=True
)
Args:
field: the :class:`fiftyone.core.labels.Classifications` field
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples with at least
one classification after filtering
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterClassifications(field, filter, only_matches=only_matches)
)
@view_stage
def filter_detections(self, field, filter, only_matches=False):
"""Filters the detections of the given
:class:`fiftyone.core.labels.Detections` field.
Elements of ``<field>.detections`` for which ``filter`` returns
``False`` are omitted from the field.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include detections in the `predictions` field whose
# `confidence` is greater than 0.8
#
view = dataset.filter_detections(
"predictions", F("confidence") > 0.8
)
#
# Only include detections in the `predictions` field whose `label`
# is "cat" or "dog", and only show samples with at least one
# detection after filtering
#
view = dataset.filter_detections(
"predictions", F("label").is_in(["cat", "dog"]), only_matches=True
)
#
# Only include detections in the `predictions` field whose bounding
# box area is smaller than 0.2
#
# bbox is in [top-left-x, top-left-y, width, height] format
bbox_area = F("bounding_box")[2] * F("bounding_box")[3]
view = dataset.filter_detections("predictions", bbox_area < 0.2)
Args:
field: the :class:`fiftyone.core.labels.Detections` field
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples with at least
one detection after filtering
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterDetections(field, filter, only_matches=only_matches)
)
@view_stage
def filter_polylines(self, field, filter, only_matches=False):
"""Filters the polylines of the given
:class:`fiftyone.core.labels.Polylines` field.
Elements of ``<field>.polylines`` for which ``filter`` returns
``False`` are omitted from the field.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
from fiftyone.core.stages import FilterPolylines
dataset = fo.load_dataset(...)
#
# Only include polylines in the `predictions` field that are filled
#
stage = FilterPolylines("predictions", F("filled"))
view = dataset.add_stage(stage)
#
# Only include polylines in the `predictions` field whose `label`
# is "lane", and only show samples with at least one polyline after
# filtering
#
stage = FilterPolylines(
"predictions", F("label") == "lane", only_matches=True
)
view = dataset.add_stage(stage)
#
# Only include polylines in the `predictions` field with at least
# 10 vertices
#
num_vertices = F("points").map(F().length()).sum()
stage = FilterPolylines("predictions", num_vertices >= 10)
view = dataset.add_stage(stage)
Args:
field: the :class:`fiftyone.core.labels.Polylines` field
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples with at least
one polyline after filtering
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterPolylines(field, filter, only_matches=only_matches)
)
@view_stage
def filter_keypoints(self, field, filter, only_matches=False):
"""Filters the keypoints of the given
:class:`fiftyone.core.labels.Keypoints` field.
Elements of ``<field>.keypoints`` for which ``filter`` returns
``False`` are omitted from the field.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
from fiftyone.core.stages import FilterKeypoints
dataset = fo.load_dataset(...)
#
# Only include keypoints in the `predictions` field whose `label`
# is "face", and only show samples with at least one keypoint after
# filtering
#
stage = FilterKeypoints(
"predictions", F("label") == "face", only_matches=True
)
view = dataset.add_stage(stage)
#
# Only include keypoints in the `predictions` field with at least
# 10 points
#
stage = FilterKeypoints("predictions", F("points").length() >= 10)
view = dataset.add_stage(stage)
Args:
field: the :class:`fiftyone.core.labels.Keypoints` field
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
only_matches (False): whether to only include samples with at least
one keypoint after filtering
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(
fos.FilterKeypoints(field, filter, only_matches=only_matches)
)
@view_stage
def limit(self, limit):
"""Returns a view with at most the given number of samples.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include the first 10 samples in the view
#
view = dataset.limit(10)
Args:
limit: the maximum number of samples to return. If a non-positive
number is provided, an empty view is returned
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Limit(limit))
@view_stage
def limit_labels(self, field, limit):
"""Limits the number of :class:`fiftyone.core.labels.Label` instances
in the specified labels list field of each sample.
The specified ``field`` must be one of the following types:
- :class:`fiftyone.core.labels.Classifications`
- :class:`fiftyone.core.labels.Detections`
- :class:`fiftyone.core.labels.Polylines`
- :class:`fiftyone.core.labels.Keypoints`
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include the first 5 detections in the `ground_truth` field of
# the view
#
view = dataset.limit_labels("ground_truth", 5)
Args:
field: the labels list field to filter
limit: the maximum number of labels to include in each labels list.
If a non-positive number is provided, all lists will be empty
"""
return self._add_view_stage(fos.LimitLabels(field, limit))
@view_stage
def match(self, filter):
"""Filters the samples in the collection by the given filter.
Samples for which ``filter`` returns ``False`` are omitted.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Only include samples whose `filepath` ends with ".jpg"
#
view = dataset.match(F("filepath").ends_with(".jpg"))
#
# Only include samples whose `predictions` field (assume it is a
# `Classification` field) has `label` of "cat"
#
view = dataset.match(F("predictions").label == "cat"))
#
# Only include samples whose `predictions` field (assume it is a
# `Detections` field) has at least 5 detections
#
view = dataset.match(F("predictions").detections.length() >= 5)
#
# Only include samples whose `predictions` field (assume it is a
# `Detections` field) has at least one detection with area smaller
# than 0.2
#
# bbox is in [top-left-x, top-left-y, width, height] format
pred_bbox = F("predictions.detections.bounding_box")
pred_bbox_area = pred_bbox[2] * pred_bbox[3]
view = dataset.match((pred_bbox_area < 0.2).length() > 0)
Args:
filter: a :class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that returns a boolean describing the filter to apply
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Match(filter))
@view_stage
def match_tag(self, tag):
"""Returns a view containing the samples that have the given tag.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include samples that have the "test" tag
#
view = dataset.match_tag("test")
Args:
tag: a tag
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.MatchTag(tag))
@view_stage
def match_tags(self, tags):
"""Returns a view containing the samples that have any of the given
tags.
To match samples that must contain multiple tags, chain multiple
:meth:`match_tag` or :meth:`match_tags` calls together.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include samples that have either the "test" or "validation"
# tag
#
view = dataset.match_tags(["test", "validation"])
Args:
tags: an iterable of tags
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.MatchTags(tags))
@view_stage
def mongo(self, pipeline):
"""Adds a view stage defined by a raw MongoDB aggregation pipeline.
See `MongoDB aggregation pipelines <https://docs.mongodb.com/manual/core/aggregation-pipeline/>`_
for more details.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Extract a view containing the 6th through 15th samples in the
# dataset
#
view = dataset.mongo([{"$skip": 5}, {"$limit": 10}])
#
# Sort by the number of detections in the `precictions` field of
# the samples (assume it is a `Detections` field)
#
view = dataset.mongo([
{
"$addFields": {
"_sort_field": {
"$size": {
"$ifNull": ["$predictions.detections", []]
}
}
}
},
{"$sort": {"_sort_field": -1}},
{"$unset": "_sort_field"}
])
Args:
pipeline: a MongoDB aggregation pipeline (list of dicts)
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Mongo(pipeline))
@view_stage
def select(self, sample_ids):
"""Returns a view containing only the samples with the given IDs.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Select the samples with the given IDs from the dataset
#
view = dataset.select([
"5f3c298768fd4d3baf422d34",
"5f3c298768fd4d3baf422d35",
"5f3c298768fd4d3baf422d36",
])
#
# Create a view containing the currently selected samples in the
# App
#
session = fo.launch_app(dataset=dataset)
# Select samples in the App...
view = dataset.select(session.selected)
Args:
sample_ids: a sample ID or iterable of sample IDs
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Select(sample_ids))
@view_stage
def select_fields(self, field_names=None):
"""Selects the fields with the given names as the *only* fields
present in the returned :class:`fiftyone.core.sample.SampleView`
instances. All other fields are excluded.
Note that default sample fields are always selected and will be added
if not included in ``field_names``.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Include only the default fields on each sample
#
view = dataset.select_fields()
#
# Include only the `ground_truth` field (and the default fields) on
# each sample
#
view = dataset.select_fields("ground_truth")
Args:
field_names (None): a field name or iterable of field names to
select. If not specified, just the default fields will be
selected
Returns:
a :class:`DatasetView`
"""
return self._add_view_stage(fos.SelectFields(field_names))
@view_stage
def select_objects(self, objects):
"""Selects only the specified objects from the view.
The returned view will omit samples, sample fields, and individual
objects that do not appear in the provided ``objects`` argument, which
should have the following format::
[
{
"sample_id": "5f8d254a27ad06815ab89df4",
"field": "ground_truth",
"object_id": "5f8d254a27ad06815ab89df3",
},
{
"sample_id": "5f8d255e27ad06815ab93bf8",
"field": "ground_truth",
"object_id": "5f8d255e27ad06815ab93bf6",
},
...
]
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Only include the objects currently selected in the App
#
session = fo.launch_app(dataset)
# Select some objects in the App...
view = dataset.select_objects(session.selected_objects)
Args:
objects: a list of dicts specifying the objects to select
"""
return self._add_view_stage(fos.SelectObjects(objects))
@view_stage
def shuffle(self, seed=None):
"""Randomly shuffles the samples in the collection.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Return a view that contains a randomly shuffled version of the
# samples in the dataset
#
view = dataset.shuffle()
#
# Shuffle the samples with a set random seed
#
view = dataset.shuffle(seed=51)
Args:
seed (None): an optional random seed to use when shuffling the
samples
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Shuffle(seed=seed))
@view_stage
def skip(self, skip):
"""Omits the given number of samples from the head of the collection.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Omit the first 10 samples from the dataset
#
view = dataset.skip(10)
Args:
skip: the number of samples to skip. If a non-positive number is
provided, no samples are omitted
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Skip(skip))
@view_stage
def sort_by(self, field_or_expr, reverse=False):
"""Sorts the samples in the collection by the given field or
expression.
When sorting by an expression, ``field_or_expr`` can either be a
:class:`fiftyone.core.expressions.ViewExpression` or a
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
that defines the quantity to sort by.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.load_dataset(...)
#
# Sorts the samples in descending order by the `confidence` of
# their `predictions` field (assume it is a `Classification` field)
#
view = dataset.sort_by("predictions.confidence", reverse=True)
#
# Sorts the samples in ascending order by the number of detections
# in their `predictions` field (assume it is a `Detections` field)
# whose bounding box area is at most 0.2
#
# bbox is in [top-left-x, top-left-y, width, height] format
pred_bbox = F("predictions.detections.bounding_box")
pred_bbox_area = pred_bbox[2] * pred_bbox[3]
view = dataset.sort_by((pred_bbox_area < 0.2).length())
Args:
field_or_expr: the field or expression to sort by
reverse (False): whether to return the results in descending order
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.SortBy(field_or_expr, reverse=reverse))
@view_stage
def take(self, size, seed=None):
"""Randomly samples the given number of samples from the collection.
Examples::
import fiftyone as fo
dataset = fo.load_dataset(...)
#
# Take 10 random samples from the dataset
#
view = dataset.take(10)
#
# Take 10 random samples from the dataset with a set seed
#
view = dataset.take(10, seed=51)
Args:
size: the number of samples to return. If a non-positive number is
provided, an empty view is returned
seed (None): an optional random seed to use when selecting the
samples
Returns:
a :class:`fiftyone.core.view.DatasetView`
"""
return self._add_view_stage(fos.Take(size, seed=seed))
def draw_labels(
self,
anno_dir,
label_fields=None,
overwrite=False,
annotation_config=None,
):
"""Renders annotated versions of the samples in the collection with
label field(s) overlaid to the given directory.
The filenames of the sample data are maintained, unless a name conflict
would occur in ``anno_dir``, in which case an index of the form
``"-%d" % count`` is appended to the base filename.
Images are written in format ``fo.config.default_image_ext``.
Args:
anno_dir: the directory to write the annotated files
label_fields (None): a list of :class:`fiftyone.core.labels.Label`
fields to render. By default, all
:class:`fiftyone.core.labels.Label` fields are drawn
overwrite (False): whether to delete ``anno_dir`` if it exists
before rendering the labels
annotation_config (None): an
:class:`fiftyone.utils.annotations.AnnotationConfig` specifying
how to render the annotations
Returns:
the list of paths to the labeled images
"""
if os.path.isdir(anno_dir):
if overwrite:
etau.delete_dir(anno_dir)
else:
logger.warning(
"Directory '%s' already exists; outputs will be merged "
"with existing files",
anno_dir,
)
if self.media_type == fom.VIDEO:
if label_fields is None:
label_fields = _get_frame_label_fields(self)
return foua.draw_labeled_videos(
self,
anno_dir,
label_fields=label_fields,
annotation_config=annotation_config,
)
if label_fields is None:
label_fields = _get_image_label_fields(self)
return foua.draw_labeled_images(
self,
anno_dir,
label_fields=label_fields,
annotation_config=annotation_config,
)
def export(
self,
export_dir=None,
dataset_type=None,
dataset_exporter=None,
label_field=None,
label_prefix=None,
labels_dict=None,
frame_labels_field=None,
frame_labels_prefix=None,
frame_labels_dict=None,
overwrite=False,
**kwargs
):
"""Exports the samples in the collection to disk.
Provide either ``export_dir`` and ``dataset_type`` or
``dataset_exporter`` to perform an export.
See :ref:`this guide <custom-dataset-exporter>` for more details about
exporting datasets in custom formats by defining your own
:class:`DatasetExporter <fiftyone.utils.data.exporters.DatasetExporter>`.
Args:
export_dir (None): the directory to which to export the samples in
format ``dataset_type``
dataset_type (None): the
:class:`fiftyone.types.dataset_types.Dataset` type to write. If
not specified, the default type for ``label_field`` is used
dataset_exporter (None): a
:class:`fiftyone.utils.data.exporters.DatasetExporter` to use
to export the samples
label_field (None): the name of the label field to export. Only
applicable to labeled image datasets or labeled video datasets
with sample-level labels. If none of ``label_field``,
``label_prefix``, and ``labels_dict`` are specified and the
requested output type is a labeled image dataset or labeled
video dataset with sample-level labels, the first field of
compatible type for the output format is used
label_prefix (None): a label field prefix; all fields whose name
starts with the given prefix will be exported (with the prefix
removed when constructing the label dicts). Only applicable to
labeled image datasets or labeled video datasets with
sample-level labels. This parameter can only be used when the
exporter can handle dictionaries of labels
labels_dict (None): a dictionary mapping label field names to keys
to use when constructing the label dict to pass to the
exporter. Only applicable to labeled image datasets or labeled
video datasets with sample-level labels. This parameter can
only be used when the exporter can handle dictionaries of
labels
frame_labels_field (None): the name of the frame labels field to
export. Only applicable for labeled video datasets. If none of
``frame_labels_field``, ``frame_labels_prefix``, and
``frame_labels_dict`` are specified and the requested output
type is a labeled video dataset with frame-level labels, the
first frame-level field of compatible type for the output
format is used
frame_labels_prefix (None): a frame labels field prefix; all
frame-level fields whose name starts with the given prefix will
be exported (with the prefix removed when constructing the
frame label dicts). Only applicable for labeled video datasets.
This parameter can only be used when the exporter can handle
dictionaries of frame-level labels
frame_labels_dict (None): a dictionary mapping frame-level label
field names to keys to use when constructing the frame labels
dicts to pass to the exporter. Only applicable for labeled
video datasets. This parameter can only be used when the
exporter can handle dictionaries of frame-level labels
overwrite (False): when an ``export_dir`` is provided, whether to
delete the existing directory before performing the export
**kwargs: optional keyword arguments to pass to
``dataset_type.get_dataset_exporter_cls(export_dir, **kwargs)``
"""
if dataset_type is None and dataset_exporter is None:
raise ValueError(
"Either `dataset_type` or `dataset_exporter` must be provided"
)
if dataset_type is not None and inspect.isclass(dataset_type):
dataset_type = dataset_type()
# If no dataset exporter was provided, construct one based on the
# dataset type
if dataset_exporter is None:
if os.path.isdir(export_dir):
if overwrite:
etau.delete_dir(export_dir)
else:
logger.warning(
"Directory '%s' already exists; export will be merged "
"with existing files",
export_dir,
)
dataset_exporter_cls = dataset_type.get_dataset_exporter_cls()
try:
dataset_exporter = dataset_exporter_cls(export_dir, **kwargs)
except Exception as e:
exporter_name = dataset_exporter_cls.__name__
raise ValueError(
"Failed to construct exporter using syntax "
"%s(export_dir, **kwargs); you may need to supply "
"mandatory arguments to the constructor via `kwargs`. "
"Please consult the documentation of `%s` to learn more"
% (
exporter_name,
etau.get_class_name(dataset_exporter_cls),
)
) from e
# Get label field(s) to export
if isinstance(dataset_exporter, foud.LabeledImageDatasetExporter):
# Labeled images
label_field_or_dict = get_label_fields(
self,
label_field=label_field,
label_prefix=label_prefix,
labels_dict=labels_dict,
dataset_exporter=dataset_exporter,
required=True,
)
frame_labels_field_or_dict = None
elif isinstance(dataset_exporter, foud.LabeledVideoDatasetExporter):
# Labeled videos
label_field_or_dict = get_label_fields(
self,
label_field=label_field,
label_prefix=label_prefix,
labels_dict=labels_dict,
dataset_exporter=dataset_exporter,
required=False,
)
frame_labels_field_or_dict = get_frame_labels_fields(
self,
frame_labels_field=frame_labels_field,
frame_labels_prefix=frame_labels_prefix,
frame_labels_dict=frame_labels_dict,
dataset_exporter=dataset_exporter,
required=False,
)
if (
label_field_or_dict is None
and frame_labels_field_or_dict is None
):
raise ValueError(
"Unable to locate compatible sample or frame-level "
"field(s) to export"
)
else:
# Other (unlabeled, entire samples, etc)
label_field_or_dict = None
frame_labels_field_or_dict = None
# Export the dataset
foud.export_samples(
self,
dataset_exporter=dataset_exporter,
label_field_or_dict=label_field_or_dict,
frame_labels_field_or_dict=frame_labels_field_or_dict,
)
def create_index(self, field):
"""Creates a database index on the given field, enabling efficient
sorting on that field.
Args:
field: the name of the field to index
"""
raise NotImplementedError("Subclass must implement make_index()")
def to_dict(self, rel_dir=None, frame_labels_dir=None, pretty_print=False):
"""Returns a JSON dictionary representation of the collection.
Args:
rel_dir (None): a relative directory to remove from the
``filepath`` of each sample, if possible. The path is converted
to an absolute path (if necessary) via
``os.path.abspath(os.path.expanduser(rel_dir))``. The typical
use case for this argument is that your source data lives in
a single directory and you wish to serialize relative, rather
than absolute, paths to the data within that directory
frame_labels_dir (None): a directory in which to write per-sample
JSON files containing the frame labels for video samples. If
omitted, frame labels will be included directly in the returned
JSON dict (which can be quite quite large for video datasets
containing many frames). Only applicable to video datasets
pretty_print (False): whether to render frame labels JSON in human
readable format with newlines and indentations. Only applicable
to video datasets when a ``frame_labels_dir`` is provided
Returns:
a JSON dict
"""
if rel_dir is not None:
rel_dir = (
os.path.abspath(os.path.expanduser(rel_dir)) + os.path.sep
)
len_rel_dir = len(rel_dir)
is_video = self.media_type == fom.VIDEO
write_frame_labels = is_video and frame_labels_dir is not None
d = {
"name": self.name,
"media_type": self.media_type,
"num_samples": len(self),
"sample_fields": self._serialize_field_schema(),
}
if is_video:
d["frame_fields"] = self._serialize_frame_field_schema()
d["info"] = self.info
# Serialize samples
samples = []
with fou.ProgressBar() as pb:
for sample in pb(self):
sd = sample.to_dict(include_frames=True)
if write_frame_labels:
frames = {"frames": sd.pop("frames", {})}
filename = sample.id + ".json"
sd["frames"] = filename
frames_path = os.path.join(frame_labels_dir, filename)
etas.write_json(
frames, frames_path, pretty_print=pretty_print
)
if rel_dir and sd["filepath"].startswith(rel_dir):
sd["filepath"] = sd["filepath"][len_rel_dir:]
samples.append(sd)
d["samples"] = samples
return d
def to_json(self, rel_dir=None, frame_labels_dir=None, pretty_print=False):
"""Returns a JSON string representation of the collection.
The samples will be written as a list in a top-level ``samples`` field
of the returned dictionary.
Args:
rel_dir (None): a relative directory to remove from the
``filepath`` of each sample, if possible. The path is converted
to an absolute path (if necessary) via
``os.path.abspath(os.path.expanduser(rel_dir))``. The typical
use case for this argument is that your source data lives in
a single directory and you wish to serialize relative, rather
than absolute, paths to the data within that directory
frame_labels_dir (None): a directory in which to write per-sample
JSON files containing the frame labels for video samples. If
omitted, frame labels will be included directly in the returned
JSON dict (which can be quite quite large for video datasets
containing many frames). Only applicable to video datasets
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
d = self.to_dict(
rel_dir=rel_dir,
frame_labels_dir=frame_labels_dir,
pretty_print=pretty_print,
)
return etas.json_to_str(d, pretty_print=pretty_print)
def write_json(
self,
json_path,
rel_dir=None,
frame_labels_dir=None,
pretty_print=False,
):
"""Writes the colllection to disk in JSON format.
Args:
json_path: the path to write the JSON
rel_dir (None): a relative directory to remove from the
``filepath`` of each sample, if possible. The path is converted
to an absolute path (if necessary) via
``os.path.abspath(os.path.expanduser(rel_dir))``. The typical
use case for this argument is that your source data lives in
a single directory and you wish to serialize relative, rather
than absolute, paths to the data within that directory
frame_labels_dir (None): a directory in which to write per-sample
JSON files containing the frame labels for video samples. If
omitted, frame labels will be included directly in the returned
JSON dict (which can be quite quite large for video datasets
containing many frames). Only applicable to video datasets
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
"""
d = self.to_dict(
rel_dir=rel_dir,
frame_labels_dir=frame_labels_dir,
pretty_print=pretty_print,
)
etas.write_json(d, json_path, pretty_print=pretty_print)
def _add_view_stage(self, stage):
"""Returns a :class:`fiftyone.core.view.DatasetView` containing the
contents of the collection with the given
:class:fiftyone.core.stages.ViewStage` appended to its aggregation
pipeline.
Subclasses are responsible for performing any validation on the view
stage to ensure that it is a valid stage to add to this collection.
Args:
stage: a :class:fiftyone.core.stages.ViewStage`
Returns:
a :class:`fiftyone.core.view.DatasetView`
Raises:
:class:`fiftyone.core.stages.ViewStageError`: if the stage was not
a valid stage for this collection
"""
raise NotImplementedError("Subclass must implement _add_view_stage()")
def _aggregate(
self, pipeline=None, hide_frames=False, squash_frames=False
):
"""Runs the MongoDB aggregation pipeline on the collection and returns
the result.
Args:
pipeline (None): a MongoDB aggregation pipeline (list of dicts)
hide_frames (False): whether to hide frames in the result
squash_frames (False): whether to squash frames in the result
Returns:
the aggregation result dict
"""
raise NotImplementedError("Subclass must implement _aggregate()")
def _attach_frames(self, hide_frames=False):
key = "_frames" if hide_frames else "frames"
# pylint: disable=no-member
return [
{
"$lookup": {
"from": self._frame_collection_name,
"localField": "_id",
"foreignField": "_sample_id",
"as": key,
}
}
]
def _serialize(self):
# pylint: disable=no-member
return self._doc.to_dict(extended=True)
def _serialize_field_schema(self):
return self._serialize_schema(self.get_field_schema())
def _serialize_frame_field_schema(self):
return self._serialize_schema(self.get_frame_field_schema())
def _serialize_schema(self, schema):
return {field_name: str(field) for field_name, field in schema.items()}
def _get_random_characters(n):
return "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(n)
)
def get_label_fields(
sample_collection,
label_field=None,
label_prefix=None,
labels_dict=None,
dataset_exporter=None,
required=False,
force_dict=False,
):
"""Gets the label field(s) of the sample collection matching the specified
arguments.
Provide one of ``label_field``, ``label_prefix``, ``labels_dict``, or
``dataset_exporter``.
Args:
sample_collection: a :class:`SampleCollection`
label_field (None): the name of the label field to export
label_prefix (None): a label field prefix; the returned labels dict
will contain all fields whose name starts with the given prefix
labels_dict (None): a dictionary mapping label field names to keys
dataset_exporter (None): a
:class:`fiftyone.utils.data.exporters.DatasetExporter` to use to
choose appropriate label field(s)
required (False): whether at least one matching field must be found
force_dict (False): whether to always return a labels dict rather than
an individual label field
Returns:
a label field or dict mapping label fields to keys
"""
if label_prefix is not None:
labels_dict = _get_labels_dict_for_prefix(
sample_collection, label_prefix
)
if labels_dict is not None:
return labels_dict
if label_field is None and dataset_exporter is not None:
label_field = _get_default_label_fields_for_exporter(
sample_collection, dataset_exporter, required=required
)
if label_field is None and required:
raise ValueError(
"Unable to find any label fields matching the provided arguments"
)
if (
force_dict
and label_field is not None
and not isinstance(label_field, dict)
):
return {label_field: label_field}
return label_field
def get_frame_labels_fields(
sample_collection,
frame_labels_field=None,
frame_labels_prefix=None,
frame_labels_dict=None,
dataset_exporter=None,
required=False,
force_dict=False,
):
"""Gets the frame label field(s) of the sample collection matching the
specified arguments.
Provide one of ``frame_labels_field``, ``frame_labels_prefix``,
``frame_labels_dict``, or ``dataset_exporter``.
Args:
sample_collection: a :class:`SampleCollection`
frame_labels_field (None): the name of the frame labels field to
export
frame_labels_prefix (None): a frame labels field prefix; the returned
labels dict will contain all frame-level fields whose name starts
with the given prefix
frame_labels_dict (None): a dictionary mapping frame-level label field
names to keys
dataset_exporter (None): a
:class:`fiftyone.utils.data.exporters.DatasetExporter` to use to
choose appropriate frame label field(s)
required (False): whether at least one matching frame field must be
found
force_dict (False): whether to always return a labels dict rather than
an individual label field
Returns:
a frame label field or dict mapping frame label fields to keys
"""
if frame_labels_prefix is not None:
frame_labels_dict = _get_frame_labels_dict_for_prefix(
sample_collection, frame_labels_prefix
)
if frame_labels_dict is not None:
return frame_labels_dict
if frame_labels_field is None and dataset_exporter is not None:
frame_labels_field = _get_default_frame_label_fields_for_exporter(
sample_collection, dataset_exporter, required=required
)
if frame_labels_field is None and required:
raise ValueError(
"Unable to find any frame label fields matching the provided "
"arguments"
)
if (
force_dict
and frame_labels_field is not None
and not isinstance(frame_labels_field, dict)
):
return {frame_labels_field: frame_labels_field}
return frame_labels_field
def _get_image_label_fields(sample_collection):
label_fields = sample_collection.get_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.ImageLabel
)
return list(label_fields.keys())
def _get_frame_label_fields(sample_collection):
label_fields = sample_collection.get_frame_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.ImageLabel
)
return list(label_fields.keys())
def _get_labels_dict_for_prefix(sample_collection, label_prefix):
label_fields = sample_collection.get_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.Label
)
return _make_labels_dict_for_prefix(label_fields, label_prefix)
def _get_frame_labels_dict_for_prefix(sample_collection, frame_labels_prefix):
label_fields = sample_collection.get_frame_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.Label
)
return _make_labels_dict_for_prefix(label_fields, frame_labels_prefix)
def _make_labels_dict_for_prefix(label_fields, label_prefix):
labels_dict = {}
for field_name in label_fields:
if field_name.startswith(label_prefix):
labels_dict[field_name] = field_name[len(label_prefix) :]
return labels_dict
def _get_default_label_fields_for_exporter(
sample_collection, dataset_exporter, required=True
):
label_cls = dataset_exporter.label_cls
if label_cls is None:
if required:
raise ValueError(
"Cannot select a default field when exporter does not provide "
"a `label_cls`"
)
return None
label_fields = sample_collection.get_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.Label
)
label_field_or_dict = _get_fields_with_types(label_fields, label_cls)
if label_field_or_dict is not None:
return label_field_or_dict
#
# SPECIAL CASE
#
# The export routine can convert `Classification` labels to Detections`
# format just-in-time, if necessary. So, allow a `Classification` field
# to be returned here
#
if label_cls is fol.Detections:
for field, field_type in label_fields.items():
if issubclass(field_type.document_type, fol.Classification):
return field
if required:
raise ValueError("No compatible field(s) of type %s found" % label_cls)
return None
def _get_default_frame_label_fields_for_exporter(
sample_collection, dataset_exporter, required=True
):
frame_labels_cls = dataset_exporter.frame_labels_cls
if frame_labels_cls is None:
if required:
raise ValueError(
"Cannot select a default frame field when exporter does not "
"provide a `frame_labels_cls`"
)
return None
frame_labels_fields = sample_collection.get_frame_field_schema(
ftype=fof.EmbeddedDocumentField, embedded_doc_type=fol.Label
)
frame_labels_field_or_dict = _get_fields_with_types(
frame_labels_fields, frame_labels_cls
)
if frame_labels_field_or_dict is not None:
return frame_labels_field_or_dict
if required:
raise ValueError(
"No compatible frame field(s) of type %s found" % frame_labels_cls
)
return None
def _get_fields_with_types(label_fields, label_cls):
if isinstance(label_cls, dict):
# Return first matching field for all dict keys
labels_dict = {}
for name, _label_cls in label_cls.items():
field = _get_field_with_type(label_fields, _label_cls)
if field is not None:
labels_dict[field] = name
return labels_dict if labels_dict else None
# Return first matching field, if any
return _get_field_with_type(label_fields, label_cls)
def _get_field_with_type(label_fields, label_cls):
for field, field_type in label_fields.items():
if issubclass(field_type.document_type, label_cls):
return field
return None
| StarcoderdataPython |
11314505 | from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
import time
from urllib.parse import unquote
from baike import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownLoader()
self.praser = html_parser.HtmlPraser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url):
# 添加新的url
self.urls.add_new_url(root_url)
# 当前的条数
self.task(0)
time.sleep(1)
time1 = datetime.now()
with ThreadPoolExecutor(max_workers=4) as executor:
for i in range(999):
executor.submit(self.task, i)
# 输出收集好的数据
self.outputer.output_html()
time2 = datetime.now()
print("耗时 %s 秒" % (time2 - time1).seconds) # 23-21
def task(self, count):
try:
# 获取一条url
new_url = self.urls.get_new_url()
print("%d : %s" % (count, unquote(new_url)))
# 下载网页
html_cont = self.downloader.download(new_url)
# 解析网页,得到新的url列表和数据
new_urls, new_data = self.praser.prase(new_url, html_cont)
# 将url列表添加的url管理器
self.urls.add_new_urls(new_urls)
# 收集数据
self.outputer.collect_data(new_data)
except(Exception) as e:
print("craw fail:%s" % (e))
if __name__ == "__main__":
root_url = "https://baike.baidu.com/item/Python/407313"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
| StarcoderdataPython |
8078023 | <gh_stars>0
from kaggle.api.kaggle_api_extended import KaggleApi
import zipfile
import os
from shutil import copyfile
from random import random
from random import seed
DATASET = "alessiocorrado99/animals10"
# requires /.kaggle/kaggle.json
def fetch_kaggle():
api = KaggleApi()
api.authenticate()
api.dataset_download_files(f"{DATASET}")
zf = zipfile.ZipFile("animals10.zip")
zf.extractall("raw-img")
zf.close()
classes = os.listdir("raw-img/raw-img")
print(classes)
for subdir in ["train", "validation"]:
for label in classes:
new = os.path.join(subdir, label)
print(new)
os.makedirs(new, exist_ok=True)
seed(1)
for label in classes:
head = os.path.join("raw-img/raw-img", label)
for file in os.listdir(head):
src = os.path.join(head, file)
dst = os.path.join("train", label, file)
if random() < 0.2:
dst = os.path.join("validation", label, file)
print(f"Travelling from {src} to {dst}!")
copyfile(src, dst)
def conjoin_furry_dir():
label = "furry"
zf = zipfile.ZipFile("furry.zip")
zf.extractall(label)
zf.close()
for subdir in ["train", "validation"]:
new = os.path.join(subdir, label)
os.makedirs(new, exist_ok=True)
seed(1)
head = os.path.join(label, "furry")
for file in os.listdir(head):
src = os.path.join(head, file)
dst = os.path.join("train", label, file)
if random() < 0.2:
dst = os.path.join("validation", label, file)
print(f"Travelling from {src} to {dst}!")
copyfile(src, dst)
def main():
if "raw-img" in os.listdir():
print("Dataset already exists!")
fetch_kaggle()
conjoin_furry_dir()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6540006 | import shapely
from shapely.geometry import shape, Polygon,box, JOIN_STYLE
from shapely.ops import cascaded_union
import pdb
import math
from operator import itemgetter
# Calculation
def building(xmin,ymin,xmax,ymax,polys_design):
gsize = 10
x_num = math.ceil((xmax-xmin) / gsize)
y_num = math.ceil((ymax-ymin) / gsize)
grids = []
for i in range(x_num):
for j in range(y_num):
x0 = xmin + i * gsize
x1 = xmin + (i+1)*gsize
y0 = ymin + j * gsize
y1 = ymin + (j+1)*gsize
grids.append(box(x0, y0, x1, y1))
s = [15,30,45]
volume = 0
gfa = 0
res_sqm = 0
com_sqm = 0
off_sqm = 0
fs = {'commercial':0 ,'residential':0, 'office':0}
volume3 = 0
volume2com_res = 0
volume2com_off = 0
volume2res_off = 0
volume1com = 0
volume1res = 0
volume1off = 0
roof_surface = 0
res_deck = 0
for g_pos, grid in enumerate(grids):
h_max=0.1
i_area =[]
functions = []
f_height = []
volume4all = {"com":[],"res":[],"off":[]}
for p_pos, poly in enumerate(polys_design):
polygon = poly['Polygon']
# check if there are any polygons inside this grid
i_id = 0
h_id = 0
if grid.intersects(polygon):
i_area.append(grid.intersection(polygon).area)
functions.append(poly["function"])
f_height.append(poly["height"])
# Find which kind of functional polygon it intersected with!
if poly["function"] == "commercial":
volume4all["com"].append(poly["height"])
elif poly["function"] == "residential":
volume4all["res"].append(poly["height"])
elif poly["function"] == 'office':
volume4all["off"].append(poly["height"])
for key, value in fs.items():
if key == poly["function"]:
fs[key] += gsize * gsize * poly["height"]/3
h = poly["height"]
if h > h_max:
h_max = h
h_id = i_id
i_id += 1
if h_max != 0.1:
h_grid = h_max
area = i_area[h_id]
gfa += area * h_grid/3
volume += area * h_grid
# 对此grid而言
v_check = {k: v for k, v in volume4all.items() if v != []}
if v_check != {}:
garea = area
for key, all_height in volume4all.items():
segment =[]
for val in all_height:
if val == 15:
segment.extend(["s1"])
elif val == 45:#135
segment.extend(["s1", "s2"])
elif val == 90:
segment.extend(["s1", "s2", "s3"])
volume4all[key] = segment
# 最底层
s1_com = volume4all["com"].count("s1") # 2
s1_res = volume4all["res"].count("s1") # 1
s1_off = volume4all["off"].count("s1") # 1
s1_count = s1_com+s1_res+s1_off
# 中间层
s2_com = volume4all["com"].count("s2")
s2_res = volume4all["res"].count("s2")
s2_off = volume4all["off"].count("s2")
s2_count = s2_com+s2_res+s2_off
# 最高层
s3_com = volume4all["com"].count("s3")
s3_res = volume4all["res"].count("s3")
s3_off = volume4all["off"].count("s3")
s3_count = s3_com+s3_res+s3_off
#BOTOM LAYER
# 一种相交
# 第一层
if (s1_com > 0) & (s1_res == 0) & (s1_off == 0):
volume1com += (garea * s[0] / 3 * s1_com)/s1_count
roof_surface += garea
elif (s1_com == 0) & (s1_res > 0) & (s1_off == 0):
volume1res += (garea * s[0] / 3 * s1_res)/s1_count
roof_surface += garea
res_deck += garea
elif (s1_com == 0) & (s1_res == 0) & (s1_off > 0):
volume1off += (garea * s[0] / 3 * s1_off)/s1_count
roof_surface += garea
# 两种相交
# 第一层俩 com res off
elif (s1_com > 0) & (s1_res > 0) & (s1_off == 0):
volume2com_res += (garea * s[0] / 3 * (s1_com + s1_res))/s1_count
res_sqm += volume2com_res * (s1_res/s1_count)
com_sqm += volume2com_res * (s1_com/s1_count)
roof_surface += garea
elif (s1_com > 0) & (s1_res == 0) & (s1_off > 0):
volume2com_off += (area * s[0] / 3 * (s1_com + s1_off))/s1_count
com_sqm += volume2com_off * (s1_com / s1_count)
off_sqm += volume2com_off * (s1_off / s1_count)
roof_surface += garea
elif (s1_com == 0) & (s1_res > 0) & (s1_off > 0):
volume2res_off += (garea * s[0] / 3 * (s1_off + s1_res))/s1_count
res_sqm += volume2res_off * (s1_res / s1_count)
com_sqm += volume2res_off * (s1_com / s1_count)
roof_surface += garea
# 三种相交
elif (s1_com > 0) & (s1_res > 0) & (s1_off > 0):
volume3 += garea * s[0] / 3
res_sqm += volume3 * s1_res/s1_count
com_sqm += volume3 * s1_com / s1_count
off_sqm += volume3 * s1_off / s1_count
roof_surface += garea
## MIDDLE LAYER
# 一种相交
if (s2_com > 0) & (s2_res == 0) & (s2_off == 0):
volume1com += (garea * s[1] / 3 * s2_com)/s2_count
elif (s2_com == 0) & (s2_res > 0) & (s2_off == 0):
volume1res += (garea * s[1] / 3 * s2_res)/s2_count
elif (s2_com == 0) & (s2_res == 0) & (s2_off > 0):
volume1off += (garea * s[1] / 3 * s2_off)/s2_count
# 两种相交
elif (s2_com > 0) & (s2_res > 0) & (s2_off == 0):
volume2com_res += garea * s[1] / 3
res_sqm += volume2com_res * (s2_res / s2_count)
com_sqm += volume2com_res * (s2_com / s2_count)
elif (s2_com > 0) & (s2_res == 0) & (s2_off > 0):
volume2com_off += garea * s[1] / 3
com_sqm += volume2com_off * (s2_com / s2_count)
off_sqm += volume2com_off * (s2_off / s2_count)
elif (s2_com == 0) & (s2_res > 0) & (s2_off > 0):
volume2res_off += (garea * s[1] / 3 * (s2_off + s2_res))/s2_count
res_sqm += volume2res_off * (s2_res / s2_count)
com_sqm += volume2res_off * (s2_com / s2_count)
# 三种相交
elif (s2_com > 0) & (s2_res > 0) & (s2_off > 0):
volume3 += garea * s[1] / 3
res_sqm += volume3 * s2_res / s2_count
com_sqm += volume3 * s2_com / s2_count
off_sqm += volume3 * s2_off / s2_count
## TOPLAYER
#一种相交
if (s3_com > 0) & (s3_res == 0) & (s3_off == 0):
volume1com += garea * s[2] / 3
elif (s3_com == 0) & (s3_res > 0) & (s3_off == 0):
volume1res += garea * s[2] / 3
elif (s3_com == 0) & (s3_res == 0) & (s3_off > 0):
volume1off += (garea * s[2] / 3 * s3_off)/s3_count
# 两种相交
elif (s3_com > 0) & (s3_res > 0) & (s3_off == 0):
volume2com_res += garea * s[2] / 3
res_sqm += volume2com_res * (s3_res / s3_count)
com_sqm += volume2com_res * (s3_com / s3_count)
elif (s3_com > 0) & (s3_res == 0) & (s3_off > 0):
volume2com_off += garea * s[2] / 3
com_sqm += volume2com_off * (s3_com / s3_count)
off_sqm += volume2com_off * (s3_off / s3_count)
elif (s3_com == 0) & (s3_res > 0) & (s3_off > 0):
volume2res_off += garea * s[2] / 3
res_sqm += volume2res_off * (s3_res / s3_count)
com_sqm += volume2res_off * (s3_com / s3_count)
# 三种相交
elif (s3_com > 0) & (s3_res > 0) & (s3_off > 0):
volume3 += garea * s[2] / 3
res_sqm += volume3 * s3_res / s3_count
com_sqm += volume3 * s3_com / s3_count
off_sqm += volume3 * s3_off / s3_count
print("volume3",volume3)
print("mixed used,(comres,comoff,resoff)",volume2com_res,volume2com_off,volume2res_off)
print("single used:(com, off, res)",volume1com,volume1off,volume1res)
return volume3,volume2com_res,volume2com_off,volume2res_off,volume1com,volume1off,volume1res,roof_surface,res_deck | StarcoderdataPython |
4829198 | <gh_stars>1-10
"""
setup.py - Setup file to distribute the library
See Also:
https://github.com/pypa/sampleproject
https://packaging.python.org/en/latest/distributing.html
https://pythonhosted.org/an_example_pypi_project/setuptools.html
"""
import os
import glob
from setuptools import setup, Extension, find_packages
import numpy
def read(fname):
"""Read in a file"""
with open(os.path.join(os.path.dirname(__file__), fname), 'r') as file:
return file.read()
def get_meta(filename):
"""Return the metadata dictionary from the given filename."""
with open(filename, 'r') as f:
meta = {}
exec(compile(f.read(), filename, 'exec'), meta)
return meta
# ========== Optional C extension ==========
import logging
from setuptools.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
logging.basicConfig()
log = logging.getLogger(__file__)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError)
class BuildFailed(Exception):
pass
def construct_build_ext(build_ext):
class WrappedBuildExt(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as x:
raise BuildFailed(x)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as x:
raise BuildFailed(x)
return WrappedBuildExt
if __name__ == "__main__":
# Variables
meta = get_meta('np_rw_buffer/__meta__.py')
name = meta['name']
version = meta['version']
description = meta['description']
url = meta['url']
author = meta['author']
author_email = meta['author_email']
keywords = 'read write ring circular buffer'
packages = find_packages(exclude=('tests', 'bin'))
# Extensions
extensions = [
Extension('np_rw_buffer._circular_indexes',
# define_macros=[('MAJOR_VERSION', '1')],
# extra_compile_args=['-std=c99'],
sources=['src/circular_indexes.c'],
include_dirs=['src', numpy.get_include()]),
]
setup_kwargs = {'name': name,
'version': version,
'description': description,
'long_description': read('README.rst'),
'keywords': keywords,
'url': url,
'download_url': ''.join((url, '/archive/v', version, '.tar.gz')),
'author': author,
'author_email': author_email,
'license': 'Proprietary',
'platform': 'any',
'classifiers': ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent'],
'scripts': [file for file in glob.iglob('bin/*.py')], # Run with python -m Scripts.module args
# 'ext_modules': extensions,
'packages': packages,
'include_package_data': True,
'package_data': {pkg: ['*', '*/*', '*/*/*', '*/*/*/*', '*/*/*/*/*']
for pkg in packages if '/' not in pkg and '\\' not in pkg},
'install_requires': [
],
'extras_require': {
},
}
# Comment this out if you want the simple setup
cmd_classes = setup_kwargs.setdefault('cmdclass', {})
setup_kwargs['cmdclass']['build_ext'] = construct_build_ext(build_ext)
try:
# Run the setup with the c code
setup(ext_modules=extensions, **setup_kwargs)
except BuildFailed as err:
log.warning(err)
log.warning("The C extension could not be compiled")
# Remove any previously defined build_ext command class.
if 'build_ext' in setup_kwargs['cmdclass']:
del setup_kwargs['cmdclass']['build_ext']
if 'build_ext' in cmd_classes:
del cmd_classes['build_ext']
# Run the setup without the c code
setup(**setup_kwargs)
log.info("Plain-Python installation succeeded.")
| StarcoderdataPython |
6552255 | import logging
from numpy.random import uniform, randint
from problems.test_case import TestCase, TestCaseTypeEnum
from problems.solutions.compound_interest import compound_interest
logger = logging.getLogger(__name__)
FUNCTION_NAME = "compound_interest"
INPUT_VARS = ["amount", "rate", "years"]
OUTPUT_VARS = ["new_amount"]
STATIC_RESOURCES = []
PHYSICAL_CONSTANTS = {}
ATOL = {}
RTOL = {
"new_amount": 1e-6
}
class TestCaseType(TestCaseTypeEnum):
NO_INTEREST = ("No interest", 1)
SAVINGS = ("High-rate savings account", 1)
SP_500 = ("S&P 500 average annual return", 1)
CREDIT_CARD = ("Credit card debt", 1)
RANDOM = ("Random", 2)
class ProblemTestCase(TestCase):
def input_tuple(self):
return self.input["amount"], self.input["rate"], self.input["years"]
def output_tuple(self):
return self.output["new_amount"],
def generate_test_case(test_type):
test_case = ProblemTestCase(test_type)
if test_type is TestCaseType.NO_INTEREST:
amount = uniform(10, 1000)
rate = 0.0
years = randint(1, 10)
elif test_type is TestCaseType.SAVINGS:
amount = uniform(100, 25000)
rate = 0.005
years = randint(10, 25)
elif test_type is TestCaseType.SP_500:
amount = uniform(10000, 500000)
rate = 0.1
years = randint(7, 30)
elif test_type is TestCaseType.CREDIT_CARD:
amount = uniform(250, 10000)
rate = 0.1
years = randint(7, 30)
elif test_type is TestCaseType.RANDOM:
amount = uniform(0, 100000)
rate = uniform(0, 0.25)
years = randint(0, 30)
else:
raise ValueError(f"Unrecognized test case: {test_type}")
test_case.input["amount"] = amount
test_case.input["rate"] = rate
test_case.input["years"] = years
test_case.output["new_amount"] = compound_interest(amount, rate, years)
return test_case
| StarcoderdataPython |
3383987 | """Views for social interactions."""
from django.views.generic import TemplateView
from django.core.urlresolvers import reverse
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.shortcuts import redirect
from django.contrib import messages
from braces.views import LoginRequiredMixin
from allauth.socialaccount.models import SocialAccount, SocialToken, SocialApp
from geokey.core.decorators import handle_exceptions_for_admin
from geokey.projects.models import Project
from geokey.projects.views import ProjectContext
from .models import SocialInteractionPost, SocialInteractionPull
from .base import STATUS, FREQUENCY
class SocialInteractionList(LoginRequiredMixin, ProjectContext, TemplateView):
"""
Displays the list of social interactions in the project.
"""
template_name = 'socialinteractions/socialinteraction_list.html'
class SocialInteractionPostCreate(LoginRequiredMixin, ProjectContext, TemplateView):
"""
Provides the form to create a new social interaction.
"""
template_name = 'socialinteractions/socialinteraction_post_create.html'
def get_context_data(self, *args, **kwargs):
context = super(SocialInteractionPostCreate, self).get_context_data(
*args,
**kwargs
)
auth_users = SocialAccount.objects.filter(
user=self.request.user,
provider__in=['twitter', 'facebook'])
context["auth_users"] = auth_users
return context
def post(self, request, project_id):
"""
Creates the social interaction based on the data entered by the user.
Parameters
----------
request : django.http.HttpRequest
Object representing the request
project_id : int
Identifies the project in the database
Returns
-------
django.http.HttpResponseRedirect
Redirects to social interaction create if social interaction is
created, social interaction list if project is locked or it does
not have any categories
django.http.HttpResponse
Rendered template, if project does not exist
"""
data = request.POST
context = self.get_context_data(project_id)
project = context.get('project')
if project:
cannot_create = 'New social interactions cannot be created.'
if project.islocked:
messages.error(
self.request,
'The project is locked. %s' % cannot_create
)
return redirect(
'admin:socialinteraction_post_create',
project_id=project_id
)
try:
socialaccount = SocialAccount.objects.get(
pk=data.get('socialaccount'))
text_to_post = data.get('text_post')
link = data.get('text_link')
except SocialAccount.DoesNotExist:
messages.error(
self.request,
'The social account is not found. %s' % cannot_create
)
return redirect(
'admin:socialinteraction_post_create',
project_id=project_id
)
socialinteraction = SocialInteractionPost.objects.create(
creator=request.user,
project=project,
socialaccount=socialaccount,
text_to_post=text_to_post,
link=link,
)
add_another_url = reverse(
'admin:socialinteraction_post_create',
kwargs={
'project_id': project_id
}
)
messages.success(
self.request,
mark_safe(
'The social interaction has been created.<a href="%s"> Add another social interaction.</a>' % add_another_url)
)
return redirect(
'admin:socialinteraction_list',
project_id=project_id,
)
else:
return self.render_to_response(context)
class SocialInteractionPostContext(object):
"""
Provides the context to render templates. The context contains
a social interaction instance based on project_id and socialinteraction_id.
"""
@handle_exceptions_for_admin
def get_context_data(self, project_id, socialinteraction_id, *args, **kwargs):
"""
Returns the context containing the project and social interaction
instances.
Parameters
----------
project_id : int
Identifies the project in the database
socialinteraction_id : int
Identifies the social interaction in the database
Returns
-------
dict
Context
"""
project = Project.objects.as_admin(self.request.user, project_id)
try:
socialinteraction = project.socialinteractions_post.get(
id=socialinteraction_id)
except:
messages.error(
self.request, 'The social interaction was not found.'
)
return redirect(
'socialinteractions/socialinteraction_post_settings.html',
project_id=project_id,
socialinteraction_id=socialinteraction_id,
)
try:
socialaccount = SocialAccount.objects.get(id=socialinteraction.socialaccount_id)
except:
messages.error(
self.request, 'The social account was not found'
)
return redirect(
'socialinteractions/socialinteraction_post_settings.html',
project_id=project_id,
socialinteraction_id=socialinteraction_id,
)
if socialinteraction and socialaccount:
return super(SocialInteractionPostContext, self).get_context_data(
project=project,
socialinteraction=socialinteraction,
socialaccount=socialaccount,
)
class SocialInteractionPostDelete(LoginRequiredMixin, SocialInteractionPostContext,
TemplateView):
"""
Deletes the social interactions.
"""
template_name = 'base.html'
def get(self, request, project_id, socialinteraction_id):
"""
Deletes the social interaction.
Parameter
---------
request : django.http.HttpRequest
Object representing the request
project_id : int
Identifies the project in the database
socialinteraction_id : int
Identifies the social interaction in the database
Returns
-------
django.http.HttpResponseRedirect
Redirects to social interaction list if social interaction is
deleted, social interaction settings if project is locked, if social
interaction does not exists redirect to base.html and show error
django.http.HttpResponse
Rendered template, if project or social interaction does not exist
"""
try:
context = self.get_context_data(project_id, socialinteraction_id)
socialinteraction = context.get('socialinteraction')
except:
messages.error(
self.request, 'The social account is not found.'
)
return redirect(
'base.html',
project_id=project_id,
socialinteraction_id=socialinteraction_id
)
if socialinteraction:
if socialinteraction.project.islocked:
messages.error(
self.request,
'The project is locked. Social interaction cannot be deleted.'
)
return redirect(
'admin:socialinteraction_post_settings',
project_id=project_id,
socialinteraction_id=socialinteraction_id
)
else:
socialinteraction.delete()
messages.success(self.request, 'The social interaction has been'
' deleted.')
return redirect('admin:socialinteraction_list',
project_id=project_id)
return self.render_to_response(context)
class SocialInteractionPostSettings(LoginRequiredMixin, SocialInteractionPostContext,
TemplateView):
"""
Provides the form to update the social interaction settings.
"""
template_name = 'socialinteractions/socialinteraction_post_settings.html'
def get_context_data(self, project_id, *args, **kwargs):
"""
Return the context to render the view.
Add Twitter and Facebook social accounts of a user to the context.
Parameters
----------
project_id : int
Identifies the project in the database.
Returns
-------
dict
Context.
"""
context = super(SocialInteractionPostSettings, self).get_context_data(
project_id,
*args,
**kwargs
)
auth_users = SocialAccount.objects.filter(
user=self.request.user,
provider__in=['twitter', 'facebook'])
context["auth_users"] = auth_users
context['status_types'] = {value: key for key, value in STATUS}.keys()
return context
def post(self, request, project_id, socialinteraction_id):
"""
Updates the social interaction based on the data entered by the user.
Parameter
---------
request : django.http.HttpRequest
Object representing the request
project_id : int
Identifies the project in the database
socialinteraction_id : int
Identifies the scoial interaction in the database
Returns
-------
django.http.HttpResponse
Rendered template when social interactions updated
django.http.HttpResponse
Rendered template, if project or social interaction does not exist
"""
data = request.POST
try:
context = self.get_context_data(project_id, socialinteraction_id)
socialinteraction = context.get('socialinteraction')
except:
messages.error(
self.request, 'The social account is not found.'
)
return redirect(
'socialinteractions/socialinteraction_post_settings.html',
project_id=project_id,
socialinteraction_id=socialinteraction_id
)
if socialinteraction:
if socialinteraction.project.islocked:
messages.error(
self.request,
'The project is locked. Social interaction cannot be deleted.'
)
return redirect(
'admin:socialinteraction_post_settings',
project_id=project_id,
socialinteraction_id=socialinteraction_id
)
else:
socialinteraction.text_to_post = data.get('text_post')
socialinteraction.link = data.get('text_link')
socialinteraction.socialaccount = SocialAccount.objects.get(
id=data.get('socialaccount'))
socialinteraction.status = data.get('status_type')
socialinteraction.save()
messages.success(self.request, 'The social interaction has been updated.')
return self.render_to_response(context)
# class SocialInteractionPost(LoginRequiredMixin, SocialInteractionPostContext,
# TemplateView):
# """Provide the form to update the social interaction settings."""
#
# template_name = 'socialinteractions/socialinteraction_post.html'
#
# def get_context_data(self, project_id, *args, **kwargs):
# """
# Return the context to render the view.
#
# Add Twitter and Facebook social accounts of a user to the context.
#
# Parameters
# ----------
# project_id : int
# Identifies the project in the database.
#
# Returns
# -------
# dict
# Context.
# """
#
# return super(SocialInteractionPost, self).get_context_data(
# project_id,
# *args,
# **kwargs
# )
#
# def post(self, request, project_id, socialinteraction_id):
# """
# Creates social post base on the data entered by the user.
#
# Parameters
# ---------
# request : django.http.HttpRequest
# Object representing the request.
# project_id : intyes
# Identifies the project in the database.
# socialinteraction_id : int
# Identifies the social interaction in the database.
#
# Returns
# -------
# django.http.HttpResponse
# Rendered template when social interactions updated.
# django.http.HttpResponse
# Rendered template, if project or social interaction does not exist.
# """
# data = request.POST
# context = self.get_context_data(project_id, socialinteraction_id)
# socialinteraction = context.get('socialinteraction')
# socialinteraction.text_to_post = data.get('text_post')
# socialinteraction.link = data.get('text_link')
# socialinteraction.save()
#
# return self.render_to_response(context)
class SocialInteractionPullCreate(LoginRequiredMixin, ProjectContext,
TemplateView):
"""Provide the form to update the social interaction settings."""
template_name = 'socialinteractions/socialinteraction_pull_create.html'
def get_context_data(self, *args, **kwargs):
context = super(SocialInteractionPullCreate, self).get_context_data(
*args,
**kwargs
)
auth_users = SocialAccount.objects.filter(
user=self.request.user,
provider__in=['twitter', 'facebook'])
context["auth_users"] = auth_users
context["frequencies"] = [x for x, _ in FREQUENCY]
return context
def post(self, request, project_id):
"""
Creates social post base on the data entered by the user.
Parameters
---------
request : django.http.HttpRequest
Object representing the request.
project_id : intyes
Identifies the project in the database.
socialinteraction_id : int
Identifies the social interaction in the database.
Returns
-------
django.http.HttpResponse
Rendered template when social interactions updated.
django.http.HttpResponse
Rendered template, if project or social interaction does not exist.
"""
data = request.POST
context = self.get_context_data(project_id)
project = context.get('project')
if project:
cannot_create = 'New social interactions cannot be created.'
if project.islocked:
messages.error(
self.request,
'The project is locked. %s' % cannot_create
)
return redirect(
'admin:socialinteraction_post_create',
project_id=project_id
)
try:
socialaccount = SocialAccount.objects.get(
pk=data.get('socialaccount'))
except SocialAccount.DoesNotExist:
messages.error(
self.request,
'The social account is not found. %s' % cannot_create
)
return redirect(
'admin:socialinteraction_post_create',
project_id=project_id
)
SocialInteractionPull.objects.create(
text_to_pull=strip_tags(data.get('text_pull')),
creator=request.user,
project=project,
socialaccount=socialaccount,
frequency=strip_tags(data.get('frequency')),
)
add_another_url = reverse(
'admin:socialinteraction_pull_create',
kwargs={
'project_id': project_id
}
)
messages.success(
self.request,
mark_safe(
'The social interaction has been created.<a href="%s"> Add pull from social media task.</a>' % add_another_url)
)
return redirect(
'admin:socialinteraction_list',
project_id=project_id,
)
else:
return self.render_to_response(context)
class SocialInteractionPullContext(object):
"""
Provides the context to render templates. The context contains
a social interaction instance based on project_id and socialinteraction_id.
"""
@handle_exceptions_for_admin
def get_context_data(self, project_id, socialinteractionpull_id, *args, **kwargs):
"""
Returns the context containing the project and social interaction
instances.
Parameters
----------
project_id : int
Identifies the project in the database
socialinteraction_id : int
Identifies the social interaction in the database
Returns
-------
dict
Context
"""
project = Project.objects.as_admin(self.request.user, project_id)
try:
socialinteraction_pull = project.socialinteractions_pull.get(
pk=socialinteractionpull_id)
except:
messages.error(
self.request, 'The social interaction pull is not found.'
)
return redirect(
'socialinteractions/socialinteraction_list.html',
project_id=project_id
)
if socialinteraction_pull:
return super(SocialInteractionPullContext, self).get_context_data(
project=project,
socialinteraction_pull=socialinteraction_pull,
)
class SocialInteractionPullSettings(LoginRequiredMixin, SocialInteractionPullContext,
TemplateView):
"""Provide the form to update the social interaction settings."""
template_name = 'socialinteractions/socialinteraction_pull.html'
def get_context_data(self, project_id, *args, **kwargs):
context = super(SocialInteractionPullSettings, self).get_context_data(
project_id,
*args,
**kwargs
)
auth_users = SocialAccount.objects.filter(
user=self.request.user,
provider__in=['twitter', 'facebook'])
context["auth_users"] = auth_users
context['status_types'] = {value: key for key, value in STATUS}.keys()
context["freq"] = [x for x, _ in FREQUENCY]
return context
def post(self, request, project_id, socialinteractionpull_id):
"""
Get the data from the specified social media
Parameters
---------
request : django.http.HttpRequest
Object representing the request.
project_id : intyes
Identifies the project in the database.
Returns
-------
django.http.HttpResponse
Rendered template when social interactions updated.
django.http.HttpResponse
Rendered template, if project or social interaction does not exist.
"""
data = request.POST
try:
context = self.get_context_data(
project_id,
socialinteractionpull_id
)
si_pull = context['socialinteraction_pull']
except:
messages.error(
self.request, 'The social account is not found.'
)
return redirect(
'socialinteractions/socialinteraction_pull.html',
project_id=project_id,
socialinteractionpull_id=socialinteractionpull_id
)
if si_pull:
if si_pull.project.islocked:
messages.error(
self.request,
'The project is locked. Social interaction cannot be edited.'
)
return redirect(
'admin:socialinteraction_pull',
project_id=project_id,
socialinteractionpull_id=socialinteractionpull_id
)
else:
text_pull = data.get("text_pull")
frequency = data.get('frequency')
socialaccount_id = data.get('socialaccount')
socialaccount = SocialAccount.objects.get(id=socialaccount_id)
status = data.get('status_type')
if text_pull != si_pull.text_to_pull:
si_pull.text_to_pull = text_pull
if si_pull.frequency != frequency:
si_pull.frequency = frequency
if si_pull.socialaccount != socialaccount:
si_pull.socialaccount = socialaccount
if si_pull.status != status:
si_pull.status = status
si_pull.save()
messages.success(self.request, 'The social interaction has been updated.')
return self.render_to_response(context)
class SocialInteractionPullDelete(LoginRequiredMixin, SocialInteractionPullContext,
TemplateView):
"""
Deletes the social interactions.
"""
template_name = 'base.html'
def get(self, request, project_id, socialinteractionpull_id):
try:
context = self.get_context_data(project_id, socialinteractionpull_id)
socialinteraction_pull = context.get('socialinteraction_pull')
except:
messages.error(
self.request, 'The social account is not found.'
)
return redirect(
'base.html',
project_id=project_id,
socialinteractionpull_id=socialinteraction_pull.id
)
if socialinteraction_pull:
if socialinteraction_pull.project.islocked:
messages.error(
self.request,
'The project is locked. Social pull cannot be deleted.'
)
return redirect(
'admin:socialinteraction_pull_settings',
project_id=project_id,
socialinteractionpull_id=socialinteraction_pull.id
)
else:
socialinteraction_pull.delete()
messages.success(self.request, 'The social interaction has been'
' deleted.')
return redirect('admin:socialinteraction_list',
project_id=project_id)
return self.render_to_response(context)
| StarcoderdataPython |
8051717 | from ..db import ZipDetail, Base
from .base import session
def validate(pincode: int) -> bool:
"""
Description
-----------
Verify if a pincode is correct or not:
Parameters
----------
pincode : int
The pincode of district
Returns
-------
bool
True if pincode is corrent else False
"""
try:
int(pincode)
value = session.query(ZipDetail).filter(ZipDetail.pincode == pincode).first()
if value:
return True
else:
return False
except:
return False
| StarcoderdataPython |
1901442 | <reponame>minsukkahng/pokr.kr<gh_stars>10-100
# -*- coding: utf-8 -*-
import redis
class RedisQueue(object):
"""Simple Queue with Redis Backend"""
def __init__(self, name, namespace='queue', **redis_kwargs):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.db = redis.Redis(**redis_kwargs)
self.key = '%s:%s' %(namespace, name)
def qsize(self):
"""Return the approximate size of the queue."""
return self.db.llen(self.key)
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def put(self, item):
"""Put item into the queue."""
self.db.rpush(self.key, item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
item = self.db.blpop(self.key, timeout=timeout)
if item:
item = item[1]
else:
item = self.db.lpop(self.key)
return item
def get_nowait(self):
"""Equivalent to get(False)."""
return self.get(False)
def __iter__(self):
return self
def next(self):
item = self.get(False)
if item is None:
raise StopIteration
return item
| StarcoderdataPython |
9652603 | <filename>gsmodutils/test/instances.py<gh_stars>10-100
from abc import ABCMeta, abstractmethod
from six import exec_, add_metaclass
import sys
import os
import traceback
from gsmodutils.test.utils import stdout_ctx, ModelLoader, ResultRecord
import jsonschema
from cobra.exceptions import Infeasible
import cobra
from cobra.core import get_solution
import json
@add_metaclass(ABCMeta)
class TestInstance:
def __init__(self, project, log, **kwargs):
"""
Abstract base class for test instances
"""
self._override_model = None
self.children = []
self.log = log
self.project = project
self.log.__test = self
@abstractmethod
def run(self):
""" Iterable (i.e should yield not return) """
pass
@property
def id(self):
return self.log.id
def get_child_ids(self):
ids = []
for child in self.children:
ids += [child.id] + child.get_child_ids()
return ids
def get_id_tree(self):
tree = {}
for child in self.children:
tree[child.id] = child.get_id_tree()
return tree
def get_children(self, flatten=False):
"""
Returns heirarchy of tests
:bool flatten: return a flat list (id):Test_Obj instead of nested tree
:return:
"""
tree = {}
if flatten:
for child in self.children:
tree[child.id] = child
for k, v in child.get_children(flatten=True).items():
tree[k] = v
else:
for child in self.children:
tree[child.id] = {
"tester": child,
"children": child.get_children()
}
return tree
def set_override_model(self, model):
"""
Sets an in memory model to be loaded on execution instead of loaded from the project
:param model:
:return:
"""
self._override_model = model
@abstractmethod
def applies_to_model(self, model_id, design_id=None):
pass
class PyTestFileInstance(TestInstance):
def __init__(self, project, log, pyfile_path, **kwargs):
"""
Loads a python test file and all associated python test instances
:param pyfile_path:
"""
super(PyTestFileInstance, self).__init__(project, log, **kwargs)
self.compiled_py = dict()
self.syntax_errors = None
self.file_path = pyfile_path
self.name = os.path.basename(pyfile_path)
self.global_namespace = dict(
__name__='__gsmodutils_test__',
)
with open(pyfile_path) as codestr:
try:
self.compiled_code = compile(codestr.read(), '', 'exec')
except SyntaxError as ex:
# syntax error for user written code
# ex.lineno, ex.msg, ex.filename, ex.text, ex.offset
self.syntax_errors = ex
return
with stdout_ctx() as stdout:
try:
exec_(self.compiled_code, self.global_namespace)
except Exception as ex:
# the whole module has an error somewhere, no functions will run
log.add_error("Error with code file {} error - {}".format(pyfile_path, str(ex)),
".compile_error")
fout = stdout.getvalue()
if fout.strip() != '':
log.std_out += fout
for func in filter(lambda f: f[:5] == "test_", self.compiled_code.co_names):
clog = log.create_child("{}::{}".format(self.name, func))
self.children.append(PyTestInstance(self.project, clog, func, self, self))
def run(self):
"""
Runs all children
:return:
"""
for child in self.children:
child.run()
return self.log
def applies_to_model(self, model_id, design_id=None):
return False
class PyTestInstance(TestInstance):
def __init__(self, project, log, func_name, parent, pyfile, model_loader=None, **kwargs):
"""
Python test encapsulation object
:param project:
:param log:
:param func_name:
:param parent:
:param pyfile:
:param model_loader:
:param kwargs:
"""
super(PyTestInstance, self).__init__(project, log, **kwargs)
self.func_name = func_name
self.tb_info = None
self.parent = parent
self.pyfile = pyfile
self._function = self.pyfile.global_namespace[func_name]
self.model_loader = model_loader
self._is_master = False
if model_loader is None and hasattr(self._function, '_is_test_selector'):
self._is_master = True
_func_id = "{}::{}".format(self.pyfile.name, self.func_name)
# This is not an individual test case
if self._function.models == "*":
self._function.models = self.project.list_models
if self._function.designs == "*":
self._function.designs = self.project.list_designs + [None]
if self._function.conditions == "*":
self._function.conditions = self.project.list_conditions + [None]
if not len(self._function.models):
self._function.models = [None]
if not len(self._function.conditions):
self._function.conditions = [None]
if not len(self._function.designs):
self._function.designs = [None]
for mn in self._function.models:
if mn is None:
mn = self.project.config.default_model
for cid in self._function.conditions:
for did in self._function.designs:
# correctly setting the log id so user can easily read
tid = mn
if cid is not None and did is not None:
tid = (mn, cid, did)
elif cid is not None:
tid = (mn, cid)
elif did is not None:
tid = (mn, did)
if type(tid) is tuple:
tid = "::".join(tid)
model_loader = ModelLoader(self.project, mn, cid, did)
task_id = "{}::{}".format(_func_id, tid)
nlog = log.create_child(task_id, param_child=True)
self.children.append(PyTestInstance(self.project, nlog, func_name, self, self.pyfile,
model_loader))
def run(self):
if self._is_master:
for child in self.children:
child.run()
else:
self._fexec()
return self.log
def _fexec(self, model=None):
"""
Execute the python test with encapsulation
Passing a model allows this test to be run on any predefined cobra model, rather than one loaded from a project.
:param model: gsmodutils model instance or none.
:return:
"""
with stdout_ctx() as stdout:
if model is None and self.model_loader is None:
model = self.project.load_model()
elif self.model_loader is not None:
try:
model = self.model_loader.load(self.log)
except Exception as ex:
self.log.add_error("Error loading model {}".format(ex))
return self.log
elif not isinstance(model, cobra.Model):
raise TypeError("Expected gsmodutils or cobra model")
try:
# Call the function
# Uses standardised prototypes
self._function(model, self.project, self.log)
except Exception as ex:
_, _, tb = sys.exc_info()
self.tb_info = traceback.extract_tb(tb)[-1] # Store the traceback information
# the specific test case has an erro
self.log.add_error("Error executing function {} in file {} error - {}".format(self.func_name,
self.pyfile.file_path,
str(ex)),
".execution_error")
fout = stdout.getvalue()
if fout.strip() != '':
self.log.std_out = fout
return self.log
def applies_to_model(self, model_id, design_id=None):
if len(self.children) or design_id is None and self.model_loader.design_id is not None:
return False
elif self.model_loader.model_id == model_id and design_id is None:
return True
elif self.model_loader.model_id == model_id and design_id == self.model_loader.design_id:
return True
return False
class JsonTestInstance(TestInstance):
def __init__(self, project, file_path, **kwargs):
"""
Create sub tests from a json file
:param project:
:param file_path:
:param kwargs:
"""
id_key = os.path.basename(file_path)
log = ResultRecord(id_key)
super(JsonTestInstance, self).__init__(project, log, **kwargs)
self.load_errors = None
self.invalid_tests = None
with open(file_path) as test_file:
try:
entries = json.load(test_file)
for entry_key, entry in entries.items():
clog = self.log.create_child("{}::{}".format(self.id, entry_key))
# Test to see if individual test entries are valid or not
try:
dt = DictTestInstance(self.project, clog, entry)
self.children.append(dt)
except jsonschema.ValidationError as exp:
self.log.add_error(entry_key, exp)
self.invalid_tests = (id_key, entry_key, exp)
continue
except (ValueError, AttributeError) as e:
# Test json is invalid format
self.load_errors = (self.id, e)
def run(self):
for child in self.children:
child.run()
return self.log
def applies_to_model(self, model_id, design_id=None):
return False
class DictTestInstance(TestInstance):
schema = {
"type": "object",
"properties": {
'models': {
"type": "array",
"items": {
"type": "string"
}
},
'conditions': {
"type": "array",
"items": {
"minItems": 0,
"type": "string"
}
},
'designs': {
"type": "array",
"items": {
"type": "string"
}
},
'reaction_fluxes': {
"type": "object",
"patternProperties": {
"^.*$": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "number"}
}
}
},
'required_reactions': {
"type": "array",
"items": {
"type": "string"
}
},
"description": {"type": "string"},
"id": {"type": "string"}
},
"required": ["description", "reaction_fluxes", "conditions", "models", "designs"],
}
def __init__(self, project, log, entry, master=True, model_loader=None, **kwargs):
super(DictTestInstance, self).__init__(project, log, **kwargs)
# Test to see if individual test entries are valid or not
# Exception should be handled when test is loaded
jsonschema.validate(entry, DictTestInstance.schema)
self.entry = entry.copy()
self._master = master
self._model_loader = model_loader
if self._master:
if not len(self.entry['conditions']):
self.entry['conditions'] = [None]
if not len(entry['designs']):
self.entry['designs'] = [None]
if not len(entry['models']):
self.entry['models'] = self.project.config.models
# Create children
for mn in self.entry["models"]:
for cid in self.entry["conditions"]:
for did in self.entry["designs"]:
if cid is None and did is None:
test_id = mn
elif did is None:
test_id = (mn, cid)
elif cid is None:
test_id = (mn, did)
else:
test_id = (mn, cid, did)
if type(test_id) is tuple:
test_id = "::".join(test_id)
tid = "{}::{}".format(self.id, test_id)
clog = self.log.create_child(tid)
ml = ModelLoader(self.project, mn, cid, did)
cinst = DictTestInstance(project, clog, entry, False, ml)
self.children.append(cinst)
def run(self):
""" Run the test (iterable) """
if not self._master:
self._fexec()
else:
for child in self.children:
child.run()
return self.log
def _fexec(self, model=None):
"""
broken up code for testing individual entries
"""
if self._override_model is not None:
model = self._override_model
elif model is None and self._model_loader is None:
model = self. project.load_model()
elif self._model_loader is not None:
try:
model = self._model_loader.load(self.log)
except Exception as ex:
self.log.add_error("Error loading model {}".format(ex))
return self.log
elif not isinstance(model, cobra.Model):
raise TypeError("Expected gsmodutils or cobra model")
try:
status = model.solver.optimize()
if status == 'infeasible':
raise Infeasible('Cannot find solution')
# Test entries that require non-zero fluxes
for rid in self.entry['required_reactions']:
try:
reac = model.reactions.get_by_id(rid)
self.log.assertion(
reac.flux == 0,
success_msg='required reaction {} not active'.format(rid),
error_msg='required reaction {} present at steady state'.format(rid),
desc='.required_reaction'
)
except KeyError:
self.log.assertion(
False,
success_msg='',
error_msg="required reaction {} not found in model".format(rid),
desc='.required_reaction .reaction_not_found'
)
continue
# tests for specific reaction flux ranges
for rid, (lb, ub) in self.entry['reaction_fluxes'].items():
try:
reac = model.reactions.get_by_id(rid)
if reac.flux < lb or reac.flux > ub:
err = 'reaction {} outside of flux bounds {}, {}'.format(rid, lb, ub)
self.log.error.append((err, '.reaction_flux'))
else:
msg = 'reaction {} inside flux bounds {}, {}'.format(rid, lb, ub)
self.log.success.append((msg, '.reaction_flux'))
except KeyError:
# Error log of reaction not found
self.log.assertion(
False,
success_msg='',
error_msg="required reaction {} not found in model".format(rid),
desc='.reaction_flux .reaction_not_found'
)
continue
except Infeasible:
# This is a full test failure (i.e. the model does not work)
# not a conditional assertion
self.log.add_error("No solution found with model configuration", '.no_solution')
return self.log
def applies_to_model(self, model_id, design_id=None):
if len(self.children):
return False
class DefaultTestInstance(TestInstance):
def __init__(self, project, log_id="default_tests", **kwargs):
"""
:param project:
:param log_id:
:param kwargs:
"""
log = ResultRecord(log_id)
super(DefaultTestInstance, self).__init__(project, log, **kwargs)
for model_path in self.project.config.models:
# Checking model functions without design
tf_name = 'model::{}'.format(model_path)
clog = self.log.create_child(tf_name)
ti = ModelTestInstance(self.project, clog, model_path)
self.children.append(ti)
for ckey, cdf in self.project.conditions['growth_conditions'].items():
# Load model that conditions applies to (default is all models in project)
cmodels = self.project.config.models
if 'models' in cdf and len(cdf['models']):
cmodels = cdf['models']
for model_path in cmodels:
tf_name = 'model::{}::conditions::{}'.format(model_path, ckey)
clog = self.log.create_child(tf_name)
ti = ModelTestInstance(self.project, clog, model_path, conditions_id=ckey)
self.children.append(ti)
for design in self.project.list_designs:
# Load model design with design applied
tf_name = 'design::{}'.format(design)
clog = self.log.create_child(tf_name)
ti = DesignTestInstance(self.project, clog, design)
self.children.append(ti)
def run(self):
for child in self.children:
child.run()
return self.log
def applies_to_model(self, model_id, design_id=None):
return False
class ModelTestInstance(TestInstance):
def __init__(self, project, log, model_id, conditions_id=None, **kwargs):
"""
:param project:
:param log_id:
:param model_id:
:param conditions_id:
:param kwargs:
"""
super(ModelTestInstance, self).__init__(project, log, **kwargs)
self.model_path = model_id
self.conditions = conditions_id
def run(self):
self._fexec()
return self.log
def applies_to_model(self, model_id, design_id=None):
if self.model_path == model_id and design_id is None:
return True
return False
@staticmethod
def _model_check(model):
"""
Check a model produces a steady state flux solution
:return: bool
"""
status = model.solver.optimize()
if status == 'infeasible':
return False
solution = get_solution(model)
if solution.objective_value != 0:
return True
return False
def load_model(self):
try:
model = self.project.load_model(self.model_path)
if self.conditions is not None:
self.project.load_conditions(self.conditions, model=model)
except Exception as ex:
self.log.error.append(('Model failure loading model {}'.format(ex), '.default'))
return None
return model
def _fexec(self):
if self._override_model is None:
model = self.load_model()
else:
model = self._override_model
if model is None:
return self.log
growth_expected = True
if self.conditions is not None:
growth_expected = self.project.growth_condition(self.conditions)
if self._model_check(model) and growth_expected:
self.log.success.append(('Model grows', '.default'))
elif self._model_check(model) and not growth_expected:
self.log.error.append(('Model grows when it should not', '.default'))
elif not self._model_check(model) and not growth_expected:
self.log.success.append(('Model does not grow', '.default'))
else:
self.log.error.append(('Model does not grow', '.default'))
return self.log
class DesignTestInstance(ModelTestInstance):
def __init__(self, project, log, design_id, model_id=None, conditions_id=None, **kwargs):
"""
:param project:
:param log_id:
:param design_id:
:param model_id:
:param conditions_id:
:param kwargs:
"""
super(DesignTestInstance, self).__init__(project, log, model_id, conditions_id, **kwargs)
self.design = design_id
def load_model(self):
try:
model = self.project.load_design(self.design)
if self.conditions is not None:
self.project.load_conditions(self.conditions, model=model)
except Exception as ex:
self.log.error.append(('Design failure loading design {}'.format(ex), '.default'))
return None
return model
def _fexec(self):
if self._override_model is None:
model = self.load_model()
else:
model = self._override_model
if model is None:
return self.log
if self._model_check(model):
self.log.success.append(('Design appears to function correctly', '.default'))
else:
self.log.error.append(('Design fails to pass check', '.default'))
return self.log
def applies_to_model(self, model_id, design_id=None):
if design_id != self.design:
return False
return True
| StarcoderdataPython |
4896540 | from http import HTTPStatus
import pytest
import requests
from rotkehlchen.tests.utils.api import api_url_for, assert_error_response, assert_proper_response
from rotkehlchen.tests.utils.constants import A_RDN
from rotkehlchen.tests.utils.factories import UNIT_BTC_ADDRESS1, UNIT_BTC_ADDRESS2
from rotkehlchen.tests.utils.rotkehlchen import setup_balances
@pytest.mark.parametrize('number_of_eth_accounts', [2])
@pytest.mark.parametrize('btc_accounts', [[UNIT_BTC_ADDRESS1, UNIT_BTC_ADDRESS2]])
@pytest.mark.parametrize('owned_eth_tokens', [[A_RDN]])
@pytest.mark.parametrize('added_exchanges', [('binance', 'poloniex')])
def test_query_owned_assets(
rotkehlchen_api_server_with_exchanges,
ethereum_accounts,
btc_accounts,
number_of_eth_accounts,
):
"""Test that using the query all owned assets endpoint works"""
# Disable caching of query results
rotki = rotkehlchen_api_server_with_exchanges.rest_api.rotkehlchen
rotki.blockchain.cache_ttl_secs = 0
setup = setup_balances(rotki, ethereum_accounts, btc_accounts)
# Get all our mocked balances and save them in the DB
with setup.poloniex_patch, setup.binance_patch, setup.blockchain_patch:
response = requests.get(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"allbalancesresource",
), json={'save_data': True},
)
assert_proper_response(response)
# And now check that the query owned assets endpoint works
with setup.poloniex_patch, setup.binance_patch, setup.blockchain_patch:
response = requests.get(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ownedassetsresource",
),
)
assert_proper_response(response)
data = response.json()
assert data['message'] == ''
assert set(data['result']) == {'ETH', 'BTC', 'EUR', 'RDN'}
def test_ignored_assets_modification(rotkehlchen_api_server_with_exchanges):
"""Test that using the ignored assets endpoint to modify the ignored assets list works fine"""
rotki = rotkehlchen_api_server_with_exchanges.rest_api.rotkehlchen
# add three assets to ignored assets
ignored_assets = ['GNO', 'RDN', 'XMR']
response = requests.put(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': ignored_assets},
)
assert_proper_response(response)
data = response.json()
assert data['message'] == ''
assert data['result'] == ignored_assets
# check they are there
assert rotki.data.db.get_ignored_assets() == ignored_assets
# Query for ignored assets and check that the response returns them
response = requests.get(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
),
)
assert_proper_response(response)
data = response.json()
assert data['message'] == ''
assert data['result'] == ignored_assets
# remove two assets from ignored assets
response = requests.delete(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': ['GNO', 'XMR']},
)
assets_after_deletion = ['RDN']
assert_proper_response(response)
data = response.json()
assert data['message'] == ''
assert data['result'] == assets_after_deletion
# check that the changes are reflected
assert rotki.data.db.get_ignored_assets() == assets_after_deletion
# Query for ignored assets and check that the response returns them
response = requests.get(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
),
)
assert_proper_response(response)
data = response.json()
assert data['message'] == ''
assert data['result'] == assets_after_deletion
@pytest.mark.parametrize('method', ['put', 'delete'])
def test_ignored_assets_endpoint_errors(rotkehlchen_api_server_with_exchanges, method):
"""Test errors are handled properly at the ignored assets endpoint"""
rotki = rotkehlchen_api_server_with_exchanges.rest_api.rotkehlchen
# add three assets to ignored assets
ignored_assets = ['GNO', 'RDN', 'XMR']
response = requests.put(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': ignored_assets},
)
assert_proper_response(response)
# Test that omitting the assets argument is an error
response = getattr(requests, method)(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
),
)
assert_error_response(
response=response,
contained_in_msg="'assets': ['Missing data for required field",
status_code=HTTPStatus.BAD_REQUEST,
)
# Test that invalid type for assets list is an error
response = getattr(requests, method)(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': 'foo'},
)
assert_error_response(
response=response,
contained_in_msg='Unknown asset foo provided',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test that list with invalid asset is an error
response = getattr(requests, method)(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': ['notanasset']},
)
assert_error_response(
response=response,
contained_in_msg='Unknown asset notanasset provided',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test that list with one valid and one invalid is rejected and not even the
# valid one is processed
if method == 'put':
asset = 'ETH'
else:
asset = 'XMR'
response = getattr(requests, method)(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': [asset, 'notanasset']},
)
assert_error_response(
response=response,
contained_in_msg='Unknown asset notanasset provided',
status_code=HTTPStatus.BAD_REQUEST,
)
# Check that assets did not get modified
assert rotki.data.db.get_ignored_assets() == ignored_assets
# Test the adding an already existing asset or removing a non-existing asset is an error
if method == 'put':
asset = 'RDN'
expected_msg = 'RDN is already in ignored assets'
else:
asset = 'ETH'
expected_msg = 'ETH is not in ignored assets'
response = getattr(requests, method)(
api_url_for(
rotkehlchen_api_server_with_exchanges,
"ignoredassetsresource",
), json={'assets': [asset]},
)
assert_error_response(
response=response,
contained_in_msg=expected_msg,
status_code=HTTPStatus.CONFLICT,
)
# Check that assets did not get modified
assert rotki.data.db.get_ignored_assets() == ignored_assets
| StarcoderdataPython |
5141574 | <reponame>vtsuperdarn/deep_leaning_on_GSP_TEC<gh_stars>1-10
'''
This file gets the predicted tec maps by first loading the saved model and then running on the test input.
'''
from st_resnet import STResNetShared, STResNetIndep
import tensorflow as tf
from params import Params as param
import pandas as pd
import numpy as np
import sqlite3
from tqdm import tqdm
import datetime as dt
import os
import numpy
import time
from omn_utils import OmnData
from batch_utils import BatchDateUtils, TECUtils
# Parameters for tensor flow
if(param.independent_channels == True):
g = STResNetIndep()
print ("Computation graph for ST-ResNet with independent channels loaded\n")
else:
g = STResNetShared()
print ("Computation graph for ST-ResNet with shared channels loaded\n")
file_dir="../../data/tec_map/filled/"
#closeness is sampled 12 times every 5 mins, lookback = (12*5min = 1 hour)
#freq 1 is 5mins
#size corresponds to the sample size
closeness_size = param.closeness_sequence_length
#period is sampled 24 times every 1 hour (every 12th index), lookback = (24*12*5min = 1440min = 1day)
period_size = param.period_sequence_length
#trend is sampled 24 times every 3 hours (every 36th index), lookback = (8*36*5min = 1440min = 1day)
trend_size = param.trend_sequence_length
# get date ranges for getting the prediction
start_date = param.start_date
end_date = param.end_date
# We need OMNI data for training
# setting appropriate vars and reading
omn_dbdir = param.omn_dbdir
omn_db_name = param.omn_db_name
omn_table_name = param.omn_table_name
omn_train=True
start_date_omni = start_date - dt.timedelta(days=param.load_window)
end_date_omni = end_date + dt.timedelta(days=param.load_window)
path = param.saved_model_path+"_values"
#getting the omni object
omnObj = OmnData(start_date_omni, end_date_omni, omn_dbdir, omn_db_name, omn_table_name, omn_train, param.imf_normalize, path)
# get all corresponding dates for batches
batchObj = BatchDateUtils(start_date, end_date, param.batch_size, param.tec_resolution, param.data_point_freq,\
param.closeness_freq, closeness_size, param.period_freq, period_size,\
param.trend_freq, trend_size, param.num_of_output_tec_maps, param.output_freq,\
param.closeness_channel, param.period_channel, param.trend_channel)
#getting all the datetime from which prediction has to be made
date_arr_test = np.array( list(batchObj.batch_dict.keys()) )
# Bulk load TEC data
tecObj = TECUtils(start_date, end_date, file_dir, param.tec_resolution, param.load_window,\
param.closeness_channel, param.period_channel, param.trend_channel)
weight_matrix = np.load(param.loss_weight_matrix)
#converting by repeating the weight_matrix into a desired shape of (B, O, H, W)
weight_matrix_expanded = np.expand_dims(weight_matrix, 0)
weight_matrix_tiled = np.tile(weight_matrix_expanded, [param.batch_size*param.num_of_output_tec_maps, 1, 1])
loss_weight_matrix = np.reshape(weight_matrix_tiled, [param.batch_size, param.num_of_output_tec_maps, param.map_height, param.map_width])
#converting the dimension from (B, O, H, W) -> (B, H, W, O)
loss_weight_matrix = np.transpose(loss_weight_matrix, [0, 2, 3, 1])
with tf.Session(graph=g.graph) as sess:
#loading the trained model whose path is given in the params file
g.saver.restore(sess, param.saved_model_path+param.saved_model)
b = 1
loss_values = []
for te_ind, current_datetime in tqdm(enumerate(date_arr_test)):
#print("Testing date-->" + current_datetime.strftime("%Y%m%d-%H%M"))
#get the batch of data points
curr_batch_time_dict = batchObj.batch_dict[current_datetime]
#creating directory inside the model_path_values folder for those datetime variables for which prediction is made
for dtm in curr_batch_time_dict.keys():
os.makedirs(path+'/'+dtm.strftime("%Y%m%d_%H_%M"))
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
#if we need to use the exogenous module
if (param.add_exogenous == True):
imf_batch = omnObj.get_omn_batch(current_datetime, param.batch_size, param.trend_freq, trend_size )
if(param.closeness_channel == True and param.period_channel == True and param.trend_channel == True):
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, period, trend = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_period, g.exo_trend],
feed_dict={g.c_tec: data_close,
g.p_tec: data_period,
g.t_tec: data_trend,
g.output_tec: data_out,
g.exogenous: imf_batch,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == True and param.trend_channel == False):
#here the data_trend will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, period = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_period],
feed_dict={g.c_tec: data_close,
g.p_tec: data_period,
g.output_tec: data_out,
g.exogenous: imf_batch,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == True):
#here the data_period will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, trend = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_trend],
feed_dict={g.c_tec: data_close,
g.t_tec: data_trend,
g.output_tec: data_out,
g.exogenous: imf_batch,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == False):
#here the data_period, data_trend will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close],
feed_dict={g.c_tec: data_close,
g.output_tec: data_out,
g.exogenous: imf_batch,
g.loss_weight_matrix: loss_weight_matrix})
#if we dont want to use the exogenous module
else:
if(param.closeness_channel == True and param.period_channel == True and param.trend_channel == True):
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, period, trend = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.period_output, g.trend_output],
feed_dict={g.c_tec: data_close,
g.p_tec: data_period,
g.t_tec: data_trend,
g.output_tec: data_out,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == True and param.trend_channel == False):
#here the data_trend will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, period = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.period_output],
feed_dict={g.c_tec: data_close,
g.p_tec: data_period,
g.output_tec: data_out,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == True):
#here the data_period will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness, trend = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.trend_output],
feed_dict={g.c_tec: data_close,
g.t_tec: data_trend,
g.output_tec: data_out,
g.loss_weight_matrix: loss_weight_matrix})
elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == False):
#here the data_period,data_trend will be empty
data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)
loss_v, pred, truth, closeness = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output],
feed_dict={g.c_tec: data_close,
g.output_tec: data_out,
g.loss_weight_matrix: loss_weight_matrix})
loss_values.append(loss_v)
print("val_loss: {:.3f}".format(loss_v))
#saving the predictions into seperate directories that are already created
j = 0
for dtm in curr_batch_time_dict.keys():
folder_name = path+'/'+dtm.strftime("%Y%m%d_%H_%M")
np.save(folder_name+'/pred.npy', pred[j])
np.save(folder_name+'/y.npy', truth[j])
if(param.closeness_channel == True):
np.save(folder_name+'/close.npy', closeness[j])
if(param.period_channel == True):
np.save(folder_name+'/period.npy', period[j])
if(param.trend_channel == True):
np.save(folder_name+'/trend.npy', trend[j])
j += 1
print ('Saving {} batch with {:.1f}'.format(b, loss_v.item()))
b += 1
loss_values = np.array(loss_values)
print ('Saving loss values in the .npy file ...')
np.save(path+'/prediction_loss.npy', loss_values)
| StarcoderdataPython |
55979 | from .utils import *
from .enums import * | StarcoderdataPython |
3543309 | <reponame>Alicegif/covid19-severity-prediction
#! /usr/bin/python3
import pandas as pd
import os
from os.path import join as oj
from os.path import dirname
if __name__ == '__main__':
import sys
sys.path.append(oj(os.path.dirname(__file__), '../../raw/usafacts_infections/'))
from load import load_usafacts_infections
else:
from ...raw.usafacts_infections.load import load_usafacts_infections
def clean_usafacts_infections(data_dir='../../raw/usafacts_infections/',
out_dir='.'):
''' Clean usafacts data
Parameters
----------
data_dir : str; path to the data directory to find raw csv
out_dir : str; path to the data directory to write cleaned csv
Returns
-------
writes out cleaned csv file and returns clean data frame
'''
# load in data
df = load_usafacts_infections(data_dir = data_dir)
# write out to csv
df.to_csv(oj(out_dir, "usafacts_infections.csv"), index=False)
return df
if __name__ == '__main__':
df = clean_usafacts_infections()
print("cleaned usafacts infections successfully.")
| StarcoderdataPython |
9643390 | import pygame
import pygame.font
class Button:
def __init__(self,game,txt):
self.screen = game.screen
self.screen_rect = self.screen.get_rect()
self.width, self.height = 200, 50
self.button_color = (0,153,0) #RGB
self.txt_color = (160,160,160)
self.font = pygame.font.SysFont(None,48)
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.center = self.screen_rect.center
self.set_button_text(txt)
def set_button_text(self,txt):
self.txt = self.font.render(txt,True, self.txt_color,self.button_color)
self.txt_rect = self.txt.get_rect()
self.txt_rect.center = self.rect.center
def draw_button(self):
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.txt,self.txt_rect) | StarcoderdataPython |
37195 | # if语句
games = ['CS GO', 'wow', 'deathStranding']
for game in games:
if game == 'wow': # 判断是否相等用'=='
print(game.upper())
# 检查是否相等
sport = 'football'
if sport == 'FOOTBALL':
print('yes')
else:
print('No') # 此处输出结果为No说明大小写不同不被认同是同一string、转化为小写在进行对比
for game in games:
if game.lower() == 'cs go':
print('是cs go了')
# 检查是否不相等
for game in games:
if game != 'wow':
print('该游戏不是wow,该游戏是' + game)
else:
print('该游戏是wow')
# 比较数字
ages = [15, 31, 22, 18]
for age in ages:
if age >= 18:
print('已成年')
else:
print('未成年')
# 使用and检查多个条件
i = 17
j = 21
if i > 18 and j > 18:
print('\n两者都已成年')
else:
print('\n有未成年混入其中')
# 使用or也能检查多个条件 、只不过只要满足其中任意一个就会返回、两个选项都不满足时返回False
i = 17
j = 12
if i > 18 or j > 18:
print('\n两者里有成年人')
else:
print('\n都是未成年')
# 检查特定值是否存在可以选择用in
if 'wow' in games:
print('\nwow已经在游戏库中')
else:
games.append('OverWatch')
print('\nwow不在库中,现已经添加进去')
if 'OverWatch' in games:
print('\n守望先锋已经在游戏库中')
else:
games.append('OverWatch')
print('\nOverWatch不在库中,现已经添加进去')
# if-elif-else结构
age = 4
if age <= 4:
price = 0
elif 4 < age <= 18:
price = 25
else:
price = 50
print('您的票价为:' + str(price) + '元,谢谢参观!')
# 多个elif代码块
age = 31
if age <= 4:
price = 0
elif 4 < age <= 18:
price = 5
elif 18 < age < 65:
price = 10
elif 65 <= age:
price = 5
print('\n购买票价为:' + str(price) + '元 谢谢参观!')
ob = ['雕哥', '宝哥', '龙神', '胖头', '大狗', '大Mu', '核桃', '谢彬', '马甲', '566']
for player in ob:
if player == '谢彬':
print('谢彬是谁???')
elif player == '胖头':
print('法国士兵!!!')
else:
print(player + 'nb!!!')
# 检查列表是否为空
games = []
if games:
for game in games:
print('\n圣诞大折扣所要购买的游戏有:')
else:
print('\n购物车中没有任何游戏、快去添加吧!')
# 使用多个列表
my_games = ['Dota2', 'CS GO', 'WOW', 'Over Watch', 'Death Stranding', 'Cyberpunk2077', 'Dark Dungeon']
fri_games = ['Dota2', 'CS GO', 'WOW', 'lol']
for fri_game in fri_games:
if fri_game in my_games:
print('我们共同喜欢的游戏有:' + fri_game.upper())
else:
print('我不喜欢玩' + fri_game.upper() + '但是她好像蛮喜欢的')
| StarcoderdataPython |
1947395 | <filename>cifar10_data.py
"""
cifar-10 dataset, with support for random labels
"""
import numpy as np
import torch
import torchvision.datasets as datasets
class CIFAR10RandomLabels(datasets.CIFAR10):
"""CIFAR10 dataset, with support for randomly corrupt labels.
Params
------
corrupt_prob: float
Default 0.0. The probability of a label being replaced with
random label.
num_classes: int
Default 10. The number of classes in the dataset.
"""
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10RandomLabels, self).__init__(**kwargs)
self.n_classes = num_classes
if corrupt_prob > 0:
self.corrupt_labels(corrupt_prob)
def corrupt_labels(self, corrupt_prob):
labels = np.array(self.train_labels if self.train else self.test_labels)
np.random.seed(12345)
mask = np.random.rand(len(labels)) <= corrupt_prob
rnd_labels = np.random.choice(self.n_classes, mask.sum())
labels[mask] = rnd_labels
# we need to explicitly cast the labels from npy.int64 to
# builtin int type, otherwise pytorch will fail...
labels = [int(x) for x in labels]
if self.train:
self.train_labels = labels
else:
self.test_labels = labels
class CIFAR10RandomPixels(datasets.CIFAR10):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10RandomPixels, self).__init__(**kwargs)
if corrupt_prob > 0:
self.corrupt_pixels(corrupt_prob)
def corrupt_pixels(self, corrupt_prob):
np.random.seed(12345)
data = np.array(self.train_data if self.train else self.test_data)
mask = np.random.rand(data.shape[0], data.shape[1], data.shape[2], data.shape[3]) <= corrupt_prob
rnd_data = np.random.randint(0, 256, data.shape)
data = (1 - mask) * data + mask * rnd_data
data = data.astype(np.uint8)
if self.train:
self.train_data = data
else:
self.test_data = data
class CIFAR10ShufflePixels(datasets.CIFAR10):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10ShufflePixels, self).__init__(**kwargs)
self.corrupt_pixels(corrupt_prob)
def corrupt_pixels(self, corrupt_prob):
np.random.seed(12345)
data = np.array(self.train_data if self.train else self.test_data)
data = np.reshape(data, (data.shape[0], -1))
for i in range(data.shape[0]):
if np.random.rand() <= corrupt_prob:
np.random.shuffle(data[i,:])
data = np.reshape(data, (data.shape[0], 32, 32, 3))
if self.train:
self.train_data = data
else:
self.test_data = data
class CIFAR10GaussianPixels(datasets.CIFAR10):
def __init__(self, corrupt_prob=0.0, num_classes=10, **kwargs):
super(CIFAR10GaussianPixels, self).__init__(**kwargs)
self.corrupt_pixels(corrupt_prob)
def corrupt_pixels(self, corrupt_prob):
np.random.seed(12345)
data = np.array(self.train_data if self.train else self.test_data)
for i in range(data.shape[0]):
if np.random.rand() <= corrupt_prob:
data[i,:,:,:] = np.random.normal((125.3, 123.0, 113.9), (63.0, 62.1, 66.7), data[i,:,:,:].shape)
data = data.astype(np.uint8)
print(data[0,0,:,1])
print(data[:,:,:,0].mean(), data[:,:,:,1].mean(), data[:,:,:,2].mean())
if self.train:
self.train_data = data
else:
self.test_data = data
| StarcoderdataPython |
11275046 | <reponame>Hidberg/Landmark2019-1st-and-3rd-Place-Solution
import itertools
import random
import math
import albumentations.augmentations.functional as F
import cv2
from PIL import Image
import numpy as np
import torch
from albumentations import ImageOnlyTransform
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data.sampler import BatchSampler
from torch.utils.data.sampler import Sampler
from torch._six import int_classes as _int_classes
from src.autoaugment import ImageNetPolicy
class PiecewiseCyclicalLinearLR(_LRScheduler):
r"""Set the learning rate of each parameter group using a piecewise
cyclical linear schedule.
When last_epoch=-1, sets initial lr as lr.
_Loss Surfaces, Mode Connectivity, and Fast Ensembling of DNNs
https://arxiv.org/pdf/1802.10026
Exploring loss function topology with cyclical learning rates
https://arxiv.org/abs/1702.04283
"""
def __init__(self, optimizer, c, alpha1=1e-2, alpha2=5e-4, last_epoch=-1):
"""
:param c: cycle length
:param alpha1: lr upper bound of cycle
:param alpha2: lr lower bounf of cycle
"""
self.c = c
self.alpha1 = alpha1
self.alpha2 = alpha2
super(PiecewiseCyclicalLinearLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
lrs = []
for _ in range(len(self.base_lrs)):
ti = ((self.last_epoch - 1) % self.c + 1) / self.c
if 0 <= ti <= 0.5:
lr = (1 - 2 * ti) * self.alpha1 + 2 * ti * self.alpha2
elif 0.5 < ti <= 1.0:
lr = (2 - 2 * ti) * self.alpha2 + (2 * ti - 1) * self.alpha1
else:
raise ValueError('t(i) is out of range [0,1].')
lrs.append(lr)
return lrs
class PolyLR(_LRScheduler):
def __init__(self, optimizer, power=0.9, max_epoch=4e4, last_epoch=-1):
"""The argument name "epoch" also can be thought as "iter"."""
self.power = power
self.max_epoch = max_epoch
self.last_epoch = last_epoch
super(PolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
lr = base_lr * (1.0 - (self.last_epoch / self.max_epoch)) ** self.power
lrs.append(lr)
return lrs
class WarmupCosineAnnealingLR(torch.optim.lr_scheduler._LRScheduler):
"""cosine annealing scheduler with warmup.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(
self,
optimizer,
T_max,
eta_min,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.T_max = T_max
self.eta_min = eta_min
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.warmup_iters:
return self.get_lr_warmup()
else:
return self.get_lr_cos_annealing()
def get_lr_warmup(self):
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor
for base_lr in self.base_lrs
]
def get_lr_cos_annealing(self):
last_epoch = self.last_epoch - self.warmup_iters
T_max = self.T_max - self.warmup_iters
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * last_epoch / T_max)) / 2
for base_lr in self.base_lrs]
def _pad_const(x, target_height, target_width, value=255, center=True, pad_loc_seed=None):
random.seed(pad_loc_seed)
height, width = x.shape[:2]
if height < target_height:
if center:
h_pad_top = int((target_height - height) / 2.0)
else:
h_pad_top = random.randint(a=0, b=target_height - height)
h_pad_bottom = target_height - height - h_pad_top
else:
h_pad_top = 0
h_pad_bottom = 0
if width < target_width:
if center:
w_pad_left = int((target_width - width) / 2.0)
else:
w_pad_left = random.randint(a=0, b=target_width - width)
w_pad_right = target_width - width - w_pad_left
else:
w_pad_left = 0
w_pad_right = 0
x = cv2.copyMakeBorder(x, h_pad_top, h_pad_bottom, w_pad_left, w_pad_right,
cv2.BORDER_CONSTANT, value=value)
return x
class RandomCropThenScaleToOriginalSize(ImageOnlyTransform):
"""Crop a random part of the input and rescale it to some size.
Args:
limit (float): maximum factor range for cropping region size.
interpolation (OpenCV flag): flag that is used to specify the interpolation algorithm. Should be one of:
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4.
Default: cv2.INTER_LINEAR.
pad_value (int): pixel value for padding.
p (float): probability of applying the transform. Default: 1.
"""
def __init__(self, limit=0.1, interpolation=cv2.INTER_LINEAR, pad_value=0, p=1.0):
super(RandomCropThenScaleToOriginalSize, self).__init__(p)
self.limit = limit
self.interpolation = interpolation
self.pad_value = pad_value
def apply(self, img, height_scale=1.0, width_scale=1.0, h_start=0, w_start=0, interpolation=cv2.INTER_LINEAR,
pad_value=0, pad_loc_seed=None, **params):
img_height, img_width = img.shape[:2]
crop_height, crop_width = int(img_height * height_scale), int(img_width * width_scale)
crop = self.random_crop(img, crop_height, crop_width, h_start, w_start, pad_value, pad_loc_seed)
return F.resize(crop, img_height, img_width, interpolation)
def get_params(self):
height_scale = 1.0 + random.uniform(-self.limit, self.limit)
width_scale = 1.0 + random.uniform(-self.limit, self.limit)
return {'h_start': random.random(),
'w_start': random.random(),
'height_scale': height_scale,
'width_scale': width_scale,
'pad_loc_seed': random.random()}
def update_params(self, params, **kwargs):
if hasattr(self, 'interpolation'):
params['interpolation'] = self.interpolation
if hasattr(self, 'pad_value'):
params['pad_value'] = self.pad_value
params.update({'cols': kwargs['image'].shape[1], 'rows': kwargs['image'].shape[0]})
return params
@staticmethod
def random_crop(img, crop_height, crop_width, h_start, w_start, pad_value=0, pad_loc_seed=None):
height, width = img.shape[:2]
if height < crop_height or width < crop_width:
img = _pad_const(img, crop_height, crop_width, value=pad_value, center=False, pad_loc_seed=pad_loc_seed)
y1 = max(int((height - crop_height) * h_start), 0)
y2 = y1 + crop_height
x1 = max(int((width - crop_width) * w_start), 0)
x2 = x1 + crop_width
img = img[y1:y2, x1:x2]
return img
class AutoAugmentWrapper(ImageOnlyTransform):
def __init__(self, p=1.0):
super(AutoAugmentWrapper, self).__init__(p)
self.autoaugment = ImageNetPolicy()
def apply(self, img, **params):
img = Image.fromarray(img)
img = self.autoaugment(img)
img = np.asarray(img)
return img
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
class GroupedBatchSampler(BatchSampler):
"""
Wraps another sampler to yield a mini-batch of indices.
It enforces that elements from the same group should appear in groups of batch_size.
It also tries to provide mini-batches which follows an ordering which is
as close as possible to the ordering from the original sampler.
Arguments:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_uneven (bool): If ``True``, the sampler will drop the batches whose
size is less than ``batch_size``
"""
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if not isinstance(sampler, Sampler):
raise ValueError(
"sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert self.group_ids.dim() == 1
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
self._can_reuse_batches = False
def _prepare_batches(self):
dataset_size = len(self.group_ids)
# get the sampled indices from the sampler
sampled_ids = torch.as_tensor(list(self.sampler))
# potentially not all elements of the dataset were sampled
# by the sampler (e.g., DistributedSampler).
# construct a tensor which contains -1 if the element was
# not sampled, and a non-negative number indicating the
# order where the element was sampled.
# for example. if sampled_ids = [3, 1] and dataset_size = 5,
# the order is [-1, 1, -1, 0, -1]
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
# get a mask with the elements that were sampled
mask = order >= 0
# find the elements that belong to each individual cluster
clusters = [(self.group_ids == i) & mask for i in self.groups]
# get relative order of the elements inside each cluster
# that follows the order from the sampler
relative_order = [order[cluster] for cluster in clusters]
# with the relative order, find the absolute order in the
# sampled space
permutation_ids = [s[s.sort()[1]] for s in relative_order]
# permute each cluster so that they follow the order from
# the sampler
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
# splits each cluster in batch_size, and merge as a list of tensors
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
# now each batch internally has the right order, but
# they are grouped by clusters. Find the permutation between
# different batches that brings them as close as possible to
# the order that we have in the sampler. For that, we will consider the
# ordering as coming from the first element of each batch, and sort
# correspondingly
first_element_of_batch = [t[0].item() for t in merged]
# get and inverse mapping from sampled indices and the position where
# they occur (as returned by the sampler)
inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())}
# from the first element in each batch, get a relative ordering
first_index_of_batch = torch.as_tensor(
[inv_sampled_ids_map[s] for s in first_element_of_batch]
)
# permute the batches so that they approximately follow the order
# from the sampler
permutation_order = first_index_of_batch.sort(0)[1].tolist()
# finally, permute the batches
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
return batches
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if not hasattr(self, "_batches"):
self._batches = self._prepare_batches()
self._can_reuse_batches = True
return len(self._batches)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while iteration <= self.num_iterations:
# if the underlying sampler has a set_epoch method, like
# DistributedSampler, used for making each process see
# a different split of the dataset, then set it
if hasattr(self.batch_sampler.sampler, "set_epoch"):
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if iteration > self.num_iterations:
break
yield batch
def __len__(self):
return self.num_iterations
class WeightedRandomSampler(Sampler):
r"""Samples elements from [0,..,len(weights)-1] with given probabilities (weights).
Note: Official WeightedRandomSampler implementation of PyTorch is very slow
due to bad performance of torch.multinomial().
Instead, this implementation uses numpy.random.choice().
Arguments:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
"""
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integeral "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = weights / weights.sum()
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(np.random.choice(np.arange(len(self.weights)),
size=self.num_samples,
replace=self.replacement,
p=self.weights))
def __len__(self):
return self.num_samples
class PseudoTripletSampler(Sampler):
def __init__(self, class_ids, k=4):
self.class_ids = class_ids
self.k = k
def __iter__(self):
upper = len(self.class_ids) - len(self.class_ids) % k
first_indexes = np.random.permutation(np.arange(0, upper, self.k))
rand_indexes = np.concatenate(np.array([first_indexes + i for i in range(k)]).T)
return iter(np.argsort(self.class_ids)[rand_indexes])
def __len__(self):
return len(self.class_ids)
| StarcoderdataPython |
9600721 | <reponame>qiskit-community/repo-monitor<filename>tests/test_utils.py
"""Tests for utils."""
import unittest
from typing import Optional, Union
from monitor.utils import UrlsHelper, GitHubUrlsHelper
class MockUrlsHelper(UrlsHelper):
"""Mock urls helpder for testing purposes."""
def get_comments_url(self, account: str, repo: str,
number: Union[str, int]) -> str:
"""Returns mock comments url."""
return "http://localhost/comments"
def get_issues_url(self, account: str, repo: str,
page: Optional[Union[str, int]] = None) -> str:
"""Returns mock issues url."""
return "http://localhost/issues"
class TestUrlHelper(unittest.TestCase):
"""Tests url helpers."""
def test_github_url_heler(self):
"""Tests github url helpder,"""
helper = GitHubUrlsHelper()
issues_api_url = "https://api.github.com/repos/Qiskit/qiskit-terra/" \
"issues?page=10&state=open&per_page=100"
comments_api_url = "https://api.github.com/repos/Qiskit/qiskit-terra/" \
"issues/1234/comments?per_page=100"
self.assertEqual(helper.get_issues_url("Qiskit", "qiskit-terra", 10), issues_api_url)
self.assertEqual(helper.get_comments_url("Qiskit", "qiskit-terra", 1234), comments_api_url)
| StarcoderdataPython |
6645249 | <reponame>pomarec/django
from django.conf.urls import patterns, url
from .views import empty_view
urlpatterns = patterns('',
url(r'^$', empty_view, name="named-url5"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="named-url6"),
url(r'^(?P<one>\d+)|(?P<two>\d+)/$', empty_view),
)
| StarcoderdataPython |
6483169 | <filename>research/cv/centernet_resnet50_v1/postprocess.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import json
import numpy as np
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
from src.model_utils.config import config, dataset_config, eval_config
from src import convert_eval_format, post_process, merge_outputs
def cal_acc(result_path, label_path, meta_path, save_path):
"""calculate inference accuracy"""
name_list = np.load(os.path.join(meta_path, "name_list.npy"), allow_pickle=True)
meta_list = np.load(os.path.join(meta_path, "meta_list.npy"), allow_pickle=True)
label_infor = coco.COCO(label_path)
pred_annos = {"images": [], "annotations": []}
for num, image_id in enumerate(name_list):
meta = meta_list[num]
pre_image = np.fromfile(os.path.join(result_path) + "/eval2017_image_" + str(image_id) + "_0.bin",
dtype=np.float32).reshape((1, 100, 6))
detections = []
for scale in eval_config.multi_scales:
dets = post_process(pre_image, meta, scale, dataset_config.num_classes)
detections.append(dets)
detections = merge_outputs(detections, dataset_config.num_classes, eval_config.SOFT_NMS)
pred_json = convert_eval_format(detections, image_id, eval_config.valid_ids)
label_infor.loadImgs([image_id])
for image_info in pred_json["images"]:
pred_annos["images"].append(image_info)
for image_anno in pred_json["annotations"]:
pred_annos["annotations"].append(image_anno)
if not os.path.exists(save_path):
os.makedirs(save_path)
pred_anno_file = os.path.join(save_path, '{}_pred_result.json').format(config.run_mode)
json.dump(pred_annos, open(pred_anno_file, 'w'))
pred_res_file = os.path.join(save_path, '{}_pred_eval.json').format(config.run_mode)
json.dump(pred_annos["annotations"], open(pred_res_file, 'w'))
coco_anno = coco.COCO(label_path)
coco_dets = coco_anno.loadRes(pred_res_file)
coco_eval = COCOeval(coco_anno, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if __name__ == '__main__':
cal_acc(config.result_path, config.label_path, config.meta_path, config.save_path)
| StarcoderdataPython |
1932671 | '''
scikit-learn 패키지에 포함된 위스콘신 대학 암 데이터를 로딩해서
Naive Bayes 모델로 예측 결과를 분석.
'''
import pandas as pd
from sklearn import datasets
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
cancer = datasets.load_breast_cancer()
print(cancer)
print(cancer.data.shape)
print(cancer.target_names)
print(cancer.feature_names)
X = cancer.data
X,y = datasets.load_breast_cancer(return_X_y=True)
X_train, X_test,y_train, y_test = train_test_split(X,y, test_size=0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train_trainformed = scaler.transform(X_train)
X_test_trainformed = scaler.transform(X_test)
gnb = GaussianNB()
gnb.fit(X_train_trainformed, y_train)
y_pred = gnb.predict(X_test_trainformed)
print(y_pred)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
| StarcoderdataPython |
201470 | <gh_stars>1-10
import collections
import datetime
from django.utils.translation import ugettext_lazy as _
from .base import * # noqa
# Override static and media URL for prefix in WSGI server.
# https://code.djangoproject.com/ticket/25598
STATIC_URL = '/2016/static/'
MEDIA_URL = '/2016/media/'
CONFERENCE_DEFAULT_SLUG = 'pycontw-2016'
TALK_PROPOSAL_DURATION_CHOICES = (
('NOPREF', _('No preference')),
('PREF25', _('Prefer 25min')),
('PREF45', _('Prefer 45min')),
)
EVENTS_DAY_NAMES = collections.OrderedDict([
(datetime.date(2016, 6, 3), _('Day 1')),
(datetime.date(2016, 6, 4), _('Day 2')),
(datetime.date(2016, 6, 5), _('Day 3')),
])
| StarcoderdataPython |
24235 | <reponame>dangthanhan507/odcl
import cv2
import os
import argparse
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--o', required=True, help='output_folder')
opt = arg.parse_args()
cap = cv2.VideoCapture(0)
ret, img = cap.read()
print('Writing to chessboard file')
files = os.listdir(opt.o)
index = 0
while (f'img{index}.jpg' in files):
index+=1
cv2.imwrite(f'{opt.o}/img{index}.jpg', img)
| StarcoderdataPython |
6653780 | import subprocess
import os
import json
import argparse
from draco.spec import Query, Task
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
DATA_FIELD_TYPES = ['string', 'string', 'string', 'number', 'string',
'number', 'string', 'number', 'number', 'number',
'number', 'number', 'number', 'string', 'number',
'number', 'number', 'number', 'string', 'number',
'string', 'number', 'datetime', 'number', 'string']
NUM_TRIALS = 20
CLINGO_PREFIX = 'clingo asp/_all.lp '
CLINGO_OPTIONS = '--quiet=1 --warn=no-atom-undefined -c max_extra_encs=0'
DRACO_LP_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../asp'))
DRACO_LP = ['define.lp', 'generate.lp', 'hard.lp', 'soft.lp', 'weights.lp', 'assign_weights.lp', 'optimize.lp', 'output.lp']
def main(args):
nfields = int(args.nfields)
nencodings = int(args.nencodings)
# warmup
run_set(1, nfields, nencodings, True)
# actual
results = run_set(NUM_TRIALS, nfields, nencodings, False)
with open(f'draco_runtimes_{nfields}_{nencodings}.json', 'w') as out_file:
json.dump(results, out_file, indent=2)
def run_set(numTrials, nfields, nencodings, dry):
query = generate_asp_query(nfields, nencodings)
results = []
total_time = 0
for _ in range(numTrials):
asp_query = generate_asp_query(nfields, nencodings)
delta = run(asp_query)
total_time += delta
results.append({
'fields': nfields,
'encodings': nencodings,
'runtime': delta,
'system': 'draco'
})
avg_time = total_time / NUM_TRIALS
if not dry:
logger.info('DRACO:: fields={0} encodings={1} avg_query_time: {2}'.format(nfields, nencodings, avg_time))
return results
def run(asp_query):
# default args
options = ['--outf=2', '--quiet=3', '--warn=no-atom-undefined', '-c', 'max_extra_encs=0']
cmd = ['clingo'] + options
proc = subprocess.Popen(
args=cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
file_names = [os.path.join(DRACO_LP_DIR, f) for f in DRACO_LP]
asp_program = b'\n'.join(map(load_file, file_names)) + asp_query.encode('utf8')
stdout, stderr = proc.communicate(asp_program)
out = json.loads(stdout)
return out['Time']['Total']
def load_file(path):
with open(path) as f:
content = f.read().encode('utf8')
return content
def generate_asp_query(nfields, nencodings):
query_string = ''
for i in range(nfields):
field_name = chr(ord('a') + i)
field_def = 'fieldtype({0},{1}).\n'.format(field_name, DATA_FIELD_TYPES[i])
query_string += field_def
for i in range(nencodings):
encoding_name = 'e{0}'.format(i)
encoding_def = 'encoding({0}).\n'.format(encoding_name)
encoding_def += ':- not task(value).\n' # fix a task, as compass does not support
query_string += encoding_def
return query_string
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('nfields')
parser.add_argument('nencodings')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
9648011 | import sys
import numpy as np
import math
import pickle
unique_entity_fp = sys.argv[1]
concatenated_embedding_fp = sys.argv[2]
ds_name = sys.argv[3]
def sigmoid(x):
return 1 / (1 + math.exp(-x))
# prepare unique entity list
unique_entity_list = []
with open(unique_entity_fp) as fp:
for line in fp:
unique_entity_list.append(line.strip())
# read concatenated entity embedding
concatenated_embedding = np.load(concatenated_embedding_fp)
# list of sigmoid of dot products of two entity embedding
sigmoid_tuple_list = []
for i in range(len(unique_entity_list)):
for j in range(i+1, len(unique_entity_list)):
sigmoid_tuple_list.append((i, j, sigmoid(np.dot(concatenated_embedding[i], concatenated_embedding[j]))))
with open(ds_name+'_sigmoid_dot_product_tuple_list.pkl', 'wb') as fp:
pickle.dump(sigmoid_tuple_list, fp)
# sort nodes wrt. sigmoid of dot product for rach node
adjacency_sigmoid_list = {} # {node1:[(node2, sigmoid_val)]}
for tuple_element in sigmoid_tuple_list:
node1 = int(tuple_element[0])
node2 = int(tuple_element[1])
sigmoid_val = tuple_element[2]
if node1 not in adjacency_sigmoid_list:
adjacency_sigmoid_list[node1] = []
if node2 not in adjacency_sigmoid_list:
adjacency_sigmoid_list[node2] = []
adjacency_sigmoid_list[node1].append((node2, sigmoid_val))
adjacency_sigmoid_list[node2].append((node1, sigmoid_val))
for i in range(len(unique_entity_list)):
adjacency_sigmoid_list[i].sort(reverse = True, key=lambda x: x[1])
with open(ds_name+'_adjacency_sigmoid_list.json', 'w') as fp:
json.dump(adjacency_sigmoid_list, fp)
all_node_sorted_node_list_wrt_sigmoid_dot = []
for i in range(len(unique_entity_list)):
sorted_node_list_wrt_sigmoid_dot = []
for tuple_item in adjacency_sigmoid_list[i]:
sorted_node_list_wrt_sigmoid_dot.append(tuple_item[0])
all_node_sorted_node_list_wrt_sigmoid_dot.append(sorted_node_list_wrt_sigmoid_dot)
with open(ds_name+'_soreted_node_list_wrt_sigmoid_dot.pkl', 'wb') as fp:
pickle.dump(all_node_sorted_node_list_wrt_sigmoid_dot, fp)
| StarcoderdataPython |
71224 | <reponame>josalinas/ppiclF
import unittest
import inspect
import os
from functools import wraps
###############################################################################
# DECORATORS
###############################################################################
def Parallel(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
self.mpi_procs = self.parallel_procs
if not self.ifmpi:
self.skipTest("Skipping \"{0}\"; MPI is not enabled.".format(self.id()))
else:
self.log_suffix = '.general'
if self.ifmpi:
self.log_suffix += '.parallel'
else:
self.log_suffix += '.serial'
method(self, *args, **kwargs)
return wrapper
###############################################################################
# BASE TEST CASE
###############################################################################
class ppiclfTestCase(unittest.TestCase):
""" Base class for ppiclf unittests
This defines a setUpClass method to:
(a) get the relevant environment variables for compilers, directories
All subclassed TestCases will need to do these things.
Class attributes:
f77 (str): The Fortran 77 compiler to use [default: 'gfortran']
cc (str): The C compiler to use [default: 'gcc']
ifmpi (bool): Perform compilation/tests with MPI [default: False]
source_root (str): Path to Nek source directory;overridden by $NEK_SOURCE_ROOT env variable
[default: '$HOME/nek5_svn/trunk/nek']
tools_root (str): Path to Nek tools directory; overridden by $TOOLS_ROOT env variable
[default: '$HOME/nek5_svn/trunk/tools']
examples_root (str): Path to Nek examples directory; overridden by $EXAMPLES_ROOT env variable
[default: '$HOME/nek5_svn/examples']
makenek (str): Path to makenek [default: source_root/makenek]
tools_bin (str): Directory to place compiled tools [default: tools_root/bin]
Subclass attributes:
These aren't meaningful in the base class. They're intended for a subclass that represents
a particular example problem.
example_subdir (str): The subdirectory for the subclass' example. Assumed that it's in example_root
rea_file (str): The .rea file for the subclass' example, minus the '.rea' extension. Assumed
that it's in example_root/example_dir
size_file (str): The SIZE file for the subclass' example. Assuemed that it's in
example_root/example_subdir
"""
# Defined in subclasses only; declared here to make syntax checker happy
example_subdir = ""
case_name = ""
def __init__(self, *args, **kwargs):
# These can be overridden by self.get_opts
self.f77 = "mpif77"
self.cc = "mpicc"
self.pplist = ""
self.usr_lflags = ""
self.ifmpi = True
self.source_root = os.path.dirname(os.path.dirname(inspect.getabsfile(self.__class__)))
self.examples_root = os.path.dirname(inspect.getabsfile(self.__class__))
self.make = os.path.join(self.source_root, 'Makefile')
self.log_root = ""
self.verbose = True
self.serial_procs = 1
self.parallel_procs = 2
self.size_params = {}
# These are overridden by method decorators (Parallel, ..)
self.log_suffix = ""
self.mpi_procs = None
# Empy list of delayed fails
self._delayed_failures = []
self.get_opts()
unittest.TestCase.__init__(self, *args, **kwargs)
def assertAlmostEqualDelayed(self, test_val, target_val, delta, label):
if abs(test_val-target_val) <= delta:
msg = ' SUCCESS: {0}: Test value {1} equals target value {2} +/- {3}'.format(label, test_val, target_val, delta)
else:
msg = ' FAILURE: {0}: Test value {1} exceeds target value {2} +/- {3}'.format(label, test_val, target_val, delta)
self._delayed_failures.append(msg)
print(msg)
def assertIsNotNullDelayed(self, test_val, label):
if test_val:
msg = 'SUCCESS: Found phrase "{0}" in logfile.'.format(label)
else:
msg = 'FAILURE: Unexpectedly did not find phrase "{0}" in logfile'.format(label)
self._delayed_failures.append(msg)
print(msg)
def assertIsNullDelayed(self, test_val, label):
if test_val:
msg = 'FAILURE: Found phrase "{0}" in logfile.'.format(label)
self._delayed_failures.append(msg)
else:
msg = 'SUCCESS: Did not find phrase "{0}" in logfile'.format(label)
print(msg)
def assertDelayedFailures(self):
if self._delayed_failures:
report = [
'\n\nFailed assertions:{0}\n'.format(len(self._delayed_failures))
]
for i,failure in enumerate(self._delayed_failures, start=1):
report.append('{0}: {1}'.format(i, failure))
#self._delayed_failures = []
self.fail('\n'.join(report))
def get_opts(self):
print("Getting setup options...")
# Get compiler options from env
self.f77 = os.environ.get('FC', self.f77)
self.cc = os.environ.get('CC', self.cc)
self.pplist = os.environ.get('PPLIST', self.pplist)
self.usr_lflags = os.environ.get('USR_LFLAGS', self.usr_lflags)
self.ifmpi = os.environ.get('MPI', self.ifmpi)
# Get paths from env
try:
self.source_root = os.path.abspath(os.environ['SOURCE_ROOT'])
except KeyError:
pass
else:
self.make = os.path.join(self.source_root, 'Makefile')
self.examples_root = os.path.abspath(os.environ.get('EXAMPLES_ROOT', self.examples_root))
try:
self.log_root = os.path.abspath(os.environ['LOG_ROOT'])
except KeyError:
pass
self.verbose = str(os.environ.get('VERBOSE_TESTS', self.verbose)).lower() == 'true'
self.parallel_procs = int(os.environ.get('PARALLEL_PROCS', self.parallel_procs))
# Print everything out
for varname, varval in (
('FC', self.f77),
('CC', self.cc),
('PPLIST', self.pplist),
('USR_LFLAGS', self.usr_lflags),
('IFMPI', self.ifmpi),
('SOURCE_ROOT', self.source_root),
('EXAMPLES_ROOT', self.examples_root),
('LOG_ROOT', self.log_root),
('VERBOSE_TESTS', self.verbose),
('PARALLEL_PROCS', self.parallel_procs)
):
if varval:
print(' Using {0:14} = "{1}"'.format(varname, varval))
# Verify that pathnames are valid
for varname, varval in (
('SOURCE_ROOT', self.source_root),
('EXAMPLES_ROOT', self.examples_root),
('LOG_ROOT', self.log_root),
):
if varval and not os.path.isdir(varval):
raise OSError('The {0} directory "{1}" does not exist. Please the env variable ${0} to a valid directory.'.format(varname, varval))
print("Finished getting setup options!")
def build_ppiclf(self, opts=None):
from lib.ppiclfBinBuild import build_ppiclf
cls = self.__class__
all_opts = dict(
FC = self.f77,
CC = self.cc,
PPLIST = self.pplist,
USR_LFLAGS = self.usr_lflags,
MPI = int(self.ifmpi),
)
if opts:
all_opts.update(opts)
build_ppiclf(
source_root = self.source_root,
cwd = os.path.join(self.examples_root, cls.example_subdir),
opts = all_opts,
verbose = self.verbose,
)
def run_ppiclf(self, rea_file=None):
from lib.ppiclfBinRun import run_ppiclf
cls = self.__class__
run_ppiclf(
cwd = os.path.join(self.examples_root, cls.example_subdir),
rea_file = cls.case_name if not rea_file else rea_file,
ifmpi = self.ifmpi,
log_suffix = self.log_suffix,
n_procs = self.mpi_procs,
verbose = self.verbose
)
def get_value_from_log(self, label, column, row=0, logfile=None):
cls = self.__class__
if not logfile:
logfile = os.path.join(
self.examples_root,
cls.example_subdir,
'{0}.log.{1}{2}'.format(cls.case_name, self.mpi_procs, self.log_suffix)
)
# Get all lines with label
with open(logfile, 'r') as f:
line_list = [l for l in f if label in l]
if not line_list:
raise ValueError("Could not find label \"{0}\" in logfile \"{1}\". The run may have failed.".format(label, logfile))
try:
value = float(line_list[row].split()[column])
except ValueError:
raise ValueError("Attempted to parse non-numerical value in logfile, \"{0}\". Logfile may be malformatted or run may have failed".format(logfile))
except IndexError:
raise IndexError("Fewer rows and/or columns than expected in logfile, \"{0}\". Logfile may be malformmated or run may have failed.".format(logfile))
else:
return value
def get_phrase_from_log(self, label, logfile=None, row=0):
cls = self.__class__
if not logfile:
logfile = os.path.join(
self.examples_root,
cls.example_subdir,
'{0}.log.{1}{2}'.format(cls.case_name, self.mpi_procs, self.log_suffix)
)
with open(logfile, 'r') as f:
line_list = [l for l in f if label in l]
try:
line = line_list[row]
except IndexError:
return None
else:
return line
| StarcoderdataPython |
11375247 | from rest_framework.fields import get_attribute
from rest_framework import relations
from rest_framework.reverse import reverse
class GenericHyperlinkedRelatedField(relations.PrimaryKeyRelatedField):
def get_attribute(self, instance):
return get_attribute(instance, self.source_attrs)
def to_representation(self, value):
default_view_name = "{}s-detail".format(value._meta.object_name.lower())
url = reverse(default_view_name, kwargs={'pk': value.pk})
request = self.context.get('request', None)
if request is not None:
return request.build_absolute_uri(url)
return url
| StarcoderdataPython |
8015340 | from table2json.bin.main import execute_from_command_line
__all__ = [
"execute_from_command_line",
]
| StarcoderdataPython |
40385 | import sys
import random
from test_base import *
class TestBlockLD(TestBase):
def generate(self):
self.clear_tag()
for n in range(50000):
store_not_load = random.randint(0,1)
tag = random.randint(0, 15)
index = random.randint(0,self.sets_p-1)
taddr = self.get_addr(tag,index)
if store_not_load:
self.send_block_st(taddr)
else:
self.send_block_ld(taddr)
self.tg.done()
def send_block_st(self, addr):
base_addr = addr - (addr % (self.block_size_in_words_p*4))
for i in range(self.block_size_in_words_p):
self.send_sw(base_addr+(i*4))
# main()
if __name__ == "__main__":
t = TestBlockLD()
t.generate()
| StarcoderdataPython |
1785313 | <reponame>pld/bamboo<gh_stars>10-100
from functools import partial
import simplejson as json
import os
import tempfile
from celery.exceptions import RetryTaskError
from celery.task import task
import pandas as pd
from bamboo.lib.async import call_async
from bamboo.lib.datetools import recognize_dates
from bamboo.lib.schema_builder import filter_schema
@task(ignore_result=True)
def import_dataset(dataset, file_reader, delete=False):
"""For reading a URL and saving the corresponding dataset.
Import a DataFrame using the provided `file_reader` function. All
exceptions are caught and on exception the dataset is marked as failed and
set for deletion after 24 hours.
:param dataset: The dataset to import into.
:param file_reader: Function for reading the dataset.
:param delete: Delete filepath_or_buffer after import, default False.
"""
try:
dframe = file_reader()
dataset.save_observations(dframe)
except Exception as e:
if isinstance(e, RetryTaskError):
raise e
else:
dataset.failed(e.__str__())
dataset.delete(countdown=86400)
def csv_file_reader(name, na_values=[], delete=False):
try:
return recognize_dates(
pd.read_csv(name, encoding='utf-8', na_values=na_values))
finally:
if delete:
os.unlink(name)
def json_file_reader(content):
return recognize_dates(pd.DataFrame(json.loads(content)))
class ImportableDataset(object):
def import_from_url(self, url, na_values=[], allow_local_file=False):
"""Load a URL, read from a CSV, add data to dataset.
:param url: URL to load file from.
:param allow_local_file: Allow URL to refer to a local file.
:raises: `IOError` for an unreadable file or a bad URL.
:returns: The created dataset.
"""
if not allow_local_file and isinstance(url, basestring)\
and url[0:4] == 'file':
raise IOError
call_async(
import_dataset, self, partial(
csv_file_reader, url, na_values=na_values))
return self
def import_from_csv(self, csv_file, na_values=[]):
"""Import data from a CSV file.
.. note::
Write to a named tempfile in order to get a handle for pandas'
`read_csv` function.
:param csv_file: The CSV File to create a dataset from.
:returns: The created dataset.
"""
if 'file' in dir(csv_file):
csv_file = csv_file.file
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(csv_file.read())
# pandas needs a closed file for *read_csv*
tmpfile.close()
call_async(import_dataset, self, partial(
csv_file_reader, tmpfile.name, na_values=na_values, delete=True))
return self
def import_from_json(self, json_file):
"""Impor data from a JSON file.
:param json_file: JSON file to import.
"""
content = json_file.file.read()
call_async(import_dataset, self, partial(json_file_reader, content))
return self
def import_schema(self, schema):
"""Create a dataset from a SDF schema file (JSON).
:param schema: The SDF (JSON) file to create a dataset from.
:returns: The created dataset.
"""
try:
schema = json.loads(schema.file.read())
except AttributeError:
schema = json.loads(schema)
self.set_schema(filter_schema(schema))
self.ready()
return self
| StarcoderdataPython |
9658638 | <filename>src/fedAVG/server.py
from copy import deepcopy
import random
import numpy
import torch
import torch.nn as nn
import torch.optim as optim
from .client import Client
from ..models import *
from ..utils import get_class_priors, load_cifar, run_accuracy, generate_clients_sizes
from ..splits import indexes_split_IID, indexes_split_NON_IID
class Server:
def __init__(self, device, data_config, model_config, optim_config, fed_config, logger=None):
self.device = device
self.clients = []
# DATASET CONFIGURATION
self.trainset = load_cifar(name=data_config["dataset_name"], train=True)
self.testset = load_cifar(name=data_config["dataset_name"], train=False)
self.trainset_size = len(self.trainset)
self.testset_size = len(self.testset)
self.num_classes = len(self.trainset.classes)
self.class_priors = get_class_priors(self.num_classes, self.trainset.targets)
self.global_batch_size = data_config["global_batch_size"]
self.std_client_samples = data_config["std_client_samples"]
self.IID = data_config["IID"]
self.logger = logger
if not self.IID:
self.alpha = data_config["alpha"]
# MODEL CONFIGURATION
self.model_config = model_config
self.optim_config = optim_config
self.global_net = eval(model_config["net"])(self.num_classes)
# FEDERATED CONFIGURATION
self.num_clients = fed_config["num_clients"]
self.avg_clients_rounds = fed_config["avg_clients_rounds"]
self.std_clients_rounds = fed_config["std_clients_rounds"]
self.num_rounds = fed_config["num_rounds"]
self.client_batch_size = fed_config["client_batch_size"]
self.local_epochs = fed_config["local_epochs"]
self.fed_IR = fed_config["fed_IR"]
self.fed_VC = fed_config["fed_VC"]
if self.fed_VC:
self.virtual_client_size = self.trainset_size // self.num_clients
else:
self.virtual_client_size = None
self.clients_weights = None
def init_clients(self):
# Define each client training size using gaussian distribution
clients_sizes = generate_clients_sizes(self.trainset_size, self.num_clients, self.std_client_samples)
self.logger.log(f"Client samples sizes: {clients_sizes}, total: {numpy.sum(clients_sizes)}")
if self.IID:
indexes = indexes_split_IID(self.num_clients, self.num_classes, self.trainset, clients_sizes)
else:
indexes = indexes_split_NON_IID(self.num_clients, self.num_classes, self.alpha, self.trainset, clients_sizes)
for i in range(self.num_clients):
trainset_i = torch.utils.data.Subset(self.trainset, indexes[i])
client = Client(i, self.device, self.local_epochs, self.client_batch_size, trainset_i,
model_config=self.model_config, optim_config=self.optim_config,
server_class_priors=self.class_priors, virtual_client_size=self.virtual_client_size,
logger=self.logger)
self.clients.append(client)
# Only for FedVC -> calculate clients weights based on how many samples they have
if self.fed_VC:
self.clients_weights = numpy.zeros((len(self.clients)))
for i in range(len(self.clients)):
self.clients_weights[i] = self.clients[i].trainset_size
self.clients_weights = self.clients_weights / numpy.sum(self.clients_weights)
def run_training(self, state_dict=None, round_num=0, print_acc=True):
if len(self.clients) == 0:
self.init_clients()
self.global_net.to(self.device)
if state_dict is not None:
self.global_net.load_state_dict(state_dict)
self.global_net.train()
for _ in range(self.num_rounds):
round_num += 1
self.logger.log(f"ROUND {round_num}")
# Save state at current round
state_t = deepcopy(self.global_net.state_dict())
# Get the selected clients for this round
num_selected_clients = int(max(min(self.num_clients,
random.gauss(self.avg_clients_rounds * self.num_clients,
self.std_clients_rounds * self.num_clients)), 1))
selected_clients = numpy.random.choice(self.clients, num_selected_clients, replace=False, p=self.clients_weights).tolist()
selected_clients.sort(key=lambda x: x.id)
num_samples = sum(c.trainset_size for c in selected_clients) # effective number of samples at current round
if self.std_clients_rounds != 0:
self.logger.log(f"{num_selected_clients} clients selected")
# Run update on each client
for client in selected_clients:
client.client_update(state_t, fed_IR=self.fed_IR, fed_VC=self.fed_VC, print_acc=print_acc)
# Calculate weighted accuracy of all clients (after clients updating, BEFORE averaging)
if print_acc:
self.logger.log("[BEFORE AVG]")
self.run_weighted_clients_accuracy()
# AVERAGING
old_state = deepcopy(self.global_net.state_dict())
for client in selected_clients:
if self.fed_VC:
# for Fed_VC we use every time the same total amount of sample per client
weight = 1 / len(selected_clients)
else:
weight = client.trainset_size / num_samples
for key in self.global_net.state_dict().keys():
old_tensor = old_state[key]
new_tensor = client.net.state_dict()[key]
delta = new_tensor - old_tensor
self.global_net.state_dict()[key] += (weight * delta).type(old_tensor.type())
# Calculate weighted accuracy of all clients (after clients updating, AFTER averaging)
if print_acc:
self.logger.log("[AFTER AVG]")
self.run_testing(train=True)
def run_weighted_clients_accuracy(self, state_dict=None):
accuracy = 0
loss = 0
for client in self.clients:
client_accuracy, client_loss = client.train_accuracy(state_dict=state_dict)
weight = client.trainset_size / self.trainset_size
accuracy += weight * client_accuracy
loss += weight * client_loss
self.logger.log(f'Weighted Clients -> Train: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')
def run_testing(self, train=False):
if train:
dataset = self.trainset
else:
dataset = self.testset
criterion = eval(self.model_config["criterion"])()
accuracy, loss = run_accuracy(device=self.device, dataset=dataset,
batch_size=self.global_batch_size, net=self.global_net,
criterion=criterion)
if train:
self.logger.log(f'Server -> Train: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')
else:
self.logger.log(f'Server -> Test: Loss {loss:.3f} | Accuracy = {accuracy:.3f}')
return accuracy, loss
| StarcoderdataPython |
1769790 | <reponame>basarane/model-based-rl<filename>src/nets/loss.py
import numpy as np
import tensorflow.keras.backend as K
if K.backend() == 'tensorflow':
import tensorflow as tf
elif K.backend() == 'theano':
from theano import tensor as T
# adapted from keras-rl: https://github.com/keras-rl/keras-rl/blob/master/rl/util.py
def huber_loss(y_true, y_pred):
clip_value = 1
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
# for details.
assert clip_value > 0.
coef = 1
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return coef * K.sum(.5 * K.square(x))
condition = K.abs(x) < clip_value
#squared_loss = .5 * K.square(x)
#linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
squared_loss = 0.5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
if K.backend() == 'tensorflow':
if hasattr(tf, 'select'):
return coef * K.sum(tf.select(condition, squared_loss, linear_loss)) # condition, true, false
else:
return coef * K.sum(tf.where(condition, squared_loss, linear_loss)) # condition, true, false
elif K.backend() == 'theano':
return T.switch(condition, squared_loss, linear_loss)
else:
raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
def huber_loss_mse(y_true, y_pred):
clip_value = 1
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
# for details.
assert clip_value > 0.
coef = 1
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return coef * K.mean(.5 * K.square(x))
condition = K.abs(x) < clip_value
#squared_loss = .5 * K.square(x)
#linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
squared_loss = 0.5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
if K.backend() == 'tensorflow':
if hasattr(tf, 'select'):
return coef * K.mean(tf.select(condition, squared_loss, linear_loss)) # condition, true, false
else:
return coef * K.mean(tf.where(condition, squared_loss, linear_loss)) # condition, true, false
elif K.backend() == 'theano':
return T.switch(condition, squared_loss, linear_loss)
else:
raise RuntimeError('Unknown backend "{}".'.format(K.backend())) | StarcoderdataPython |
1886921 | from asserts import assert_equal
from dectest import TestCase, test
from werkzeug import Request
from rouver.util import absolute_url
class AbsoluteURLTest(TestCase):
@staticmethod
def _create_request(*, path_info: str = "/path") -> Request:
return Request(
{
"wsgi.url_scheme": "https",
"SERVER_NAME": "example.com",
"SERVER_PORT": "443",
"SCRIPT_NAME": "/base/",
"PATH_INFO": path_info,
}
)
@test
def path_is_not_ascii(self) -> None:
request = self._create_request()
url = absolute_url(request, "/~föo")
assert_equal("https://example.com/~f%C3%B6o", url)
@test
def path_is_absolute(self) -> None:
request = self._create_request()
url = absolute_url(request, "https://example.org/foo")
assert_equal("https://example.org/foo", url)
@test
def path_is_root_relative(self) -> None:
request = self._create_request()
url = absolute_url(request, "/foo")
assert_equal("https://example.com/foo", url)
@test
def path_is_relative__base_with_slash(self) -> None:
request = self._create_request(path_info="/path/")
url = absolute_url(request, "foo")
assert_equal("https://example.com/base/path/foo", url)
@test
def path_is_relative__base_without_slash(self) -> None:
request = self._create_request(path_info="/path")
url = absolute_url(request, "foo")
assert_equal("https://example.com/base/foo", url)
@test
def do_not_encode_special_characters(self) -> None:
request = self._create_request()
url = absolute_url(request, "/foo?bar=baz&abc=%6A;+,@:$")
assert_equal("https://example.com/foo?bar=baz&abc=%6A;+,@:$", url)
| StarcoderdataPython |
264778 | # Copyright 2019-2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from django.contrib import admin
# Register your models here.
| StarcoderdataPython |
9633823 | name = "muDIC"
from .solver import DICInput
from muDIC.post.viz import Fields, Visualizer
from muDIC.solver.correlate import DICInput, DICOutput
from . import IO
from . import elements
from . import filtering
from . import mesh
from . import mesh
from . import post
from . import vlab
from . import utils
from .IO import image_stack_from_list, image_stack_from_folder, ImageStack
from .mesh import Mesher
from .solver import *
# Set the default logging level
import logging
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
| StarcoderdataPython |
3378007 | <gh_stars>0
from numba.core import dispatcher, compiler
from numba.core.registry import cpu_target, dispatcher_registry
import numba.dppl_config as dppl_config
class DpplOffloadDispatcher(dispatcher.Dispatcher):
targetdescr = cpu_target
def __init__(self, py_func, locals={}, targetoptions={}, impl_kind='direct', pipeline_class=compiler.Compiler):
if dppl_config.dppl_present:
from numba.dppl.compiler import DPPLCompiler
targetoptions['parallel'] = True
dispatcher.Dispatcher.__init__(self, py_func, locals=locals,
targetoptions=targetoptions, impl_kind=impl_kind, pipeline_class=DPPLCompiler)
else:
print("---------------------------------------------------------------------")
print("WARNING : DPPL pipeline ignored. Ensure OpenCL drivers are installed.")
print("---------------------------------------------------------------------")
dispatcher.Dispatcher.__init__(self, py_func, locals=locals,
targetoptions=targetoptions, impl_kind=impl_kind, pipeline_class=pipeline_class)
dispatcher_registry['__dppl_offload_gpu__'] = DpplOffloadDispatcher
dispatcher_registry['__dppl_offload_cpu__'] = DpplOffloadDispatcher
| StarcoderdataPython |
8193077 | <gh_stars>0
from wpcv import plp as plp
import os,glob
def resnet(data_dir=None,name='resnet18',pretrained=True,num_classes='auto',input_size=(224,224),batch_size=8,num_epoch=200,patience=20,shuffle=True):
trainer = plp.ClassifierTrainer()
if num_classes=='auto':
num_classes=len(os.listdir(data_dir+'/train'))
model=plp.resnet_model(name,pretrained=pretrained,num_classes=num_classes)
data_transforms=plp.simple_transforms_for_classify_model(input_size=input_size)
dataloaders=plp.simple_dataloaders_for_imagefolder(
data_dir=data_dir,
data_transforms=data_transforms,
batch_size=batch_size,
shuffle=shuffle,
)
trainer.setParams(dict(
model=model,
dataloaders=dataloaders,
)).setSettings(dict(
num_epoch=num_epoch,
)).bind_callback([
plp.SaveCallback(),plp.EarlyStopping(patience=patience),
]).setup()
return trainer
def classifier(data_dir,name='resnet18',pretrained=True,num_classes='auto',input_size=(224,224),batch_size=8,num_epoch=200,patience=20,shuffle=True):
pass | StarcoderdataPython |
3202613 | <gh_stars>1-10
from itertools import permutations
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
take_it= []
keep=permutations((x,y,z))
count=0
for i in list(keep):
for j in i:
count+=j
if count is n:
take_it.append(i)
count=0
print(take_it)
| StarcoderdataPython |
119880 | import asyncio
import base64
import binascii
import json
import logging
import os
import sys
from urllib.parse import urlparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from runners.agent_container import ( # noqa:E402
arg_parser,
create_agent_with_args,
AriesAgent,
)
from runners.support.utils import ( # noqa:E402
check_requires,
log_msg,
log_status,
log_timer,
prompt,
prompt_loop,
)
logging.basicConfig(level=logging.WARNING)
LOGGER = logging.getLogger(__name__)
class BEEDSUserAgent(AriesAgent):
def __init__(
self,
ident: str,
http_port: int,
admin_port: int,
no_auto: bool = False,
aip: int = 20,
endorser_role: str = None,
**kwargs,
):
super().__init__(
ident,
http_port,
admin_port,
prefix="BEEDSUser",
no_auto=no_auto,
seed=None,
aip=aip,
endorser_role=endorser_role,
**kwargs,
)
self.connection_id = None
self._connection_ready = None
self.cred_state = {}
async def detect_connection(self):
await self._connection_ready
self._connection_ready = None
@property
def connection_ready(self):
return self._connection_ready.done() and self._connection_ready.result()
async def input_invitation(agent_container):
agent_container.agent._connection_ready = asyncio.Future()
async for details in prompt_loop("Invite details: "):
b64_invite = None
try:
url = urlparse(details)
query = url.query
if query and "c_i=" in query:
pos = query.index("c_i=") + 4
b64_invite = query[pos:]
elif query and "oob=" in query:
pos = query.index("oob=") + 4
b64_invite = query[pos:]
else:
b64_invite = details
except ValueError:
b64_invite = details
if b64_invite:
try:
padlen = 4 - len(b64_invite) % 4
if padlen <= 2:
b64_invite += "=" * padlen
invite_json = base64.urlsafe_b64decode(b64_invite)
details = invite_json.decode("utf-8")
except binascii.Error:
pass
except UnicodeDecodeError:
pass
if details:
try:
details = json.loads(details)
break
except json.JSONDecodeError as e:
log_msg("Invalid invitation:", str(e))
with log_timer("Connect duration:"):
connection = await agent_container.input_invitation(details, wait=True)
async def main(args):
beeds_user_agent = await create_agent_with_args(args, ident="beeds_user")
try:
log_status(
"#7 Provision an agent and wallet, get back configuration details"
+ (
f" (Wallet type: {beeds_user_agent.wallet_type})"
if beeds_user_agent.wallet_type
else ""
)
)
agent = BEEDSUserAgent(
"beeds_user.agent",
beeds_user_agent.start_port,
beeds_user_agent.start_port + 1,
genesis_data=beeds_user_agent.genesis_txns,
no_auto=beeds_user_agent.no_auto,
tails_server_base_url=beeds_user_agent.tails_server_base_url,
timing=beeds_user_agent.show_timing,
multitenant=beeds_user_agent.multitenant,
mediation=beeds_user_agent.mediation,
wallet_type=beeds_user_agent.wallet_type,
aip=beeds_user_agent.aip,
endorser_role=beeds_user_agent.endorser_role,
)
await beeds_user_agent.initialize(the_agent=agent)
log_status("#9 Input boe.py invitation details")
await input_invitation(beeds_user_agent)
options = " (3) Send Message\n" " (4) Input New Invitation\n"
if beeds_user_agent.endorser_role and beeds_user_agent.endorser_role == "author":
options += " (D) Set Endorser's DID\n"
if beeds_user_agent.multitenant:
options += " (W) Create and/or Enable Wallet\n"
options += " (X) Exit?\n[3/4/{}X] ".format(
"W/" if beeds_user_agent.multitenant else "",
)
async for option in prompt_loop(options):
if option is not None:
option = option.strip()
if option is None or option in "xX":
break
elif option in "dD" and beeds_user_agent.endorser_role:
endorser_did = await prompt("Enter Endorser's DID: ")
await beeds_user_agent.agent.admin_POST(
f"/transactions/{beeds_user_agent.agent.connection_id}/set-endorser-info",
params={"endorser_did": endorser_did, "endorser_name": "endorser"},
)
elif option in "wW" and beeds_user_agent.multitenant:
target_wallet_name = await prompt("Enter wallet name: ")
include_subwallet_webhook = await prompt(
"(Y/N) Create sub-wallet webhook target: "
)
if include_subwallet_webhook.lower() == "y":
await beeds_user_agent.agent.register_or_switch_wallet(
target_wallet_name,
webhook_port=beeds_user_agent.agent.get_new_webhook_port(),
mediator_agent=beeds_user_agent.mediator_agent,
)
else:
await beeds_user_agent.agent.register_or_switch_wallet(
target_wallet_name,
mediator_agent=beeds_user_agent.mediator_agent,
)
elif option == "3":
msg = await prompt("Enter message: ")
if msg:
await beeds_user_agent.agent.admin_POST(
f"/connections/{beeds_user_agent.agent.connection_id}/send-message",
{"content": msg},
)
elif option == "4":
# handle new invitation
log_status("Input new invitation details")
await input_invitation(beeds_user_agent)
if beeds_user_agent.show_timing:
timing = await beeds_user_agent.agent.fetch_timing()
if timing:
for line in beeds_user_agent.agent.format_timing(timing):
log_msg(line)
finally:
terminated = await beeds_user_agent.terminate()
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
if __name__ == "__main__":
parser = arg_parser(ident="beeds_user", port=8030)
args = parser.parse_args()
ENABLE_PTVSD = os.getenv("ENABLE_PTVSD_FABER", "").lower()
ENABLE_PTVSD = ENABLE_PTVSD and ENABLE_PTVSD not in ("false", "0")
ENABLE_PYDEVD_PYCHARM = os.getenv("ENABLE_PYDEVD_PYCHARM", "").lower()
ENABLE_PYDEVD_PYCHARM = ENABLE_PYDEVD_PYCHARM and ENABLE_PYDEVD_PYCHARM not in (
"false",
"0",
)
PYDEVD_PYCHARM_HOST = os.getenv("PYDEVD_PYCHARM_HOST", "localhost")
PYDEVD_PYCHARM_CONTROLLER_PORT = int(
os.getenv("PYDEVD_PYCHARM_CONTROLLER_PORT", 5001)
)
# --debug to use microsoft's visual studio remote debugger
if ENABLE_PTVSD or "--debug" in args:
try:
import ptvsd
ptvsd.enable_attach(address = ('0.0.0.0', 5677))
print("ptvsd is running")
print("=== Waiting for debugger to attach ===")
# To pause execution until the debugger is attached:
ptvsd.wait_for_attach()
except ImportError:
print("ptvsd library was not found")
if ENABLE_PYDEVD_PYCHARM:
try:
import pydevd_pycharm
print(
"beeds_user remote debugging to "
f"{PYDEVD_PYCHARM_HOST}:{PYDEVD_PYCHARM_CONTROLLER_PORT}"
)
pydevd_pycharm.settrace(
host=PYDEVD_PYCHARM_HOST,
port=PYDEVD_PYCHARM_CONTROLLER_PORT,
stdoutToServer=True,
stderrToServer=True,
suspend=False,
)
except ImportError:
print("pydevd_pycharm library was not found")
check_requires(args)
try:
asyncio.get_event_loop().run_until_complete(main(args))
except KeyboardInterrupt:
os._exit(1)
| StarcoderdataPython |
54717 | import operator
from itertools import chain
from Logic.ProperLogic.helper_classes.reducer import MaxReducer
from Logic.ProperLogic.misc_helpers import log_error
class ClusterDict(dict):
# TODO: Make sure constructor is only called when needed / doesn't produce more work than necessary!
def __init__(self, clusters=None):
super().__init__()
self.max_id = None
self.max_id_reducer = MaxReducer()
if not clusters:
return
for cluster in clusters:
cluster_id = cluster.cluster_id
self[cluster_id] = cluster
self.max_id_reducer(cluster_id)
self.max_id = self.max_id_reducer.get_state()
def get_clusters(self, with_ids=False):
if with_ids:
return self.items()
return self.values()
def get_cluster_by_id(self, cluster_id):
try:
return self[cluster_id]
except KeyError:
log_error(f"no cluster with id '{cluster_id}' found")
return None
def get_clusters_by_ids(self, cluster_ids):
return map(self.get_cluster_by_id, cluster_ids)
def get_cluster_ids(self):
return self.keys()
def get_cluster_labels(self, with_ids=False, unique=True):
"""
If with_ids is provided, unique is ignored.
:param with_ids:
:param unique:
:return:
"""
attrs = ['cluster_id'] if with_ids else []
attrs.append('label')
cluster_labels = self.get_cluster_attrs(*attrs)
if unique and not with_ids:
return list(set(cluster_labels))
return list(cluster_labels)
def get_cluster_attrs(self, *attrs):
clusters = self.get_clusters()
attrs_getter = operator.attrgetter(*attrs)
return map(attrs_getter, clusters)
def reset_ids(self, start_id=1):
clusters_with_ids = list(self.get_clusters(with_ids=True))
self.clear()
old_ids = []
for new_cluster_id, (old_cluster_id, cluster) in enumerate(clusters_with_ids, start=start_id):
old_ids.append(old_cluster_id)
cluster.set_cluster_id(new_cluster_id)
self[new_cluster_id] = cluster
max_id = start_id + len(clusters_with_ids) - 1
self.max_id = max_id
new_ids = list(range(start_id, max_id + 1))
return old_ids, new_ids
def set_ids(self, old_ids, new_ids):
clusters = self.get_clusters()
old_to_new_ids_dict = dict(zip(old_ids, new_ids))
self.max_id_reducer.reset()
for cluster in clusters:
new_id = old_to_new_ids_dict[cluster.cluster_id]
cluster.set_cluster_id(new_id)
self.max_id_reducer(new_id)
self.max_id = self.max_id_reducer.get_state()
def any_cluster_with_emb(self, emb):
clusters = self.get_clusters()
return any(filter(lambda cluster: cluster.contains_embedding(emb), clusters))
def add_clusters(self, clusters):
self.max_id_reducer.reset()
for cluster in clusters:
cluster_id = cluster.cluster_id
self[cluster_id] = cluster
self.max_id_reducer(cluster_id)
self.max_id = self.max_id_reducer.get_state()
def add_cluster(self, cluster):
self.add_clusters([cluster])
def remove_clusters(self, clusters):
reset_max_id = False
for cluster in clusters:
cluster_id = cluster.cluster_id
self.pop(cluster_id)
if cluster_id == self.max_id:
reset_max_id = True
if reset_max_id:
self.reset_max_id()
def reset_max_id(self):
cluster_ids = self.get_cluster_ids()
self.max_id = max(cluster_ids) if cluster_ids else 0
def remove_cluster(self, cluster):
self.remove_clusters([cluster])
def get_max_id(self):
if self.max_id is None:
return self.max_id_reducer.default
return self.max_id
def get_embeddings(self):
return chain(*map(lambda cluster: cluster.get_embeddings(),
self.get_clusters(with_ids=False)))
| StarcoderdataPython |
333341 | import logging
import random
import numpy as N
class Cpu:
def __init__(self):
# memory 4kbs
self.memory = [0] * 4096
# data registers (8-bit)
self.V = [0] * 16
# address register (16-bit)
self.I = 0
# timers (8-bit)
self.delay = 0
self.sound = 0
# program counter, 0x000 to 0x1FF is reserved for internal use (16-bit)
self.pc = 0x200
# stack pointer (8-bit) and stack (16-bit)
self.stack_pointer = 0
self.stack = [0] * 16
# Initialize graphics array
self.graphics = N.zeros((31, 63))
# initialize some sprites in memory
self.sprite_pointer = 0x50
self.sprites = [
0xF0, 0x90, 0x90, 0x90, 0xF0, # Zero
0x20, 0x60, 0x20, 0x20, 0x70, # One
0xF0, 0x10, 0xF0, 0x80, 0xF0, # Two
0xF0, 0x10, 0xF0, 0x10, 0xF0, # Three
0x90, 0x90, 0xF0, 0x10, 0x10, # Four
0xF0, 0x80, 0xF0, 0x10, 0xF0, # Five
0xF0, 0x80, 0xF0, 0x90, 0xF0, # Six
0xF0, 0x10, 0x20, 0x40, 0x40, # Seven
0xF0, 0x90, 0xF0, 0x90, 0xF0, # Eight
0xF0, 0x90, 0xF0, 0x10, 0xF0, # Nine
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80 # F
]
# Sprites are saved in memory starting at 0x50
for i in range(len(self.sprites)):
self.memory[self.sprite_pointer+i] = self.sprites[i]
self.keys = list(range(16))
logging.basicConfig(level=logging.DEBUG)
# fetch an opcode from two bytes
def fetch_opcode(self, hexvalue):
byte1 = self.memory[hexvalue]
byte2 = self.memory[hexvalue+1]
opcode = byte1 << 8
opcode = opcode | byte2
return opcode
def execute_operation(self, opcode, key):
opcode_identifier = opcode & 0xF000
nnn = opcode & 0xFFF
kk = opcode & 0x00FF
x = (opcode & 0x0F00) >> 8
y = (opcode & 0x00F0) >> 4
# Debug logging
logging.debug("Register I: " + hex(self.I))
logging.debug("Program Counter: " + hex(self.pc))
for i in range(0x10):
logging.debug("Register V[" + hex(i) + "]: " + hex(self.V[i]))
logging.debug('\n' + '\n'.join([''.join(['{:2}'.format(int(item)) for item in row])
for row in self.graphics]))
# 0XXX - Multiple opcodes
if opcode_identifier == 0x0000:
# 00E0 - CLS - Clear the display.
if opcode == 0x00E0:
logging.debug(hex(opcode) + " == 00E0 - CLS - Clear the display")
self.graphics = N.zeros((31, 63))
self.pc += 2
# 00EE - RET - Return from a subroutine.
elif opcode == 0x00EE:
logging.debug(hex(opcode) + " == 00EE - RET - Return from a subroutine")
self.pc = self.stack[self.stack_pointer] + 2
self.stack_pointer -= 1
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# 1nnn - JP addr - Jump to location nnn.
elif opcode_identifier == 0x1000:
logging.debug(hex(opcode) + " == 1nnn - JP addr - Jump to location nnn")
self.pc = nnn
# 2nnn - CALL addr - Call subroutine at nnn.
elif opcode_identifier == 0x2000:
logging.debug(hex(opcode) + " == 2nnn - CALL addr - Call subroutine at nnn")
self.stack_pointer += 1
self.stack[self.stack_pointer] = self.pc
self.pc = nnn
# 3xkk - SE Vx, byte - Skip next instruction if Vx = kk.
elif opcode_identifier == 0x3000:
logging.debug(hex(opcode) + " == 3xkk - SE Vx, byte - Skip next instruction if Vx = kk")
if self.V[x] == kk:
self.pc += 4
else:
self.pc += 2
# 4xkk - SNE Vx, byte - Skip next instruction if Vx != kk.
elif opcode_identifier == 0x4000:
logging.debug(hex(opcode) + " == 4xkk - SNE Vx, byte - Skip next instruction if Vx != kk")
if self.V[x] != kk:
self.pc += 4
else:
self.pc += 2
# 5xy0 - SE Vx, Vy - Skip next instruction if Vx = Vy.
elif (opcode_identifier == 0x5000) and (opcode & 0xF == 0x0):
logging.debug(hex(opcode) + " == 5xy0 - SE Vx, Vy - Skip next instruction if Vx = Vy")
if self.V[x] == self.V[y]:
self.pc += 4
else:
self.pc += 2
self.pc += 2
# 6xkk - LD Vx, byte - Set Vx = kk.
elif opcode_identifier == 0x6000:
logging.debug(hex(opcode) + " == 6xkk - LD Vx, byte - Set Vx = kk")
self.V[x] = kk
self.pc += 2
# 7xkk - ADD Vx, byte - Set Vx = Vx + kk.
elif opcode_identifier == 0x7000:
logging.debug(hex(opcode) + " == 7xkk - ADD Vx, byte - Set Vx = Vx + kk")
self.V[x] = self.V[x] + kk
self.pc += 2
# 8XXX - Multiple opcodes
elif opcode_identifier == 0x8000:
# 8xy0 - LD Vx, Vy - Set Vx = Vy.
if opcode & 0xF == 0x0:
logging.debug(hex(opcode) + " == 8xy0 - LD Vx, Vy - Set Vx = Vy")
self.V[x] = self.V[y]
self.pc += 2
# 8xy1 - OR Vx, Vy - Set Vx = Vx OR Vy.
elif opcode & 0xF == 0x1:
logging.debug(hex(opcode) + " == 8xy1 - OR Vx, Vy - Set Vx = Vx OR Vy")
self.V[x] = self.V[x] | self.V[y]
self.pc += 2
# 8xy2 - AND Vx, Vy - Set Vx = Vx AND Vy.
elif opcode & 0xF == 0x2:
logging.debug(hex(opcode) + " == 8xy2 - AND Vx, Vy - Set Vx = Vx AND Vy")
self.V[x] = self.V[x] & self.V[y]
self.pc += 2
# 8xy3 - XOR Vx, Vy - Set Vx = Vx XOR Vy.
elif opcode & 0xF == 0x3:
logging.debug(hex(opcode) + " == 8xy3 - XOR Vx, Vy - Set Vx = Vx XOR Vy")
self.V[x] = self.V[x] ^ self.V[y]
self.pc += 2
# 8xy4 - ADD Vx, Vy - Set Vx = Vx + Vy, set VF = carry.
elif opcode & 0xF == 0x4:
logging.debug(hex(opcode) + " == 8xy4 - ADD Vx, Vy - Set Vx = Vx + Vy, set VF = carry")
if self.V[x] + self.V[y] > 0xFF:
self.V[0xF] = 1
self.V[x] = (self.V[x] + self.V[y]) - 256
else:
self.V[0xF] = 0
self.V[x] = self.V[x] + self.V[y]
self.pc += 2
# 8xy5 - SUB Vx, Vy - Set Vx = Vx - Vy, set VF = NOT borrow.
elif opcode & 0xF == 0x5:
logging.debug(hex(opcode) + " == 8xy5 - SUB Vx, Vy - Set Vx = Vx - Vy, set VF = NOT borrow")
if self.V[x] > self.V[y]:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] - self.V[y]
self.pc += 2
# 8xy6 - SHR Vx {, Vy} - Set Vx = Vx SHR 1.
elif opcode & 0xF == 0x6:
logging.debug(hex(opcode) + " == 8xy6 - SHR Vx {, Vy} - Set Vx = Vx SHR 1")
if self.V[x] & 0x1 == 1:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] / 2
self.pc += 2
# 8xy7 - SUBN Vx, Vy - Set Vx = Vy - Vx, set VF = NOT borrow.
elif opcode & 0xF == 0x7:
logging.debug(hex(opcode) + " == 8xy7 - SUBN Vx, Vy - Set Vx = Vy - Vx, set VF = NOT borrow")
if self.V[y] > self.V[x]:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[y] - self.V[x]
self.pc += 2
# 8xyE - SHL Vx {, Vy} - Set Vx = Vx SHL 1.
elif opcode & 0xF == 0xE:
logging.debug(hex(opcode) + " == 8xyE - SHL Vx {, Vy} - Set Vx = Vx SHL 1")
if self.V[x] & 0x80 >> 7 == 1:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] * 2
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# 9xy0 - SNE Vx, Vy - Skip next instruction if Vx != Vy.
elif (opcode_identifier == 0x9000) and (opcode & 0xF == 0x0):
logging.debug(hex(opcode) + " == 9xy0 - SNE Vx, Vy - Skip next instruction if Vx != Vy")
if self.V[x] != self.V[y]:
self.pc += 4
else:
self.pc += 2
# Annn - LD I, addr - Set I = nnn.
elif opcode_identifier == 0xA000:
logging.debug(hex(opcode) + " == Annn - LD I, addr - Set I = nnn")
self.I = nnn
self.pc += 2
# Bnnn - JP V0, addr - Jump to location nnn + V0.
elif opcode_identifier == 0xB000:
logging.debug(hex(opcode) + " == Bnnn - JP V0, addr - Jump to location nnn + V0")
self.pc = nnn + self.V[0]
self.pc += 2
# Cxkk - RND Vx, byte - Set Vx = random byte AND kk.
elif opcode_identifier == 0xC000:
logging.debug(hex(opcode) + " == Cxkk - RND Vx, byte - Set Vx = random byte AND kk")
random_byte = random.randint(0, 255)
self.V[x] = random_byte & kk
self.pc += 2
# Dxyn - DRW Vx, Vy, nibble
# Display n-byte sprite starting at memory location I at (Vx, Vy), set VF = collision.
elif opcode_identifier == 0xD000:
logging.debug(hex(opcode) + " == Dxyn - DRW Vx, Vy, nibble - Display sprite and set collision")
height = opcode & 0x000F
width = 8
x_initial = self.V[y]
y_initial = self.V[x]
for x_pos in range(height):
binary_string = bin(self.memory[self.I+x_pos])
binary_string = binary_string[2:].zfill(width)
for y_pos in range(width):
self.graphics[(x_initial+x_pos) % 31][(y_initial+y_pos) % 63] = binary_string[y_pos]
self.pc += 2
#EXXX - Multiple opcodes
elif opcode_identifier == 0xE000:
# Ex9E - SKP Vx - Skip next instruction if key with the value of Vx is pressed.
if (opcode & 0xF0FF) == 0xE091:
logging.debug(hex(opcode) + " == Ex9E - SKP Vx - Skip next instruction if key with the value of Vx is pressed")
if key == self.V[x]:
self.pc += 4
else:
self.pc += 2
# ExA1 - SKNP Vx - Skip next instruction if key with the value of Vx is not pressed.
elif opcode & 0xF0FF == 0xE0A1:
logging.debug(hex(opcode) + " == ExA1 - SKNP Vx - Skip next instruction if key with the value of Vx is not pressed")
if key != self.V[x]:
self.pc += 4
else:
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# FXXX - Multiple opcodes
elif opcode_identifier == 0xF000:
# Fx07 - LD Vx, DT - Set Vx = delay timer value.
if opcode & 0xF0FF == 0xF007:
logging.debug(hex(opcode) + " == Fx07 - LD Vx, DT - Set Vx = delay timer value")
self.V[x] = self.delay
self.pc += 2
# Fx0A - LD Vx, K - Wait for a key press, store the value of the key in Vx.
# Program counter does not progress if no keys are pressed, so no loop necessary
elif opcode & 0xF0FF == 0xF00A:
logging.debug(hex(opcode) + " == Fx0A - LD Vx, K - Wait for a key press, store the value of the key in Vx")
if key:
self.V[x] = key
self.pc += 2
# Fx15 - LD DT, Vx - Set delay timer = Vx.
elif opcode & 0xF0FF == 0xF015:
logging.debug(hex(opcode) + " == Fx15 - LD DT, Vx - Set delay timer = Vx")
self.delay = self.V[x]
self.pc += 2
# Fx18 - LD ST, Vx - Set sound timer = Vx.
elif opcode & 0xF0FF == 0xF018:
logging.debug(hex(opcode) + " == Fx18 - LD ST, Vx - Set sound timer = Vx")
self.sound = self.V[x]
self.pc += 2
# Fx1E - ADD I, Vx - Set I = I + Vx. #
elif opcode & 0xF0FF == 0xF01E:
logging.debug(hex(opcode) + " == Fx1E - ADD I, Vx - Set I = I + Vx")
self.I = self.I + self.V[x]
self.pc += 2
# Fx29 - LD F, Vx - Set I = location of sprite for digit Vx.
elif opcode & 0xF0FF == 0xF029:
logging.debug(hex(opcode) + " == Fx29 - LD F, Vx - Set I = location of sprite for digit Vx")
self.I = (self.V[x] * 5) + self.sprite_pointer
self.pc += 2
# Fx33 - LD B, Vx - Store BCD representation of Vx in memory locations I, I+1, and I+2.
elif opcode & 0xF0FF == 0xF033:
logging.debug(hex(opcode) + " == Fx33 - LD B, Vx - Store BCD representation of Vx in memory locations I, I+1, and I+2")
self.memory[self.I] = (self.V[x >> 8] / 100);
self.memory[self.I + 1] = ((self.V[x >> 8] / 10) % 10);
self.memory[self.I + 2] = ((self.V[x >> 8] % 100) % 10);
self.pc += 2
# Fx55 - LD [I], Vx - Store registers V0 through Vx in memory starting at location I.
elif opcode & 0xF0FF == 0xF055:
logging.debug(hex(opcode) + " == Fx55 - LD [I], Vx - Store registers V0 through Vx in memory starting at location I")
for i in range(0x10):
self.memory[self.I + i] = self.V[i]
self.pc += 2
# Fx65 - LD Vx, [I] - Read registers V0 through Vx from memory starting at location I.
elif opcode & 0xF0FF == 0xF065:
logging.debug(hex(opcode) + " == Fx65 - LD Vx, [I] - Read registers V0 through Vx from memory starting at location I")
for i in range(0x10):
self.V[i] = self.memory[self.I + i]
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
| StarcoderdataPython |
6570071 | lista = ('Lapis', 1.70,
'Borracha', 2,
'Caderno', 10.40,
'Mochila', 80.76,
'Caneta', 1.50)
print(f'{"Lista de Preços":^28}')
for n in range(0, len(lista)):
if n % 2 == 0:
print(f'{lista[n]:.<20}', end='')
else:
print(f'{lista[n]:>7.2f}')
'''Sempre que precisar lembrar de uma lista aq estar'''
| StarcoderdataPython |
6673314 | <filename>solutions/500.keyboard-row.241401967.ac.py
class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
row3 = set("ZXCVBNMzxcvbnm")
row1 = set("QWERTYUIOPqwertyuiop")
row2 = set("ASDFGHJKLasdfghjkl")
def f(word):
s = set(word)
f1 = s & row1
f2 = s & row2
f3 = s & row3
return (f1 and not f2 and not f3) or (f2 and not f1 and not f3) or (f3 and not f1 and not f2)
return filter(f, words)
| StarcoderdataPython |
8077564 | import pytest
import torch
from ding.rl_utils.upgo import upgo_loss, upgo_returns, tb_cross_entropy
@pytest.mark.unittest
def test_upgo():
T, B, N, N2 = 4, 8, 5, 7
# tb_cross_entropy: 3 tests
logit = torch.randn(T, B, N, N2).softmax(-1).requires_grad_(True)
action = logit.argmax(-1).detach()
ce = tb_cross_entropy(logit, action)
assert ce.shape == (T, B)
logit = torch.randn(T, B, N, N2, 2).softmax(-1).requires_grad_(True)
action = logit.argmax(-1).detach()
with pytest.raises(AssertionError):
ce = tb_cross_entropy(logit, action)
logit = torch.randn(T, B, N).softmax(-1).requires_grad_(True)
action = logit.argmax(-1).detach()
ce = tb_cross_entropy(logit, action)
assert ce.shape == (T, B)
# upgo_returns
rewards = torch.randn(T, B)
bootstrap_values = torch.randn(T + 1, B).requires_grad_(True)
returns = upgo_returns(rewards, bootstrap_values)
assert returns.shape == (T, B)
# upgo loss
rhos = torch.randn(T, B)
loss = upgo_loss(logit, rhos, action, rewards, bootstrap_values)
assert logit.requires_grad
assert bootstrap_values.requires_grad
for t in [logit, bootstrap_values]:
assert t.grad is None
loss.backward()
for t in [logit]:
assert isinstance(t.grad, torch.Tensor)
| StarcoderdataPython |
11250139 | <gh_stars>1-10
#! /usr/bin python
####################################################################
# sendSMS.py
# Send the SMS
# <NAME>
# April 22, 2016
# Contact: <EMAIL>
# Description: Keeps listening to new SMSes and whenever an SMS is received it
# prints it to console.
########################################################################
import serial
from time import sleep
ser = serial.Serial('/dev/ttyO1', 9600, timeout = 1)
def sendSMS(phoneNum, msg) :
ser.flushOutput()
print("Sending SMS to "),
print phoneNum
ser.write("AT+CMGF=1\r\n") #Because we want to send the SMS in text mode. Prints data to the serial port as human-readable ASCII text followed by a carriage return character (ASCII 13, or '\r') and a newline character (ASCII 10, or '\n').
sleep(0.5) #On shorter delays it does not work
ser.write("AT+CMGS=\"")
sleep(0.5)
ser.write(phoneNum)
sleep(0.5)
ser.write("\"\r\n")
sleep(0.2)
ser.write(msg)
sleep(0.2)
ser.write(chr(26)) #the ASCII code of the ctrl+z is 26
sleep(0.2) #maybe this line may be removed
ser.write("\r\n")
sleep(5)
ser.flushOutput()
sendSMS("+919816923467", "Hey there!") | StarcoderdataPython |
6404744 | from rest_framework import serializers
from geodata.models import GeodataModelRu, GeodataModelRuAlternate
class GeodataModelRuSerializer(serializers.ModelSerializer):
class Meta:
model = GeodataModelRu
fields = (
'__all__'
)
class GeodataCitiesSerializer(serializers.ModelSerializer):
class Meta:
model = GeodataModelRu
fields = (
"geonameid", "name", "latitude", "longitude", "admin1code"
)
class GeodataCitiesSerializer2(serializers.ModelSerializer):
geonameid = GeodataCitiesSerializer()
class Meta:
model = GeodataModelRuAlternate
fields = (
"geonameid", "alternate_name", "isolanguage"
)
class GeodataCitiesSerializer3(serializers.ModelSerializer):
class Meta:
model = GeodataModelRu
fields = (
"__all__"
)
| StarcoderdataPython |
4893149 | <reponame>fullbat/scilpy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Apply bias field correction to DWI. This script doesn't compute the bias
field itself. It ONLY applies an existing bias field. Use the ANTs
N4BiasFieldCorrection executable to compute the bias field
"""
from __future__ import division
from past.utils import old_div
import argparse
import nibabel as nib
import numpy as np
from scilpy.io.utils import (
add_overwrite_arg, assert_inputs_exist, assert_outputs_exist)
def _build_arg_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('dwi', help='DWI Nifti image')
parser.add_argument('bias_field', help='Bias field Nifti image')
parser.add_argument('output', help='Corrected DWI Nifti image')
parser.add_argument('--mask',
help='Apply bias field correction only in the region '
'defined by the mask')
add_overwrite_arg(parser)
return parser
def _rescale_intensity(val, slope, in_max, bc_max):
return in_max - slope * (bc_max - val)
# https://github.com/stnava/ANTs/blob/master/Examples/N4BiasFieldCorrection.cxx
def _rescale_dwi(in_data, bc_data, mask_data=None):
nz_in_data = in_data
nz_bc_data = bc_data
nz_mask_data = None
if mask_data is not None:
nz_mask_data = np.nonzero(mask_data)
nz_in_data = in_data[nz_mask_data]
nz_bc_data = bc_data[nz_mask_data]
in_min = np.amin(nz_in_data)
in_max = np.amax(nz_in_data)
bc_min = np.amin(nz_bc_data)
bc_max = np.amax(nz_bc_data)
slope = old_div((in_max - in_min), (bc_max - bc_min))
rescale_func = np.vectorize(_rescale_intensity, otypes=[np.float])
rescaled_data = rescale_func(nz_bc_data, slope, in_max, bc_max)
if mask_data is not None:
bc_data[nz_mask_data] = rescaled_data
else:
bc_data = rescaled_data
return bc_data
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, [args.dwi, args.bias_field], args.mask)
assert_outputs_exist(parser, args, args.output)
dwi_img = nib.load(args.dwi)
dwi_data = dwi_img.get_data()
bias_field_img = nib.load(args.bias_field)
bias_field_data = bias_field_img.get_data()
mask_data = nib.load(args.mask).get_data() if args.mask else None
nuc_dwi_data = np.divide(dwi_data, bias_field_data[..., np.newaxis])
rescaled_nuc_data = _rescale_dwi(dwi_data, nuc_dwi_data, mask_data)
nib.save(nib.Nifti1Image(rescaled_nuc_data, dwi_img.affine,
dwi_img.header),
args.output)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3377021 | <filename>mirumon/api/asgi.py<gh_stars>10-100
from fastapi import FastAPI
from mirumon.api import routers
from mirumon.infra.components.server_events import (
create_shutdown_events_handler,
create_startup_events_handler,
)
from mirumon.settings.environments.app import AppSettings
def create_app(settings: AppSettings) -> FastAPI:
"""Create FastAPI instance with registered events."""
app = FastAPI(**settings.fastapi_kwargs)
app.include_router(router=routers.router)
app.add_event_handler("startup", create_startup_events_handler(app, settings))
app.add_event_handler("shutdown", create_shutdown_events_handler(app))
return app
| StarcoderdataPython |
6447246 | <filename>spreadsheet_coder/model.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build model essentials."""
import numpy as np
from tensor2tensor.utils import beam_search
import tensorflow.compat.v1 as tf
import tensorflow.contrib.rnn as contrib_rnn
import tf_slim as slim
from spreadsheet_coder import bert_modeling
from spreadsheet_coder import constants
from spreadsheet_coder import mobilebert_modeling
from spreadsheet_coder import model_utils
def create_model(
num_encoder_layers, num_decoder_layers, embedding_size, hidden_size,
dropout_rate, is_training, formula, row_cell_context, row_context_mask,
row_context_segment_ids, row_cell_indices, row_context_mask_per_cell,
row_context_segment_ids_per_cell, col_cell_context, col_context_mask,
col_context_segment_ids, col_cell_indices, col_context_mask_per_cell,
col_context_segment_ids_per_cell, exclude_headers, max_cell_context_length,
num_rows, record_index, column_index, layer_norm, cell_position_encoding,
cell_context_encoding, use_bert, use_mobilebert, per_row_encoding,
max_pooling, use_cnn, use_pointer_network, two_stage_decoding, conv_type,
grid_type, skip_connection, bert_config, unused_tensors_to_print,
formula_length, formula_prefix_length, vocab_size, beam_size, use_tpu,
use_one_hot_embeddings):
"""Creates a program generator."""
use_dropout = is_training
input_shape = bert_modeling.get_shape_list(
formula, expected_rank=2)
batch_size = input_shape[0]
height = 22
row_width = 21
col_width = 22
if exclude_headers:
row_context_mask *= tf.cast(row_context_segment_ids, dtype=tf.float32)
row_context_mask_per_cell *= tf.cast(row_context_segment_ids_per_cell,
dtype=tf.float32)
col_context_mask *= tf.cast(col_context_segment_ids, dtype=tf.float32)
col_context_mask_per_cell *= tf.cast(col_context_segment_ids_per_cell,
dtype=tf.float32)
row_cell_context *= row_context_segment_ids
if num_rows < 21:
cell_data_mask = ([1] * max_cell_context_length +
[0] * max_cell_context_length * (10 - num_rows) +
[1] * max_cell_context_length * (num_rows + 1) +
[0] * max_cell_context_length * 10)
cell_data_mask = tf.convert_to_tensor(np.array(cell_data_mask),
dtype=tf.float32)
cell_data_mask = tf.expand_dims(cell_data_mask, dim=0)
cell_data_mask_per_cell = ([1] * 21 + [0] * 21 * (10 - num_rows) +
[1] * 21 * (num_rows + 1) + [0] * 21 * 10)
cell_data_mask_per_cell = tf.convert_to_tensor(
np.array(cell_data_mask_per_cell), dtype=tf.float32)
cell_data_mask_per_cell = tf.expand_dims(cell_data_mask_per_cell, dim=0)
row_cell_context *= tf.cast(cell_data_mask, dtype=tf.int32)
row_context_mask *= cell_data_mask
row_context_mask_per_cell *= cell_data_mask_per_cell
if cell_context_encoding:
if grid_type != "col":
reshape_row_cell_context = tf.reshape(
row_cell_context, [batch_size, height, max_cell_context_length])
reshape_row_context_mask = tf.reshape(
row_context_mask, [batch_size, height, max_cell_context_length])
reshape_row_context_segment_ids = tf.reshape(row_context_segment_ids,
[batch_size, height,
max_cell_context_length])
split_row_cell_context = tf.split(reshape_row_cell_context, height,
axis=1)
split_row_context_mask = tf.split(reshape_row_context_mask, height,
axis=1)
split_row_context_segment_ids = tf.split(reshape_row_context_segment_ids,
height, axis=1)
if not use_bert and not use_mobilebert:
if max_pooling:
split_row_cell_context = ([split_row_cell_context[0]] +
split_row_cell_context[2:12])
split_row_context_mask = ([split_row_context_mask[0]] +
split_row_context_mask[2:12])
split_row_context_segment_ids = ([split_row_context_segment_ids[0]] +
split_row_context_segment_ids[2:12])
height = 11
else:
split_row_cell_context = split_row_cell_context[:12]
split_row_context_mask = split_row_context_mask[:12]
split_row_context_segment_ids = split_row_context_segment_ids[:12]
height = 12
header = tf.squeeze(split_row_cell_context[0], axis=1)
header_mask = tf.squeeze(split_row_context_mask[0], axis=1)
header_segment_ids = tf.squeeze(split_row_context_segment_ids[0], axis=1)
row_context_grid = []
header_encoding = []
if grid_type != "row":
reshape_col_cell_context = tf.reshape(
col_cell_context, [batch_size, height, max_cell_context_length])
reshape_col_context_mask = tf.reshape(
col_context_mask, [batch_size, height, max_cell_context_length])
reshape_col_context_segment_ids = tf.reshape(col_context_segment_ids,
[batch_size, height,
max_cell_context_length])
split_col_cell_context = tf.split(reshape_col_cell_context, height,
axis=1)
split_col_context_mask = tf.split(reshape_col_context_mask, height,
axis=1)
split_col_context_segment_ids = tf.split(reshape_col_context_segment_ids,
height, axis=1)
if not use_bert and not use_mobilebert:
split_col_cell_context = split_col_cell_context[:12]
split_col_context_mask = split_col_context_mask[:12]
split_col_context_segment_ids = split_col_context_segment_ids[:12]
height = 12
cur_col = tf.squeeze(split_col_cell_context[0], axis=1)
cur_col_mask = tf.squeeze(split_col_context_mask[0], axis=1)
cur_col_segment_ids = tf.squeeze(split_col_context_segment_ids[0], axis=1)
col_context_grid = []
if grid_type != "col":
if per_row_encoding:
chunk_size = 1
st_idx = 0
else:
chunk_size = 512 // max_cell_context_length - 1
st_idx = 1
if use_bert or use_mobilebert:
if grid_type == "both":
bert_scope = "row/bert"
else:
bert_scope = "bert"
else:
bert_vocab_size = bert_config.vocab_size
with tf.variable_scope("row", reuse=tf.AUTO_REUSE):
row_context_embedding = tf.get_variable(
name="cell_context_embedding",
shape=[bert_vocab_size, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
row_context_encoder_cells = [model_utils.build_lstm(hidden_size)
for _ in range(num_encoder_layers)]
row_context_encoder_cells = contrib_rnn.MultiRNNCell(
row_context_encoder_cells, state_is_tuple=True)
for i in range(st_idx, height, chunk_size):
for j in range(chunk_size):
split_row_cell_context[i + j] = tf.squeeze(
split_row_cell_context[i + j], axis=1)
split_row_context_mask[i + j] = tf.squeeze(
split_row_context_mask[i + j], axis=1)
split_row_context_segment_ids[i + j] = tf.squeeze(
split_row_context_segment_ids[i + j], axis=1)
if per_row_encoding:
concat_row_cell_context = split_row_cell_context[i]
concat_row_mask = split_row_context_mask[i]
concat_row_segment_ids = split_row_context_segment_ids[i]
else:
concat_row_cell_context = tf.concat(
[header] + split_row_cell_context[i: i + chunk_size], axis=-1)
concat_row_mask = tf.concat(
[header_mask] + split_row_context_mask[i: i + chunk_size],
axis=-1)
concat_row_segment_ids = tf.concat(
[header_segment_ids] +
split_row_context_segment_ids[i:i + chunk_size],
axis=-1)
if use_mobilebert:
row_bert_context_model = mobilebert_modeling.BertModel(
config=bert_config, is_training=is_training,
input_ids=concat_row_cell_context, input_mask=concat_row_mask,
token_type_ids=concat_row_segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=bert_scope)
elif use_bert:
row_bert_context_model = bert_modeling.BertModel(
config=bert_config, is_training=is_training,
input_ids=concat_row_cell_context, input_mask=concat_row_mask,
token_type_ids=concat_row_segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=bert_scope)
else:
cell_context_embeddings = tf.nn.embedding_lookup(
row_context_embedding, concat_row_cell_context)
row_context_sequence_output, _ = tf.nn.dynamic_rnn(
row_context_encoder_cells,
cell_context_embeddings,
initial_state=row_context_encoder_cells.get_initial_state(
batch_size=batch_size, dtype=tf.float32),
dtype=tf.float32)
if use_bert or use_mobilebert:
row_context_sequence_output = (
row_bert_context_model.get_sequence_output())
row_context_sequence_output = tf.reshape(
row_context_sequence_output,
[batch_size, chunk_size + st_idx, max_cell_context_length, -1])
row_context_sequence_output = tf.split(
row_context_sequence_output, chunk_size + st_idx, axis=1)
if not per_row_encoding:
header_encoding.append(row_context_sequence_output[0])
for j in range(st_idx, chunk_size + st_idx):
row_context_sequence_output[j] = tf.squeeze(
row_context_sequence_output[j], axis=1)
row_context_grid.append(row_context_sequence_output[j])
if not per_row_encoding:
header_encoding = tf.concat(header_encoding, axis=1)
header_encoding = tf.reduce_mean(header_encoding, axis=1)
row_context_grid = [header_encoding] + row_context_grid
if grid_type != "row":
if per_row_encoding:
chunk_size = 1
st_idx = 0
else:
chunk_size = 512 // max_cell_context_length - 1
st_idx = 1
if use_bert or use_mobilebert:
if grid_type == "both":
bert_scope = "col/bert"
else:
bert_scope = "bert"
else:
bert_vocab_size = bert_config.vocab_size
with tf.variable_scope("col", reuse=tf.AUTO_REUSE):
col_context_embedding = tf.get_variable(
name="cell_context_embedding",
shape=[bert_vocab_size, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
col_context_encoder_cells = [model_utils.build_lstm(hidden_size)
for _ in range(num_encoder_layers)]
col_context_encoder_cells = contrib_rnn.MultiRNNCell(
col_context_encoder_cells, state_is_tuple=True)
for i in range(1, height, chunk_size):
for j in range(chunk_size):
split_col_cell_context[i + j] = tf.squeeze(
split_col_cell_context[i + j], axis=1)
split_col_context_mask[i + j] = tf.squeeze(
split_col_context_mask[i + j], axis=1)
split_col_context_segment_ids[i + j] = tf.squeeze(
split_col_context_segment_ids[i + j], axis=1)
if per_row_encoding:
concat_col_cell_context = split_col_cell_context[i]
concat_col_mask = split_col_context_mask[i]
concat_col_segment_ids = split_col_context_segment_ids[i]
else:
concat_col_cell_context = tf.concat(
[cur_col] + split_col_cell_context[i: i + chunk_size], axis=-1)
concat_col_mask = tf.concat(
[cur_col_mask] + split_col_context_mask[i: i + chunk_size],
axis=-1)
concat_col_segment_ids = tf.concat(
[cur_col_segment_ids] +
split_col_context_segment_ids[i: i + chunk_size],
axis=-1)
if use_mobilebert:
col_bert_context_model = mobilebert_modeling.BertModel(
config=bert_config, is_training=is_training,
input_ids=concat_col_cell_context, input_mask=concat_col_mask,
token_type_ids=concat_col_segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=bert_scope)
elif use_bert:
col_bert_context_model = bert_modeling.BertModel(
config=bert_config, is_training=is_training,
input_ids=concat_col_cell_context, input_mask=concat_col_mask,
token_type_ids=concat_col_segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
scope=bert_scope)
else:
cell_context_embeddings = tf.nn.embedding_lookup(
col_context_embedding, concat_col_cell_context)
col_context_sequence_output, _ = tf.nn.dynamic_rnn(
col_context_encoder_cells,
cell_context_embeddings,
initial_state=col_context_encoder_cells.get_initial_state(
batch_size=batch_size, dtype=tf.float32),
dtype=tf.float32)
if use_bert or use_mobilebert:
col_context_sequence_output = (
col_bert_context_model.get_sequence_output())
col_context_sequence_output = tf.reshape(
col_context_sequence_output,
[batch_size, chunk_size + st_idx, max_cell_context_length, -1])
col_context_sequence_output = tf.split(
col_context_sequence_output, chunk_size + st_idx, axis=1)
for j in range(st_idx, chunk_size + st_idx):
col_context_sequence_output[j] = tf.squeeze(
col_context_sequence_output[j], axis=1)
col_context_grid.append(col_context_sequence_output[j])
if cell_context_encoding:
with tf.variable_scope("encode", reuse=tf.AUTO_REUSE):
if grid_type != "col":
with tf.variable_scope("row", reuse=tf.AUTO_REUSE):
row_context_grid = tf.stack(row_context_grid, axis=1)
_, row_height, width, _ = bert_modeling.get_shape_list(
row_context_grid, expected_rank=4)
if use_cnn:
if conv_type == "grid":
# Adds a conv layer with hidden_size filters of size [hxw],
# followed by the default (implicit) ReLU activation.
conv1 = slim.conv2d(
row_context_grid, hidden_size,
[row_height, row_width],
padding="SAME", scope="grid_conv")
else:
col_conv = slim.conv2d(row_context_grid, hidden_size,
[row_height, 1],
padding="SAME", scope="col_conv")
if conv_type == "cross":
row_conv = slim.conv2d(row_context_grid, hidden_size,
[1, width],
padding="SAME", scope="row_conv")
conv1 = col_conv + row_conv
else:
conv1 = col_conv
# Reshapes the hidden units such that instead of 2D maps,
# they are 1D vectors:
row_context_sequence_output = tf.reshape(
conv1, [batch_size, row_height * width, hidden_size])
if skip_connection:
row_context_grid = tf.reshape(
row_context_grid,
[batch_size, row_height * width, -1])
row_context_sequence_output = tf.concat(
[row_context_sequence_output, row_context_grid], axis=-1)
else:
row_context_sequence_output = tf.reshape(
row_context_grid,
[batch_size, row_height * max_cell_context_length, -1])
if use_pointer_network:
batch_row_indices = tf.range(tf.to_int32(batch_size))
batch_row_indices = tf.expand_dims(batch_row_indices, dim=-1)
batch_row_indices = tf.repeat(
batch_row_indices, repeats=row_height * row_width, axis=1)
batch_row_indices = tf.reshape(
batch_row_indices, [batch_size * row_height * row_width])
row_indices = tf.range(tf.to_int32(row_height))
row_indices = tf.expand_dims(row_indices, dim=-1)
row_indices = tf.repeat(row_indices, repeats=row_width, axis=1)
row_indices = tf.repeat(row_indices, repeats=batch_size, axis=0)
row_indices = tf.reshape(
row_indices, [batch_size * row_height * row_width])
row_cell_indices = tf.reshape(
row_cell_indices, [batch_size * row_height * row_width])
row_cell_indices = tf.stack([batch_row_indices, row_indices,
row_cell_indices], axis=1)
row_pooled_output = tf.reshape(
row_context_sequence_output,
[batch_size, row_height, max_cell_context_length, -1])
row_pooled_output = tf.gather_nd(
row_pooled_output, row_cell_indices)
row_pooled_output = tf.reshape(
row_pooled_output,
[batch_size, row_height, row_width, -1])
if use_bert or use_mobilebert:
pooled_linear_name = "bert_pooled_output_linear"
else:
pooled_linear_name = "pooled_output_linear"
row_pooled_output = tf.layers.dense(
row_pooled_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(
stddev=0.02),
name=pooled_linear_name)
row_row_pooled_output = tf.reduce_mean(
row_pooled_output, axis=2)
row_col_pooled_output = tf.reduce_mean(
row_pooled_output, axis=1)
row_row_pooled_output = tf.split(
row_row_pooled_output, [1] * 11 + [11], axis=1)
row_row_pooled_output = tf.concat(
list(reversed(row_row_pooled_output[1:])), axis=1)
row_col_pooled_output = tf.split(row_col_pooled_output,
[1] * 10 + [11], axis=1)
row_col_pooled_output = tf.concat(
list(reversed(row_col_pooled_output)), axis=1)
output_token_embeddings = tf.get_variable(
name="formula_token_embedding",
shape=[vocab_size - 42, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_token_embeddings = tf.expand_dims(output_token_embeddings,
dim=0)
output_token_embeddings = tf.tile(output_token_embeddings,
[batch_size, 1, 1])
output_token_embeddings = tf.split(
output_token_embeddings,
[constants.ROW_ID, vocab_size - 42 - constants.ROW_ID],
axis=1)
output_token_embeddings = tf.concat(
[output_token_embeddings[0],
row_row_pooled_output, row_col_pooled_output,
output_token_embeddings[1]], axis=1)
row_context_mask = tf.expand_dims(row_context_mask, axis=-1)
row_cell_data_encoder_output = tf.layers.dense(
row_context_sequence_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="cell_data_encoder_output")
row_header_encoder_output = tf.layers.dense(
row_context_sequence_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="header_encoder_output")
if layer_norm:
row_cell_data_encoder_output = slim.layer_norm(
bert_modeling.gelu(row_cell_data_encoder_output))
row_header_encoder_output = slim.layer_norm(
bert_modeling.gelu(row_header_encoder_output))
if not use_bert and not use_mobilebert:
if max_pooling:
split_row_context_segment_ids = tf.split(
row_context_segment_ids,
[max_cell_context_length, max_cell_context_length,
10 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
split_row_context_mask = tf.split(
row_context_mask,
[max_cell_context_length, max_cell_context_length,
10 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
row_context_segment_ids = tf.concat(
[split_row_context_segment_ids[0],
split_row_context_segment_ids[2]], axis=1)
row_context_mask = tf.concat(
[split_row_context_mask[0], split_row_context_mask[2]],
axis=1)
else:
split_row_context_segment_ids = tf.split(
row_context_segment_ids,
[12 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
split_row_context_mask = tf.split(
row_context_mask,
[12 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
row_context_segment_ids = split_row_context_segment_ids[0]
row_context_mask = split_row_context_mask[0]
row_cell_data_mask = tf.cast(
tf.expand_dims(row_context_segment_ids, dim=-1),
dtype=tf.float32)
row_header_mask = 1.0 - row_cell_data_mask
row_cell_data_mask *= row_context_mask
row_header_mask *= row_context_mask
row_context_encoder_output = (
row_header_encoder_output * row_header_mask
+ row_cell_data_encoder_output * row_cell_data_mask)
if grid_type != "row":
with tf.variable_scope("col", reuse=tf.AUTO_REUSE):
col_context_grid = tf.stack(col_context_grid, axis=1)
_, col_height, width, _ = bert_modeling.get_shape_list(
col_context_grid, expected_rank=4)
if use_cnn:
if conv_type == "grid":
# Adds a conv layer with hidden_size filters of size [hxw],
# followed by the default (implicit) ReLU activation.
conv1 = slim.conv2d(col_context_grid, hidden_size,
[col_height, col_width],
padding="SAME", scope="grid_conv")
else:
col_conv = slim.conv2d(col_context_grid, hidden_size,
[col_height, 1],
padding="SAME", scope="col_conv")
if conv_type == "cross":
row_conv = slim.conv2d(col_context_grid, hidden_size,
[1, width],
padding="SAME", scope="row_conv")
conv1 = col_conv + row_conv
else:
conv1 = col_conv
# Reshapes the hidden units such that instead of 2D maps,
# they are 1D vectors:
col_context_sequence_output = tf.reshape(
conv1, [batch_size, col_height * width, hidden_size])
if skip_connection:
col_context_grid = tf.reshape(
col_context_grid,
[batch_size, col_height * width, -1])
col_context_sequence_output = tf.concat(
[col_context_sequence_output, col_context_grid], axis=-1)
else:
col_context_sequence_output = tf.reshape(
col_context_grid,
[batch_size, col_height * max_cell_context_length, -1])
if use_pointer_network:
batch_col_indices = tf.range(tf.to_int32(batch_size))
batch_col_indices = tf.expand_dims(batch_col_indices, dim=-1)
batch_col_indices = tf.repeat(
batch_col_indices, repeats=col_height * col_width, axis=1)
batch_col_indices = tf.reshape(
batch_col_indices, [batch_size * col_height * col_width])
col_indices = tf.range(tf.to_int32(col_height))
col_indices = tf.expand_dims(col_indices, dim=-1)
col_indices = tf.repeat(col_indices, repeats=col_width, axis=1)
col_indices = tf.repeat(col_indices, repeats=batch_size, axis=0)
col_indices = tf.reshape(
col_indices, [batch_size * col_height * col_width])
col_cell_indices = tf.split(col_cell_indices,
[col_width, col_height * col_width],
axis=1)
col_cell_indices = col_cell_indices[1]
col_cell_indices = tf.reshape(
col_cell_indices, [batch_size * col_height * col_width])
col_cell_indices = tf.stack([batch_col_indices, col_indices,
col_cell_indices], axis=1)
col_pooled_output = tf.reshape(
col_context_sequence_output,
[batch_size, col_height, max_cell_context_length, -1])
col_pooled_output = tf.gather_nd(
col_pooled_output, col_cell_indices)
col_pooled_output = tf.reshape(
col_pooled_output,
[batch_size, col_height, col_width, -1])
if use_bert or use_mobilebert:
pooled_linear_name = "bert_pooled_output_linear"
else:
pooled_linear_name = "pooled_output_linear"
col_pooled_output = tf.layers.dense(
col_pooled_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=pooled_linear_name)
col_row_pooled_output = tf.reduce_mean(
col_pooled_output, axis=1)
col_col_pooled_output = tf.reduce_mean(
col_pooled_output, axis=2)
col_row_pooled_output = tf.split(
col_row_pooled_output, [1] * 11 + [11], axis=1)
col_row_pooled_output = tf.concat(
list(reversed(col_row_pooled_output[1:])), axis=1)
col_col_pooled_output = tf.split(
col_col_pooled_output, [1] * 10 + [11], axis=1)
col_col_pooled_output = tf.concat(
list(reversed(col_col_pooled_output)), axis=1)
if grid_type == "col":
output_token_embeddings = tf.get_variable(
name="formula_token_embedding",
shape=[vocab_size - 42, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_token_embeddings = tf.expand_dims(
output_token_embeddings, dim=0)
output_token_embeddings = tf.tile(output_token_embeddings,
[batch_size, 1, 1])
output_token_embeddings = tf.split(
output_token_embeddings,
[constants.ROW_ID, vocab_size - 42 - constants.ROW_ID],
axis=1)
output_token_embeddings = tf.concat(
[output_token_embeddings[0],
col_row_pooled_output, col_col_pooled_output,
output_token_embeddings[1]], axis=1)
else:
output_token_embeddings = tf.split(
output_token_embeddings,
[constants.ROW_ID, 42, vocab_size - 42 - constants.ROW_ID],
axis=1)
concat_range_embeddings = tf.concat(
[col_row_pooled_output, col_col_pooled_output], axis=1)
output_token_embeddings = tf.concat(
[output_token_embeddings[0],
output_token_embeddings[1] + concat_range_embeddings,
output_token_embeddings[2]], axis=1)
col_context_mask = tf.expand_dims(col_context_mask, axis=-1)
col_cell_data_encoder_output = tf.layers.dense(
col_context_sequence_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="cell_data_encoder_output")
col_header_encoder_output = tf.layers.dense(
col_context_sequence_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="header_encoder_output")
if layer_norm:
col_cell_data_encoder_output = slim.layer_norm(
bert_modeling.gelu(col_cell_data_encoder_output))
col_header_encoder_output = slim.layer_norm(
bert_modeling.gelu(col_header_encoder_output))
col_context_segment_ids = tf.split(
col_context_segment_ids,
[max_cell_context_length, 21 * max_cell_context_length],
axis=1)
col_context_segment_ids = col_context_segment_ids[1]
col_context_mask = tf.split(
col_context_mask,
[max_cell_context_length, 21 * max_cell_context_length],
axis=1)
col_context_mask = col_context_mask[1]
if not use_bert and not use_mobilebert:
split_col_context_segment_ids = tf.split(
col_context_segment_ids,
[11 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
col_context_segment_ids = split_col_context_segment_ids[0]
split_col_context_mask = tf.split(
col_context_mask,
[11 * max_cell_context_length, 10 * max_cell_context_length],
axis=1)
col_context_mask = split_col_context_mask[0]
col_cell_data_mask = tf.cast(
tf.expand_dims(col_context_segment_ids, dim=-1),
dtype=tf.float32)
col_header_mask = 1.0 - col_cell_data_mask
col_cell_data_mask = col_cell_data_mask * col_context_mask
col_header_mask = col_header_mask * col_context_mask
col_context_encoder_output = (
col_header_encoder_output * col_header_mask +
col_cell_data_encoder_output * col_cell_data_mask)
if grid_type == "row":
context_encoder_output = row_context_encoder_output
cell_data_mask = row_cell_data_mask
header_mask = row_header_mask
elif grid_type == "col":
context_encoder_output = col_context_encoder_output
cell_data_mask = col_cell_data_mask
header_mask = col_header_mask
else:
context_encoder_output = tf.concat([row_context_encoder_output,
col_context_encoder_output],
axis=1)
cell_data_mask = tf.concat([row_cell_data_mask, col_cell_data_mask],
axis=1)
header_mask = tf.concat([row_header_mask, col_header_mask], axis=1)
context_encoder_output = tf.layers.dropout(context_encoder_output,
dropout_rate,
training=use_dropout,
name="context_encoder_dropout")
with tf.variable_scope("cell_index_encoder", reuse=tf.AUTO_REUSE):
index_embedding = tf.get_variable(
name="cell_index_embedding",
shape=[1000, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
cell_indices = tf.concat([column_index, record_index], axis=1)
cell_indices_embeddings = tf.nn.embedding_lookup(index_embedding,
cell_indices)
if cell_position_encoding:
encoder_cells = [model_utils.build_lstm(hidden_size)
for _ in range(num_encoder_layers)]
encoder_cells = contrib_rnn.MultiRNNCell(encoder_cells,
state_is_tuple=True)
encoder_state = encoder_cells.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
_, encoder_state = tf.nn.dynamic_rnn(
encoder_cells,
cell_indices_embeddings,
initial_state=encoder_state,
dtype=tf.float32)
sketch_mask = ([0] * constants.SPECIAL_TOKEN_SIZE +
[1] * (vocab_size - constants.SPECIAL_TOKEN_SIZE))
sketch_mask[constants.END_FORMULA_SKETCH_ID] = 1
sketch_mask = tf.convert_to_tensor(np.array(sketch_mask), dtype=tf.float32)
sketch_mask = tf.expand_dims(sketch_mask, 0)
sketch_mask = tf.tile(sketch_mask, [batch_size, 1])
range_mask = [0] * vocab_size
range_mask[constants.RANGE_TOKEN_ID] = 1
range_mask[constants.RANGE_SPLIT_ID] = 1
range_mask[constants.END_RANGE_ID] = 1
range_mask[constants.EOF_ID] = 1
for i in range(constants.ROW_ID, constants.COL_ID + 21):
range_mask[i] = 1
range_mask = tf.convert_to_tensor(np.array(range_mask), dtype=tf.float32)
range_mask = tf.expand_dims(range_mask, 0)
range_mask = tf.tile(range_mask, [batch_size, 1])
range_bool_mask = [0]
range_bool_mask = tf.convert_to_tensor(np.array(range_bool_mask),
dtype=tf.int32)
range_bool_mask = tf.expand_dims(range_bool_mask, 0)
range_bool_mask = tf.tile(range_bool_mask, [batch_size, 1])
with tf.variable_scope("decode", reuse=tf.AUTO_REUSE):
token_embedding = tf.get_variable(
name="formula_token_embedding",
shape=[vocab_size, embedding_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
cells = [model_utils.build_lstm(hidden_size)
for _ in range(num_decoder_layers)]
cells = contrib_rnn.MultiRNNCell(cells, state_is_tuple=True)
def symbols_to_logits(partial_seqs, cur_step, beam_search_state):
decoder_state = beam_search_state.get("decoder_state")
input_tokens = tf.slice(
partial_seqs, [0, cur_step,], [batch_size * beam_size, 1])
cur_formula_mask = beam_search_state.get("formula_mask")
cur_range_bool_mask = beam_search_state.get("range_bool_mask")
sketch_idx = tf.constant(constants.END_FORMULA_SKETCH_ID, dtype=tf.int32)
cur_formula_mask = tf.where(
tf.equal(tf.reshape(input_tokens, [-1]), sketch_idx),
tf.tile(range_mask, [beam_size, 1]), cur_formula_mask)
cur_range_bool_mask = tf.where(
tf.equal(tf.reshape(input_tokens, [-1]), sketch_idx),
tf.tile(1 - range_bool_mask, [beam_size, 1]), cur_range_bool_mask)
input_embeddings = tf.nn.embedding_lookup(
token_embedding,
input_tokens)
decoder_output, decoder_state = tf.nn.dynamic_rnn(
cells,
input_embeddings,
initial_state=decoder_state,
dtype=tf.float32)
if cell_context_encoding:
context_encoder_output = beam_search_state.get("context_encoder_output")
cell_data_mask = beam_search_state.get("cell_data_mask")
header_mask = beam_search_state.get("header_mask")
if use_pointer_network:
output_token_embeddings = beam_search_state.get(
"output_token_embeddings")
if max_pooling:
context_shape = bert_modeling.get_shape_list(
context_encoder_output, expected_rank=3)
height = context_shape[1] // max_cell_context_length
context_encoder_output = tf.reshape(
context_encoder_output,
[batch_size * beam_size, height, max_cell_context_length, -1])
cell_data_mask = tf.reshape(
cell_data_mask,
[batch_size * beam_size, height, max_cell_context_length, -1])
header_mask = tf.reshape(
header_mask,
[batch_size * beam_size, height, max_cell_context_length, -1]
)
decoder_output = tf.expand_dims(decoder_output, axis=1)
decoder_output = tf.repeat(
decoder_output, repeats=height, axis=1)
cell_data_attn_vec = tf.layers.dense(
decoder_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="cell_data_encoder_attention_layer")
cell_data_encoder_attn_w = tf.matmul(context_encoder_output,
cell_data_attn_vec,
transpose_b=True)
cell_data_encoder_attn_w = tf.layers.dropout(
cell_data_encoder_attn_w, dropout_rate, training=use_dropout,
name="cell_data_attn_dropout")
cell_data_encoder_attn_w -= 1e6 * (1 - cell_data_mask)
cell_data_encoder_attn_w = tf.nn.softmax(cell_data_encoder_attn_w,
axis=-2)
cell_data_encoder_embeddings = tf.matmul(cell_data_encoder_attn_w,
context_encoder_output,
transpose_a=True)
cell_data_encoder_vec = tf.layers.dense(
cell_data_encoder_embeddings,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="cell_data_encoder_linear")
header_attn_vec = tf.layers.dense(
decoder_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="header_encoder_attention_layer")
header_encoder_attn_w = tf.matmul(context_encoder_output,
header_attn_vec,
transpose_b=True)
header_encoder_attn_w = tf.layers.dropout(
header_encoder_attn_w, dropout_rate, training=use_dropout,
name="header_attn_dropout")
header_encoder_attn_w -= 1e6 * (1 - header_mask)
header_encoder_attn_w = tf.nn.softmax(header_encoder_attn_w,
axis=-2)
header_encoder_embeddings = tf.matmul(header_encoder_attn_w,
context_encoder_output,
transpose_a=True)
header_encoder_vec = tf.layers.dense(
header_encoder_embeddings,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="header_encoder_linear")
decoder_vec = tf.layers.dense(
decoder_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="decoder_linear")
decoder_output = tf.concat(
[cell_data_encoder_vec, header_encoder_vec, decoder_vec], axis=-1)
sketch_logits = tf.layers.dense(
decoder_output,
vocab_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="formula_sketch_logit")
if use_pointer_network:
range_attn_vec = tf.layers.dense(
decoder_output,
hidden_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="range_attention_layer")
range_logits = tf.matmul(output_token_embeddings,
range_attn_vec,
transpose_b=True)
range_logits = tf.squeeze(range_logits, axis=-1)
else:
range_logits = tf.layers.dense(
decoder_output,
vocab_size,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="range_logit")
range_logits = tf.squeeze(range_logits, axis=-2)
if two_stage_decoding:
pred_logits = tf.where(
tf.equal(tf.reshape(cur_range_bool_mask, [-1]), 0),
tf.squeeze(sketch_logits, axis=-2),
range_logits)
elif use_pointer_network:
pred_logits = range_logits
else:
pred_logits = tf.squeeze(sketch_logits, axis=-2)
if cell_context_encoding and max_pooling:
pred_logits = tf.reduce_max(pred_logits, axis=1)
if two_stage_decoding:
pred_logits -= 1e6 * (1 - cur_formula_mask)
beam_search_state.update({
"decoder_state": decoder_state,
"formula_mask": cur_formula_mask,
"range_bool_mask": cur_range_bool_mask
})
return pred_logits, beam_search_state
logits = []
if cell_context_encoding:
decoder_state = cells.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
_, decoder_state = tf.nn.dynamic_rnn(
cells,
cell_indices_embeddings,
initial_state=decoder_state,
dtype=tf.float32)
elif cell_position_encoding:
decoder_state = encoder_state
else:
decoder_state = cells.get_initial_state(
batch_size=batch_size, dtype=tf.float32)
beam_search_state = {
"decoder_state": decoder_state,
"formula_mask": sketch_mask,
"range_bool_mask": range_bool_mask,
}
if cell_context_encoding:
beam_search_state.update({
"context_encoder_output": context_encoder_output,
"cell_data_mask": cell_data_mask,
"header_mask": header_mask
})
if use_pointer_network:
beam_search_state.update({
"output_token_embeddings": output_token_embeddings
})
initial_input_tokens = tf.constant(
constants.GO_ID, dtype=tf.int32, shape=(1,), name="GO_ids")
initial_input_tokens = tf.tile(initial_input_tokens, [batch_size])
if is_training:
initial_input_tokens = tf.expand_dims(initial_input_tokens, axis=1)
full_formula = tf.concat([initial_input_tokens, formula], axis=1)
for cur_step in range(formula_length):
partial_seqs = tf.slice(
full_formula, [0, 0], [batch_size, cur_step + 1])
pred_logits, beam_search_state = symbols_to_logits(
partial_seqs, cur_step, beam_search_state)
logits.append(pred_logits)
logits = tf.stack(logits, axis=1)
return logits
elif beam_size <= 1:
input_tokens = initial_input_tokens
for cur_step in range(formula_length):
pred_logits, beam_search_state = symbols_to_logits(
input_tokens, cur_step, beam_search_state)
pred_logits = tf.squeeze(pred_logits, axis=1)
logits.append(pred_logits)
input_tokens = tf.argmax(pred_logits, axis=-1,
output_type=tf.int32)
logits = tf.stack(logits, axis=1)
return logits
else:
initial_input_tokens = tf.expand_dims(initial_input_tokens, axis=1)
full_formula = tf.concat([initial_input_tokens, formula], axis=1)
for cur_step in range(formula_prefix_length):
cur_tokens = tf.slice(
full_formula, [0, cur_step], [batch_size, 1])
cur_embeddings = tf.nn.embedding_lookup(
token_embedding,
cur_tokens)
_, decoder_state = tf.nn.dynamic_rnn(
cells,
cur_embeddings,
initial_state=decoder_state,
dtype=tf.float32)
beam_search_state.update({"decoder_state": decoder_state})
initial_input_tokens = tf.slice(
full_formula, [0, formula_prefix_length], [batch_size, 1])
initial_input_tokens = tf.squeeze(initial_input_tokens, axis=1)
beam_seqs, beam_probs, _ = beam_search.beam_search(
symbols_to_logits_fn=symbols_to_logits,
initial_ids=initial_input_tokens,
beam_size=beam_size,
decode_length=formula_length - formula_prefix_length,
vocab_size=vocab_size,
alpha=1.0,
states=beam_search_state,
eos_id=constants.EOF_ID,
stop_early=False,
use_tpu=use_tpu)
if formula_prefix_length > 0:
initial_partial_seqs = tf.slice(
full_formula, [0, 0], [batch_size, formula_prefix_length])
initial_partial_seqs = tf.expand_dims(initial_partial_seqs, axis=1)
initial_partial_seqs = tf.tile(initial_partial_seqs, [1, beam_size, 1])
beam_seqs = tf.concat([initial_partial_seqs, beam_seqs], axis=2)
return beam_seqs, beam_probs
| StarcoderdataPython |
1734107 | __author__ = 's7a'
# The Sanitizer class
class Sanitizer:
# Constructor for the sanitizer class
def __init__(self):
# Unused
pass
# Sanitize a given word
@staticmethod
def sanitize_word(word):
word = word.lower()
alphabets = "abcdefghijklmnopqrstuvwxyz"
return ''.join(w for w in word if w in alphabets) | StarcoderdataPython |
3454443 | import sys
# TODO: add option or in other way allow developer to enable debug logging.
if False:
import logging
logging.basicConfig(level=logging.DEBUG)
def pytest_cmdline_preparse(args):
if sys.version_info[:2] == (3, 5):
# Disable pylint on Python 3.5, since it's broken:
# <https://bitbucket.org/logilab/astroid/issues/187/call-object-has-no-attribute-starargs>
args[:] = (
["-p", "no:pylint"] +
[arg for arg in args if "pylint" not in arg])
| StarcoderdataPython |
4891730 | a3 = [258,3]
print(bytearray(a3))
| StarcoderdataPython |
1812088 | <reponame>gregdavill/rpc-dram-playground<filename>gsd_orangecrab.py
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.dfu import DFUProg
# IOs ----------------------------------------------------------------------------------------------
_io_r0_1 = [
# Clk / Rst
("clk48", 0, Pins("A9"), IOStandard("LVCMOS33")),
("rst_n", 0, Pins("R16"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("V17"), IOStandard("LVCMOS33")), # rgb_led.r
("user_led", 1, Pins("T17"), IOStandard("LVCMOS33")), # rgb_led.g
("user_led", 2, Pins("J3"), IOStandard("LVCMOS33")), # rgb_led.b
("rgb_led", 0,
Subsignal("r", Pins("V17"), IOStandard("LVCMOS33")),
Subsignal("g", Pins("T17"), IOStandard("LVCMOS33")),
Subsignal("b", Pins("J3"), IOStandard("LVCMOS33")),
),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"A4 D2 C3 C7 D3 D4 D1 B2",
"C1 A2 A7 C2 C4"),
IOStandard("SSTL135_I")),
Subsignal("ba", Pins("B6 B7 A6"), IOStandard("SSTL135_I")),
Subsignal("ras_n", Pins("C12"), IOStandard("SSTL135_I")),
Subsignal("cas_n", Pins("D13"), IOStandard("SSTL135_I")),
Subsignal("we_n", Pins("B12"), IOStandard("SSTL135_I")),
Subsignal("cs_n", Pins("A12"), IOStandard("SSTL135_I")),
Subsignal("dm", Pins("D16 G16"), IOStandard("SSTL135_I")),
Subsignal("dq", Pins(
"C17 D15 B17 C16 A15 B13 A17 A13",
"F17 F16 G15 F15 J16 C18 H16 F18"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=75")),
Subsignal("dqs_p", Pins("B15 G18"), IOStandard("SSTL135D_I"),
Misc("TERMINATION=OFF DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("J18"), IOStandard("SSTL135D_I")),
Subsignal("cke", Pins("D6"), IOStandard("SSTL135_I")),
Subsignal("odt", Pins("C13"), IOStandard("SSTL135_I")),
Subsignal("reset_n", Pins("B1"), IOStandard("SSTL135_I")),
Misc("SLEWRATE=FAST")
),
# USB
("usb", 0,
Subsignal("d_p", Pins("N1")),
Subsignal("d_n", Pins("M2")),
Subsignal("pullup", Pins("N2")),
IOStandard("LVCMOS33")
),
# SPIFlash
("spiflash4x", 0,
Subsignal("cs_n", Pins("U17")),
#Subsignal("clk", Pins("U16")),
Subsignal("dq", Pins("U18 T18 R18 N18")),
IOStandard("LVCMOS33")
),
# SPI
("spi-internal", 0,
Subsignal("cs_n", Pins("B11"), Misc("PULLMODE=UP")),
Subsignal("clk", Pins("C11")),
Subsignal("miso", Pins("A11"), Misc("PULLMODE=UP")),
Subsignal("mosi", Pins("A10"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=SLOW"),
IOStandard("LVCMOS33"),
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("K1")),
Subsignal("mosi", Pins("K2"), Misc("PULLMODE=UP")),
Subsignal("cs_n", Pins("M1"), Misc("PULLMODE=UP")),
Subsignal("miso", Pins("J1"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
]
_io_r0_2 = [
# Clk / Rst
("clk48", 0, Pins("A9"), IOStandard("LVCMOS33")),
("rst_n", 0, Pins("V17"), IOStandard("LVCMOS33")),
# Buttons
("usr_btn", 0, Pins("J17"), IOStandard("SSTL135_I")),
# Leds
("user_led", 0, Pins("K4"), IOStandard("LVCMOS33")), # rgb_led.r
("user_led", 1, Pins("M3"), IOStandard("LVCMOS33")), # rgb_led.g
("user_led", 2, Pins("J3"), IOStandard("LVCMOS33")), # rgb_led.b
("rgb_led", 0,
Subsignal("r", Pins("K4"), IOStandard("LVCMOS33")),
Subsignal("g", Pins("M3"), IOStandard("LVCMOS33")),
Subsignal("b", Pins("J3"), IOStandard("LVCMOS33")),
),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"C4 D2 D3 A3 A4 D4 C3 B2",
"B1 D1 A7 C2 B6 C1 A2 C7"),
IOStandard("SSTL135_I")),
Subsignal("ba", Pins("D6 B7 A6"), IOStandard("SSTL135_I"),),
Subsignal("ras_n", Pins("C12"), IOStandard("SSTL135_I")),
Subsignal("cas_n", Pins("D13"), IOStandard("SSTL135_I")),
Subsignal("we_n", Pins("B12"), IOStandard("SSTL135_I")),
Subsignal("cs_n", Pins("A12"), IOStandard("SSTL135_I")),
Subsignal("dm", Pins("D16 G16"), IOStandard("SSTL135_I")),
Subsignal("dq", Pins(
"C17 D15 B17 C16 A15 B13 A17 A13",
"F17 F16 G15 F15 J16 C18 H16 F18"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=OFF")), # Misc("TERMINATION=75") Disabled to reduce heat
Subsignal("dqs_p", Pins("B15 G18"), IOStandard("SSTL135D_I"),
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("J18"), IOStandard("SSTL135D_I")),
Subsignal("cke", Pins("D18"), IOStandard("SSTL135_I")),
Subsignal("odt", Pins("C13"), IOStandard("SSTL135_I")),
Subsignal("reset_n", Pins("L18"), IOStandard("SSTL135_I")),
Subsignal("vccio", Pins("K16 D17 K15 K17 B18 C6"), IOStandard("SSTL135_II")),
Subsignal("gnd", Pins("L15 L16"), IOStandard("SSTL135_II")),
Misc("SLEWRATE=FAST")
),
# RPC DRAM
("rpc_dram", 0,
Subsignal("cs_n", Pins("D16"), IOStandard("SSTL135_I")), #LDM
Subsignal("stb", Pins("G16"), IOStandard("SSTL135_I")), #UDM
Subsignal("db", Pins(
"C17 D15 B17 C16 A15 B13 A17 A13",
"F17 F16 G15 F15 J16 C18 H16 F18"),
IOStandard("SSTL135_I"),
Misc("TERMINATION=OFF")), # Misc("TERMINATION=75") Disabled to reduce heat
Subsignal("dqs_p", Pins("G18"), IOStandard("SSTL135D_I"), # DQS0
Misc("TERMINATION=OFF"),
Misc("DIFFRESISTOR=100")),
Subsignal("clk_p", Pins("J18"), IOStandard("SSTL135D_I")),
Subsignal("vccio", Pins("K16 D17 K15 K17 B18 C6"), IOStandard("SSTL135_II")),
Subsignal("gnd", Pins("L15 L16"), IOStandard("SSTL135_II")),
Misc("SLEWRATE=FAST")
),
# USB
("usb", 0,
Subsignal("d_p", Pins("N1")),
Subsignal("d_n", Pins("M2")),
Subsignal("pullup", Pins("N2")),
IOStandard("LVCMOS33")
),
# SPIFlash
("spiflash4x", 0,
Subsignal("cs_n", Pins("U17"), IOStandard("LVCMOS33")),
#Subsignal("clk", Pins("U16"), IOStandard("LVCMOS33")),
Subsignal("dq", Pins("U18 T18 R18 N18"), IOStandard("LVCMOS33")),
),
("spiflash", 0,
Subsignal("cs_n", Pins("U17"), IOStandard("LVCMOS33")),
#Subsignal("clk", Pins("U16"), IOStandard("LVCMOS33")), # Note: CLK is bound using USRMCLK block
Subsignal("miso", Pins("T18"), IOStandard("LVCMOS33")),
Subsignal("mosi", Pins("U18"), IOStandard("LVCMOS33")),
Subsignal("wp", Pins("R18"), IOStandard("LVCMOS33")),
Subsignal("hold", Pins("N18"), IOStandard("LVCMOS33")),
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("K1")),
Subsignal("mosi", Pins("K2"), Misc("PULLMODE=UP")),
Subsignal("cs_n", Pins("M1"), Misc("PULLMODE=UP")),
Subsignal("miso", Pins("J1"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("clk", Pins("K1")),
Subsignal("cmd", Pins("K2"), Misc("PULLMODE=UP")),
Subsignal("data", Pins("J1 K3 L3 M1"), Misc("PULLMODE=UP")),
IOStandard("LVCMOS33"), Misc("SLEWRATE=FAST")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors_r0_1 = [
# Feather 0.1" Header Pin Numbers,
# Note: Pin nubering is not continuous.
("GPIO", "N17 M18 C10 C9 - B10 B9 - - C8 B8 A8 H2 J2 N15 R17 N16 - - - - - - - -"),
]
_connectors_r0_2 = [
# Feather 0.1" Header Pin Numbers,
# Note: Pin nubering is not continuous.
("GPIO", "N17 M18 C10 C9 - B10 B9 - - C8 B8 A8 H2 J2 N15 R17 N16 - L4 N3 N4 H4 G4 T17"),
]
# Standard Feather Pins
feather_serial = [
("serial", 0,
Subsignal("tx", Pins("GPIO:1"), IOStandard("LVCMOS33")),
Subsignal("rx", Pins("GPIO:0"), IOStandard("LVCMOS33"))
)
]
feather_i2c = [
("i2c", 0,
Subsignal("sda", Pins("GPIO:2"), IOStandard("LVCMOS33")),
Subsignal("scl", Pins("GPIO:3"), IOStandard("LVCMOS33"))
)
]
feather_spi = [
("spi", 0,
Subsignal("miso", Pins("GPIO:14"), IOStandard("LVCMOS33")),
Subsignal("mosi", Pins("GPIO:16"), IOStandard("LVCMOS33")),
Subsignal("clk", Pins("GPIO:15"), IOStandard("LVCMOS33"))
)
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = "clk48"
default_clk_period = 1e9/48e6
def __init__(self, revision="0.2", device="25F", toolchain="trellis", **kwargs):
assert revision in ["0.1", "0.2"]
self.revision = revision
io = {"0.1": _io_r0_1, "0.2": _io_r0_2 }[revision]
connectors = {"0.1": _connectors_r0_1, "0.2": _connectors_r0_2}[revision]
LatticePlatform.__init__(self, f"LFE5U-{device}-8MG285C", io, connectors, toolchain=toolchain, **kwargs)
def create_programmer(self):
return DFUProg(vid="1209", pid="5af0", alt=0)
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk48", loose=True), 1e9/48e6)
| StarcoderdataPython |
3369788 | '''
May 2020 by <NAME>
<EMAIL>
https://www.github.com/sebbarb/
'''
import feather
import pandas as pd
import numpy as np
from hyperparameters import Hyperparameters
from pdb import set_trace as bp
def main():
hp = Hyperparameters()
# Load data
#df = feather.read_dataframe(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather')
df = pd.read_feather(hp.data_dir + 'Py_VARIANZ_2012_v3-1.feather')
# Exclude
df = df[~df['ph_loopdiuretics_prior_5yrs_3evts'].astype(bool)]
df = df[~df['ph_antianginals_prior_5yrs_3evts' ].astype(bool)]
df.dropna(subset=['end_fu_date'], inplace=True)
# Adjust data types
df['nhi_age'] = df['nhi_age'].astype(int)
df['gender_code'] = df['gender_code'].astype(bool)
df['en_prtsd_eth'] = df['en_prtsd_eth'].astype(int)
df['en_nzdep_q'] = df['en_nzdep_q'].astype(int)
df['hx_vdr_diabetes'] = df['hx_vdr_diabetes'].astype(bool)
df['hx_af'] = df['hx_af'].astype(bool)
df['ph_bp_lowering_prior_6mths'] = df['ph_bp_lowering_prior_6mths'].astype(bool)
df['ph_lipid_lowering_prior_6mths'] = df['ph_lipid_lowering_prior_6mths'].astype(bool)
df['ph_anticoagulants_prior_6mths'] = df['ph_anticoagulants_prior_6mths'].astype(bool)
df['ph_antiplatelets_prior_6mths'] = df['ph_antiplatelets_prior_6mths'].astype(bool)
df['out_broad_cvd_adm_date'] = pd.to_datetime(df['out_broad_cvd_adm_date'], format='%Y-%m-%d', errors='coerce')
df['end_fu_date'] = pd.to_datetime(df['end_fu_date'], format='%Y-%m-%d', errors='coerce')
# Map Other Asian, Chinese, MELAA to 'other'
df['en_prtsd_eth'].replace({4:9, 42:9, 5:9}, inplace=True)
# Create antiplatelet/anticoagulant column
df['ph_antiplat_anticoag_prior_6mths'] = df['ph_antiplatelets_prior_6mths'] | df['ph_anticoagulants_prior_6mths']
# Time to event and binary event column
df['EVENT_DATE'] = df[['out_broad_cvd_adm_date', 'end_fu_date']].min(axis=1)
beginning = pd.to_datetime({'year':[2012], 'month':[12], 'day':[31]})[0]
df['TIME'] = (df['EVENT_DATE'] - beginning).dt.days.astype(int)
df['EVENT'] = df['out_broad_cvd'] | df['imp_fatal_cvd']
# Descriptive statistics
num_participants = len(df.index)
print('Total participants: {}'.format(num_participants))
num_males = len(df.loc[df['gender_code']].index)
num_females = len(df.loc[~df['gender_code']].index)
print('Men: {} ({:.1f}%)'.format(num_males, 100*num_males/num_participants))
print('Women: {} ({:.1f}%)'.format(num_females, 100*num_females/num_participants))
mean_age_males, std_age_males = df.loc[df['gender_code'], 'nhi_age'].mean(), df.loc[df['gender_code'], 'nhi_age'].std()
mean_age_females, std_age_females = df.loc[~df['gender_code'], 'nhi_age'].mean(), df.loc[~df['gender_code'], 'nhi_age'].std()
print('Age Men: {:.1f} ({:.1f})'.format(mean_age_males, std_age_males))
print('Age Women: {:.1f} ({:.1f})'.format(mean_age_females, std_age_females))
num_nze_males = (df.loc[df['gender_code'], 'en_prtsd_eth'] == 1).sum()
num_nze_females = (df.loc[~df['gender_code'], 'en_prtsd_eth'] == 1).sum()
print('NZE Men: {} ({:.1f}%)'.format(num_nze_males, 100*num_nze_males/num_males))
print('NZE Women: {} ({:.1f}%)'.format(num_nze_females, 100*num_nze_females/num_females))
num_maori_males = (df.loc[df['gender_code'], 'en_prtsd_eth'] == 2).sum()
num_maori_females = (df.loc[~df['gender_code'], 'en_prtsd_eth'] == 2).sum()
print('Maori Men: {} ({:.1f}%)'.format(num_maori_males, 100*num_maori_males/num_males))
print('Maori Women: {} ({:.1f}%)'.format(num_maori_females, 100*num_maori_females/num_females))
num_pacific_males = (df.loc[df['gender_code'], 'en_prtsd_eth'] == 3).sum()
num_pacific_females = (df.loc[~df['gender_code'], 'en_prtsd_eth'] == 3).sum()
print('Pacific Men: {} ({:.1f}%)'.format(num_pacific_males, 100*num_pacific_males/num_males))
print('Pacific Women: {} ({:.1f}%)'.format(num_pacific_females, 100*num_pacific_females/num_females))
num_indian_males = (df.loc[df['gender_code'], 'en_prtsd_eth'] == 43).sum()
num_indian_females = (df.loc[~df['gender_code'], 'en_prtsd_eth'] == 43).sum()
print('Indian Men: {} ({:.1f}%)'.format(num_indian_males, 100*num_indian_males/num_males))
print('Indian Women: {} ({:.1f}%)'.format(num_indian_females, 100*num_indian_females/num_females))
num_other_males = (df.loc[df['gender_code'], 'en_prtsd_eth'] == 9).sum()
num_other_females = (df.loc[~df['gender_code'], 'en_prtsd_eth'] == 9).sum()
print('Other Men: {} ({:.1f}%)'.format(num_other_males, 100*num_other_males/num_males))
print('Other Women: {} ({:.1f}%)'.format(num_other_females, 100*num_other_females/num_females))
num_dp1_males = (df.loc[df['gender_code'], 'en_nzdep_q'] == 1).sum()
num_dp1_females = (df.loc[~df['gender_code'], 'en_nzdep_q'] == 1).sum()
print('dp1 Men: {} ({:.1f}%)'.format(num_dp1_males, 100*num_dp1_males/num_males))
print('dp1 Women: {} ({:.1f}%)'.format(num_dp1_females, 100*num_dp1_females/num_females))
num_dp2_males = (df.loc[df['gender_code'], 'en_nzdep_q'] == 2).sum()
num_dp2_females = (df.loc[~df['gender_code'], 'en_nzdep_q'] == 2).sum()
print('dp2 Men: {} ({:.1f}%)'.format(num_dp2_males, 100*num_dp2_males/num_males))
print('dp2 Women: {} ({:.1f}%)'.format(num_dp2_females, 100*num_dp2_females/num_females))
num_dp3_males = (df.loc[df['gender_code'], 'en_nzdep_q'] == 3).sum()
num_dp3_females = (df.loc[~df['gender_code'], 'en_nzdep_q'] == 3).sum()
print('dp3 Men: {} ({:.1f}%)'.format(num_dp3_males, 100*num_dp3_males/num_males))
print('dp3 Women: {} ({:.1f}%)'.format(num_dp3_females, 100*num_dp3_females/num_females))
num_dp4_males = (df.loc[df['gender_code'], 'en_nzdep_q'] == 4).sum()
num_dp4_females = (df.loc[~df['gender_code'], 'en_nzdep_q'] == 4).sum()
print('dp4 Men: {} ({:.1f}%)'.format(num_dp4_males, 100*num_dp4_males/num_males))
print('dp4 Women: {} ({:.1f}%)'.format(num_dp4_females, 100*num_dp4_females/num_females))
num_dp5_males = (df.loc[df['gender_code'], 'en_nzdep_q'] == 5).sum()
num_dp5_females = (df.loc[~df['gender_code'], 'en_nzdep_q'] == 5).sum()
print('dp5 Men: {} ({:.1f}%)'.format(num_dp5_males, 100*num_dp5_males/num_males))
print('dp5 Women: {} ({:.1f}%)'.format(num_dp5_females, 100*num_dp5_females/num_females))
num_diabetes_males = df.loc[df['gender_code'], 'hx_vdr_diabetes'].sum()
num_diabetes_females = df.loc[~df['gender_code'], 'hx_vdr_diabetes'].sum()
print('Diabetes Men: {} ({:.1f}%)'.format(num_diabetes_males, 100*num_diabetes_males/num_males))
print('Diabetes Women: {} ({:.1f}%)'.format(num_diabetes_females, 100*num_diabetes_females/num_females))
num_AF_males = df.loc[df['gender_code'], 'hx_af'].sum()
num_AF_females = df.loc[~df['gender_code'], 'hx_af'].sum()
print('AF Men: {} ({:.1f}%)'.format(num_AF_males, 100*num_AF_males/num_males))
print('AF Women: {} ({:.1f}%)'.format(num_AF_females, 100*num_AF_females/num_females))
num_BP_males = df.loc[df['gender_code'], 'ph_bp_lowering_prior_6mths'].sum()
num_BP_females = df.loc[~df['gender_code'], 'ph_bp_lowering_prior_6mths'].sum()
print('BP Men: {} ({:.1f}%)'.format(num_BP_males, 100*num_BP_males/num_males))
print('BP Women: {} ({:.1f}%)'.format(num_BP_females, 100*num_BP_females/num_females))
num_LL_males = df.loc[df['gender_code'], 'ph_lipid_lowering_prior_6mths'].sum()
num_LL_females = df.loc[~df['gender_code'], 'ph_lipid_lowering_prior_6mths'].sum()
print('LL Men: {} ({:.1f}%)'.format(num_LL_males, 100*num_LL_males/num_males))
print('LL Women: {} ({:.1f}%)'.format(num_LL_females, 100*num_LL_females/num_females))
num_APAC_males = df.loc[df['gender_code'], 'ph_antiplat_anticoag_prior_6mths'].sum()
num_APAC_females = df.loc[~df['gender_code'], 'ph_antiplat_anticoag_prior_6mths'].sum()
print('APAC Men: {} ({:.1f}%)'.format(num_APAC_males, 100*num_APAC_males/num_males))
print('APAC Women: {} ({:.1f}%)'.format(num_APAC_females, 100*num_APAC_females/num_females))
follow_up_males, follow_up_males_mean = df.loc[df['gender_code'], 'TIME'].sum()/365, df.loc[df['gender_code'], 'TIME'].mean()/365
follow_up_females, follow_up_females_mean = df.loc[~df['gender_code'], 'TIME'].sum()/365, df.loc[~df['gender_code'], 'TIME'].mean()/365
print('Follow up Men: {:.0f} ({:.1f})'.format(follow_up_males, follow_up_males_mean))
print('Follow up Women: {:.0f} ({:.1f})'.format(follow_up_females, follow_up_females_mean))
num_CVD_death_males = df.loc[df['gender_code'], 'imp_fatal_cvd'].sum()
num_CVD_death_females = df.loc[~df['gender_code'], 'imp_fatal_cvd'].sum()
print('CVD death Men: {} ({:.1f}%)'.format(num_CVD_death_males, 100*num_CVD_death_males/num_males))
print('CVD death Women: {} ({:.1f}%)'.format(num_CVD_death_females, 100*num_CVD_death_females/num_females))
num_CVD_event_males = df.loc[df['gender_code'], 'EVENT'].sum()
num_CVD_event_females = df.loc[~df['gender_code'], 'EVENT'].sum()
print('CVD event Men: {} ({:.1f}%)'.format(num_CVD_event_males, 100*num_CVD_event_males/num_males))
print('CVD event Women: {} ({:.1f}%)'.format(num_CVD_event_females, 100*num_CVD_event_females/num_females))
tmp_males = df.loc[df['gender_code'] & df['EVENT'], 'TIME']/365
time_to_CVD_males, time_to_CVD_males_Q1, time_to_CVD_males_Q3 = tmp_males.median(), tmp_males.quantile(0.25), tmp_males.quantile(0.75)
tmp_females = df.loc[~df['gender_code'] & df['EVENT'], 'TIME']/365
time_to_CVD_females, time_to_CVD_females_Q1, time_to_CVD_females_Q3 = tmp_females.median(), tmp_females.quantile(0.25), tmp_females.quantile(0.75)
print('Time to CVD Men: {:.1f} ({:.1f}, {:.1f})'.format(time_to_CVD_males, time_to_CVD_males_Q1, time_to_CVD_males_Q3))
print('Time to CVD Women: {:.1f} ({:.1f}, {:.1f})'.format(time_to_CVD_females, time_to_CVD_females_Q1, time_to_CVD_females_Q3))
num_censored_5y_males = (1-df.loc[df['gender_code'] & (df['TIME'] == 1826), 'EVENT']).sum()
num_censored_5y_females = (1-df.loc[~df['gender_code'] & (df['TIME'] == 1826), 'EVENT']).sum()
print('Censored at 5 years Men: {} ({:.1f}%)'.format(num_censored_5y_males, 100*num_censored_5y_males/num_males))
print('Censored at 5 years Women: {} ({:.1f}%)'.format(num_censored_5y_females, 100*num_censored_5y_females/num_females))
# Center age and deprivation index, separately for males and females
mean_age_males = df.loc[df['gender_code'], 'nhi_age'].mean()
mean_age_females = df.loc[~df['gender_code'], 'nhi_age'].mean()
df.loc[df['gender_code'], 'nhi_age'] = df.loc[df['gender_code'], 'nhi_age'] - mean_age_males
df.loc[~df['gender_code'], 'nhi_age'] = df.loc[~df['gender_code'], 'nhi_age'] - mean_age_females
mean_nzdep_males = 3
mean_nzdep_females = 3
df.loc[df['gender_code'], 'en_nzdep_q'] = df.loc[df['gender_code'], 'en_nzdep_q'] - mean_nzdep_males
df.loc[~df['gender_code'], 'en_nzdep_q'] = df.loc[~df['gender_code'], 'en_nzdep_q'] - mean_nzdep_females
# Create interaction columns
df['age_X_bp'] = df['nhi_age'] * df['ph_bp_lowering_prior_6mths']
df['age_X_diabetes'] = df['nhi_age'] * df['hx_vdr_diabetes']
df['age_X_af'] = df['nhi_age'] * df['hx_af']
df['bp_X_diabetes'] = df['ph_bp_lowering_prior_6mths'] & df['hx_vdr_diabetes']
df['antiplat_anticoag_X_diabetes'] = df['ph_antiplat_anticoag_prior_6mths'] & df['hx_vdr_diabetes']
df['bp_X_lipid'] = df['ph_bp_lowering_prior_6mths'] & df['ph_lipid_lowering_prior_6mths']
# Keep all VARIANZ risk equations columns
keep_cols = ['VSIMPLE_INDEX_MASTER', 'nhi_age', 'gender_code', 'en_prtsd_eth', 'en_nzdep_q',
'hx_vdr_diabetes', 'hx_af', 'ph_bp_lowering_prior_6mths', 'ph_lipid_lowering_prior_6mths',
'ph_antiplat_anticoag_prior_6mths', 'age_X_bp', 'age_X_diabetes', 'age_X_af',
'bp_X_diabetes', 'antiplat_anticoag_X_diabetes', 'bp_X_lipid', 'TIME', 'EVENT']
df = df[keep_cols]
# Save
df_males = df[df['gender_code']]
df_males.reset_index(drop=True, inplace=True)
df_males.to_feather(hp.data_pp_dir + 'Py_VARIANZ_2012_v3-1_pp_males.feather')
np.savez(hp.data_pp_dir + 'means_males.npz', mean_age=mean_age_males, mean_nzdep=mean_nzdep_males)
df_females = df[~df['gender_code']]
df_females.reset_index(drop=True, inplace=True)
df_females.to_feather(hp.data_pp_dir + 'Py_VARIANZ_2012_v3-1_pp_females.feather')
np.savez(hp.data_pp_dir + 'means_females.npz', mean_age=mean_age_females, mean_nzdep=mean_nzdep_females)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11340097 | <filename>src/segmentation/SegmentationTransformer.py<gh_stars>1-10
import time
from sklearn.pipeline import Pipeline
from src.segmentation.VCReaderTransformer import VCReaderTransformer
from src.segmentation.AutographerImageExtractorTransformer import AutographerImageExtractorTransformer
from src.segmentation.DaysExtractionTransformer import DaysExtractionTransformer
from src.segmentation.DaysProcessorTransformer import DaysProcessorTransformer
from src.segmentation.CachableTransformerBase import CachableTransformerBase
from src.common.helper import clear_cache
from src.segmentation.SegmentationTransformer_opts import *
class SegmentationTransformer(CachableTransformerBase):
"""
Performs the segmentation process.
Input: user id
Output: for each day a list of contained segments in which were the contained image ids as list
"""
def __init__(self, opts: dict):
self.opts = opts
return super().__init__(usr=self.opts[opt_usr], is_dirty=True)
def before_transform(self, _):
self.start = time.time()
print("Perform segmentation...")
if self.opts[opt_clear_cache]:
print("Cleaning cache...")
clear_cache(self.usr)
def transform_core(self, _):
pipe = Pipeline([
("read_vc", VCReaderTransformer(usr=self.usr)),
("extract_auto_imgs", AutographerImageExtractorTransformer(usr=self.usr)),
("extract_days", DaysExtractionTransformer(usr=self.usr)),
("segment_days", DaysProcessorTransformer(settings=self.opts)),
])
result = pipe.transform([])
return result
def get_file_count(self, days):
c = 0
for segments in days:
for segment in segments:
c += len(segment)
return c
def after_transform(self, days):
original_files_count = len(AutographerImageExtractorTransformer(usr=self.usr).from_cache())
segment_count = sum([len(segments) for segments in days])
image_count = self.get_file_count(days)
print("Original image count:", str(original_files_count))
print("Final image count:", str(image_count))
print("Reduzed image count by:", str(round(100-image_count/original_files_count*100,0)) + "%")
print("Final segment count:", str(segment_count))
mins = int(round((time.time() - self.start) / 60.0, 0))
print("Total duration of segmentation process:", str(mins), "minutes")
| StarcoderdataPython |
397701 | <filename>utils/cryptoFunc.py
# coding=utf-8
import hashlib
from Crypto.Cipher import DES, AES
import base64
mdes = DES.new(b'!z*EaY0e', 1)
def decode_base64(text, user_agent=""):
if not text: return text
if 'iOS' in user_agent or "CFNetwork" in user_agent:
dec_text = aes_func.decrypt(text)
else:
data = text.encode("utf-8")
ret = mdes.decrypt(base64.decodebytes(data))
padding_len = ret[-1]
dec_text = ret[:-padding_len].decode("utf-8")
return dec_text
def encrypt_base64(text):
pad_len = 8 - len(text) % 8
padding = chr(pad_len) * pad_len
text += padding
data = text.encode("utf-8")
data = base64.encodebytes(mdes.encrypt(data))
return data.decode("utf-8").replace('\n', '')
class PrpCrypt(object):
"""
AES加密与解密
"""
def __init__(self, key):
self.key = key
self.mode = AES.MODE_CBC
self.cipher_text = None
def get_cryptor(self):
sha384 = hashlib.sha384()
sha384.update(self.key.encode('utf-8'))
res = sha384.digest()
crypt_or = AES.new(res[0:32], self.mode, res[32:48])
return crypt_or
# 解密后,去掉补足的空格用strip() 去掉
def decrypt(self, text):
if not text:
return text
cryptor = self.get_cryptor()
data = text.encode("utf-8")
plain_text = cryptor.decrypt(base64.decodebytes(data))
padding_len = plain_text[-1]
plain_text = plain_text[:-padding_len].decode('utf-8')
return plain_text
# 加密函数,如果text不是16的倍数【加密文本text必须为16的倍数!】,那就补足为16的倍数
def encrypt(self, text):
cryptor = self.get_cryptor()
length = 16
count = len(text)
add = length - (count % length)
text = text + (chr(add) * add)
data = text.encode('utf-8')
data = base64.encodebytes(cryptor.encrypt(data))
return data.decode('utf-8').replace('\n', '')
aes_func = PrpCrypt('!z*EaY0e')
if __name__ == '__main__':
enc_t = decode_base64("JANz27XK5SlLaI0YCr67dY3tvQ73clwHxx0NJu80NdcKTn6oH1IYCA==")
print(enc_t)
| StarcoderdataPython |
1900153 | import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
UNICODE_EXISTS = bool(type(unicode))
except NameError:
unicode = lambda s: str(s)
from collections import namedtuple
import click
APP_NAME = "zotcli"
Item = namedtuple("Item", ("key", "creator", "title", "abstract", "tags", "date", "citekey"))
def _get_config_path():
return os.path.join(click.get_app_dir(APP_NAME), 'config.ini')
def load_config():
""" Load configuration from application directory.
:returns: Configuration
:rtype: (flat) dict
"""
cfg_path = _get_config_path()
if not os.path.exists(cfg_path):
raise ValueError("Could not find configuration file. Please run "
"`zotcli configure` to perform the first-time "
"setup.")
parser = configparser.RawConfigParser()
parser.read([cfg_path])
rv = {}
for section in parser.sections():
for key, value in parser.items(section):
rv['%s.%s' % (section, key)] = value
return rv
def save_config(cfgdata):
""" Save configuration to application directory.
:param cfgdata: Configuration
:type cfgdata: (flat) dict
"""
cfg_path = _get_config_path()
cfg_dir = os.path.dirname(cfg_path)
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir)
cfg = configparser.SafeConfigParser()
cfg.add_section("zotcli")
for key, value in cfgdata.items():
cfg.set("zotcli", key, unicode(value))
with open(cfg_path, "w") as fp:
cfg.write(fp)
| StarcoderdataPython |
3422685 | import os
from io import StringIO
from pathlib import Path
from quom import Quom
from quom.__main__ import main
FILE_MAIN_HPP = """
int foo = 3;
int foo();
"""
FILE_MAIN_CPP = """
int foo() { return 42; }
"""
RESULT = """
int foo = 3;
int foo();
int foo() { return 42; }
"""
def test_source_directory(fs):
os.makedirs('project/')
os.chdir('project/')
os.makedirs('include/')
os.makedirs('src/')
with open('include/main.hpp', 'w+') as file:
file.write(FILE_MAIN_HPP)
with open('src/main.cpp', 'w+') as file:
file.write(FILE_MAIN_CPP)
dst = StringIO()
Quom(Path('include/main.hpp'), dst)
assert dst.getvalue() != RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, relative_source_directories=[Path('../src')])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('src').resolve()])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('/project/src')])
assert dst.getvalue() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', './../src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', 'src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', '/project/src'])
assert Path('result.hpp').read_text() == RESULT
| StarcoderdataPython |
6508037 | import os
import requests
import json
server_ip = "172.16.17.32"
creds_name = "<NAME>"
username = "ubuntu"
privateKey = "paste your SSH private key here"
base_url = 'http://%s:8888/api/v2/lcm/' % server_ip
opscenter_session = os.environ.get('opscenter_session', '')
def do_post(url, post_data):
result = requests.post(base_url + url,
data=json.dumps(post_data),
headers={'Content-Type': 'application/json', 'opscenter-session': opscenter_session})
print repr(result.text)
result_data = json.loads(result.text)
return result_data
machine_credential_response = do_post("machine_credentials/",
{"name": creds_name,
"login-user": username,
"become-mode": "sudo",
"ssh-private-key": privateKey,
"use-ssh-keys": True
}
)
machine_credential_id = machine_credential_response['id']
print "\nmachine_credential_id: " + machine_credential_id
| StarcoderdataPython |
12853918 | from django.urls import path, re_path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework.routers import SimpleRouter, DefaultRouter
from rest_framework_simplejwt import views as jwt_views
from api.views import *
# роутер нужен, чтобы сгенерить урлы под вью сет и самому их не прописывать соотвественно
router = SimpleRouter()
router.register("baskets", BasketViewSet, "baskets")
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version="v1",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="BSD License"),
),
public=True,
)
urlpatterns = [
path("check/", check_api_view, name="check-api"),
path("token/", jwt_views.TokenObtainPairView.as_view(), name="token-obtain-pair"),
path("token/refresh/", jwt_views.TokenRefreshView.as_view(), name="token-refresh"),
*router.urls,
re_path(r"swagger(?P<format>\.json|\.yaml)$", schema_view.without_ui(cache_timeout=0), name="schema-json"),
path("swagger/", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"),
]
| StarcoderdataPython |
6511037 | <filename>rules/ordinal_mappings/advapi32.py
mapping = {
1001:'I_ScGetCurrentGroupStateW',
1005:'AbortSystemShutdownA',
1006:'AbortSystemShutdownW',
1007:'AccessCheck',
1008:'AccessCheckAndAuditAlarmA',
1009:'AccessCheckAndAuditAlarmW',
1010:'AccessCheckByType',
1011:'AccessCheckByTypeAndAuditAlarmA',
1012:'AccessCheckByTypeAndAuditAlarmW',
1013:'AccessCheckByTypeResultList',
1014:'AccessCheckByTypeResultListAndAuditAlarmA',
1015:'AccessCheckByTypeResultListAndAuditAlarmByHandleA',
1016:'AccessCheckByTypeResultListAndAuditAlarmByHandleW',
1017:'AccessCheckByTypeResultListAndAuditAlarmW',
1018:'AddAccessAllowedAce',
1019:'AddAccessAllowedAceEx',
1020:'AddAccessAllowedObjectAce',
1021:'AddAccessDeniedAce',
1022:'AddAccessDeniedAceEx',
1023:'AddAccessDeniedObjectAce',
1024:'AddAce',
1025:'AddAuditAccessAce',
1026:'AddAuditAccessAceEx',
1027:'AddAuditAccessObjectAce',
1028:'AddConditionalAce',
1030:'AddUsersToEncryptedFile',
1031:'AddUsersToEncryptedFileEx',
1032:'AdjustTokenGroups',
1033:'AdjustTokenPrivileges',
1034:'AllocateAndInitializeSid',
1035:'AllocateLocallyUniqueId',
1036:'AreAllAccessesGranted',
1037:'AreAnyAccessesGranted',
1038:'AuditComputeEffectivePolicyBySid',
1039:'AuditComputeEffectivePolicyByToken',
1040:'AuditEnumerateCategories',
1041:'AuditEnumeratePerUserPolicy',
1042:'AuditEnumerateSubCategories',
1043:'AuditFree',
1044:'AuditLookupCategoryGuidFromCategoryId',
1045:'AuditLookupCategoryIdFromCategoryGuid',
1046:'AuditLookupCategoryNameA',
1047:'AuditLookupCategoryNameW',
1048:'AuditLookupSubCategoryNameA',
1049:'AuditLookupSubCategoryNameW',
1050:'AuditQueryGlobalSaclA',
1051:'AuditQueryGlobalSaclW',
1052:'AuditQueryPerUserPolicy',
1053:'AuditQuerySecurity',
1054:'AuditQuerySystemPolicy',
1055:'AuditSetGlobalSaclA',
1056:'AuditSetGlobalSaclW',
1057:'AuditSetPerUserPolicy',
1058:'AuditSetSecurity',
1059:'AuditSetSystemPolicy',
1060:'BackupEventLogA',
1061:'BackupEventLogW',
1062:'BuildExplicitAccessWithNameA',
1063:'BuildExplicitAccessWithNameW',
1064:'BuildImpersonateExplicitAccessWithNameA',
1065:'BuildImpersonateExplicitAccessWithNameW',
1066:'BuildImpersonateTrusteeA',
1067:'BuildImpersonateTrusteeW',
1068:'BuildSecurityDescriptorA',
1069:'BuildSecurityDescriptorW',
1070:'BuildTrusteeWithNameA',
1071:'BuildTrusteeWithNameW',
1072:'BuildTrusteeWithObjectsAndNameA',
1073:'BuildTrusteeWithObjectsAndNameW',
1074:'BuildTrusteeWithObjectsAndSidA',
1075:'BuildTrusteeWithObjectsAndSidW',
1076:'BuildTrusteeWithSidA',
1077:'BuildTrusteeWithSidW',
1078:'CancelOverlappedAccess',
1079:'ChangeServiceConfig2A',
1080:'ChangeServiceConfig2W',
1081:'ChangeServiceConfigA',
1082:'ChangeServiceConfigW',
1083:'CheckTokenMembership',
1084:'ClearEventLogA',
1085:'ClearEventLogW',
1086:'CloseCodeAuthzLevel',
1087:'CloseEncryptedFileRaw',
1088:'CloseEventLog',
1089:'CloseServiceHandle',
1090:'CloseThreadWaitChainSession',
1091:'CloseTrace',
1092:'CommandLineFromMsiDescriptor',
1093:'ComputeAccessTokenFromCodeAuthzLevel',
1094:'ControlService',
1095:'ControlServiceExA',
1096:'ControlServiceExW',
1097:'ControlTraceA',
1098:'ControlTraceW',
1099:'ConvertAccessToSecurityDescriptorA',
1100:'ConvertAccessToSecurityDescriptorW',
1101:'ConvertSDToStringSDRootDomainA',
1102:'ConvertSDToStringSDRootDomainW',
1103:'ConvertSecurityDescriptorToAccessA',
1104:'ConvertSecurityDescriptorToAccessNamedA',
1105:'ConvertSecurityDescriptorToAccessNamedW',
1106:'ConvertSecurityDescriptorToAccessW',
1107:'ConvertSecurityDescriptorToStringSecurityDescriptorA',
1108:'ConvertSecurityDescriptorToStringSecurityDescriptorW',
1109:'ConvertSidToStringSidA',
1110:'ConvertSidToStringSidW',
1111:'ConvertStringSDToSDDomainA',
1112:'ConvertStringSDToSDDomainW',
1113:'ConvertStringSDToSDRootDomainA',
1114:'ConvertStringSDToSDRootDomainW',
1115:'ConvertStringSecurityDescriptorToSecurityDescriptorA',
1116:'ConvertStringSecurityDescriptorToSecurityDescriptorW',
1117:'ConvertStringSidToSidA',
1118:'ConvertStringSidToSidW',
1119:'ConvertToAutoInheritPrivateObjectSecurity',
1120:'CopySid',
1121:'CreateCodeAuthzLevel',
1122:'CreatePrivateObjectSecurity',
1123:'CreatePrivateObjectSecurityEx',
1124:'CreatePrivateObjectSecurityWithMultipleInheritance',
1125:'CreateProcessAsUserA',
1126:'CreateProcessAsUserW',
1127:'CreateProcessWithLogonW',
1128:'CreateProcessWithTokenW',
1129:'CreateRestrictedToken',
1130:'CreateServiceA',
1131:'CreateServiceW',
1133:'CreateWellKnownSid',
1134:'CredBackupCredentials',
1135:'CredDeleteA',
1136:'CredDeleteW',
1137:'CredEncryptAndMarshalBinaryBlob',
1138:'CredEnumerateA',
1139:'CredEnumerateW',
1140:'CredFindBestCredentialA',
1141:'CredFindBestCredentialW',
1142:'CredFree',
1143:'CredGetSessionTypes',
1144:'CredGetTargetInfoA',
1145:'CredGetTargetInfoW',
1146:'CredIsMarshaledCredentialA',
1147:'CredIsMarshaledCredentialW',
1148:'CredIsProtectedA',
1149:'CredIsProtectedW',
1150:'CredMarshalCredentialA',
1151:'CredMarshalCredentialW',
1152:'CredProfileLoaded',
1153:'CredProfileUnloaded',
1154:'CredProtectA',
1155:'CredProtectW',
1156:'CredReadA',
1157:'CredReadByTokenHandle',
1158:'CredReadDomainCredentialsA',
1159:'CredReadDomainCredentialsW',
1160:'CredReadW',
1161:'CredRenameA',
1162:'CredRenameW',
1163:'CredRestoreCredentials',
1164:'CredUnmarshalCredentialA',
1165:'CredUnmarshalCredentialW',
1166:'CredUnprotectA',
1167:'CredUnprotectW',
1168:'CredWriteA',
1169:'CredWriteDomainCredentialsA',
1170:'CredWriteDomainCredentialsW',
1171:'CredWriteW',
1172:'CredpConvertCredential',
1173:'CredpConvertOneCredentialSize',
1174:'CredpConvertTargetInfo',
1175:'CredpDecodeCredential',
1176:'CredpEncodeCredential',
1177:'CredpEncodeSecret',
1178:'CryptAcquireContextA',
1179:'CryptAcquireContextW',
1180:'CryptContextAddRef',
1181:'CryptCreateHash',
1182:'CryptDecrypt',
1183:'CryptDeriveKey',
1184:'CryptDestroyHash',
1185:'CryptDestroyKey',
1186:'CryptDuplicateHash',
1187:'CryptDuplicateKey',
1188:'CryptEncrypt',
1189:'CryptEnumProviderTypesA',
1190:'CryptEnumProviderTypesW',
1191:'CryptEnumProvidersA',
1192:'CryptEnumProvidersW',
1193:'CryptExportKey',
1194:'CryptGenKey',
1195:'CryptGenRandom',
1196:'CryptGetDefaultProviderA',
1197:'CryptGetDefaultProviderW',
1198:'CryptGetHashParam',
1199:'CryptGetKeyParam',
1200:'CryptGetProvParam',
1201:'CryptGetUserKey',
1202:'CryptHashData',
1203:'CryptHashSessionKey',
1204:'CryptImportKey',
1205:'CryptReleaseContext',
1206:'CryptSetHashParam',
1207:'CryptSetKeyParam',
1208:'CryptSetProvParam',
1209:'CryptSetProviderA',
1210:'CryptSetProviderExA',
1211:'CryptSetProviderExW',
1212:'CryptSetProviderW',
1213:'CryptSignHashA',
1214:'CryptSignHashW',
1215:'CryptVerifySignatureA',
1216:'CryptVerifySignatureW',
1217:'DecryptFileA',
1218:'DecryptFileW',
1219:'DeleteAce',
1220:'DeleteService',
1221:'DeregisterEventSource',
1222:'DestroyPrivateObjectSecurity',
1223:'DuplicateEncryptionInfoFile',
1224:'DuplicateToken',
1225:'DuplicateTokenEx',
1226:'ElfBackupEventLogFileA',
1227:'ElfBackupEventLogFileW',
1228:'ElfChangeNotify',
1229:'ElfClearEventLogFileA',
1230:'ElfClearEventLogFileW',
1231:'ElfCloseEventLog',
1232:'ElfDeregisterEventSource',
1233:'ElfFlushEventLog',
1234:'ElfNumberOfRecords',
1235:'ElfOldestRecord',
1236:'ElfOpenBackupEventLogA',
1237:'ElfOpenBackupEventLogW',
1238:'ElfOpenEventLogA',
1239:'ElfOpenEventLogW',
1240:'ElfReadEventLogA',
1241:'ElfReadEventLogW',
1242:'ElfRegisterEventSourceA',
1243:'ElfRegisterEventSourceW',
1244:'ElfReportEventA',
1245:'ElfReportEventAndSourceW',
1246:'ElfReportEventW',
1247:'EnableTrace',
1248:'EnableTraceEx',
1249:'EnableTraceEx2',
1250:'EncryptFileA',
1251:'EncryptFileW',
1252:'EncryptedFileKeyInfo',
1253:'EncryptionDisable',
1254:'EnumDependentServicesA',
1255:'EnumDependentServicesW',
1256:'EnumServiceGroupW',
1257:'EnumServicesStatusA',
1258:'EnumServicesStatusExA',
1259:'EnumServicesStatusExW',
1260:'EnumServicesStatusW',
1261:'EnumerateTraceGuids',
1262:'EnumerateTraceGuidsEx',
1263:'EqualDomainSid',
1264:'EqualPrefixSid',
1265:'EqualSid',
1266:'EventAccessControl',
1267:'EventAccessQuery',
1268:'EventAccessRemove',
1276:'EventWriteEx',
1280:'FileEncryptionStatusA',
1281:'FileEncryptionStatusW',
1282:'FindFirstFreeAce',
1283:'FlushEfsCache',
1284:'FlushTraceA',
1285:'FlushTraceW',
1286:'FreeEncryptedFileKeyInfo',
1287:'FreeEncryptedFileMetadata',
1288:'FreeEncryptionCertificateHashList',
1289:'FreeInheritedFromArray',
1290:'FreeSid',
1291:'GetAccessPermissionsForObjectA',
1292:'GetAccessPermissionsForObjectW',
1293:'GetAce',
1294:'GetAclInformation',
1295:'GetAuditedPermissionsFromAclA',
1296:'GetAuditedPermissionsFromAclW',
1297:'GetCurrentHwProfileA',
1298:'GetCurrentHwProfileW',
1299:'GetEffectiveRightsFromAclA',
1300:'GetEffectiveRightsFromAclW',
1301:'GetEncryptedFileMetadata',
1302:'GetEventLogInformation',
1303:'GetExplicitEntriesFromAclA',
1304:'GetExplicitEntriesFromAclW',
1305:'GetFileSecurityA',
1306:'GetFileSecurityW',
1307:'GetInformationCodeAuthzLevelW',
1308:'GetInformationCodeAuthzPolicyW',
1309:'GetInheritanceSourceA',
1310:'GetInheritanceSourceW',
1311:'GetKernelObjectSecurity',
1312:'GetLengthSid',
1313:'GetLocalManagedApplicationData',
1314:'GetLocalManagedApplications',
1315:'GetManagedApplicationCategories',
1316:'GetManagedApplications',
1317:'GetMultipleTrusteeA',
1318:'GetMultipleTrusteeOperationA',
1319:'GetMultipleTrusteeOperationW',
1320:'GetMultipleTrusteeW',
1321:'GetNamedSecurityInfoA',
1322:'GetNamedSecurityInfoExA',
1323:'GetNamedSecurityInfoExW',
1324:'GetNamedSecurityInfoW',
1325:'GetNumberOfEventLogRecords',
1326:'GetOldestEventLogRecord',
1327:'GetOverlappedAccessResults',
1328:'GetPrivateObjectSecurity',
1329:'GetSecurityDescriptorControl',
1330:'GetSecurityDescriptorDacl',
1331:'GetSecurityDescriptorGroup',
1332:'GetSecurityDescriptorLength',
1333:'GetSecurityDescriptorOwner',
1334:'GetSecurityDescriptorRMControl',
1335:'GetSecurityDescriptorSacl',
1336:'GetSecurityInfo',
1337:'GetSecurityInfoExA',
1338:'GetSecurityInfoExW',
1339:'GetServiceDisplayNameA',
1340:'GetServiceDisplayNameW',
1341:'GetServiceKeyNameA',
1342:'GetServiceKeyNameW',
1343:'GetSidIdentifierAuthority',
1344:'GetSidLengthRequired',
1345:'GetSidSubAuthority',
1346:'GetSidSubAuthorityCount',
1347:'GetThreadWaitChain',
1348:'GetTokenInformation',
1352:'GetTrusteeFormA',
1353:'GetTrusteeFormW',
1354:'GetTrusteeNameA',
1355:'GetTrusteeNameW',
1356:'GetTrusteeTypeA',
1357:'GetTrusteeTypeW',
1358:'GetUserNameA',
1359:'GetUserNameW',
1360:'GetWindowsAccountDomainSid',
1367:'I_ScSetServiceBitsA',
1368:'I_ScSetServiceBitsW',
1370:'IdentifyCodeAuthzLevelW',
1371:'ImpersonateAnonymousToken',
1372:'ImpersonateLoggedOnUser',
1373:'ImpersonateNamedPipeClient',
1374:'ImpersonateSelf',
1375:'InitializeAcl',
1376:'InitializeSecurityDescriptor',
1377:'InitializeSid',
1378:'InitiateShutdownA',
1379:'InitiateShutdownW',
1380:'InitiateSystemShutdownA',
1381:'InitiateSystemShutdownExA',
1382:'InitiateSystemShutdownExW',
1383:'InitiateSystemShutdownW',
1384:'InstallApplication',
1385:'IsTextUnicode',
1386:'IsTokenRestricted',
1387:'IsTokenUntrusted',
1388:'IsValidAcl',
1390:'IsValidSecurityDescriptor',
1391:'IsValidSid',
1392:'IsWellKnownSid',
1393:'LockServiceDatabase',
1394:'LogonUserA',
1395:'LogonUserExA',
1396:'LogonUserExExW',
1397:'LogonUserExW',
1398:'LogonUserW',
1399:'LookupAccountNameA',
1400:'LookupAccountNameW',
1401:'LookupAccountSidA',
1402:'LookupAccountSidW',
1403:'LookupPrivilegeDisplayNameA',
1404:'LookupPrivilegeDisplayNameW',
1405:'LookupPrivilegeNameA',
1406:'LookupPrivilegeNameW',
1407:'LookupPrivilegeValueA',
1408:'LookupPrivilegeValueW',
1409:'LookupSecurityDescriptorPartsA',
1410:'LookupSecurityDescriptorPartsW',
1411:'LsaAddAccountRights',
1412:'LsaAddPrivilegesToAccount',
1413:'LsaClearAuditLog',
1414:'LsaClose',
1415:'LsaCreateAccount',
1416:'LsaCreateSecret',
1417:'LsaCreateTrustedDomain',
1418:'LsaCreateTrustedDomainEx',
1419:'LsaDelete',
1420:'LsaDeleteTrustedDomain',
1421:'LsaEnumerateAccountRights',
1422:'LsaEnumerateAccounts',
1423:'LsaEnumerateAccountsWithUserRight',
1424:'LsaEnumeratePrivileges',
1425:'LsaEnumeratePrivilegesOfAccount',
1426:'LsaEnumerateTrustedDomains',
1427:'LsaEnumerateTrustedDomainsEx',
1428:'LsaFreeMemory',
1429:'LsaGetQuotasForAccount',
1430:'LsaGetRemoteUserName',
1431:'LsaGetSystemAccessAccount',
1432:'LsaGetUserName',
1433:'LsaICLookupNames',
1434:'LsaICLookupNamesWithCreds',
1435:'LsaICLookupSids',
1436:'LsaICLookupSidsWithCreds',
1437:'LsaLookupNames',
1438:'LsaLookupNames2',
1439:'LsaLookupPrivilegeDisplayName',
1440:'LsaLookupPrivilegeName',
1441:'LsaLookupPrivilegeValue',
1442:'LsaLookupSids',
1443:'LsaManageSidNameMapping',
1444:'LsaNtStatusToWinError',
1445:'LsaOpenAccount',
1446:'LsaOpenPolicy',
1447:'LsaOpenPolicySce',
1448:'LsaOpenSecret',
1449:'LsaOpenTrustedDomain',
1450:'LsaOpenTrustedDomainByName',
1451:'LsaQueryDomainInformationPolicy',
1452:'LsaQueryForestTrustInformation',
1453:'LsaQueryInfoTrustedDomain',
1454:'LsaQueryInformationPolicy',
1455:'LsaQuerySecret',
1456:'LsaQuerySecurityObject',
1457:'LsaQueryTrustedDomainInfo',
1458:'LsaQueryTrustedDomainInfoByName',
1459:'LsaRemoveAccountRights',
1460:'LsaRemovePrivilegesFromAccount',
1461:'LsaRetrievePrivateData',
1462:'LsaSetDomainInformationPolicy',
1463:'LsaSetForestTrustInformation',
1464:'LsaSetInformationPolicy',
1465:'LsaSetInformationTrustedDomain',
1466:'LsaSetQuotasForAccount',
1467:'LsaSetSecret',
1468:'LsaSetSecurityObject',
1469:'LsaSetSystemAccessAccount',
1470:'LsaSetTrustedDomainInfoByName',
1471:'LsaSetTrustedDomainInformation',
1472:'LsaStorePrivateData',
1479:'MSChapSrvChangePassword',
1480:'MSChapSrvChangePassword2',
1481:'MakeAbsoluteSD',
1482:'MakeAbsoluteSD2',
1483:'MakeSelfRelativeSD',
1484:'MapGenericMask',
1485:'NotifyBootConfigStatus',
1486:'NotifyChangeEventLog',
1487:'NotifyServiceStatusChange',
1488:'NotifyServiceStatusChangeA',
1489:'NotifyServiceStatusChangeW',
1490:'ObjectCloseAuditAlarmA',
1491:'ObjectCloseAuditAlarmW',
1492:'ObjectDeleteAuditAlarmA',
1493:'ObjectDeleteAuditAlarmW',
1494:'ObjectOpenAuditAlarmA',
1495:'ObjectOpenAuditAlarmW',
1496:'ObjectPrivilegeAuditAlarmA',
1497:'ObjectPrivilegeAuditAlarmW',
1498:'OpenBackupEventLogA',
1499:'OpenBackupEventLogW',
1500:'OpenEncryptedFileRawA',
1501:'OpenEncryptedFileRawW',
1502:'OpenEventLogA',
1503:'OpenEventLogW',
1504:'OpenProcessToken',
1505:'OpenSCManagerA',
1506:'OpenSCManagerW',
1507:'OpenServiceA',
1508:'OpenServiceW',
1509:'OpenThreadToken',
1510:'OpenThreadWaitChainSession',
1511:'OpenTraceA',
1512:'OpenTraceW',
1513:'PerfAddCounters',
1514:'PerfCloseQueryHandle',
1518:'PerfDeleteCounters',
1520:'PerfEnumerateCounterSet',
1521:'PerfEnumerateCounterSetInstances',
1524:'PerfOpenQueryHandle',
1525:'PerfQueryCounterData',
1526:'PerfQueryCounterInfo',
1527:'PerfQueryCounterSetRegistrationInfo',
1536:'PrivilegeCheck',
1537:'PrivilegedServiceAuditAlarmA',
1538:'PrivilegedServiceAuditAlarmW',
1539:'ProcessIdleTasks',
1540:'ProcessIdleTasksW',
1541:'ProcessTrace',
1542:'QueryAllTracesA',
1543:'QueryAllTracesW',
1544:'QueryRecoveryAgentsOnEncryptedFile',
1545:'QuerySecurityAccessMask',
1546:'QueryServiceConfig2A',
1547:'QueryServiceConfig2W',
1548:'QueryServiceConfigA',
1549:'QueryServiceConfigW',
1550:'QueryServiceLockStatusA',
1551:'QueryServiceLockStatusW',
1552:'QueryServiceObjectSecurity',
1553:'QueryServiceStatus',
1554:'QueryServiceStatusEx',
1555:'QueryTraceA',
1556:'QueryTraceW',
1557:'QueryUsersOnEncryptedFile',
1558:'ReadEncryptedFileRaw',
1559:'ReadEventLogA',
1560:'ReadEventLogW',
1561:'RegCloseKey',
1562:'RegConnectRegistryA',
1563:'RegConnectRegistryExA',
1564:'RegConnectRegistryExW',
1565:'RegConnectRegistryW',
1566:'RegCopyTreeA',
1567:'RegCopyTreeW',
1568:'RegCreateKeyA',
1569:'RegCreateKeyExA',
1570:'RegCreateKeyExW',
1571:'RegCreateKeyTransactedA',
1572:'RegCreateKeyTransactedW',
1573:'RegCreateKeyW',
1574:'RegDeleteKeyA',
1575:'RegDeleteKeyExA',
1576:'RegDeleteKeyExW',
1577:'RegDeleteKeyTransactedA',
1578:'RegDeleteKeyTransactedW',
1579:'RegDeleteKeyValueA',
1580:'RegDeleteKeyValueW',
1581:'RegDeleteKeyW',
1582:'RegDeleteTreeA',
1583:'RegDeleteTreeW',
1584:'RegDeleteValueA',
1585:'RegDeleteValueW',
1586:'RegDisablePredefinedCache',
1587:'RegDisablePredefinedCacheEx',
1588:'RegDisableReflectionKey',
1589:'RegEnableReflectionKey',
1590:'RegEnumKeyA',
1591:'RegEnumKeyExA',
1592:'RegEnumKeyExW',
1593:'RegEnumKeyW',
1594:'RegEnumValueA',
1595:'RegEnumValueW',
1596:'RegFlushKey',
1597:'RegGetKeySecurity',
1598:'RegGetValueA',
1599:'RegGetValueW',
1600:'RegLoadAppKeyA',
1601:'RegLoadAppKeyW',
1602:'RegLoadKeyA',
1603:'RegLoadKeyW',
1604:'RegLoadMUIStringA',
1605:'RegLoadMUIStringW',
1606:'RegNotifyChangeKeyValue',
1607:'RegOpenCurrentUser',
1608:'RegOpenKeyA',
1609:'RegOpenKeyExA',
1610:'RegOpenKeyExW',
1611:'RegOpenKeyTransactedA',
1612:'RegOpenKeyTransactedW',
1613:'RegOpenKeyW',
1614:'RegOpenUserClassesRoot',
1615:'RegOverridePredefKey',
1616:'RegQueryInfoKeyA',
1617:'RegQueryInfoKeyW',
1618:'RegQueryMultipleValuesA',
1619:'RegQueryMultipleValuesW',
1620:'RegQueryReflectionKey',
1621:'RegQueryValueA',
1622:'RegQueryValueExA',
1623:'RegQueryValueExW',
1624:'RegQueryValueW',
1625:'RegRenameKey',
1626:'RegReplaceKeyA',
1627:'RegReplaceKeyW',
1628:'RegRestoreKeyA',
1629:'RegRestoreKeyW',
1630:'RegSaveKeyA',
1631:'RegSaveKeyExA',
1632:'RegSaveKeyExW',
1633:'RegSaveKeyW',
1634:'RegSetKeySecurity',
1635:'RegSetKeyValueA',
1636:'RegSetKeyValueW',
1637:'RegSetValueA',
1638:'RegSetValueExA',
1639:'RegSetValueExW',
1640:'RegSetValueW',
1641:'RegUnLoadKeyA',
1642:'RegUnLoadKeyW',
1643:'RegisterEventSourceA',
1644:'RegisterEventSourceW',
1645:'RegisterIdleTask',
1646:'RegisterServiceCtrlHandlerA',
1647:'RegisterServiceCtrlHandlerExA',
1648:'RegisterServiceCtrlHandlerExW',
1649:'RegisterServiceCtrlHandlerW',
1652:'RegisterWaitChainCOMCallback',
1653:'RemoveTraceCallback',
1654:'RemoveUsersFromEncryptedFile',
1655:'ReportEventA',
1656:'ReportEventW',
1657:'RevertToSelf',
1658:'SaferCloseLevel',
1659:'SaferComputeTokenFromLevel',
1660:'SaferCreateLevel',
1661:'SaferGetLevelInformation',
1662:'SaferGetPolicyInformation',
1663:'SaferIdentifyLevel',
1664:'SaferRecordEventLogEntry',
1665:'SaferSetLevelInformation',
1666:'SaferSetPolicyInformation',
1667:'SaferiChangeRegistryScope',
1668:'SaferiCompareTokenLevels',
1669:'SaferiIsDllAllowed',
1670:'SaferiIsExecutableFileType',
1671:'SaferiPopulateDefaultsInRegistry',
1672:'SaferiRecordEventLogEntry',
1673:'SaferiSearchMatchingHashRules',
1674:'SetAclInformation',
1675:'SetEncryptedFileMetadata',
1676:'SetEntriesInAccessListA',
1677:'SetEntriesInAccessListW',
1678:'SetEntriesInAclA',
1679:'SetEntriesInAclW',
1680:'SetEntriesInAuditListA',
1681:'SetEntriesInAuditListW',
1682:'SetFileSecurityA',
1683:'SetFileSecurityW',
1684:'SetInformationCodeAuthzLevelW',
1685:'SetInformationCodeAuthzPolicyW',
1686:'SetKernelObjectSecurity',
1687:'SetNamedSecurityInfoA',
1688:'SetNamedSecurityInfoExA',
1689:'SetNamedSecurityInfoExW',
1690:'SetNamedSecurityInfoW',
1691:'SetPrivateObjectSecurity',
1692:'SetPrivateObjectSecurityEx',
1693:'SetSecurityAccessMask',
1694:'SetSecurityDescriptorControl',
1695:'SetSecurityDescriptorDacl',
1696:'SetSecurityDescriptorGroup',
1697:'SetSecurityDescriptorOwner',
1698:'SetSecurityDescriptorRMControl',
1699:'SetSecurityDescriptorSacl',
1700:'SetSecurityInfo',
1701:'SetSecurityInfoExA',
1702:'SetSecurityInfoExW',
1703:'SetServiceBits',
1704:'SetServiceObjectSecurity',
1705:'SetServiceStatus',
1706:'SetThreadToken',
1707:'SetTokenInformation',
1708:'SetTraceCallback',
1709:'SetUserFileEncryptionKey',
1710:'SetUserFileEncryptionKeyEx',
1711:'StartServiceA',
1712:'StartServiceCtrlDispatcherA',
1713:'StartServiceCtrlDispatcherW',
1714:'StartServiceW',
1715:'StartTraceA',
1716:'StartTraceW',
1717:'StopTraceA',
1718:'StopTraceW',
1719:'SystemFunction001',
1720:'SystemFunction002',
1721:'SystemFunction003',
1722:'SystemFunction004',
1723:'SystemFunction005',
1724:'SystemFunction006',
1725:'SystemFunction007',
1726:'SystemFunction008',
1727:'SystemFunction009',
1728:'SystemFunction010',
1729:'SystemFunction011',
1730:'SystemFunction012',
1731:'SystemFunction013',
1732:'SystemFunction014',
1733:'SystemFunction015',
1734:'SystemFunction016',
1735:'SystemFunction017',
1736:'SystemFunction018',
1737:'SystemFunction019',
1738:'SystemFunction020',
1739:'SystemFunction021',
1740:'SystemFunction022',
1741:'SystemFunction023',
1742:'SystemFunction024',
1743:'SystemFunction025',
1744:'SystemFunction026',
1745:'SystemFunction027',
1746:'SystemFunction028',
1747:'SystemFunction029',
1748:'SystemFunction030',
1749:'SystemFunction031',
1750:'SystemFunction032',
1751:'SystemFunction033',
1752:'SystemFunction034',
1754:'SystemFunction036',
1755:'SystemFunction040',
1756:'SystemFunction041',
1761:'TraceSetInformation',
1762:'TreeResetNamedSecurityInfoA',
1763:'TreeResetNamedSecurityInfoW',
1764:'TreeSetNamedSecurityInfoA',
1765:'TreeSetNamedSecurityInfoW',
1766:'TrusteeAccessToObjectA',
1767:'TrusteeAccessToObjectW',
1768:'UninstallApplication',
1769:'UnlockServiceDatabase',
1770:'UnregisterIdleTask',
1772:'UpdateTraceA',
1773:'UpdateTraceW',
1774:'UsePinForEncryptedFilesA',
1775:'UsePinForEncryptedFilesW',
1776:'WmiCloseBlock',
1777:'WmiDevInstToInstanceNameA',
1778:'WmiDevInstToInstanceNameW',
1779:'WmiEnumerateGuids',
1780:'WmiExecuteMethodA',
1781:'WmiExecuteMethodW',
1782:'WmiFileHandleToInstanceNameA',
1783:'WmiFileHandleToInstanceNameW',
1784:'WmiFreeBuffer',
1785:'WmiMofEnumerateResourcesA',
1786:'WmiMofEnumerateResourcesW',
1787:'WmiNotificationRegistrationA',
1788:'WmiNotificationRegistrationW',
1789:'WmiOpenBlock',
1790:'WmiQueryAllDataA',
1791:'WmiQueryAllDataMultipleA',
1792:'WmiQueryAllDataMultipleW',
1793:'WmiQueryAllDataW',
1794:'WmiQueryGuidInformation',
1795:'WmiQuerySingleInstanceA',
1796:'WmiQuerySingleInstanceMultipleA',
1797:'WmiQuerySingleInstanceMultipleW',
1798:'WmiQuerySingleInstanceW',
1799:'WmiReceiveNotificationsA',
1800:'WmiReceiveNotificationsW',
1801:'WmiSetSingleInstanceA',
1802:'WmiSetSingleInstanceW',
1803:'WmiSetSingleItemA',
1804:'WmiSetSingleItemW',
1805:'WriteEncryptedFileRaw',
} | StarcoderdataPython |
3531007 | from compas.datastructures import Mesh
import meshcat
import meshcat.geometry as mcg
import uuid
import numpy as np
from meshcat import Visualizer
from meshcat.animation import Animation
import pymesh
import os
def compas_mesh_to_obj_str(mesh):
lines = ["g object_1"]
v, f = mesh.to_vertices_and_faces()
for p in v:
lines.append("v {} {} {}".format(*p))
for p in f:
if len(p) == 3:
lines.append("f {} {} {}".format(*p))
elif len(p) == 4:
a, b, c, d = p
lines.append("f {} {} {} {}".format(a+1, b+1, c+1, d+1))
return "\n".join(lines)
def mesh2mcg(mesh):
contents = compas_mesh_to_obj_str(mesh)
return mcg.MeshGeometry(contents, 'obj')
def viewer_draw_box(viewer, box, color=None, id=None):
mesh = Mesh.from_vertices_and_faces(box.vertices, box.faces)
viewer_draw_mesh(viewer, mesh, color, id)
def viewer_draw_mesh(viewer, mesh, color=None, id=None):
if color == None:
color = 0x777777
if id == None:
id = str(uuid.uuid1())
geo = mesh2mcg(mesh)
mat = mcg.MeshLambertMaterial(color=color)
viewer[id].set_object(mcg.Mesh(geo, mat))
def viewer_draw_lines(viewer, lines, color=None, id=None):
if color == None:
color = 0x777777
if id == None:
id = str(uuid.uuid1())
for i, line in enumerate(lines):
vertices = np.array([list(line['start']), list(line['end'])]).T
viewer["%s_%d" % (id, i)].set_object(mcg.Line(mcg.PointsGeometry(vertices), mcg.MeshBasicMaterial(color=color)))
return ["%s_%d" % (id, i) for i, line in enumerate(lines)]
def viewer_draw_sphere(viewer, sphere, color=None, id=None):
import meshcat.transformations as tf
if color == None:
color = 0x777777
if id == None:
id = str(uuid.uuid1())
s = mcg.Sphere(sphere.radius)
viewer[id].set_object(s), mcg.MeshLambertMaterial(color=color)
viewer[id].set_transform(tf.translation_matrix(list(sphere.point)))
return id
def viewer_draw_mesh_edges(viewer, mesh, color=None, id=None):
keys = list(mesh.edges())
lines = []
for u, v in keys:
lines.append({
'start': mesh.vertex_coordinates(u),
'end' : mesh.vertex_coordinates(v),
})
viewer_draw_lines(viewer, lines, color, id)
def viewer_draw_frame(viewer, frame, id=None):
if id == None:
id = str(uuid.uuid1())
vertices = np.array([list(frame.point), list(frame.point + frame.xaxis)]).T
viewer['%s_xaxis' % id].set_object(mcg.Line(mcg.PointsGeometry(vertices), mcg.MeshBasicMaterial(color=0xff0000)))
vertices = np.array([list(frame.point), list(frame.point + frame.yaxis)]).T
viewer['%s_yaxis' % id].set_object(mcg.Line(mcg.PointsGeometry(vertices), mcg.MeshBasicMaterial(color=0x00ff00)))
vertices = np.array([list(frame.point), list(frame.point + frame.zaxis)]).T
viewer['%s_zaxis' % id].set_object(mcg.Line(mcg.PointsGeometry(vertices), mcg.MeshBasicMaterial(color=0x0000ff)))
return ['%s_xaxis' % id, '%s_yaxis' % id, '%s_zaxis' % id]
class MeshCatViewer(Visualizer):
def draw_mesh(self, mesh, color=None, id=None):
return viewer_draw_mesh(self, mesh, color, id)
def draw_mesh_edges(self, mesh, color=None, id=None):
return viewer_draw_mesh_edges(self, mesh, color, id)
def draw_box(self, box, color=None, id=None):
return viewer_draw_box(self, box, color, id)
def draw_frame(self, frame, id=None):
return viewer_draw_frame(self, frame, id)
def draw_sphere(self, sphere, color=None, id=None):
return viewer_draw_sphere(self, sphere, id)
| StarcoderdataPython |
11321241 | <filename>simuvex/simuvex/engines/vex/statements/mbe.py
print '... Importing simuvex/engines/vex/statements/mbe.py ...'
from angr.engines.vex.statements.mbe import *
| StarcoderdataPython |
1725117 | import os
dir_path = "/Users/wonder/Documents/GitHub/WonderGame/Aircraft War/img/Explosion"
file_names = os.listdir(dir_path)
print(file_names)
n = 0
for file in file_names:
print(file)
file_parts = file.split('.')
print(file_parts[0])
if file_parts[1] == 'tiff':
new_name = file_parts[0] + '.png'
print('new_name', new_name)
os.chdir(dir_path)
os.rename(file, new_name)
# file_list = [os.path.join("./data/",file) for file in file_names]
# print(file_list)
| StarcoderdataPython |
9783811 | <gh_stars>10-100
import judy
vout = judy.VoiceOut(device='plughw:0,0',
resources='/home/pi/judy/resources/audio')
vout.beep(1)
vout.beep(0)
vout.say('How are you today?')
| StarcoderdataPython |
3463168 | #%%
H_d, H_c = get_hamiltonians(backend, subsystem_list, ['wq0'])
n_ctrls = len(H_c)
U_0 = identity(3)
U_targ = get_hadamard()
| StarcoderdataPython |
11244850 | <reponame>lorena112233/pythonDay1
#diccAbecedario = { "a":,"b","c","d","e","f","g","h","i","j","k","l","m","n","ñ","o","p","q","r","s","t","u","v","w","x","y","z" }
desplazamiento = 6
alfabeto = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".lower()
abecedario = {}
for letra in alfabeto:
posicion = alfabeto.index(letra)
nuevaPosicion = posicion + desplazamiento
if nuevaPosicion >= len(alfabeto):
nuevaPosicion=abs(len(alfabeto)-nuevaPosicion)
abecedario.update( { letra: alfabeto[nuevaPosicion] } )
entradaUsuario = input("introduce un texto: ").lower()
textoCifrado= ""
for letra in entradaUsuario:
if letra in list(abecedario.keys()):
textoCifrado += abecedario[letra]
else: #Si pongo una coma, o un espacio, no traduce, lo respeta, porque no esta en el abecedario
textoCifrado+=letra
print(textoCifrado)
| StarcoderdataPython |
1956672 | <reponame>aisthesis/opttrack<filename>opttrack/lib/utils.py
"""
Copyright (c) 2016 <NAME>
license http://opensource.org/licenses/MIT
lib/utils.py
General utility functions
"""
from __future__ import division
def safe_divide(numerator, denominator, default_answer=0.):
try:
return numerator / denominator
except ZeroDivisionError:
return default_answer
| StarcoderdataPython |
198387 | <filename>CONUS/Winds/percentile_wind_vs_latitude_and_altitude.py
import iris.coord_categorisation
import matplotlib.style as style
from cf_units import Unit
import iris.quickplot as qplt
import iris.plot as iplt
import matplotlib.pyplot as plt
import iris.analysis as ia
import numpy as np
percentiles = [50, 75, 90, 95, 99]
for percentile in percentiles:
style.use("seaborn")
# percentile = 95
##### region Parse
# Pull in data from file
eastward_wind = iris.load('u_component_of_wind.nc')[0]
northward_wind = iris.load('v_component_of_wind.nc')[0]
# Find the wind speed
wind_speed_squared = eastward_wind ** 2 + northward_wind ** 2
wind_speed_squared.standard_name = "wind_speed"
wind_speed_squared.long_name = "Wind Speed Squared"
wind_speed_squared.var_name = "V"
# Find the altitude from geopotential
altitudes = np.load("altitudes.npy")
wind_speed_squared = wind_speed_squared.collapsed(["time","longitude"], ia.PERCENTILE, percent=percentile)
wind_speed = wind_speed_squared ** 0.5
# Display info
print("\nOriginal Data:")
print(wind_speed.summary())
latitudes = wind_speed.dim_coords[1].points
speeds = np.array(wind_speed.data)
np.save("wind_%.0f_vs_latitudes_altitudes" % percentile, speeds)
# Supersample
from scipy import ndimage
ss_factor = 5
latitudes = ndimage.zoom(latitudes, ss_factor)
altitudes = ndimage.zoom(altitudes, ss_factor)
speeds = ndimage.zoom(speeds, ss_factor)
plt.figure()
plt.ion()
X, Y = np.meshgrid(
latitudes,
altitudes,
)
Z = speeds
fig = plt.contourf(
X,
Y,
Z,
cmap="viridis"
)
plt.xlabel("Latitude [deg]")
plt.ylabel("Altitude [m]")
plt.title("%.0f%% Wind Speeds, CONUS July-Aug, 1972-present" % percentile)
plt.colorbar(label="Wind Speed [m/s]")
plt.tight_layout()
plt.savefig("wind_speeds_%.0f.svg" % percentile)
plt.show()
| StarcoderdataPython |
4964282 | #!/usr/bin/env python
"""
This module is designed to used with _livereload to
make it a little easier to write Sphinx documentation.
Simply run the command::
python sphinx_server.py
and browse to http://localhost:5500
livereload_: https://pypi.python.org/pypi/livereload
"""
import os
from livereload import Server, shell
rebuild_cmd = shell('make html', cwd='.')
rebuild_root = "_build/html"
watch_dirs = [
'.',
'release_notes',
]
watch_globs = [
'*.rst', '*.ipynb'
]
watch_source_dir = "../smqtk_dataprovider"
server = Server()
server.watch('conf.py', rebuild_cmd)
# Cover above configured watch dirs and globs matrix.
for d in watch_dirs:
for g in watch_globs:
server.watch(os.path.join(d, g), rebuild_cmd)
# Watch source python files.
for dirpath, dirnames, filenames in os.walk(watch_source_dir):
server.watch(os.path.join(dirpath, '*.py'), rebuild_cmd)
# Optionally change to host="0.0.0.0" to make available outside localhost.
server.serve(root=rebuild_root)
| StarcoderdataPython |
1742776 | import os
import sys
import cv2
import time
import logging
import json
import tensorflow as tf
import numpy as np
import glob
import tqdm
tf.compat.v1.disable_eager_execution()
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import src
from src.data_manager import EmojifierDataManager
from src.__init__ import *
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
logger = logging.getLogger('emojifier.model')
def weight_variable(shape):
initial = tf.compat.v1.truncated_normal(
shape=shape, stddev=0.1
)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(
x, W, strides=[1,1,1,1], padding='SAME'
)
def max_pool_2x2(x):
return tf.nn.max_pool(
x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME'
)
def conv_layer(input, shape):
W = weight_variable(shape)
b = bias_variable([shape[3]])
return tf.nn.relu(tf.compat.v1.layers.batch_normalization(conv2d(input, W) + b))
def full_layer(input, size):
insize = int(input.get_shape()[1])
W = weight_variable([insize, size])
b = bias_variable([size])
return tf.matmul(input, W) + b
def model(x, keep_prob):
C1, C2, C3 = 30, 50, 80
F1 = 512
conv1_1 = conv_layer(x, shape=[3, 3, 1, C1])
conv1_1_pool = max_pool_2x2(conv1_1)
conv1_2 = conv_layer(conv1_1_pool, shape=[3, 3, C1, C2])
conv1_2_pool = max_pool_2x2(conv1_2)
conv1_drop = tf.compat.v1.nn.dropout(conv1_2_pool, keep_prob=keep_prob)
conv2_1 = conv_layer(conv1_drop, shape=[3, 3, C2, C3])
conv2_1_pool = max_pool_2x2(conv2_1)
conv2_flat = tf.reshape(conv2_1_pool, [-1, 6*6*C3])
conv2_drop = tf.compat.v1.nn.dropout(conv2_flat, keep_prob=keep_prob)
full1 = tf.nn.relu(full_layer(conv2_drop, F1))
full1_drop = tf.compat.v1.nn.dropout(full1, keep_prob=keep_prob)
y_conv = full_layer(full1_drop, len(EMOTION_MAP))
return y_conv
def test(emoji_data, sess):
logger.info('CALCULATING TESTSET ACCURACY ...')
L = len(emoji_data.test.labels)
x = emoji_data.test.images
y = emoji_data.test.labels
accs = []
for i in tqdm.tqdm(range(0, L, 30)):
if i+30 <= L:
x_i = x[i:i+30].reshape(30, 48, 48, 1)
y_i = y[i:i+30].reshape(30, len(EMOTION_MAP))
else:
x_i = x[i:].reshape(L-i, 48, 48, 1)
y_i = y[i:].reshape(L-i, len(EMOTION_MAP))
accs.append(sess.run(accuracy, feed_dict={X:x_i, Y:y_i, keep_prob:1.0}))
acc = np.mean(accs)
logger.critical('test-accuracy: {:.4}%'.format(acc*100))
if __name__ == '__main__':
CHECKPOINT_SAVE_PATH = os.path.join(os.path.dirname(__file__), os.pardir, 'model_checkpoints')
if not os.path.exists(CHECKPOINT_SAVE_PATH):
os.makedirs(CHECKPOINT_SAVE_PATH)
BATCH_SIZE = config_parser.getint('MODEL_HYPER_PARAMETERS', 'batch_size')
STEPS = config_parser.getint('MODEL_HYPER_PARAMETERS', 'train_steps')
LEARNING_RATE = config_parser.getfloat('MODEL_HYPER_PARAMETERS', 'learning_rate')
KEEP_PROB = config_parser.getfloat('MODEL_HYPER_PARAMETERS', 'dropout_keep_prob')
X = tf.compat.v1.placeholder(
tf.float32, shape=[None, 48, 48, 1]
)
Y = tf.compat.v1.placeholder(tf.float32, shape=[None, len(EMOTION_MAP)])
keep_prob = tf.compat.v1.placeholder(tf.float32)
emoset = EmojifierDataManager()
logger.info("Number of train images: {}".format(
len(emoset.train.images)
))
logger.info("Number of train labels: {}".format(
len(emoset.train.labels)
))
logger.info("Number of test images: {}".format(
len(emoset.test.images)
))
logger.info("Number of test labels: {}".format(
len(emoset.test.labels)
))
y_conv = model(X, keep_prob)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=Y,
logits=y_conv
)
)
train = tf.compat.v1.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)
correct_predictions = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(Y, 1)
)
accuracy = tf.reduce_mean(
tf.cast(correct_predictions, tf.float32)
)
saver = tf.compat.v1.train.Saver()
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for i in tqdm.tqdm(range(STEPS)):
x_data, y_data = emoset.train.next_batch(BATCH_SIZE)
acc, loss, _ = sess.run(
[accuracy, cross_entropy, train],
feed_dict={X:x_data, Y:y_data, keep_prob: KEEP_PROB}
)
if i % 20 == 0:
logger.info('accuracy: {:.4}%, loss: {:.4}'.format(
acc*100, loss
))
test(emoset, sess)
save_path = saver.save(sess, os.path.join(CHECKPOINT_SAVE_PATH, 'model.ckpt'))
logger.info("Model saved in path: {}".format(save_path))
| StarcoderdataPython |
12833819 | <gh_stars>1-10
import json
import re
import xml.etree.ElementTree as ETree
from averell.utils import TEI_NAMESPACE as NS
from averell.utils import XML_NS
ECEP_NS = "{http://www.eighteenthcenturypoetry.org/ns}"
def get_poem_info(xml_file, lines_info, authors):
"""Poem parser for 'ECPA corpus'.
We read the data and find elements like title, author, year, etc. Then
we iterate over the poem text and we look for each stanza, line, word
and syllable data.
:param xml_file: Path for the poem xml file
:param lines_info: Path for the lines json file
:param authors: dict with authors info
:return: Dict with the data obtained from the poem
:rtype: dict
"""
poem = {}
corpus_name = xml_file.parts[-6]
tree = ETree.parse(str(xml_file))
root = tree.getroot()
manually_checked = False
metadata = root.attrib
title = root.find(f".//{NS}head[@type='main']")
poem_id = metadata.get(f"{XML_NS}id")
poem_info = authors[1].get(poem_id)
if poem_info:
title_text = poem_info.get("title")
else:
title_text = " ".join(word.text for word in title.findall(f"{NS}w"))
author = root.find(f"{NS}link[@type='author']").get("target").split("#")[1]
try:
author_name = next(aut.get("name") for aut in authors[0].values() if
aut.get("author") == author)
except StopIteration:
author_name = author
poem.update({
"poem_title": title_text,
"author": author_name,
})
alt_title = root.find(f".//{NS}head[@type='sub']")
if alt_title:
alt_title_text = re.sub(r"[\n ]+", " ",
"".join(alt_title.itertext())).strip()
poem.update({"poem_alt_title": alt_title_text})
line_group_list = root.findall(f".//{NS}lg")
line_group_list2 = []
for lg_number, lg in enumerate(line_group_list):
if not lg.find(f".//{NS}lg"):
if not lg.get("type") and not lg.get("met"):
line_group_list2.append(lg)
if lg.get("met"):
line_group_list2.append(lg)
stanza_list = []
line_number = 0
for stanza_number, line_group in enumerate(line_group_list2):
stanza_type = None
stanza_text = []
line_list = []
for n, line in enumerate(line_group.findall(f"{NS}l")):
line_dict = {}
line_id = line.attrib.get(f"{XML_NS}id")
line_length = None
met = None
foot = None
metre = None
line_info = lines_info.get(line_id)
if line_info is not None:
if n == 0:
stanza_type = line_info.get("stanzas").get("id")
syllab = line_info.get("syllab")
line_length = int(syllab) if syllab else None
met = line_info.get("met").strip("/") or None
foot = line_info.get("foot").get("id")
metre = line_info.get("footnum").get("id")
real = line_info.get("real")
if real:
manually_checked = True
met = real.strip("/")
foot = line_info.get("realfoot").get("id")
metre = line_info.get("realfootnum").get("id")
line_dict.update({
"metrical_pattern": met,
"line_length": line_length,
"foot": foot,
"metre": metre,
})
word_list = []
token_list = []
for token in line:
tag = token.tag
if tag == f"{NS}w":
word_list.append({"word_text": token.text})
if tag in [f"{NS}w", f"{NS}c", f"{NS}pc"]:
token_list.append(token.text or "")
line_text = "".join(token_list).strip()
line_dict.update({
"line_number": line_number + 1,
"line_text": "".join(line_text).strip(),
"words": word_list,
})
line_list.append(line_dict)
stanza_text.append(line_text)
line_number += 1
st = "\n".join(stanza_text)
stanza_list.append({
"stanza_number": stanza_number + 1,
"stanza_type": stanza_type,
"lines": line_list,
"stanza_text": st,
})
poem.update({
"manually_checked": manually_checked,
"stanzas": stanza_list,
"corpus": corpus_name,
})
return poem
def get_features(path):
"""Function to find each poem file and parse it
:param path: Corpus Path
:return: List of poem dicts
:rtype: list
"""
authors_file = (
path / "ECPA-master" / "web" / "resources"
/ "models" / "authwork_mdp.json"
)
authors = json.loads(authors_file.read_text())
xml_files = path / "ECPA-master" / "web" / "works"
feature_list = []
for filename in xml_files.rglob("*/*.xml"):
folder = filename.parent
lines_file = f"{filename.parts[-2]}_l.json"
lines_path = folder / lines_file
lines_info = json.loads(lines_path.read_text())
result = get_poem_info(filename, lines_info, authors)
feature_list.append(result)
return feature_list
| StarcoderdataPython |
6591179 | <gh_stars>0
import gym
import torch
from .base_classes import MuZeroConfigBase
from environment import PalleteWorld
import env_config as env_cfg
import numpy as np
class MuZeroConfig(MuZeroConfigBase):
def __init__(self):
super(MuZeroConfig, self).__init__()
self.seed = 0 # Seed for numpy, torch and the game
# Game
# Dimensions of the game observation
self.observation_shape = (env_cfg.ENV.ROW_COUNT, env_cfg.ENV.COL_COUNT, 1 + env_cfg.ENV.BIN_MAX_COUNT)
self.action_space = [i for i in range(env_cfg.ENV.BIN_MAX_COUNT)] # Fixed list of all possible actions
self.players = [i for i in range(1)] # List of players
# Self-Play
# Number of simultaneous threads self-playing to feed the replay buffer
self.num_actors = (10)
self.max_moves = env_cfg.ENV.EPISODE_MAX_STEP # Maximum number of moves if game is not finished before
self.num_simulations = 80 # Number of future moves self-simulated
self.discount = 0.997 # Chronological discount of the reward
self.self_play_delay = 0 # Number of seconds to wait after each played game to adjust the self play / training ratio to avoid over/underfitting
# Root prior exploration noise
self.root_dirichlet_alpha = 0.25
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
# Network
self.encoding_size = 200
self.hidden_size = 128
# Training
self.results_path = "./pretrained" # Path to store the model weights
self.training_steps = 500000000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = (
128 # Number of parts of games to train on at each training step
)
self.num_unroll_steps = (
5 # Number of game moves to keep for every batch element
)
self.checkpoint_interval = (
10 # Number of training steps before using the model for sef-playing
)
self.window_size = (
1000 # Number of self-play games to keep in the replay buffer
)
self.td_steps = 1 # Number of steps in the future to take into account for calculating the target value
self.training_delay = 0 # Number of seconds to wait after each training to adjust the self play / training ratio to avoid over/underfitting
self.training_device = (
"cuda" if torch.cuda.is_available() else "cpu"
) # Train on GPU if available
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9
# Exponential learning rate schedule
self.lr_init = 0.008 # Initial learning rate
self.lr_decay_rate = 0.01
self.lr_decay_steps = 10000
### Test
self.test_episodes = 2 # Number of game played to evaluate the network
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
if trained_steps < 0.5 * self.training_steps:
return 1.0
elif trained_steps < 0.75 * self.training_steps:
return 0.5
else:
return 0.25
class Game:
"""
Game wrapper.
"""
def __init__(self, seed=None, id=0):
self.id = id
# import pickle
# l = []
# with open('env_data.npy', 'rb') as f:
# l = pickle.load(f)
# self.env = PalleteWorld(datasets=l)
self.env = PalleteWorld(n_random_fixed=1, env_id=id)
self.episode_step = 0
if seed is not None:
self.env.seed(seed)
print('env {} is created with seed {}'.format(self.id, seed))
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
self.episode_step = self.episode_step + 1
a = env_cfg.Action(bin_index=action, priority=1, rotate=1)
o, reward, done, _ = self.env.step(a)
# this is for visual state representation.
import torch
boxes = torch.zeros(10, 10, env_cfg.ENV.BIN_MAX_COUNT) # x, y, box count
for i, box in enumerate(o[0]): # box = x,y
boxes[0:box[1], 0:box[0], i] = 1
o = np.concatenate((o[1],boxes), axis=-1)
print('A agent is taking action {} in {} step in game {}.'.format(action, self.episode_step, self.id))
# this is for visual state representation.
# import torch
# embedding = torch.nn.Embedding(num_embeddings=env_cfg.ENV.BIN_MAX_X_SIZE*env_cfg.ENV.BIN_MAX_X_SIZE+env_cfg.ENV.BIN_MAX_Y_SIZE+1, embedding_dim=1)
# i = [b[0] * env_cfg.ENV.BIN_MAX_X_SIZE + b[1] for b in o[0]]
# boxlist = embedding(torch.Tensor(i).long()).squeeze(1).detach().numpy()
# o = np.concatenate((o[1][:,:,0].flatten(), boxlist))
return o, reward, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return 0
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
o = self.env.reset()
print('game {} is reset.'.format(self.id))
self.episode_step = 0
# import torch
# embedding = torch.nn.Embedding(num_embeddings=env_cfg.ENV.BIN_MAX_X_SIZE*env_cfg.ENV.BIN_MAX_X_SIZE+env_cfg.ENV.BIN_MAX_Y_SIZE+1, embedding_dim=1)
# i = [b[0] * env_cfg.ENV.BIN_MAX_X_SIZE + b[1] for b in o[0]]
# boxlist = embedding(torch.Tensor(i).long()).squeeze(1).detach().numpy()
# o = np.concatenate((o[1][:,:,0].flatten(), boxlist))
# this is for visual state representation.
import torch
boxes = torch.zeros(10, 10, env_cfg.ENV.BIN_MAX_COUNT) # x, y, box count
for i, box in enumerate(o[0]): # box = x,y
boxes[0:box[1], 0:box[0], i] = 1
o = np.concatenate((o[1],boxes), axis=-1)
return o
def close(self):
"""
Properly close the game.
"""
self.env.close()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
| StarcoderdataPython |
6646852 | <reponame>zhangshen20/character-reverse<gh_stars>0
from .Character import Character | StarcoderdataPython |
14849 | """
This script is for testing/calling in several different ways
functions from QRColorChecker modules.
@author: <NAME>
@mail: <EMAIL>
"""
import unittest
import hashlib
import dateutil
from chalicelib.server import Server
import sys
import json
from datetime import datetime
sys.path.append('../chalicelib')
class AppTest(unittest.TestCase):
def setUp(self):
self.sns_client = TestSNS()
self.log = TestLog()
self.dynamodb_device_data = TestDynamoDB()
self.dynamodb_device = TestDynamoDB()
self.str_data = '{"DevEUI_uplink": {"Time": "2017-03-11T11:52:50.412+01:00","DevEUI": "0004A30B001C3306",' \
'"FPort": "7","FCntUp": "1","MType": "2","FCntDn": "2","payload_hex": "10bb17f18198100734",' \
'"mic_hex": "c00c1cfa","Lrcid": "00000127","LrrRSSI": "-64.000000","LrrSNR": "9.000000",' \
'"SpFact": "11","SubBand": "G1","Channel": "LC2","DevLrrCnt": "1","Lrrid": "08060412","Late":' \
' "0","LrrLAT": "41.550377","LrrLON": "2.241691","Lrrs": {"Lrr": {"Lrrid": "08060412",' \
'"Chain": "0","LrrRSSI": "-64.000000","LrrSNR": "9.000000","LrrESP": "-64.514969"}},' \
'"CustomerID": "100001774",' \
'"CustomerData": {"alr":{"pro":"LORA/Generic","ver":"1"}},' \
'"ModelCfg": "0","DevAddr": "260113E2","AckRequested": "0",' \
'"rawMacCommands": "0703070307030703"}}'
def test_parse_lora_json(self):
jsonbody = json.loads(self.str_data)
parsed_json = Server.parse_lora_json(self.str_data)
time = jsonbody["DevEUI_uplink"]["Time"]
payload = jsonbody["DevEUI_uplink"]["payload_hex"]
device_id = jsonbody["DevEUI_uplink"]["DevAddr"]
virtual_tx = device_id + "-" + time
hash_object = hashlib.sha256(virtual_tx.encode())
hex_dig = hash_object.hexdigest()
dt = dateutil.parser.parse(time)
strftime = dt.strftime("%s")
time_millis = int(strftime) * 1000
self.assertEqual(parsed_json["time_json"], time)
self.assertEqual(parsed_json["timeStamp"], int(time_millis))
self.assertEqual(parsed_json["payload"], payload)
self.assertEqual(parsed_json["DevEUI"], device_id)
self.assertEqual(parsed_json["type"], "LORA")
self.assertEqual(parsed_json["extra"], json.dumps(jsonbody))
self.assertEqual(parsed_json["virtual_tx"], hex_dig)
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1515360218&id=IDTest&data=02180AE4"
def test_parse_sigfox(self):
data_dic = {
"context": {
"httpMethod": "GET",
"identity": {
"sourceIp": "127.0.0.1"
},
"resourcePath": "/sigfox"
},
"headers": {
"accept": "*/*",
"accept-encoding": "gzip, deflate",
"connection": "keep-alive",
"host": "localhost:8000",
"user-agent": "HTTPie/0.9.8"
},
"method": "GET",
"query_params": {
"data": "10bb17f18198100734",
"id": "260113E2",
"time": "1515360218"
},
"stage_vars": {},
"uri_params": {}
}
parsed_dic = Server.parse_sigfox_dic(data_dic)
d = datetime.utcfromtimestamp(int("1515360218") * 1000 / 1e3)
json_date = str(d.isoformat()) + "Z"
virtual_tx = "260113E2" + "-" + json_date
hash_object = hashlib.sha256(virtual_tx.encode())
hex_dig = hash_object.hexdigest()
self.assertEqual(parsed_dic["time_json"], json_date)
self.assertEqual(parsed_dic["timeStamp"], int("1515360218"))
self.assertEqual(parsed_dic["payload"], "10bb17f18198100734")
self.assertEqual(parsed_dic["DevEUI"], "260113E2")
self.assertEqual(parsed_dic["type"], "SIGFOX")
self.assertEqual(parsed_dic["virtual_tx"], hex_dig)
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1515360218&id=IDTest&data=02180AE4&test=test"
def test_parse_sigfox_with_test_data(self):
data_dic = {
"method": "GET",
"query_params": {
"data": "10bb17f18198100734",
"id": "260113E2",
"time": "1515360218",
"test": "test"
},
"stage_vars": {},
"uri_params": {}
}
parsed_dic = Server.parse_sigfox_dic(data_dic)
self.assertEqual(parsed_dic["timeStamp"], int("1515360218"))
self.assertEqual(parsed_dic["payload"], "10bb17f18198100734")
self.assertEqual(parsed_dic["DevEUI"], "260113E2")
self.assertEqual(parsed_dic["type"], "SIGFOX")
self.assertEqual(parsed_dic["test"], "test")
def test_publishing_data_to_SNS(self):
data_to_publish = {
"DevEUI": "260113E3",
"extra": {
"DevEUI_uplink": {
"CustomerID": "100001774",
"DevAddr": "260113E3"
}
},
"payload": "010000beef",
"timeStamp": 1499366509000,
"time_json": "2017-07-06T18:41:49.51+02:00",
"type": "LORA",
"virtual_tx": "2dd66154468fa5d433420f5bad5d3f580f3dab46fa33e127ef69c511f641ae2f"
}
server = Server(None, None, self.sns_client, self.log)
expected_message = json.dumps(data_to_publish)
server.publish_data_store_device(data_to_publish)
self.assertEqual(1, self.sns_client.return_published_times())
self.assertEqual(expected_message, self.sns_client.return_message())
self.assertEqual("arn:aws:sns:eu-west-1:488643450383:StoreDeviceData", self.sns_client.return_topicarn())
def test_persist_data_to_DynamoDB(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = {
'title': "The Big New Movie",
'year': 2015,
'info': {
'plot': "Nothing happens at all.",
'rating': "0"
}
}
server.persist_data(expected_item)
self.assertEqual(1, self.dynamodb_device_data.return_persisted_times())
self.assertEqual(expected_item, self.dynamodb_device_data.return_persisted_item())
def test_parsing_none_known_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "A1bb17f18198100734",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
geolocation = Server.parse_payload(expected_item)
self.assertIsNone(geolocation)
def test_parsing_geolocation_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "10bb17f18198100734",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
geolocation = Server.parse_payload(expected_item)
self.assertIsNotNone(geolocation)
payload = expected_item["payload"]
lat_hex = payload[2:8]
lat_str = int(lat_hex, 16)
lat = (lat_str * 180 / 16777215) - 90
lng_hex = payload[8:14]
lng_str = int(lng_hex, 16)
lng = (lng_str * 360 / 16777215) - 180
self.assertEqual(1499366509000, geolocation["timeStamp"])
self.assertIsNotNone(geolocation["GEO"])
# AppTest.printGeoLocation(lat, lat_hex, lat_str, lng_hex, lng_str, payload, lng)
self.assertEqual(str(lat), geolocation["GEO"]["lat"])
self.assertEqual(str(lng), geolocation["GEO"]["lng"])
# Example query:
# http "https://d8dsx2bkn9.execute-api.eu-west-1.amazonaws.com/api/sigfox?time=1510098998&id=260113E3&data=02180AE4"
def test_parsing_keep_alive_payload(self):
expected_item = {"virtual_tx": "A001", "time_json": "2017-01-21T12:12:12.001Z", "timeStamp": 1499366509000,
"payload": "02180AE4",
"DevEUI": "260113E3", "type": "LORA", "extra": "{}"}
keep_alive = Server.parse_payload(expected_item)
self.assertIsNotNone(keep_alive)
payload = expected_item["payload"]
interval = payload[2:4]
interval_int = int(interval, 16)
voltatge_hex = payload[4:8]
voltatge_hex_dec = int(voltatge_hex, 16) / 1000
self.assertEqual(1499366509000, keep_alive["timeStamp"])
self.assertIsNotNone(keep_alive["KA"])
self.assertEqual(str(interval_int), keep_alive["KA"]["interval"])
self.assertEqual(str(voltatge_hex_dec), keep_alive["KA"]["voltage"])
def test_dispatch_alarm_Keep_Alive_low_value(self):
server = Server(None, None, self.sns_client, self.log)
virtual_tx = "AE1234567"
data = {"timeStamp": "1499366509000",
"DevEUI": "260113E3",
"KA":
{"interval": "24",
"voltage": "2.456"}}
server.dispatch_alarm(virtual_tx, data)
data.update({"virtual_tx": virtual_tx})
expected_message = json.dumps(data)
self.assertEqual(1, self.sns_client.return_published_times())
self.assertEqual("arn:aws:sns:eu-west-1:488643450383:NotifySNS", self.sns_client.return_topicarn())
self.assertEqual(expected_message, self.sns_client.return_message())
self.assertEqual("Triggered Alarm 260113E3", self.sns_client.return_subject())
def test_no_dispatch_alarm_for_Keep_Alive_high_value(self):
server = Server(None, None, self.sns_client, self.log)
data = {"timeStamp": "1499366509000",
"DevEUI": "260113E3",
"KA":
{"interval": "24",
"voltage": "2.856"}}
server.dispatch_alarm("AE1234567", data)
self.assertEqual(0, self.sns_client.return_published_times())
def test_not_update_data_in_DynamoDB_if_None(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = None
server.update_data(expected_item)
self.assertEqual(0, self.dynamodb_device_data.return_updated_times())
def test_update_data_in_DynamoDB(self):
server = Server(self.dynamodb_device_data, None, None, self.log)
expected_item = {
"timeStamp": 1499366509000,
"DevEUI": "260113E3",
"GEO": {"lat": "12.5", "lng": "1.4"}
}
server.update_data(expected_item)
self.assertEqual(1, self.dynamodb_device_data.return_updated_times())
self.assertEqual(
{"timeStamp": 1499366509000, "DevEUI": "260113E3"},
self.dynamodb_device_data.return_updated_item()["Key"])
self.assertEqual(
'SET geo = :val',
self.dynamodb_device_data.return_updated_item()["UpdateExpression"])
self.assertEqual(
{':val': {"lat": "12.5", "lng": "1.4"}},
self.dynamodb_device_data.return_updated_item()["ExpressionAttributeValues"])
@staticmethod
def printGeoLocation(lat, lat_hex, lat_str, lng_hex, lng_str, payload, lng):
str_packet_id = str_packet_id = payload[:2]
print("payload:\t" + payload)
print("packed_id:\t" + str_packet_id)
print("lat_hex:\t" + lat_hex)
print("lat_str\t" + str(lat_str))
print("lat\t" + str(lat))
print("lng_hex:\t" + lng_hex)
print("lng_str:\t" + str(lng_str))
print("lat: " + str(lat) + ", lng: " + str(lng))
class TestLog:
def __init__(self):
self.message = ''
self.logged = 0
def debug(self, message):
self.message = message
self.logged += 1
return message
def return_message(self):
return self.message
def return_logging_times(self):
return self.logged
class TestSNS:
def __init__(self):
self.Message = ''
self.TopicArn = ''
self.Subject = ''
self.published = 0
def publish(self, TopicArn, Subject, Message):
self.Message = Message
self.TopicArn = TopicArn
self.Subject = Subject
self.published += 1
def return_topicarn(self):
return self.TopicArn
def return_message(self):
return self.Message
def return_published_times(self):
return self.published
def return_subject(self):
return self.Subject
class TestDynamoDB:
def __init__(self):
self.Item = ''
self.persisted = 0
self.updated = 0
self.Key = ''
self.UpdateExpression = ''
self.ExpressionAttributeValues = ''
self.ReturnValues = ''
def put_item(self, Item):
self.Item = Item
self.persisted += 1
def update_item(self, Key, UpdateExpression, ExpressionAttributeValues, ReturnValues):
self.Key = Key
self.UpdateExpression = UpdateExpression
self.ExpressionAttributeValues = ExpressionAttributeValues
self.ReturnValues = ReturnValues
self.updated += 1
def return_persisted_item(self):
return self.Item
def return_persisted_times(self):
return self.persisted
def return_updated_item(self):
return {"Key": self.Key,
"UpdateExpression": self.UpdateExpression,
"ExpressionAttributeValues": self.ExpressionAttributeValues,
"ReturnValues": self.ReturnValues}
def return_updated_times(self):
return self.updated
| StarcoderdataPython |
4850587 | <gh_stars>0
import copy
import datetime
import json
import logging
import os
import statistics
from cluster_vcf_records import vcf_file_read
from minos import dependencies, genotyper, utils
from minos import __version__ as minos_version
class Error (Exception): pass
def _build_json_file_is_good(json_build_report):
'''Returns true iff looks like gramtools build_report.json
says that gramtools build ran successfully'''
if not os.path.exists(json_build_report):
return False
with open(json_build_report) as f:
build_report = json.load(f)
try:
returned_zero = build_report['gramtools_cpp_build']['return_value_is_0']
except:
return False
return returned_zero
def run_gramtools_build(outdir, vcf_file, ref_file, max_read_length, kmer_size=10):
'''Runs gramtools build. Makes new directory called 'outdir' for
the output'''
gramtools_exe = dependencies.find_binary('gramtools')
build_command = ' '.join([
gramtools_exe,
'build',
'--gram-directory', outdir,
'--vcf', vcf_file,
'--reference', ref_file,
'--max-read-length', str(max_read_length),
'--all-kmers',
'--kmer-size', str(kmer_size),
])
logging.info('Running gramtools build: ' + build_command)
completed_process = utils.syscall(build_command, allow_fail=True)
logging.info('Finished running gramtools build. Return code: ' + str(completed_process.returncode))
build_report = os.path.join(outdir, 'build_report.json')
ran_ok = _build_json_file_is_good(build_report) and completed_process.returncode == 0
if not ran_ok:
logging.info('Error running gramtools build. See build report file ' + build_report)
raise Error('Error running gramtools build: ' + build_command)
# Older gramtools called the perl generated VCF file perl_generated_vcf.
# New gramtools calls it perl_generated.vcf.
# Whichever one doesn't exist, symlink it to the one that does
cwd = os.getcwd()
os.chdir(outdir)
vcf1 = 'perl_generated_vcf'
vcf2 = 'perl_generated.vcf'
if os.path.exists(vcf1):
assert not os.path.exists(vcf2)
os.symlink(vcf1, vcf2)
elif os.path.exists(vcf2):
assert not os.path.exists(vcf1)
os.symlink(vcf2, vcf1)
else:
message = f'Could not find perl generated VCF file in directory {outdir}. Looked for {vcf1}, {vcf2}. Cannot continue'
logging.error(message)
raise Error(message)
os.chdir(cwd)
logging.info('Build report file looks good from gramtools build: ' + build_report)
def run_gramtools(build_dir, quasimap_dir, vcf_file, ref_file, reads, max_read_length, kmer_size=10, seed=42):
'''If build_dir does not exist, runs runs gramtools build and quasimap.
Otherwise, just runs quasimap. quasimap output is in new
directory called quasimap_dir.
"reads" can be one filename, or a list of filenames.
Raises Error if either of the expected json coverage
files made by quasimap are not found.'''
gramtools_exe = dependencies.find_binary('gramtools')
if not os.path.exists(build_dir):
run_gramtools_build(build_dir, vcf_file, ref_file, max_read_length, kmer_size=kmer_size)
if type(reads) is not list:
assert type(reads) is str
reads = [reads]
quasimap_command = ' '.join([
gramtools_exe,
'quasimap',
f'--seed {seed}',
'--gram-directory', build_dir,
'--output-directory', quasimap_dir,
' '.join(['--reads ' + x for x in reads]),
])
logging.info('Running gramtools quasimap: ' + quasimap_command)
utils.syscall(quasimap_command)
logging.info('Finished running gramtools quasimap')
build_report = os.path.join(build_dir, 'build_report.json')
quasimap_report = os.path.join(quasimap_dir, 'report.json')
allele_base_counts_file = os.path.join(quasimap_dir, 'allele_base_coverage.json')
grouped_allele_counts_file = os.path.join(quasimap_dir, 'grouped_allele_counts_coverage.json')
files_ok = True
for filename in build_report, quasimap_report, allele_base_counts_file, grouped_allele_counts_file:
if not(os.path.exists(filename)):
files_ok = False
logging.error('gramtools file not found: ' + filename)
if not files_ok:
error_message = 'Looks like something went wrong duing gramtools run. At least one output file not present. Cannot continue.'
logging.error(error_message)
raise Error(error_message)
with open(build_report) as f:
json_build_report = json.load(f)
with open(quasimap_report) as f:
json_quasimap_report = json.load(f)
return json_build_report, json_quasimap_report
def load_gramtools_vcf_and_allele_coverage_files(vcf_file, quasimap_dir):
'''Loads the perl_generated_vcf file and allele_coverage files.
Sanity checks that they agree: 1) same number of lines (excluding header
lines in vcf) and 2) number of alts agree on each line.
Raises error at the first time somthing wrong is found.
Returns a list of tuples: (VcfRecord, dict of allele -> coverage)'''
allele_base_counts_file = os.path.join(quasimap_dir, 'allele_base_coverage.json')
grouped_allele_counts_file = os.path.join(quasimap_dir, 'grouped_allele_counts_coverage.json')
all_allele_coverage, allele_groups = load_allele_files(allele_base_counts_file, grouped_allele_counts_file)
vcf_header, vcf_lines = vcf_file_read.vcf_file_to_list(vcf_file)
coverages = []
if len(all_allele_coverage) != len(vcf_lines):
raise Error('Number of records in VCF (' + str(len(vcf_lines)) + ') does not match number output from gramtools.(' + str(len(all_allele_coverage)) + '). Cannot continue')
for i, (allele_combi_coverage, allele_per_base_coverage) in enumerate(all_allele_coverage):
if len(allele_per_base_coverage) != 1 + len(vcf_lines[i].ALT):
raise Error('Mismatch in number of alleles for this VCF record:\n' + str(vcf_lines[i]) + '\nLine number is ' + str(i+1))
coverages.append(sum(allele_combi_coverage.values()))
assert len(coverages) > 0
# Unlikely to happen edge case on real data is when coverages has length 1.
# It happens when running test_run in adjudicator_test, with a split VCf.
# One of the splits only has 1 record.
if len(coverages) == 1:
variance = 1.000
else:
variance = round(statistics.variance(coverages), 3)
return round(statistics.mean(coverages), 3), variance, vcf_header, vcf_lines, all_allele_coverage, allele_groups
def update_vcf_record_using_gramtools_allele_depths(vcf_record, allele_combination_cov, allele_per_base_cov, allele_groups_dict, mean_depth, read_error_rate, kmer_size):
'''allele_depths should be a dict of allele -> coverage.
The REF allele must also be in the dict.
So keys of dict must be equal to REF + ALTs sequences.
This also changes all columns from QUAL onwards.
Returns a VcfRecord the same as vcf_record, but with all zero
coverage alleles removed, and GT and COV fixed accordingly'''
gtyper = genotyper.Genotyper(mean_depth, read_error_rate, allele_combination_cov, allele_per_base_cov, allele_groups_dict)
gtyper.run()
genotype_indexes = set()
if '.' in gtyper.genotype:
genotype = './.'
else:
if 0 in gtyper.genotype:
genotype_indexes.add(0)
for i in range(len(vcf_record.ALT)):
if i + 1 in gtyper.genotype:
genotype_indexes.add(i+1)
if len(genotype_indexes) == 1:
genotype_index = genotype_indexes.pop()
genotype = str(genotype_index) + '/' + str(genotype_index)
genotype_indexes.add(genotype_index)
else:
genotype = '/'.join([str(x) for x in sorted(list(genotype_indexes))])
cov_values = [gtyper.singleton_alleles_cov.get(x, 0) for x in range(1 + len(vcf_record.ALT))]
cov_string = ','.join([str(x) for x in cov_values])
vcf_record.QUAL = None
vcf_record.FILTER = '.'
vcf_record.INFO = {'KMER': str(kmer_size)}
vcf_record.format_keys = ['DP', 'GT', 'COV', 'GT_CONF']
vcf_record.FORMAT = {
'DP': str(sum(allele_combination_cov.values())),
'GT': genotype,
'COV': cov_string,
'GT_CONF': str(gtyper.genotype_confidence)
}
# Make new record where all zero coverage alleles are removed
filtered_record = copy.deepcopy(vcf_record)
if genotype in ['./.', '0/0']:
return filtered_record
indexes_to_keep = set([i for i in range(len(cov_values)) if i == 0 or cov_values[i] > 0])
indexes_to_keep.update(genotype_indexes)
indexes_to_keep = list(indexes_to_keep)
indexes_to_keep.sort()
filtered_record.FORMAT['COV'] = ','.join([str(cov_values[i]) for i in indexes_to_keep])
assert indexes_to_keep[0] == 0
filtered_record.ALT = [filtered_record.ALT[i-1] for i in indexes_to_keep[1:]]
# The indexes of the genotype string 'n/m' are shifted because
# we probably removed some alleles
genotype_strings = {vcf_record.REF if i == 0 else vcf_record.ALT[i-1] for i in genotype_indexes}
new_genotype_indexes = set()
if 0 in genotype_indexes:
new_genotype_indexes.add(0)
for i, genotype_string in enumerate(filtered_record.ALT):
if genotype_string in genotype_strings:
new_genotype_indexes.add(i+1)
if len(genotype_strings) == len(new_genotype_indexes):
break
new_genotype_indexes = list(new_genotype_indexes)
if len(new_genotype_indexes) == 1:
new_genotype_indexes.append(new_genotype_indexes[0])
assert len(new_genotype_indexes) == 2
filtered_record.FORMAT['GT'] = '/'.join([str(x) for x in new_genotype_indexes])
return filtered_record
def write_vcf_annotated_using_coverage_from_gramtools(mean_depth, vcf_records, all_allele_coverage, allele_groups, read_error_rate, outfile, kmer_size, sample_name='SAMPLE', max_read_length=None, filtered_outfile=None):
'''mean_depth, vcf_records, all_allele_coverage, allele_groups should be those
returned by load_gramtools_vcf_and_allele_coverage_files().
Writes a new VCF that has allele counts for all the ALTs'''
assert len(vcf_records) == len(all_allele_coverage)
header_lines = [
'##fileformat=VCFv4.2',
'##source=minos, version ' + minos_version,
'##fileDate=' + str(datetime.date.today()),
'##FORMAT=<ID=COV,Number=R,Type=Integer,Description="Number of reads on ref and alt alleles">',
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">',
'##FORMAT=<ID=DP,Number=1,Type=Integer,Description="total kmer depth from gramtools",Source="minos">',
'##FORMAT=<ID=GT_CONF,Number=1,Type=Float,Description="Genotype confidence. Difference in log likelihood of most likely and next most likely genotype">',
'##INFO=<ID=KMER,Number=1,Type=Integer,Description="Kmer size at which variant was discovered (kmer-size used by gramtools build)">',
]
if max_read_length is not None:
header_lines.append('##minos_max_read_length=' + str(max_read_length))
header_lines.append('\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', sample_name]))
if filtered_outfile is not None:
f_filter = open(filtered_outfile, 'w')
print(*header_lines, sep='\n', file=f_filter)
with open(outfile, 'w') as f:
print(*header_lines, sep='\n', file=f)
for i in range(len(vcf_records)):
logging.debug('Genotyping: ' + str(vcf_records[i]))
filtered_record = update_vcf_record_using_gramtools_allele_depths(vcf_records[i], all_allele_coverage[i][0], all_allele_coverage[i][1], allele_groups, mean_depth, read_error_rate, kmer_size)
print(vcf_records[i], file=f)
if filtered_outfile is not None:
print(filtered_record, file=f_filter)
if filtered_outfile is not None:
f_filter.close()
def load_allele_files(allele_base_counts_file, grouped_allele_counts_file):
'''Loads the allele base counts and groupeed allele counts files
made by gramtools qausimap.'''
with open(allele_base_counts_file) as f:
json_base_counts_data = json.load(f)
with open(grouped_allele_counts_file) as f:
json_allele_counts_data = json.load(f)
try:
allele_base_counts = json_base_counts_data['allele_base_counts']
except:
raise Error('Error in json file ' + allele_base_counts_file + '. allele_base_counts not found.')
try:
site_counts = json_allele_counts_data['grouped_allele_counts']['site_counts']
except:
raise Error('Error in json file ' + grouped_allele_counts_file + '. site_counts not found.')
try:
allele_groups = json_allele_counts_data['grouped_allele_counts']['allele_groups']
except:
raise Error('Error in json file ' + grouped_allele_counts_file + '. allele_groups not found.')
if len(allele_base_counts) != len(site_counts):
raise Error('Mismatch between number of records in json files ' + allele_base_counts_file + ' and ' + grouped_allele_counts_file)
for key, value in allele_groups.items():
allele_groups[key] = set(value)
return list(zip(site_counts, allele_base_counts)), allele_groups
| StarcoderdataPython |
8012345 | import os
import sys
from multiples_of_x_and_y import run
assert len(sys.argv) == 3, "please provide input_file_path and output_file_path"
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
assert os.path.isfile(input_file_path), "'{}' does not exist".format(input_file_path)
assert not os.path.isfile(output_file_path), "'{}' must not exist".format(output_file_path)
run(input_file_path, output_file_path) | StarcoderdataPython |
3427958 | <reponame>sodadata/soda-core
from __future__ import annotations
import pytest
from soda.execution.data_type import DataType
from tests.helpers.common_test_tables import (
customers_dist_check_test_table,
customers_profiling,
customers_test_table,
orders_test_table,
)
from tests.helpers.data_source_fixture import DataSourceFixture
def test_discover_tables(data_source_fixture: DataSourceFixture):
table_name = data_source_fixture.ensure_test_table(customers_profiling)
scan = data_source_fixture.create_test_scan()
mock_soda_cloud = scan.enable_mock_soda_cloud()
scan.add_sodacl_yaml_str(
f"""
discover datasets:
datasets:
- include {table_name}
"""
)
scan.execute(allow_warnings_only=True)
# remove the data source name because it's a pain to test
discover_tables_result = mock_soda_cloud.pop_scan_result()
assert discover_tables_result is not None
actual_metadatas = discover_tables_result["metadata"]
actual_metadata = actual_metadatas[0]
actual_schema = actual_metadata["schema"]
data_source = data_source_fixture.data_source
to_ds_type = data_source.get_sql_type_for_schema_check
to_ds_case = data_source.default_casify_column_name
expected_schema = [
{"columnName": to_ds_case("id"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("size"), "sourceDataType": to_ds_type(DataType.DECIMAL)},
{"columnName": to_ds_case("sizeTxt"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("distance"), "sourceDataType": to_ds_type(DataType.INTEGER)},
{"columnName": to_ds_case("pct"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("cat"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("country"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("zip"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("email"), "sourceDataType": to_ds_type(DataType.TEXT)},
{"columnName": to_ds_case("date"), "sourceDataType": to_ds_type(DataType.DATE)},
{"columnName": to_ds_case("ts"), "sourceDataType": to_ds_type(DataType.TIMESTAMP)},
{"columnName": to_ds_case("ts_with_tz"), "sourceDataType": to_ds_type(DataType.TIMESTAMP_TZ)},
]
assert actual_schema == expected_schema
def test_discover_tables_customer_wildcard(data_source_fixture: DataSourceFixture):
data_source_fixture.ensure_test_table(customers_test_table)
data_source_fixture.ensure_test_table(orders_test_table)
data_source_fixture.ensure_test_table(customers_profiling)
data_source_fixture.ensure_test_table(customers_dist_check_test_table)
scan = data_source_fixture.create_test_scan()
mock_soda_cloud = scan.enable_mock_soda_cloud()
scan.add_sodacl_yaml_str(
f"""
discover datasets:
datasets:
- include %customers%
"""
)
scan.execute(allow_warnings_only=True)
discover_tables_result = mock_soda_cloud.pop_scan_result()
assert discover_tables_result is not None
assert len(discover_tables_result["metadata"]) == 3
@pytest.mark.skip("Ask Milan, the test is identical with above")
def test_discover_tables_customer_wildcard(data_source_fixture: DataSourceFixture):
data_source_fixture.ensure_test_table(customers_test_table)
data_source_fixture.ensure_test_table(orders_test_table)
data_source_fixture.ensure_test_table(customers_profiling)
data_source_fixture.ensure_test_table(customers_dist_check_test_table)
scan = data_source_fixture.create_test_scan()
mock_soda_cloud = scan.enable_mock_soda_cloud()
scan.add_sodacl_yaml_str(
f"""
discover datasets:
datasets:
- include sodatest_cust%
- exclude sodatest_customersdist_%
"""
)
scan.execute(allow_warnings_only=True)
discover_tables_result = mock_soda_cloud.pop_scan_result()
assert discover_tables_result is not None
assert len(discover_tables_result["metadata"]) == 2
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.