blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5efc8fceda774f00ffd1f55d10a779d6bd33bbb
|
52439d16e492fc072a8fbec462f886a9327da0ba
|
/internalblue/hcicore.py
|
97b8fd9dc76c176019b213940ed583d6bdcddd58
|
[
"MIT"
] |
permissive
|
seemoo-lab/internalblue
|
73a90b874a1f2035201c9217bf8582568cfd1441
|
9a7ddea09ef045ca1cd36e39be1c6775cfce40f4
|
refs/heads/master
| 2023-09-03T04:23:25.718196
| 2022-11-25T15:03:43
| 2022-11-25T15:03:43
| 147,357,670
| 618
| 83
| null | 2022-11-25T15:03:45
| 2018-09-04T14:17:46
|
Python
|
UTF-8
|
Python
| false
| false
| 16,542
|
py
|
hcicore.py
|
#!/usr/bin/env python2
from __future__ import absolute_import
import datetime
import fcntl
import queue as queue2k
import socket
import struct
import threading
from builtins import range
from builtins import str
from builtins import zip
from ctypes import *
from typing import List, cast, TYPE_CHECKING
from future import standard_library
from . import hci
from .core import InternalBlue
from .utils.packing import p16, u16, p32, u32
if TYPE_CHECKING:
from internalblue import Device
standard_library.install_aliases()
class sockaddr_hci(Structure):
_fields_ = [
("sin_family", c_ushort),
("hci_dev", c_ushort),
("hci_channel", c_ushort),
]
# from /usr/include/bluetooth/hci.h:
# define HCIDEVUP _IOW('H', 201, int)
# define HCIGETDEVLIST _IOR('H', 210, int)
# define HCIGETDEVINFO _IOR('H', 211, int)
# ioctl numbers. see http://code.activestate.com/recipes/578225-linux-ioctl-numbers-in-python/
def _IOR(_type, nr, size):
return 2 << 30 | _type << 8 | nr << 0 | size << 16
def _IOW(_type, nr, size):
return 1 << 30 | _type << 8 | nr << 0 | size << 16
HCIDEVUP = _IOW(ord("H"), 201, 4)
HCIGETDEVLIST = _IOR(ord("H"), 210, 4)
HCIGETDEVINFO = _IOR(ord("H"), 211, 4)
class HCICore(InternalBlue):
def __init__(
self,
queue_size=1000,
btsnooplog_filename="btsnoop.log",
log_level="info",
data_directory=".",
replay=False,
user_channel=False,
):
super(HCICore, self).__init__(
queue_size,
btsnooplog_filename,
log_level,
data_directory,
replay,
)
self.btsnooplog_file_lock = threading.Lock()
self.serial = False
self.doublecheck = False
self.user_channel = user_channel
def getHciDeviceList(self):
# type: () -> List[Device]
"""
Get a list of available HCI devices. The list is obtained by executing
ioctl syscalls HCIGETDEVLIST and HCIGETDEVINFO. The returned list
contains dictionaries with the following fields:
dev_id : Internal ID of the device (e.g. 0)
dev_name : Name of the device (e.g. "hci0")
dev_bdaddr : MAC address (e.g. "00:11:22:33:44:55")
dev_flags : Device flags as decimal number
dev_flags_str : Device flags as String (e.g. "UP RUNNING" or "DOWN")
"""
# Open Bluetooth socket to execute ioctl's:
try:
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
# Ticket 6: does not run on Windows with Kali subsystem
except socket.error:
self.logger.warn(
"Opening a local Bluetooth socket failed. Not running on native Linux?"
)
return []
# Do ioctl(s,HCIGETDEVLIST,arg) to get the number of available devices:
# arg is struct hci_dev_list_req (/usr/include/bluetooth/hci.h)
arg = p32(16) # dl->dev_num = HCI_MAX_DEV which is 16 (little endian)
arg += b"\x00" * (8 * 16)
devices_raw = fcntl.ioctl(s.fileno(), HCIGETDEVLIST, arg)
num_devices = u16(devices_raw[:2])
self.logger.debug("Found %d HCI devices via ioctl(HCIGETDEVLIST)!" % num_devices)
device_list = []
for dev_nr in range(num_devices):
dev_struct_start = 4 + 8 * dev_nr
dev_id = u16(devices_raw[dev_struct_start: dev_struct_start + 2])
# arg is struct hci_dev_info (/usr/include/bluetooth/hci.h)
arg = p16(dev_id) # di->dev_id = <device_id>
arg += b"\x00" * 20 # Enough space for name, bdaddr and flags
dev_info_raw = bytearray(fcntl.ioctl(s.fileno(), HCIGETDEVINFO, arg))
dev_name = dev_info_raw[2:10].replace(b"\x00", b"").decode()
dev_bdaddr = ":".join(["%02X" % x for x in dev_info_raw[10:16][::-1]])
dev_flags = u32(dev_info_raw[16:20])
if dev_flags == 0:
dev_flags_str = "DOWN"
else:
dev_flags_str = " ".join(
[
name
for flag, name in zip(
bin(dev_flags)[2:][::-1],
[
"UP",
"INIT",
"RUNNING",
"PSCAN",
"ISCAN",
"AUTH",
"ENCRYPT",
"INQUIRY",
"RAW",
"RESET",
],
)
if flag == "1"
]
)
device_list.append(
{
"dev_id": dev_id,
"dev_name": dev_name,
"dev_bdaddr": dev_bdaddr,
"dev_flags": dev_flags,
"dev_flags_str": dev_flags_str,
}
)
s.close()
return cast("List[Device]", device_list)
def bringHciDeviceUp(self, dev_id):
"""
Uses HCIDEVUP ioctl to bring HCI device with id dev_id up.
Requires root priviledges (CAP_NET_ADMIN).
"""
if dev_id < 0 or dev_id > 16:
self.logger.warn("bringHciDeviceUp: Invalid device id: %d." % dev_id)
return False
# Open bluetooth socket to execute ioctl's:
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
# Do ioctl(s, HCIDEVUP, dev_id) to bring device up:
try:
fcntl.ioctl(s.fileno(), HCIDEVUP, dev_id)
s.close()
self.logger.info("Device with id=%d was set up successfully!" % dev_id)
return True
except IOError as e:
s.close()
self.logger.warn("Error returned by ioctl: %s" % str(e))
return False
def device_list(self):
"""
Return a list of connected hci devices.
"""
if self.replay:
return [(self, "hci_replay", "hci: ReplaySocket")]
device_list = []
for dev in self.getHciDeviceList():
self.logger.info(
"HCI device: %s [%s] flags=%d<%s>"
% (
dev["dev_name"],
dev["dev_bdaddr"],
dev["dev_flags"],
dev["dev_flags_str"],
)
)
device_list.append(
(
self,
dev["dev_name"],
"hci: %s (%s) <%s>"
% (dev["dev_bdaddr"], dev["dev_name"], dev["dev_flags_str"]),
)
)
if len(device_list) == 0:
self.logger.info("No connected HCI device found")
return cast("List[Device]", device_list)
def local_connect(self):
"""
"""
if not self.interface:
self.logger.warn("No HCI identifier is set")
return False
if self.user_channel:
success = self._setupSocketsUserChannel()
else:
success = self._setupSockets()
if not success:
self.logger.critical("HCI socket could not be established!")
return False
return True
def _btsnoop_pack_time(self, time):
"""
Takes datetime object and returns microseconds since 2000-01-01.
see https://github.com/joekickass/python-btsnoop
Record time is a 64-bit signed integer representing the time of packet arrival,
in microseconds since midnight, January 1st, 0 AD nominal Gregorian.
In order to avoid leap-day ambiguity in calculations, note that an equivalent
epoch may be used of midnight, January 1st 2000 AD, which is represented in
this field as 0x00E03AB44A676000.
"""
time_betw_0_and_2000_ad = int("0x00E03AB44A676000", 16)
time_since_2000_epoch = time - datetime.datetime(2000, 1, 1)
packed_time = time_since_2000_epoch + datetime.timedelta(
microseconds=time_betw_0_and_2000_ad
)
return int(packed_time.total_seconds() * 1000 * 1000)
def _recvThreadFunc(self):
"""
This is the run-function of the recvThread. It receives HCI events from the
s_snoop socket. The HCI packets are encapsulated in btsnoop records (see RFC 1761).
Received HCI packets are being put into the queues inside registeredHciRecvQueues and
passed to the callback functions inside registeredHciCallbacks.
The thread stops when exit_requested is set to True. It will do that on its own
if it encounters a fatal error or the stackDumpReceiver reports that the chip crashed.
"""
self.logger.debug("Receive Thread started.")
while not self.exit_requested:
# Read the record data
try:
record_data = self.s_snoop.recv(1024)
record_data = bytearray(record_data)
except socket.timeout:
continue # this is ok. just try again without error
except Exception as e:
self.logger.critical(
"Lost device interface with exception {}, terminating receive thread...".format(
e
)
)
self.exit_requested = True
continue
# btsnoop record header data:
btsnoop_orig_len = len(record_data)
btsnoop_inc_len = len(record_data)
btsnoop_flags = 0
btsnoop_drops = 0
btsnoop_time = datetime.datetime.now()
if btsnoop_orig_len == 0:
continue
# Put all relevant infos into a tuple. The HCI packet is parsed with the help of hci.py.
record = (
hci.parse_hci_packet(record_data),
btsnoop_orig_len,
btsnoop_inc_len,
btsnoop_flags,
btsnoop_drops,
btsnoop_time,
)
self.logger.debug(
"_recvThreadFunc Recv: [" + str(btsnoop_time) + "] " + str(record[0])
)
# Write to btsnoop file:
if self.write_btsnooplog:
btsnoop_record_hdr = struct.pack(
">IIIIq",
btsnoop_orig_len,
btsnoop_inc_len,
btsnoop_flags,
btsnoop_drops,
self._btsnoop_pack_time(btsnoop_time),
)
with self.btsnooplog_file_lock:
self.btsnooplog_file.write(btsnoop_record_hdr)
self.btsnooplog_file.write(record_data)
self.btsnooplog_file.flush()
# Put the record into all queues of registeredHciRecvQueues if their
# filter function matches.
for queue, filter_function in self.registeredHciRecvQueues:
if filter_function is None or filter_function(record):
try:
queue.put(record, block=False)
except queue2k.Full:
self.logger.warn(
"recvThreadFunc: A recv queue is full. dropping packets.."
)
# Call all callback functions inside registeredHciCallbacks and pass the
# record as argument.
for callback in self.registeredHciCallbacks:
callback(record)
# Check if the stackDumpReceiver has noticed that the chip crashed.
# if self.stackDumpReceiver.stack_dump_has_happened:
# A stack dump has happened!
# self.logger.warn("recvThreadFunc: The controller send a stack dump.")
# self.exit_requested = True
self.logger.debug("Receive Thread terminated.")
def _writeBTSnoopHeader(self):
# Write Header to btsnoop file (if file is still empty):
if self.write_btsnooplog and self.btsnooplog_file.tell() == 0:
# BT Snoop Header: btsnoop\x00, version: 1, data link type: 1002
btsnoop_hdr = (
b"btsnoop\x00" + p32(1, endian="big") + p32(1002, endian="big")
)
with self.btsnooplog_file_lock:
self.btsnooplog_file.write(btsnoop_hdr)
self.btsnooplog_file.flush()
def _setupSockets(self):
"""
Linux already allows to open HCI sockets to Bluetooth devices,
they include H4 information, we simply use it.
"""
# Check if hci device is in state "UP". If not, set it to "UP" (requires root)
device = [
dev for dev in self.getHciDeviceList() if dev["dev_name"] == self.interface
]
if len(device) == 0:
self.logger.warn("Device not found: " + self.interface)
return False
device = device[0]
if device["dev_flags"] == 0:
self.logger.warn("Device %s is DOWN!" % self.interface)
self.logger.info("Trying to set %s to state 'UP' (requires root)" % self.interface)
if not self.bringHciDeviceUp(device["dev_id"]):
self.logger.warn("Failed to bring up %s." % self.interface)
return False
# TODO unload btusb module and check error messages here to give the user some output if sth fails
# Connect to HCI socket
self.s_snoop = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI
)
self.s_snoop.setsockopt(socket.SOL_HCI, socket.HCI_DATA_DIR, 1)
self.s_snoop.setsockopt(socket.SOL_HCI, socket.HCI_TIME_STAMP, 1)
"""
struct hci_filter {
uint32_t type_mask; -> 4
uint32_t event_mask[2]; -> 8
uint16_t opcode; -> 2
};
"""
# TODO still seems to only forward incoming events?!
self.s_snoop.setsockopt(
socket.SOL_HCI,
socket.HCI_FILTER,
b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00",
) # type mask, event mask, event mask, opcode
interface_num = device["dev_id"]
self.logger.debug("Socket interface number: %s" % interface_num)
self.s_snoop.bind((interface_num,))
self.s_snoop.settimeout(2)
self.logger.debug("_setupSockets: Bound socket.")
# same socket for input and output (this is different from adb here!)
self.s_inject = self.s_snoop
self._writeBTSnoopHeader()
return True
def _setupSocketsUserChannel(self):
"""
Python's socket API does not allow to set up an HCI User Channel
so we need to use ctypes here. Most parts of this are taken from
scapy's code (https://github.com/secdev/scapy/blob/master/scapy/layers/bluetooth.py#L1482)
"""
sockaddr_hcip = POINTER(sockaddr_hci)
cdll.LoadLibrary("libc.so.6")
libc = CDLL("libc.so.6")
socket_c = libc.socket
socket_c.argtypes = (c_int, c_int, c_int);
socket_c.restype = c_int
bind = libc.bind
bind.argtypes = (c_int, POINTER(sockaddr_hci), c_int)
bind.restype = c_int
s = socket_c(31, 3, 1) # (AF_BLUETOOTH, SOCK_RAW, HCI_CHANNEL_USER)
if s < 0:
self.logger.error("Unable to open PF_BLUETOOTH socket")
sa = sockaddr_hci()
sa.sin_family = 31 # AF_BLUETOOTH
sa.hci_dev = 0 # adapter index
sa.hci_channel = 1 # HCI_USER_CHANNEL
r = bind(s, sockaddr_hcip(sa), sizeof(sa))
if r != 0:
self.logger.error("Unable to bind")
self.s_snoop = socket.fromfd(s, 31, 3, 1)
# same socket for input and output (this is different from adb here!)
self.s_inject = self.s_snoop
self._writeBTSnoopHeader()
return True
def _teardownSockets(self):
"""
Close s_snoop and s_inject socket. (equal)
"""
if self.s_inject is not None:
self.s_inject.close()
self.s_inject = None
self.s_snoop = None
return True
|
80e66e3d65f791312617d876c7271cdc34482c3f
|
2d18e7bdf1875240d77739b91e748eaeda6edc95
|
/drain3/redis_persistence.py
|
b2cfd67809ee19405065ef09098269116ad0dec1
|
[
"MIT"
] |
permissive
|
logpai/Drain3
|
dc3c74d3e3f9530794e3b2a0868f7de4e58f598f
|
76d12defdeec14da3794d451875b781669f62acf
|
refs/heads/master
| 2023-09-01T21:02:04.078069
| 2023-08-31T22:16:14
| 2023-08-31T22:16:14
| 243,277,573
| 111
| 31
|
NOASSERTION
| 2023-08-31T22:16:16
| 2020-02-26T14:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 842
|
py
|
redis_persistence.py
|
# SPDX-License-Identifier: MIT
import redis
from drain3.persistence_handler import PersistenceHandler
class RedisPersistence(PersistenceHandler):
def __init__(self, redis_host, redis_port, redis_db, redis_pass, is_ssl, redis_key):
self.redis_host = redis_host
self.redis_port = redis_port
self.redis_db = redis_db
self.redis_pass = redis_pass
self.is_ssl = is_ssl
self.redis_key = redis_key
self.r = redis.Redis(host=self.redis_host,
port=self.redis_port,
db=self.redis_db,
password=self.redis_pass,
ssl=self.is_ssl)
def save_state(self, state):
self.r.set(self.redis_key, state)
def load_state(self):
return self.r.get(self.redis_key)
|
5256c613cd6f06fdbed5c056a46b4c3ef4e39440
|
4570d4339e498fa8caaaad6db7296704562d0532
|
/deeplearning/twitter_sentiment/twitter_glove.py
|
772b701144eff348d814deb382e1eaa735758e42
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
sassoftware/sas-viya-programming
|
81e024035a2fec55a17006672fd15069dcdfc8a5
|
947f16955fc7e94b73b5aa5a59010e90abd11130
|
refs/heads/master
| 2023-05-24T22:21:07.696235
| 2023-05-12T19:26:58
| 2023-05-12T19:26:58
| 62,091,838
| 146
| 154
|
Apache-2.0
| 2023-03-25T01:24:58
| 2016-06-27T22:11:06
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,920
|
py
|
twitter_glove.py
|
# Python 3
# Reads stdin: python preprocess-twitter.py
#
# Script for preprocessing tweets by Romain Paulus
# with small modifications by Jeffrey Pennington
# Converted to python by Sam leeman-Munk
#"tokenize" has been renamed to "normalize" with a helper function that normalizes and calls split
# http://nlp.stanford.edu/projects/glove/preprocess-twitter.rb
import re
def process_hashtag(hashtag): # Split hashtags on uppercase letters
#TODO - recognize all caps words and don't split them
hashtag_body = hashtag.group(0)[1:]
if re.search('[A-Z]', hashtag_body) and hashtag_body.upper() == hashtag_body:
return "<hashtag> " + hashtag_body.lower() + " <allcaps>"
else:
return "<hashtag>" + re.sub(r"([A-Z])", r" \1", hashtag_body).lower()
def normalize(tweet):
pieces = dict(
eyes="[8:=;]",
nose="['`\-]?",
prechar="[^a-zA-Z0\d:-]" #the previous character - to avoid stuff like command: becoming comma <smile>
)
tweet = re.sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>", tweet)
tweet = re.sub(r"({prechar})({eyes}{nose}[\\/|l*]+|[\\/|l*]+{eyes}{nose})".format(**pieces), r"\1<neutralface>", tweet)
tweet = re.sub(r"/", " / ", tweet)
tweet = re.sub(r"@\w+", "<user>", tweet)
tweet = re.sub(r"({prechar})({eyes}{nose}[)dD]+|[(dD]+{nose}{eyes})".format(**pieces), r"\1<smile>", tweet)
tweet = re.sub(r"({prechar}){eyes}{nose}[pP]+".format(**pieces), " <lolface>", tweet)
tweet = re.sub(r"({prechar})({eyes}{nose}\(+|\)+{nose}{eyes})".format(**pieces), r"\1<sadface>", tweet)
tweet = re.sub(r"<3", "<heart>", tweet)
tweet = re.sub(r"#[a-zA-Z0-9_]+", process_hashtag,
tweet
)
tweet = re.sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>", tweet)
# Mark punctuation repetitions (eg. "!!!" => "! <REPEAT>")
tweet = re.sub(r"([!?.]){2,}",
r"\1 <repeat>",
tweet)
# Mark elongated words (eg. "wayyyy" => "way <ELONG>")
# TODO: determine if the end letter should be repeated once or twice (use lexicon/dict)
tweet = re.sub(r"\b(\S*?)(.)\2{2,}\b",
r"\1\2 <elong>",
tweet)
tweet = re.sub(r"([A-Z]){2,}", lambda word: word.group(0).lower() + " <allcaps> ", tweet)
#Not sure why this was missing. Code to add spaces between tokens that normally are given together to make
#tokenization easier
tweet = re.sub("([$!?.,'\"\(\)&;])", r" \1 ", tweet)
tweet = re.sub("(>)", r"\1 ", tweet)
tweet = re.sub(r"\s+", " ", tweet)
return tweet
def tokenize(tweet):
return normalize(tweet).split(" ")
if __name__ == "__main__":
tweet = "http://rocko.com good/bad @milord :) :-) 8) (: " \
+ ":ppppp :/// ///: :( :| :l l; <3 664.3 " \
+ "13212 #ThisIsAHashtag !!! wahoooooo! YEEEhaw YEEEHAW"\
+ "#ALLCAPS"
print(normalize(tweet))
print(tokenize(tweet))
for token,expected in zip(tokenize(tweet),
['<url>', 'good', '/', 'bad', '<user>', '<smile>', '<smile>', '<smile>', '<smile>',
'<lolface>', '<neutralface>', '<neutralface>', '<sadface>', '<neutralface>',
'<neutralface>', '<neutralface>', '<heart>', '<number>', '<number>', '<hashtag>',
'this', 'is', 'a', 'hashtag', '!', '<repeat>', 'waho', '<elong>', '!', 'yeee',
'<allcaps>', 'haw', 'yeeehaw', '<allcaps>', '<hashtag>', 'allcaps', '<allcaps>', '']):
assert token==expected, token+"!="+expected
tweet = "!!! overhaul: 2001:death-6 #1hashtag #AHASHTAG. &:) #1201"
print(normalize(tweet))
#Known Bug
tweet = "#MACBaseball"
print("Known bugs")
print (tweet)
print (normalize(tweet))
print ("should be <hashtag> mac <allcaps> baseball")
print("Can't fix without breaking #ThisisAHashtag")
|
ba64dbfa864159fe074e2046b19e86f8a8f5de03
|
ddaa20f2ff0aaed4c6beeba888c4213405fdd586
|
/package/make-deb.py
|
790248e57ea50863c364ce7cb42db5c22f1e44f4
|
[
"MIT"
] |
permissive
|
mosquito/pypi-server
|
689fb84dd0cc56a70c7bfa6157b8defa76d774d8
|
825571aae6fd17616e404ad8a9b72ef791a4fc46
|
refs/heads/master
| 2023-08-17T14:17:50.177008
| 2021-11-14T17:11:52
| 2021-11-14T17:11:52
| 47,583,364
| 129
| 58
|
MIT
| 2021-11-14T17:11:53
| 2015-12-07T22:30:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
make-deb.py
|
import os
from subprocess import check_output
import plumbum
from plumbum.cmd import grep, fpm, ln, sort, find, virtualenv
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.INFO)
ENV_PATH = os.getenv("ENV_PATH", "/usr/share/python3/pypi-server")
SRC_PATH = os.getenv("SRC_PATH", "/mnt")
pip = plumbum.local[os.path.join(ENV_PATH, 'bin', 'pip3')]
log.info("Creating virtualenv %r", ENV_PATH)
virtualenv['-p', 'python3', ENV_PATH] & plumbum.FG
log.info("Installing package %r", SRC_PATH)
pip['install', '--no-binary=:all:', '-U', "{}[postgres]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[proxy]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[mysql]".format(SRC_PATH)] & plumbum.FG
ln['-snf', os.path.join(ENV_PATH, "bin", "pypi-server"), "/usr/bin/pypi-server"] & plumbum.BG
version = (pip['show', 'pypi-server'] | grep['^Version']) & plumbum.BG
version.wait()
version = version.stdout.strip().replace("Version:", '').strip()
args = (
'-s', 'dir',
'-f', '-t', 'deb',
'--iteration', os.getenv('ITERATION', '0'),
'-n', 'pypi-server',
'--config-files', '/etc/pypi-server.conf',
'--deb-systemd', '/mnt/contrib/pypi-server.service',
'-v', version,
'-p', "/mnt/dist",
'-d', 'python3',
'-d', 'python3-distutils',
)
depends = check_output((
'find %s -iname "*.so" -exec ldd {} \; | '
'''awk '{print $1}' | '''
'sort -u | '
'xargs dpkg -S | '
'''awk '{print $1}' | '''
'sort -u | '
'''cut -d ':' -f1 | sort -u'''
) % ENV_PATH, shell=True).decode('utf-8').splitlines()
for depend in depends:
args += ('-d', depend)
args += (
'{0}/={0}/'.format(ENV_PATH),
'/usr/bin/pypi-server=/usr/bin/pypi-server',
'/mnt/contrib/pypi-server.conf.example=/etc/pypi-server.conf',
)
fpm[args] & plumbum.FG
|
78fe6eb0038a886009fbf51e67294d4ecab78058
|
1095cfe2e29ddf4e4c5e12d713bd12f45c9b6f7d
|
/configs/common/Options.py
|
8344d9fd44a44b4dede50a756e74f2b6f97dedb4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
gem5/gem5
|
9ec715ae036c2e08807b5919f114e1d38d189bce
|
48a40cf2f5182a82de360b7efa497d82e06b1631
|
refs/heads/stable
| 2023-09-03T15:56:25.819189
| 2023-08-31T05:53:03
| 2023-08-31T05:53:03
| 27,425,638
| 1,185
| 1,177
|
BSD-3-Clause
| 2023-09-14T08:29:31
| 2014-12-02T09:46:00
|
C++
|
UTF-8
|
Python
| false
| false
| 27,682
|
py
|
Options.py
|
# Copyright (c) 2013-2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import m5
from m5.defines import buildEnv
from m5.objects import *
from common.Benchmarks import *
from common import ObjectList
vio_9p_help = """\
Enable the Virtio 9P device and set the path to share. The default 9p path is
m5ou5/9p/share, and it can be changed by setting VirtIO9p.root with --param. A
sample guest mount command is: "mount -t 9p -o
trans=virtio,version=9p2000.L,aname=<host-full-path> gem5 /mnt/9p" where
"<host-full-path>" is the full path being shared on the host, and "gem5" is a
fixed mount tag. This option requires the diod 9P server to be installed in the
host PATH or selected with with: VirtIO9PDiod.diod.
"""
class ListCpu(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.cpu_list.print()
sys.exit(0)
class ListBp(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.bp_list.print()
sys.exit(0)
class ListHWP(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.hwp_list.print()
sys.exit(0)
class ListRP(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.rp_list.print()
sys.exit(0)
class ListIndirectBP(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.indirect_bp_list.print()
sys.exit(0)
class ListMem(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.mem_list.print()
sys.exit(0)
class ListPlatform(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ObjectList.platform_list.print()
sys.exit(0)
# Add the very basic options that work also in the case of the no ISA
# being used, and consequently no CPUs, but rather various types of
# testers and traffic generators.
def addNoISAOptions(parser):
parser.add_argument("-n", "--num-cpus", type=int, default=1)
parser.add_argument(
"--sys-voltage",
action="store",
type=str,
default="1.0V",
help="""Top-level voltage for blocks running at system
power supply""",
)
parser.add_argument(
"--sys-clock",
action="store",
type=str,
default="1GHz",
help="""Top-level clock for blocks running at system
speed""",
)
# Memory Options
parser.add_argument(
"--list-mem-types",
action=ListMem,
nargs=0,
help="List available memory types",
)
parser.add_argument(
"--mem-type",
default="DDR3_1600_8x8",
choices=ObjectList.mem_list.get_names(),
help="type of memory to use",
)
parser.add_argument(
"--mem-channels", type=int, default=1, help="number of memory channels"
)
parser.add_argument(
"--mem-ranks",
type=int,
default=None,
help="number of memory ranks per channel",
)
parser.add_argument(
"--mem-size",
action="store",
type=str,
default="512MB",
help="Specify the physical memory size (single memory)",
)
parser.add_argument(
"--enable-dram-powerdown",
action="store_true",
help="Enable low-power states in DRAMInterface",
)
parser.add_argument(
"--mem-channels-intlv",
type=int,
default=0,
help="Memory channels interleave",
)
parser.add_argument("--memchecker", action="store_true")
# Cache Options
parser.add_argument(
"--external-memory-system",
type=str,
help="use external ports of this port_type for caches",
)
parser.add_argument(
"--tlm-memory",
type=str,
help="use external port for SystemC TLM cosimulation",
)
parser.add_argument("--caches", action="store_true")
parser.add_argument("--l2cache", action="store_true")
parser.add_argument("--num-dirs", type=int, default=1)
parser.add_argument("--num-l2caches", type=int, default=1)
parser.add_argument("--num-l3caches", type=int, default=1)
parser.add_argument("--l1d_size", type=str, default="64kB")
parser.add_argument("--l1i_size", type=str, default="32kB")
parser.add_argument("--l2_size", type=str, default="2MB")
parser.add_argument("--l3_size", type=str, default="16MB")
parser.add_argument("--l1d_assoc", type=int, default=2)
parser.add_argument("--l1i_assoc", type=int, default=2)
parser.add_argument("--l2_assoc", type=int, default=8)
parser.add_argument("--l3_assoc", type=int, default=16)
parser.add_argument("--cacheline_size", type=int, default=64)
# Enable Ruby
parser.add_argument("--ruby", action="store_true")
# Run duration options
parser.add_argument(
"-m",
"--abs-max-tick",
type=int,
default=m5.MaxTick,
metavar="TICKS",
help="Run to absolute simulated tick "
"specified including ticks from a restored checkpoint",
)
parser.add_argument(
"--rel-max-tick",
type=int,
default=None,
metavar="TICKS",
help="Simulate for specified number of"
" ticks relative to the simulation start tick (e.g. if "
"restoring a checkpoint)",
)
parser.add_argument(
"--maxtime",
type=float,
default=None,
help="Run to the specified absolute simulated time in seconds",
)
parser.add_argument(
"-P",
"--param",
action="append",
default=[],
help="Set a SimObject parameter relative to the root node. "
"An extended Python multi range slicing syntax can be used "
"for arrays. For example: "
"'system.cpu[0,1,3:8:2].max_insts_all_threads = 42' "
"sets max_insts_all_threads for cpus 0, 1, 3, 5 and 7 "
"Direct parameters of the root object are not accessible, "
"only parameters of its children.",
)
# Add common options that assume a non-NULL ISA.
def addCommonOptions(parser):
# start by adding the base options that do not assume an ISA
addNoISAOptions(parser)
# system options
parser.add_argument(
"--list-cpu-types",
action=ListCpu,
nargs=0,
help="List available CPU types",
)
parser.add_argument(
"--cpu-type",
default="AtomicSimpleCPU",
choices=ObjectList.cpu_list.get_names(),
help="type of cpu to run with",
)
parser.add_argument(
"--list-bp-types",
action=ListBp,
nargs=0,
help="List available branch predictor types",
)
parser.add_argument(
"--list-indirect-bp-types",
action=ListIndirectBP,
nargs=0,
help="List available indirect branch predictor types",
)
parser.add_argument(
"--bp-type",
default=None,
choices=ObjectList.bp_list.get_names(),
help="""
type of branch predictor to run with
(if not set, use the default branch predictor of
the selected CPU)""",
)
parser.add_argument(
"--indirect-bp-type",
default=None,
choices=ObjectList.indirect_bp_list.get_names(),
help="type of indirect branch predictor to run with",
)
parser.add_argument(
"--list-rp-types",
action=ListRP,
nargs=0,
help="List available replacement policy types",
)
parser.add_argument(
"--list-hwp-types",
action=ListHWP,
nargs=0,
help="List available hardware prefetcher types",
)
parser.add_argument(
"--l1i-hwp-type",
default=None,
choices=ObjectList.hwp_list.get_names(),
help="""
type of hardware prefetcher to use with the L1
instruction cache.
(if not set, use the default prefetcher of
the selected cache)""",
)
parser.add_argument(
"--l1d-hwp-type",
default=None,
choices=ObjectList.hwp_list.get_names(),
help="""
type of hardware prefetcher to use with the L1
data cache.
(if not set, use the default prefetcher of
the selected cache)""",
)
parser.add_argument(
"--l2-hwp-type",
default=None,
choices=ObjectList.hwp_list.get_names(),
help="""
type of hardware prefetcher to use with the L2 cache.
(if not set, use the default prefetcher of
the selected cache)""",
)
parser.add_argument("--checker", action="store_true")
parser.add_argument(
"--cpu-clock",
action="store",
type=str,
default="2GHz",
help="Clock for blocks running at CPU speed",
)
parser.add_argument(
"--smt",
action="store_true",
default=False,
help="""
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""",
)
parser.add_argument(
"--elastic-trace-en",
action="store_true",
help="""Enable capture of data dependency and instruction
fetch traces using elastic trace probe.""",
)
# Trace file paths input to trace probe in a capture simulation and input
# to Trace CPU in a replay simulation
parser.add_argument(
"--inst-trace-file",
action="store",
type=str,
help="""Instruction fetch trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""",
default="",
)
parser.add_argument(
"--data-trace-file",
action="store",
type=str,
help="""Data dependency trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""",
default="",
)
# dist-gem5 options
parser.add_argument(
"--dist",
action="store_true",
help="Parallel distributed gem5 simulation.",
)
parser.add_argument(
"--dist-sync-on-pseudo-op",
action="store_true",
help="Use a pseudo-op to start dist-gem5 synchronization.",
)
parser.add_argument(
"--is-switch",
action="store_true",
help="Select the network switch simulator process for a"
"distributed gem5 run",
)
parser.add_argument(
"--dist-rank",
default=0,
action="store",
type=int,
help="Rank of this system within the dist gem5 run.",
)
parser.add_argument(
"--dist-size",
default=0,
action="store",
type=int,
help="Number of gem5 processes within the dist gem5 run.",
)
parser.add_argument(
"--dist-server-name",
default="127.0.0.1",
action="store",
type=str,
help="Name of the message server host\nDEFAULT: localhost",
)
parser.add_argument(
"--dist-server-port",
default=2200,
action="store",
type=int,
help="Message server listen port\nDEFAULT: 2200",
)
parser.add_argument(
"--dist-sync-repeat",
default="0us",
action="store",
type=str,
help="Repeat interval for synchronisation barriers among "
"dist-gem5 processes\nDEFAULT: --ethernet-linkdelay",
)
parser.add_argument(
"--dist-sync-start",
default="5200000000000t",
action="store",
type=str,
help="Time to schedule the first dist synchronisation barrier\n"
"DEFAULT:5200000000000t",
)
parser.add_argument(
"--ethernet-linkspeed",
default="10Gbps",
action="store",
type=str,
help="Link speed in bps\nDEFAULT: 10Gbps",
)
parser.add_argument(
"--ethernet-linkdelay",
default="10us",
action="store",
type=str,
help="Link delay in seconds\nDEFAULT: 10us",
)
# Run duration options
parser.add_argument(
"-I",
"--maxinsts",
action="store",
type=int,
default=None,
help="""Total number of instructions to
simulate (default: run forever)""",
)
parser.add_argument(
"--work-item-id",
action="store",
type=int,
help="the specific work id for exit & checkpointing",
)
parser.add_argument(
"--num-work-ids",
action="store",
type=int,
help="Number of distinct work item types",
)
parser.add_argument(
"--work-begin-cpu-id-exit",
action="store",
type=int,
help="exit when work starts on the specified cpu",
)
parser.add_argument(
"--work-end-exit-count",
action="store",
type=int,
help="exit at specified work end count",
)
parser.add_argument(
"--work-begin-exit-count",
action="store",
type=int,
help="exit at specified work begin count",
)
parser.add_argument(
"--init-param",
action="store",
type=int,
default=0,
help="""Parameter available in simulation with m5
initparam""",
)
parser.add_argument(
"--initialize-only",
action="store_true",
default=False,
help="""Exit after initialization. Do not simulate time.
Useful when gem5 is run as a library.""",
)
# Simpoint options
parser.add_argument(
"--simpoint-profile",
action="store_true",
help="Enable basic block profiling for SimPoints",
)
parser.add_argument(
"--simpoint-interval",
type=int,
default=10000000,
help="SimPoint interval in num of instructions",
)
parser.add_argument(
"--take-simpoint-checkpoints",
action="store",
type=str,
help="<simpoint file,weight file,interval-length,warmup-length>",
)
parser.add_argument(
"--restore-simpoint-checkpoint",
action="store_true",
default=False,
help="restore from a simpoint checkpoint taken with "
+ "--take-simpoint-checkpoints",
)
# Checkpointing options
# Note that performing checkpointing via python script files will override
# checkpoint instructions built into binaries.
parser.add_argument(
"--take-checkpoints",
action="store",
type=str,
help="<M,N> take checkpoints at tick M and every N ticks thereafter",
)
parser.add_argument(
"--max-checkpoints",
action="store",
type=int,
help="the maximum number of checkpoints to drop",
default=5,
)
parser.add_argument(
"--checkpoint-dir",
action="store",
type=str,
help="Place all checkpoints in this absolute directory",
)
parser.add_argument(
"-r",
"--checkpoint-restore",
action="store",
type=int,
help="restore from checkpoint <N>",
)
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help="take a checkpoint at end of run",
)
parser.add_argument(
"--work-begin-checkpoint-count",
action="store",
type=int,
help="checkpoint at specified work begin count",
)
parser.add_argument(
"--work-end-checkpoint-count",
action="store",
type=int,
help="checkpoint at specified work end count",
)
parser.add_argument(
"--work-cpus-checkpoint-count",
action="store",
type=int,
help="checkpoint and exit when active cpu count is reached",
)
parser.add_argument(
"--restore-with-cpu",
action="store",
default="AtomicSimpleCPU",
choices=ObjectList.cpu_list.get_names(),
help="cpu type for restoring from a checkpoint",
)
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_argument(
"--repeat-switch",
action="store",
type=int,
default=None,
help="switch back and forth between CPUs with period <N>",
)
parser.add_argument(
"-s",
"--standard-switch",
action="store",
type=int,
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>",
)
parser.add_argument(
"-p", "--prog-interval", type=str, help="CPU Progress Interval"
)
# Fastforwarding and simpoint related materials
parser.add_argument(
"-W",
"--warmup-insts",
action="store",
type=int,
default=None,
help="Warmup period in total instructions (requires --standard-switch)",
)
parser.add_argument(
"--bench",
action="store",
type=str,
default=None,
help="base names for --take-checkpoint and --checkpoint-restore",
)
parser.add_argument(
"-F",
"--fast-forward",
action="store",
type=str,
default=None,
help="Number of instructions to fast forward before switching",
)
parser.add_argument(
"-S",
"--simpoint",
action="store_true",
default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""",
)
parser.add_argument(
"--at-instruction",
action="store_true",
default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""",
)
parser.add_argument(
"--spec-input",
default="ref",
choices=["ref", "test", "train", "smred", "mdred", "lgred"],
help="Input set size for SPEC CPU2000 benchmarks.",
)
parser.add_argument(
"--arm-iset",
default="arm",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.",
)
parser.add_argument(
"--stats-root",
action="append",
default=[],
help="If given, dump only stats of objects under the given SimObject. "
"SimObjects are identified with Python notation as in: "
"system.cpu[0].mmu. All elements of an array can be selected at "
"once with: system.cpu[:].mmu. If given multiple times, dump stats "
"that are present under any of the roots. If not given, dump all "
"stats. ",
)
parser.add_argument(
"--override-vendor-string",
action="store",
type=str,
default=None,
help="Override vendor string returned by CPUID instruction in X86.",
)
def addSEOptions(parser):
# Benchmark options
parser.add_argument(
"-c",
"--cmd",
default="",
help="The binary to run in syscall emulation mode.",
)
parser.add_argument(
"-o",
"--options",
default="",
help="""The options to pass to the binary, use
around the entire string""",
)
parser.add_argument(
"-e",
"--env",
default="",
help="Initialize workload environment from text file.",
)
parser.add_argument(
"-i", "--input", default="", help="Read stdin from a file."
)
parser.add_argument(
"--output", default="", help="Redirect stdout to a file."
)
parser.add_argument(
"--errout", default="", help="Redirect stderr to a file."
)
parser.add_argument(
"--chroot",
action="store",
type=str,
default=None,
help="The chroot option allows a user to alter the "
"search path for processes running in SE mode. "
"Normally, the search path would begin at the "
"root of the filesystem (i.e. /). With chroot, "
"a user can force the process to begin looking at"
"some other location (i.e. /home/user/rand_dir)."
"The intended use is to trick sophisticated "
"software which queries the __HOST__ filesystem "
"for information or functionality. Instead of "
"finding files on the __HOST__ filesystem, the "
"process will find the user's replacment files.",
)
parser.add_argument(
"--interp-dir",
action="store",
type=str,
default=None,
help="The interp-dir option is used for "
"setting the interpreter's path. This will "
"allow to load the guest dynamic linker/loader "
"itself from the elf binary. The option points to "
"the parent folder of the guest /lib in the "
"host fs",
)
parser.add_argument(
"--redirects",
action="append",
type=str,
default=[],
help="A collection of one or more redirect paths "
"to be used in syscall emulation."
"Usage: gem5.opt [...] --redirects /dir1=/path/"
"to/host/dir1 --redirects /dir2=/path/to/host/dir2",
)
parser.add_argument(
"--wait-gdb",
default=False,
action="store_true",
help="Wait for remote GDB to connect.",
)
def addFSOptions(parser):
from common.FSConfig import os_types
# Simulation options
parser.add_argument(
"--timesync",
action="store_true",
help="Prevent simulated time from getting ahead of real time",
)
# System options
parser.add_argument("--kernel", action="store", type=str)
parser.add_argument(
"--os-type",
action="store",
choices=os_types,
default="linux",
help="Specifies type of OS to boot",
)
parser.add_argument("--script", action="store", type=str)
parser.add_argument(
"--frame-capture",
action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "
"files in the gem5 output directory",
)
if buildEnv["USE_ARM_ISA"]:
parser.add_argument(
"--bare-metal",
action="store_true",
help="Provide the raw system without the linux specific bits",
)
parser.add_argument(
"--list-machine-types",
action=ListPlatform,
nargs=0,
help="List available platform types",
)
parser.add_argument(
"--machine-type",
action="store",
choices=ObjectList.platform_list.get_names(),
default="VExpress_GEM5_V1",
)
parser.add_argument(
"--dtb-filename",
action="store",
type=str,
help="Specifies device tree blob file to use with device-tree-"
"enabled kernels",
)
parser.add_argument(
"--enable-context-switch-stats-dump",
action="store_true",
help="Enable stats dump at context "
"switches and dump tasks file (required for Streamline)",
)
parser.add_argument("--vio-9p", action="store_true", help=vio_9p_help)
parser.add_argument(
"--bootloader",
action="append",
help="executable file that runs before the --kernel",
)
# Benchmark options
parser.add_argument(
"--dual",
action="store_true",
help="Simulate two systems attached with an ethernet link",
)
parser.add_argument(
"-b",
"--benchmark",
action="store",
type=str,
dest="benchmark",
help=f"Specify the benchmark to run. Available benchmarks: {DefinedBenchmarks}",
)
# Metafile options
parser.add_argument(
"--etherdump",
action="store",
type=str,
dest="etherdump",
help="Specify the filename to dump a pcap capture of the"
"ethernet traffic",
)
# Disk Image Options
parser.add_argument(
"--disk-image",
action="append",
type=str,
default=[],
help="Path to the disk images to use.",
)
parser.add_argument(
"--root-device",
action="store",
type=str,
default=None,
help="OS device name for root partition",
)
# Command line options
parser.add_argument(
"--command-line",
action="store",
type=str,
default=None,
help="Template for the kernel command line.",
)
parser.add_argument(
"--command-line-file",
action="store",
default=None,
type=str,
help="File with a template for the kernel command line",
)
# Debug option
parser.add_argument(
"--wait-gdb",
default=False,
action="store_true",
help="Wait for remote GDB to connect.",
)
|
b05084838d1d69857e4c3b5b9b0c62b2cf830a1c
|
2ddbb1e257155ac583ac2caa08d4e0bbfdb35e2d
|
/models/GAN3D.py
|
6a5feaf6c9bba14ca24e39040e0edf2e804627c9
|
[
"MIT"
] |
permissive
|
black0017/3D-GAN-pytorch
|
703d0d3675ca8e206f2dd22a79bffd6ff0ac9f34
|
47c338a9787b190c76001f8c6d4f154ddb84a831
|
refs/heads/master
| 2022-07-11T13:50:55.882918
| 2022-07-04T09:34:41
| 2022-07-04T09:34:41
| 252,830,682
| 128
| 21
|
MIT
| 2022-07-04T09:34:42
| 2020-04-03T20:10:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,379
|
py
|
GAN3D.py
|
import torch
import torch.nn as nn
from torchsummary import summary
"""
Implementation based on original paper NeurIPS 2016
https://papers.nips.cc/paper/6096-learning-a-probabilistic-latent-space-of-object-shapes-via-3d-generative-adversarial-modeling.pdf
"""
class Discriminator(torch.nn.Module):
def __init__(self, in_channels=1, dim=64, out_conv_channels=512):
super(Discriminator, self).__init__()
conv1_channels = int(out_conv_channels / 8)
conv2_channels = int(out_conv_channels / 4)
conv3_channels = int(out_conv_channels / 2)
self.out_conv_channels = out_conv_channels
self.out_dim = int(dim / 16)
self.conv1 = nn.Sequential(
nn.Conv3d(
in_channels=in_channels, out_channels=conv1_channels, kernel_size=4,
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv1_channels),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv3d(
in_channels=conv1_channels, out_channels=conv2_channels, kernel_size=4,
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv2_channels),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv3 = nn.Sequential(
nn.Conv3d(
in_channels=conv2_channels, out_channels=conv3_channels, kernel_size=4,
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv3_channels),
nn.LeakyReLU(0.2, inplace=True)
)
self.conv4 = nn.Sequential(
nn.Conv3d(
in_channels=conv3_channels, out_channels=out_conv_channels, kernel_size=4,
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(out_conv_channels),
nn.LeakyReLU(0.2, inplace=True)
)
self.out = nn.Sequential(
nn.Linear(out_conv_channels * self.out_dim * self.out_dim * self.out_dim, 1),
nn.Sigmoid(),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
# Flatten and apply linear + sigmoid
x = x.view(-1, self.out_conv_channels * self.out_dim * self.out_dim * self.out_dim)
x = self.out(x)
return x
class Generator(torch.nn.Module):
def __init__(self, in_channels=512, out_dim=64, out_channels=1, noise_dim=200, activation="sigmoid"):
super(Generator, self).__init__()
self.in_channels = in_channels
self.out_dim = out_dim
self.in_dim = int(out_dim / 16)
conv1_out_channels = int(self.in_channels / 2.0)
conv2_out_channels = int(conv1_out_channels / 2)
conv3_out_channels = int(conv2_out_channels / 2)
self.linear = torch.nn.Linear(noise_dim, in_channels * self.in_dim * self.in_dim * self.in_dim)
self.conv1 = nn.Sequential(
nn.ConvTranspose3d(
in_channels=in_channels, out_channels=conv1_out_channels, kernel_size=(4, 4, 4),
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv1_out_channels),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.ConvTranspose3d(
in_channels=conv1_out_channels, out_channels=conv2_out_channels, kernel_size=(4, 4, 4),
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv2_out_channels),
nn.ReLU(inplace=True)
)
self.conv3 = nn.Sequential(
nn.ConvTranspose3d(
in_channels=conv2_out_channels, out_channels=conv3_out_channels, kernel_size=(4, 4, 4),
stride=2, padding=1, bias=False
),
nn.BatchNorm3d(conv3_out_channels),
nn.ReLU(inplace=True)
)
self.conv4 = nn.Sequential(
nn.ConvTranspose3d(
in_channels=conv3_out_channels, out_channels=out_channels, kernel_size=(4, 4, 4),
stride=2, padding=1, bias=False
)
)
if activation == "sigmoid":
self.out = torch.nn.Sigmoid()
else:
self.out = torch.nn.Tanh()
def project(self, x):
"""
projects and reshapes latent vector to starting volume
:param x: latent vector
:return: starting volume
"""
return x.view(-1, self.in_channels, self.in_dim, self.in_dim, self.in_dim)
def forward(self, x):
x = self.linear(x)
x = self.project(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return self.out(x)
def test_gan3d():
noise_dim = 200
in_channels = 512
dim = 64 # cube volume
model_generator = Generator(in_channels=512, out_dim=dim, out_channels=1, noise_dim=noise_dim)
noise = torch.rand(1, noise_dim)
generated_volume = model_generator(noise)
print("Generator output shape", generated_volume.shape)
model_discriminator = Discriminator(in_channels=1, dim=dim, out_conv_channels=in_channels)
out = model_discriminator(generated_volume)
print("Discriminator output", out)
summary(model_generator, (1, noise_dim))
summary(model_discriminator, (1, 64, 64, 64))
test_gan3d()
|
6f836ab3686942afcbcdebc3df332634655fe24e
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/functional/modules/test_aptpkg.py
|
7d4933875d411249a69795af447e26c969383490
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 11,150
|
py
|
test_aptpkg.py
|
import os
import pathlib
import shutil
import pytest
import salt.exceptions
import salt.modules.aptpkg as aptpkg
import salt.modules.cmdmod as cmd
import salt.modules.config as config
import salt.modules.cp as cp
import salt.modules.file as file
import salt.modules.gpg as gpg
import salt.utils.files
import salt.utils.stringutils
from tests.support.mock import Mock, patch
pytestmark = [
pytest.mark.skip_if_binaries_missing("apt-cache", "grep"),
pytest.mark.slow_test,
]
KEY_FILES = (
"salt-archive-keyring.gpg",
"SALTSTACK-GPG-KEY.pub",
)
class Key:
def __init__(self, aptkey=True):
self.aptkey = aptkey
self.keyname = "salt-archive-keyring.gpg"
def add_key(self):
keydir = pathlib.Path("/etc", "apt", "keyrings")
if not keydir.is_dir():
keydir.mkdir()
aptpkg.add_repo_key(f"salt://{self.keyname}", aptkey=self.aptkey)
def del_key(self):
aptpkg.del_repo_key(keyid="0E08A149DE57BFBE", aptkey=self.aptkey)
@pytest.fixture
def get_key_file(request, state_tree, functional_files_dir):
"""
Create the key file used for the repo by file name passed to the test
"""
keyname = request.param
shutil.copy(str(functional_files_dir / keyname), str(state_tree))
yield keyname
@pytest.fixture
def configure_loader_modules(minion_opts):
return {
aptpkg: {
"__salt__": {
"cmd.run_all": cmd.run_all,
"cmd.run": cmd.run,
"file.replace": file.replace,
"file.append": file.append,
"file.grep": file.grep,
"cp.cache_file": cp.cache_file,
"config.get": config.get,
},
"__opts__": minion_opts,
},
file: {
"__salt__": {"cmd.run_all": cmd.run_all},
"__utils__": {
"files.is_text": salt.utils.files.is_text,
"stringutils.get_diff": salt.utils.stringutils.get_diff,
},
"__opts__": minion_opts,
},
gpg: {},
cp: {
"__opts__": minion_opts,
},
config: {
"__opts__": minion_opts,
},
}
@pytest.fixture()
def revert_repo_file(tmp_path):
try:
repo_file = pathlib.Path("/etc") / "apt" / "sources.list"
backup = tmp_path / "repo_backup"
# make copy of repo file
shutil.copy(str(repo_file), str(backup))
yield
finally:
# revert repo file
shutil.copy(str(backup), str(repo_file))
aptpkg.refresh_db()
@pytest.fixture
def build_repo_file():
source_path = "/etc/apt/sources.list.d/source_test_list.list"
try:
test_repos = [
"deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.gpg arch=amd64] https://repo.saltproject.io/salt/py3/ubuntu/22.04/amd64/latest jammy main",
"deb http://dist.list stable/all/",
]
with salt.utils.files.fopen(source_path, "w+") as fp:
fp.write("\n".join(test_repos))
yield source_path
finally:
if os.path.exists(source_path):
os.remove(source_path)
def get_repos_from_file(source_path):
"""
Get list of repos from repo in source_path
"""
test_repos = []
try:
with salt.utils.files.fopen(source_path) as fp:
for line in fp:
test_repos.append(line.strip())
except FileNotFoundError as error:
pytest.skip(f"Missing {error.filename}")
if not test_repos:
pytest.skip("Did not detect an APT repo")
return test_repos
def get_current_repo(multiple_comps=False):
"""
Get a repo currently in sources.list
multiple_comps:
Search for a repo that contains multiple comps.
For example: main, restricted
"""
test_repo = None
try:
with salt.utils.files.fopen("/etc/apt/sources.list") as fp:
for line in fp:
if line.startswith("#"):
continue
if "ubuntu.com" in line or "debian.org" in line:
test_repo = line.strip()
comps = test_repo.split()[3:]
if multiple_comps:
if len(comps) > 1:
break
else:
break
except FileNotFoundError as error:
pytest.skip(f"Missing {error.filename}")
if not test_repo:
pytest.skip("Did not detect an APT repo")
return test_repo, comps
def test_list_repos():
"""
Test aptpkg.list_repos
"""
ret = aptpkg.list_repos()
repos = [x for x in ret if "http" in x]
for repo in repos:
check_repo = ret[repo][0]
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in check_repo
assert pathlib.Path(check_repo["file"]).is_file()
assert check_repo["dist"] in check_repo["line"]
if isinstance(check_repo["comps"], list):
assert " ".join(check_repo["comps"]) in check_repo["line"]
else:
assert check_repo["comps"] in check_repo["line"]
def test_get_repos():
"""
Test aptpkg.get_repos
"""
test_repo, comps = get_current_repo()
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
assert ret["file"] == "/etc/apt/sources.list"
def test_get_repos_multiple_comps():
"""
Test aptpkg.get_repos when multiple comps
exist in repo.
"""
test_repo, comps = get_current_repo(multiple_comps=True)
exp_ret = test_repo.split()
ret = aptpkg.get_repo(repo=test_repo)
assert ret["type"] == exp_ret[0]
assert ret["uri"] == exp_ret[1]
assert ret["dist"] == exp_ret[2]
assert ret["comps"] == exp_ret[3:]
def test_get_repos_doesnot_exist():
"""
Test aptpkg.get_repos when passing a repo
that does not exist
"""
for test_repo in [
"doesnotexist",
"deb http://archive.ubuntu.com/ubuntu/ focal-backports compdoesnotexist",
]:
ret = aptpkg.get_repo(repo=test_repo)
assert not ret
@pytest.mark.destructive_test
@pytest.mark.skip_if_not_root
def test_del_repo(build_repo_file):
"""
Test aptpkg.del_repo when passing repo
that exists. And checking correct error
is returned when it no longer exists.
"""
test_repos = get_repos_from_file(build_repo_file)
for test_repo in test_repos:
ret = aptpkg.del_repo(repo=test_repo)
assert f"Repo '{test_repo}' has been removed"
with pytest.raises(salt.exceptions.CommandExecutionError) as exc:
ret = aptpkg.del_repo(repo=test_repo)
assert f"Repo {test_repo} doesn't exist" in exc.value.message
@pytest.mark.skipif(
not os.path.isfile("/etc/apt/sources.list"), reason="Missing /etc/apt/sources.list"
)
def test__expand_repo_def(grains):
"""
Test aptpkg._expand_repo_def when the repo exists.
"""
test_repo, comps = get_current_repo()
ret = aptpkg._expand_repo_def(
os_name=grains["os"],
os_codename=grains.get("oscodename"),
repo=test_repo,
)
for key in [
"comps",
"dist",
"uri",
"line",
"architectures",
"file",
"type",
]:
assert key in ret
assert pathlib.Path(ret["file"]).is_file()
assert ret["dist"] in ret["line"]
if isinstance(ret["comps"], list):
for comp in ret["comps"]:
assert comp in ret["line"]
else:
assert ret["comps"] in ret["line"]
@pytest.mark.destructive_test
@pytest.mark.skip_if_not_root
def test_mod_repo(revert_repo_file):
"""
Test aptpkg.mod_repo when the repo exists.
"""
test_repo, comps = get_current_repo()
msg = "This is a test"
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, comments=msg)
assert sorted(ret[list(ret.keys())[0]]["comps"]) == sorted(comps)
ret = file.grep("/etc/apt/sources.list", msg)
assert f"#{msg}" in ret["stdout"]
@pytest.mark.destructive_test
@pytest.mark.skip_if_not_root
def test_mod_repo_no_file(tmp_path, revert_repo_file):
"""
Test aptpkg.mod_repo when the file does not exist.
It should create the file.
"""
test_repo, comps = get_current_repo()
test_file = str(tmp_path / "test_repo")
with patch.dict(aptpkg.__salt__, {"config.option": Mock()}):
ret = aptpkg.mod_repo(repo=test_repo, file=test_file)
with salt.utils.files.fopen(test_file, "r") as fp:
ret = fp.read()
assert test_repo.split()[1] in ret.strip()
for comp in comps:
assert comp in ret
@pytest.fixture()
def add_key(request, get_key_file):
""" """
key = Key(request.param)
key.add_key()
yield request.param
key.del_key()
@pytest.mark.parametrize("get_key_file", KEY_FILES, indirect=True)
@pytest.mark.parametrize("add_key", [False, True], indirect=True)
@pytest.mark.destructive_test
@pytest.mark.skip_if_not_root
def test_get_repo_keys(get_key_file, add_key):
"""
Test aptpkg.get_repo_keys when aptkey is False and True
"""
ret = aptpkg.get_repo_keys(aptkey=add_key)
assert (
ret["0E08A149DE57BFBE"]["uid"]
== "SaltStack Packaging Team <packaging@saltstack.com>"
)
@pytest.mark.parametrize("key", [False, True])
@pytest.mark.destructive_test
@pytest.mark.skip_if_not_root
def test_get_repo_keys_keydir_not_exist(key):
"""
Test aptpkg.get_repo_keys when aptkey is False and True
and keydir does not exist
"""
ret = aptpkg.get_repo_keys(aptkey=key, keydir="/doesnotexist/")
if not key:
assert not ret
else:
assert ret
@pytest.mark.parametrize("get_key_file", KEY_FILES, indirect=True)
@pytest.mark.parametrize("aptkey", [False, True])
@pytest.mark.skip_if_not_root
def test_add_del_repo_key(get_key_file, aptkey):
"""
Test both add_repo_key and del_repo_key when
aptkey is both False and True
and using both binary and armored gpg keys
"""
try:
assert aptpkg.add_repo_key(f"salt://{get_key_file}", aptkey=aptkey)
keyfile = pathlib.Path("/etc", "apt", "keyrings", get_key_file)
if not aptkey:
assert keyfile.is_file()
assert oct(keyfile.stat().st_mode)[-3:] == "644"
assert keyfile.read_bytes()
query_key = aptpkg.get_repo_keys(aptkey=aptkey)
assert (
query_key["0E08A149DE57BFBE"]["uid"]
== "SaltStack Packaging Team <packaging@saltstack.com>"
)
finally:
aptpkg.del_repo_key(keyid="0E08A149DE57BFBE", aptkey=aptkey)
if not aptkey:
assert not keyfile.is_file()
query_key = aptpkg.get_repo_keys(aptkey=aptkey)
assert "0E08A149DE57BFBE" not in query_key
|
06116dfe480007bb0c6223c3b91c5f05296b2820
|
fbebc09f50a6ac0749e68d5dcab20afd009de71f
|
/snippets/ch04/loader.py
|
96b5010a8996b2ab1e02effb1492873380abeb80
|
[
"Apache-2.0"
] |
permissive
|
foxbook/atap
|
7e9adbaa1f64ff4e7a2e58a5d9b1717150f063ba
|
43fd3317b641e0830905a734226afad3a0ea19f6
|
refs/heads/master
| 2023-08-17T06:37:34.571614
| 2022-12-01T14:41:35
| 2022-12-01T14:41:35
| 88,289,875
| 401
| 316
|
Apache-2.0
| 2020-08-06T12:54:54
| 2017-04-14T17:48:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
loader.py
|
from sklearn.cross_validation import KFold
class CorpusLoader(object):
def __init__(self, corpus, folds=None, shuffle=True):
self.n_docs = len(corpus.fileids())
self.corpus = corpus
self.folds = folds
if folds is not None:
# Generate the KFold cross validation for the loader.
self.folds = KFold(self.n_docs, folds, shuffle)
@property
def n_folds(self):
"""
Returns the number of folds if it exists; 0 otherwise.
"""
if self.folds is None: return 0
return self.folds.n_folds
def fileids(self, fold=None, train=False, test=False):
if fold is None:
# If no fold is specified, return all the fileids.
return self.corpus.fileids()
# Otherwise, identify the fold specifically and get the train/test idx
train_idx, test_idx = [split for split in self.folds][fold]
# Now determine if we're in train or test mode.
if not (test or train) or (test and train):
raise ValueError(
"Please specify either train or test flag"
)
# Select only the indices to filter upon.
indices = train_idx if train else test_idx
return [
fileid for doc_idx, fileid in enumerate(self.corpus.fileids())
if doc_idx in indices
]
def documents(self, fold=None, train=False, test=False):
for fileid in self.fileids(fold, train, test):
yield list(self.corpus.docs(fileids=fileid))
def labels(self, fold=None, train=False, test=False):
return [
self.corpus.categories(fileids=fileid)[0]
for fileid in self.fileids(fold, train, test)
]
if __name__ == '__main__':
from reader import PickledCorpusReader
corpus = PickledCorpusReader('corpus')
loader = CorpusLoader(corpus, folds=12)
for fid in loader.fileids(0, test=True):
print(fid)
|
1186ab3eaff44668fe0cde4d9d08a833c150078c
|
6be59c81f3f6a17c14b812be0de3346a82eb33dd
|
/learn_torch/learn_nn/custom_module.py
|
7052b14b9faf052b5bd4e089f7cb43433a46456a
|
[] |
no_license
|
chunhuizhang/bilibili_vlogs
|
6851fdcd43f08fcf7195e345b0bc85d99c0b9128
|
0efd921b24f2af43f5972ea6909deb2fc069d305
|
refs/heads/master
| 2023-08-17T15:47:04.299072
| 2023-08-14T13:46:31
| 2023-08-14T13:46:31
| 220,612,967
| 170
| 70
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
custom_module.py
|
import torch
from torch import nn
class MySeq(torch.nn.Module):
def __init__(self, *args):
super().__init__()
for block in args:
self._modules[block] = block
def forward(self, X):
for block in self._modules.values():
X = block(X)
return X
if __name__ == '__main__':
X = torch.rand(2, 20)
net = MySeq(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
|
0c62422540420b43c29c4c9c60d89044e3f034aa
|
94724578994ab1438dcefb51b7ef4d8570da5d4c
|
/calibre/os重学操作系统.recipe
|
a47459dce17365863b71371048b1dbcb055a898d
|
[] |
no_license
|
PegasusWang/collection_python
|
6648d83203634abf44fd42c0b37b0bf7cc406d8f
|
9ef019a737a0817860d3184924c67a0833bd1252
|
refs/heads/master
| 2023-09-01T23:15:39.813635
| 2023-08-24T06:46:12
| 2023-08-24T06:46:12
| 43,693,872
| 130
| 90
| null | 2021-04-26T15:12:55
| 2015-10-05T15:28:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 14,299
|
recipe
|
os重学操作系统.recipe
|
#!/usr/bin/python
# encoding: utf-8
from calibre.web.feeds.recipes import BasicNewsRecipe # 引入 Recipe 基础类
"""
教程:
- https://bookfere.com/tools#calibre
- https://www.jianshu.com/p/0bcb92509309
- https://snowdreams1006.github.io/myGitbook/advance/export.html
命令:
ebook-convert os重学操作系统.recipe os重学操作系统.mobi --output-profile=kindle
ebook-convert os重学操作系统.recipe os重学操作系统.pdf
ebook-convert os重学操作系统.recipe os重学操作系统.epub
"""
urls = [
{
"title": "00 开篇词 为什么大厂面试必考操作系统?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/00 开篇词 为什么大厂面试必考操作系统?.md"
},
{
"title": "00 课前必读 构建知识体系,可以这样做!.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/00 课前必读 构建知识体系,可以这样做!.md"
},
{
"title": "01 计算机是什么:“如何把程序写好”这个问题是可计算的吗?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/01 计算机是什么:“如何把程序写好”这个问题是可计算的吗?.md"
},
{
"title": "02 程序的执行:相比 32 位,64 位的优势是什么?(上).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/02 程序的执行:相比 32 位,64 位的优势是什么?(上).md"
},
{
"title": "03 程序的执行:相比 32 位,64 位的优势是什么?(下).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/03 程序的执行:相比 32 位,64 位的优势是什么?(下).md"
},
{
"title": "04 构造复杂的程序:将一个递归函数转成非递归函数的通用方法.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/04 构造复杂的程序:将一个递归函数转成非递归函数的通用方法.md"
},
{
"title": "05 存储器分级:L1 Cache 比内存和 SSD 快多少倍?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/05 存储器分级:L1 Cache 比内存和 SSD 快多少倍?.md"
},
{
"title": "05 (1) 加餐 练习题详解(一).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/05 (1) 加餐 练习题详解(一).md"
},
{
"title": "06 目录结构和文件管理指令:rm -rf 指令的作用是?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/06 目录结构和文件管理指令:rm -rf 指令的作用是?.md"
},
{
"title": "07 进程、重定向和管道指令:xargs 指令的作用是?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/07 进程、重定向和管道指令:xargs 指令的作用是?.md"
},
{
"title": "08 用户和权限管理指令: 请简述 Linux 权限划分的原则?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/08 用户和权限管理指令: 请简述 Linux 权限划分的原则?.md"
},
{
"title": "09 Linux 中的网络指令:如何查看一个域名有哪些 NS 记录?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/09 Linux 中的网络指令:如何查看一个域名有哪些 NS 记录?.md"
},
{
"title": "10 软件的安装: 编译安装和包管理器安装有什么优势和劣势?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/10 软件的安装: 编译安装和包管理器安装有什么优势和劣势?.md"
},
{
"title": "11 高级技巧之日志分析:利用 Linux 指令分析 Web 日志.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/11 高级技巧之日志分析:利用 Linux 指令分析 Web 日志.md"
},
{
"title": "12 高级技巧之集群部署:利用 Linux 指令同时在多台机器部署程序.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/12 高级技巧之集群部署:利用 Linux 指令同时在多台机器部署程序.md"
},
{
"title": "12 (1)加餐 练习题详解(二).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/12 (1)加餐 练习题详解(二).md"
},
{
"title": "13 操作系统内核:Linux 内核和 Windows 内核有什么区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/13 操作系统内核:Linux 内核和 Windows 内核有什么区别?.md"
},
{
"title": "14 用户态和内核态:用户态线程和内核态线程有什么区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/14 用户态和内核态:用户态线程和内核态线程有什么区别?.md"
},
{
"title": "15 中断和中断向量:Javajs 等语言为什么可以捕获到键盘输入?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/15 中断和中断向量:Javajs 等语言为什么可以捕获到键盘输入?.md"
},
{
"title": "16 WinMacUnixLinux 的区别和联系:为什么 Debian 漏洞排名第一还这么多人用?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/16 WinMacUnixLinux 的区别和联系:为什么 Debian 漏洞排名第一还这么多人用?.md"
},
{
"title": "16 (1)加餐 练习题详解(三).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/16 (1)加餐 练习题详解(三).md"
},
{
"title": "17 进程和线程:进程的开销比线程大在了哪里?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/17 进程和线程:进程的开销比线程大在了哪里?.md"
},
{
"title": "18 锁、信号量和分布式锁:如何控制同一时间只有 2 个线程运行?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/18 锁、信号量和分布式锁:如何控制同一时间只有 2 个线程运行?.md"
},
{
"title": "19 乐观锁、区块链:除了上锁还有哪些并发控制方法?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/19 乐观锁、区块链:除了上锁还有哪些并发控制方法?.md"
},
{
"title": "20 线程的调度:线程调度都有哪些方法?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/20 线程的调度:线程调度都有哪些方法?.md"
},
{
"title": "21 哲学家就餐问题:什么情况下会触发饥饿和死锁?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/21 哲学家就餐问题:什么情况下会触发饥饿和死锁?.md"
},
{
"title": "22 进程间通信: 进程间通信都有哪些方法?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/22 进程间通信: 进程间通信都有哪些方法?.md"
},
{
"title": "23 分析服务的特性:我的服务应该开多少个进程、多少个线程?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/23 分析服务的特性:我的服务应该开多少个进程、多少个线程?.md"
},
{
"title": "23 (1)加餐 练习题详解(四).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/23 (1)加餐 练习题详解(四).md"
},
{
"title": "24 虚拟内存 :一个程序最多能使用多少内存?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/24 虚拟内存 :一个程序最多能使用多少内存?.md"
},
{
"title": "25 内存管理单元: 什么情况下使用大内存分页?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/25 内存管理单元: 什么情况下使用大内存分页?.md"
},
{
"title": "26 缓存置换算法: LRU 用什么数据结构实现更合理?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/26 缓存置换算法: LRU 用什么数据结构实现更合理?.md"
},
{
"title": "27 内存回收上篇:如何解决内存的循环引用问题?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/27 内存回收上篇:如何解决内存的循环引用问题?.md"
},
{
"title": "28 内存回收下篇:三色标记-清除算法是怎么回事?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/28 内存回收下篇:三色标记-清除算法是怎么回事?.md"
},
{
"title": "28 (1)加餐 练习题详解(五).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/28 (1)加餐 练习题详解(五).md"
},
{
"title": "29 Linux 下的各个目录有什么作用?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/29 Linux 下的各个目录有什么作用?.md"
},
{
"title": "30 文件系统的底层实现:FAT、NTFS 和 Ext3 有什么区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/30 文件系统的底层实现:FAT、NTFS 和 Ext3 有什么区别?.md"
},
{
"title": "31 数据库文件系统实例:MySQL 中 B 树和 B+ 树有什么区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/31 数据库文件系统实例:MySQL 中 B 树和 B+ 树有什么区别?.md"
},
{
"title": "32 HDFS 介绍:分布式文件系统是怎么回事?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/32 HDFS 介绍:分布式文件系统是怎么回事?.md"
},
{
"title": "32 (1)加餐 练习题详解(六).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/32 (1)加餐 练习题详解(六).md"
},
{
"title": "33 互联网协议群(TCPIP):多路复用是怎么回事?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/33 互联网协议群(TCPIP):多路复用是怎么回事?.md"
},
{
"title": "34 UDP 协议:UDP 和 TCP 相比快在哪里?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/34 UDP 协议:UDP 和 TCP 相比快在哪里?.md"
},
{
"title": "35 Linux 的 IO 模式:selectpollepoll 有什么区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/35 Linux 的 IO 模式:selectpollepoll 有什么区别?.md"
},
{
"title": "36 公私钥体系和网络安全:什么是中间人攻击?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/36 公私钥体系和网络安全:什么是中间人攻击?.md"
},
{
"title": "36 (1)加餐 练习题详解(七).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/36 (1)加餐 练习题详解(七).md"
},
{
"title": "37 虚拟化技术介绍:VMware 和 Docker 的区别?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/37 虚拟化技术介绍:VMware 和 Docker 的区别?.md"
},
{
"title": "38 容器编排技术:如何利用 K8s 和 Docker Swarm 管理微服务?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/38 容器编排技术:如何利用 K8s 和 Docker Swarm 管理微服务?.md"
},
{
"title": "39 Linux 架构优秀在哪里.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/39 Linux 架构优秀在哪里.md"
},
{
"title": "40 商业操作系统:电商操作系统是不是一个噱头?.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/40 商业操作系统:电商操作系统是不是一个噱头?.md"
},
{
"title": "40 (1)加餐 练习题详解(八).md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/40 (1)加餐 练习题详解(八).md"
},
{
"title": "41 结束语 论程序员的发展——信仰、选择和博弈.md",
"url": "http://learn.lianglianglee.com/专栏/重学操作系统-完/41 结束语 论程序员的发展——信仰、选择和博弈.md"
}
]
class Blog(BasicNewsRecipe): # 继承 BasicNewsRecipe 类的新类名
# ///////////////////
# 设置电子书元数据
# ///////////////////
title = "os重学操作系统" # 电子书名
description = u"os重学操作系统" # 电子书简介
# cover_url = '' # 电子书封面
# masthead_url = '' # 页头图片
__author__ = "web" # 作者
language = "zh" # 语言
encoding = "utf-8" # 编码
# ///////////////////
# 抓取页面内容设置
# ///////////////////
# keep_only_tags = [{ 'class': 'example' }] # 仅保留指定选择器包含的内容
no_stylesheets = True # 去除 CSS 样式
remove_javascript = True # 去除 JavaScript 脚本
auto_cleanup = True # 自动清理 HTML 代码
max_articles_per_feed = 300 # 抓取文章数量
timeout = 120.0
simultaneous_downloads = 2 # 有时候多个同时下载会失败,修改小点或者单线程下载
# delay = 2 # 抓取页面间隔秒数
# 页面内容解析方法
def parse_index(self):
ans = [(self.title, urls)]
return ans # 返回可供 Calibre 转换的数据结构
|
aeb9980f493078a2884c0f1e8a1fb49d55601771
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/surround/SurroundNewline_after.py
|
c07bad2792c86b2f47f320ac4bcae2ae2a91c07d
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
SurroundNewline_after.py
|
if True:
a = 1
a = 2
|
a95f21f3ff351d356f752430a13b77d48bbc2d13
|
e85453bcff9bb2a4b817d5c9157ac0894f98c1b1
|
/packages/aws-cdk/lib/init-templates/app/python/tests/unit/test_%name.PythonModule%_stack.template.py
|
2bf2309dca779162c05259ff607985153a099067
|
[
"CC0-1.0",
"CC-BY-SA-4.0",
"MIT",
"BSD-3-Clause",
"0BSD",
"ISC",
"Apache-2.0",
"BSD-2-Clause",
"MIT-0"
] |
permissive
|
aws/aws-cdk
|
8e96f775b25d4ee193e2dbcee79c81e6b9f1a403
|
408f1f663ee300a783e36a35b5f103a514d8c12e
|
refs/heads/main
| 2023-09-04T00:59:05.308636
| 2023-09-01T19:41:23
| 2023-09-01T19:41:23
| 105,808,767
| 8,448
| 3,869
|
Apache-2.0
| 2023-09-14T20:29:14
| 2017-10-04T19:22:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 560
|
py
|
test_%name.PythonModule%_stack.template.py
|
import aws_cdk as core
import aws_cdk.assertions as assertions
from %name.PythonModule%.%name.PythonModule%_stack import %name.PascalCased%Stack
# example tests. To run these tests, uncomment this file along with the example
# resource in %name.PythonModule%/%name.PythonModule%_stack.py
def test_sqs_queue_created():
app = core.App()
stack = %name.PascalCased%Stack(app, "%name.StackName%")
template = assertions.Template.from_stack(stack)
# template.has_resource_properties("AWS::SQS::Queue", {
# "VisibilityTimeout": 300
# })
|
89bb86d91bd6065305c34315cbfb681511083da9
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/mindspore/lite/test/st/python/import_ms_and_mslite/test_predict_backend_lite_lenet.py
|
7c20679e8982544af07f76f6477670ad045fe03e
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 3,735
|
py
|
test_predict_backend_lite_lenet.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## LiteInfer test ########################
Note:
To run this scripts, 'mindspore' and 'mindspore_lite' must be installed.
mindspore_lite must be cloud inference version.
"""
import numpy as np
import mindspore as ms
from mindspore import context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train import Model
import mindspore.nn as nn
from mindspore.common.initializer import Normal
from lite_infer_predict_utils import predict_lenet, predict_mindir, predict_backend_lite, _get_max_index_from_res
CKPT_FILE_PATH = ''
class LeNet5(nn.Cell):
"""
Lenet network
"""
def __init__(self, num_class=10, num_channel=1, include_top=True):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.include_top = include_top
if self.include_top:
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
def construct(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
if not self.include_top:
return x
x = self.flatten(x)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
def create_model():
"""
create model.
"""
network = LeNet5(10, num_channel=3)
if CKPT_FILE_PATH:
param_dict = load_checkpoint(CKPT_FILE_PATH)
load_param_into_net(network, param_dict)
ms_model = Model(network)
return ms_model
def test_predict_backend_lite_lenet():
"""
Feature: test LiteInfer predict.
Description: test LiteInfer predict.
Expectation: Success.
"""
context.set_context(mode=context.GRAPH_MODE)
fake_input = ms.Tensor(np.ones((1, 1, 32, 32)).astype(np.float32))
model = create_model()
res, avg_t = predict_lenet(model, fake_input)
print("Prediction res: ", _get_max_index_from_res(res))
print(f"Prediction avg time: {avg_t * 1000} ms")
model = create_model()
res_lite, avg_t_lite = predict_backend_lite(model, fake_input)
print("Predict using backend lite, res: ", _get_max_index_from_res(res_lite))
print(f"Predict using backend lite, avg time: {avg_t_lite * 1000} ms")
model = create_model()
res_mindir, avg_t_mindir = predict_mindir(model, fake_input)
print("Predict by mindir, res: ", _get_max_index_from_res(res_mindir))
print(f"Predict by mindir, avg time: {avg_t_mindir * 1000} ms")
assert _get_max_index_from_res(res)
assert _get_max_index_from_res(res_lite)
assert _get_max_index_from_res(res_mindir)
|
ef8a7bfb8ba38a97f719294d477482818b233cb9
|
2a64017ddbf28d00109ed4e3105e1f7ccc78ccc1
|
/workflow101/config/settings.py
|
056f93f392b668640e94742fb8e670e786a8d050
|
[] |
no_license
|
viewflow/cookbook
|
25f9379510ced08dbdbae9d5cfb3df0c68f95a08
|
04189977cd894b5166d088949f30e07189889189
|
refs/heads/main
| 2023-08-17T09:22:26.226335
| 2023-08-11T15:12:23
| 2023-08-11T15:12:23
| 35,273,541
| 286
| 179
| null | 2023-01-19T10:44:18
| 2015-05-08T10:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,273
|
py
|
settings.py
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "b=b7z6c!2_k62o42k5s0145=&s7d*-1u!9&g%5bw@+!i3-80$b"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.staticfiles",
"rest_framework",
"django_filters",
"guardian",
"viewflow",
"viewflow.workflow",
"cookbook.workflow101.bloodtest",
"cookbook.workflow101.dynamic_split",
"cookbook.workflow101.helloworld",
"cookbook.workflow101.hellorest",
"cookbook.workflow101.shipment",
"cookbook.workflow101.subprocess",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"cookbook.workflow101.config.AutoLoginMiddleware",
]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"guardian.backends.ObjectPermissionBackend",
)
ROOT_URLCONF = "cookbook.workflow101.config.urls"
default_loaders = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
cached_loaders = [("django.template.loaders.cached.Loader", default_loaders)]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "config", "templates")],
# "APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"cookbook.workflow101.config.urls.users",
],
"loaders": default_loaders if DEBUG else cached_loaders,
},
},
]
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
# Celery
CELERY_WORKER_CONCURRENCY = 1
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_RESULT_SERIALIZER = "json"
CELERY_TASK_SERIALIZER = "json"
CELERY_BROKER_URL = "redis://localhost:6379/10"
CELERY_RESULT_BACKEND = "redis://localhost:6379/11"
# Rest Framework
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.CursorPagination",
"PAGE_SIZE": 100,
}
SPECTACULAR_SETTINGS = {
"ENUM_NAME_OVERRIDES": {
"ProcessStatusEnum": "viewflow.workflow.status.PROCESS",
"TaskStatusEnum": "viewflow.workflow.status.STATUS",
}
}
|
ff087e3a9ced7f44946bdf9c69175bfb3c3afb90
|
5be3a03ca2d74e0263338af3cc91d2cf1b7f75d9
|
/正则表达式/output.py
|
2dfcc83996f4d17e7bb6af90e39470c6c0879ead
|
[] |
no_license
|
secondtonone1/python-
|
d969a1bbc5c8c2b27a0bb75f91159da6d1ce83f7
|
19a66d70c20293875ad29a868f42a9b3a5826422
|
refs/heads/master
| 2022-11-05T00:31:53.300764
| 2022-09-26T03:10:11
| 2022-09-26T03:10:11
| 98,652,204
| 177
| 154
| null | 2022-10-30T08:55:38
| 2017-07-28T13:29:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,799
|
py
|
output.py
|
#-*-coding:utf-8-*-
from multiprocessing import Process
import os
import re
s = 'AB\\-001'
print(s)
s = r'AB\-001'
print(s)
import re
result = re.match(r'^\d{3}\-\d{3,8}$','010-12345')
print(result)
result2 = re.match(r'^\d{3}\-\d{3,8}$','010 12345')
print(result2)
#切串
s1 = 'a b c'
print(s1.split(' ') )
s2 = re.split(r'\s+', s1)
print(s2)
s3 = re.split(r'[\s\:\,]+','a,b c::d e, f')
print(s3)
#分组
m = re.match(r'^(\d{3})-(\d{3,8})$','010-12345')
print(m)
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.groups())
#贪婪匹配
r1 = re.match(r'^(\d+)(0*)$','102300').groups()
print(r1)
r2 = re.match(r'^(\d+?)(0*)$','102300').groups()
print(r2)
#编译
re_telephone = re.compile(r'^(\d{3})-(\d{3,8})$')
r3 = re_telephone.match('010-12345').groups()
print(r3)
r4 = re_telephone.match('043-12345').groups()
print(r4)
pattern = re.compile(r'hello')
result1 = re.match(pattern, 'hello')
result2 = re.match(pattern, 'helloo, aaa')
result3 = re.match(pattern, 'helo AAB')
result4 = re.match(pattern, 'helloww')
if result1:
print(result1 )
else:
print('failed!!!')
if result2:
print(result2.group() )
else:
print('failed!!!')
if result3:
print(result3.group() )
else:
print('failed!!!')
if result4:
print(result4.group() )
else:
print('failed!!!')
res = re.match(r'(hello)','hellooaaa')
if res:
print(res.groups())
else:
print('failed!!!')
'''
1.string: 匹配时使用的文本。
2.re: 匹配时使用的Pattern对象。
3.pos: 文本中正则表达式开始搜索的索引。值与Pattern.match()和Pattern.seach()方法的同名参数相同。
4.endpos: 文本中正则表达式结束搜索的索引。值与Pattern.match()和Pattern.seach()方法的同名参数相同。
5.lastindex: 最后一个被捕获的分组在文本中的索引。如果没有被捕获的分组,将为None。
6.lastgroup: 最后一个被捕获的分组的别名。如果这个分组没有别名或者没有被捕获的分组,将为None。
方法:
1.group([group1, …]):
获得一个或多个分组截获的字符串;指定多个参数时将以元组形式返回。group1可以使用编号也可以使用别名;编号0代表整个匹配的子串;
不填写参数时,返回group(0);没有截获字符串的组返回None;截获了多次的组返回最后一次截获的子串。
2.groups([default]):
以元组形式返回全部分组截获的字符串。相当于调用group(1,2,…last)。default表示没有截获字符串的组以这个值替代,默认为None。
3.groupdict([default]):
返回以有别名的组的别名为键、以该组截获的子串为值的字典,没有别名的组不包含在内。default含义同上。
4.start([group]):
返回指定的组截获的子串在string中的起始索引(子串第一个字符的索引)。group默认值为0。
5.end([group]):
返回指定的组截获的子串在string中的结束索引(子串最后一个字符的索引+1)。group默认值为0。
6.span([group]):
返回(start(group), end(group))。
7.expand(template):
将匹配到的分组代入template中然后返回。template中可以使用\id或\g、\g引用分组,但不能使用编号0。\id与\g是等价的;
但\10将被认为是第10个分组,如果你想表达\1之后是字符’0’,只能使用\g0。
'''
#匹配:单词+空格+单词+任意字符
m = re.match(r'(\w+) (\w+)(?P<sign>.*)','hello world!')
print('m.string is %s' %(m.string) )
print('m.re: %s' %(m.re) )
print('m.pos: %d' %(m.pos))
print('m.endpos: %d' %(m.endpos))
print('m.lastindex: %d' %(m.lastindex))
print('m.lastgroup: %s' %(m.lastgroup))
print('m.groups: ' , m.groups())
print('m.group: ' , m.group())
print('m.group(1,2): ' , m.group(1,2))
print('m.groupdict():', m.groupdict())
print('m.start(2):',m.start(2))
print('m.end(2):',m.end(2))
print('m.span(2):',m.span(2))
print("m.expand(r'\g \g\g'):", m.expand(r'\2 \1\3') )
import re
pattern = re.compile(r'world')
sr = re.search(pattern, 'hello world!')
if sr:
print(sr.group())
pattern = re.compile(r'\d+')
splitrs = re.split(pattern, 'one1two2three3four45six797five')
if sr:
print(splitrs)
pattern = re.compile(r'\d+')
find = re.findall(pattern, 'one1two2three3four45six797five')
if find:
print(find)
pattern = re.compile(r'\d+')
finditer = re.finditer(pattern, 'one1two2three3four45six797five')
if(finditer):
print(finditer)
for m in finditer:
print(m.group())
pattern = re.compile(r'(\w+) (\w+)')
s = 'i say, hello world'
print(re.sub(pattern,r'\2 \1', s))
def func(m):
return m.group(1).title() + ' '+ m.group(2).title()
sub = re.sub(pattern, func, s)
print(sub)
pattern = re.compile(r'(\w+) (\w+)')
s = 'i say, hello world'
print(re.subn(pattern,r'\2 \1', s))
def func(m):
return m.group(1).title() + ' '+ m.group(2).title()
sub = re.subn(pattern, func, s)
print(sub)
|
e4e4b3a7fb9cb386f925b10d779e3127400d3ca3
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/battle_royale/scripts/client/battle_royale/gui/impl/lobby/tooltips/test_drive_info_tooltip_view.py
|
5cbff65242aa6465fe2606d211df4ff831084c68
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
test_drive_info_tooltip_view.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: battle_royale/scripts/client/battle_royale/gui/impl/lobby/tooltips/test_drive_info_tooltip_view.py
from battle_royale.gui.impl.gen.view_models.views.lobby.tooltips.test_drive_info_tooltip_view_model import TestDriveInfoTooltipViewModel
from frameworks.wulf import ViewSettings
from gui.impl import backport
from gui.impl.gen import R
from gui.impl.pub import ViewImpl
from gui.impl.wrappers.user_compound_price_model import PriceModelBuilder
from helpers import dependency
from skeletons.gui.game_control import IBattleRoyaleRentVehiclesController
class TestDriveInfoTooltipView(ViewImpl):
__rentVehiclesController = dependency.descriptor(IBattleRoyaleRentVehiclesController)
def __init__(self):
settings = ViewSettings(R.views.battle_royale.lobby.tooltips.TestDriveInfoTooltipView())
settings.model = TestDriveInfoTooltipViewModel()
super(TestDriveInfoTooltipView, self).__init__(settings)
@property
def viewModel(self):
return super(TestDriveInfoTooltipView, self).getViewModel()
def _onLoading(self, *args, **kwargs):
super(TestDriveInfoTooltipView, self)._onLoading(args, kwargs)
with self.viewModel.transaction() as model:
self._fillModel(model)
def _fillModel(self, model):
model.setTestDriveDays(self.__rentVehiclesController.getPendingRentDays())
self.__fillPrice(model.price)
def __fillPrice(self, model):
testDriveDays = self.__rentVehiclesController.getNextTestDriveDaysTotal()
rentDays = self.__rentVehiclesController.getNextRentDaysTotal()
rentPrice = self.__rentVehiclesController.getRentPrice()
testDrivePrice = self.__rentVehiclesController.getTestDrivePrice()
model.setTestDriveLabel(backport.text(R.strings.battle_royale.tooltips.testDriveInfo.leftLabel()).format(days=int(testDriveDays)))
model.setRentLabel(backport.text(R.strings.battle_royale.tooltips.testDriveInfo.rightLabel()).format(days=rentDays))
PriceModelBuilder.fillPriceModel(model.testDrivePrice, testDrivePrice)
PriceModelBuilder.fillPriceModel(model.rentPrice, rentPrice)
|
a8245b01c18d3182bbe05cfc7f69691ff88d9ece
|
cdfa1fbbbbccc87771401125ad8460344118fe30
|
/hassio-google-drive-backup/backup/file/__init__.py
|
1f3f3f6dbe33eb65f89b7733325a06722c007db0
|
[
"MIT"
] |
permissive
|
sabeechen/hassio-google-drive-backup
|
f97409292f8fe02a8b7078e42b7da242b45fe8a5
|
f55390bf2e26e3b28d6a9ce4587e119d58db4b73
|
refs/heads/master
| 2023-09-04T20:39:11.846549
| 2023-07-16T17:47:47
| 2023-07-16T17:47:47
| 176,825,504
| 2,581
| 188
|
MIT
| 2023-09-13T06:56:18
| 2019-03-20T22:16:20
|
Python
|
UTF-8
|
Python
| false
| false
| 63
|
py
|
__init__.py
|
from .jsonfilesaver import JsonFileSaver
from .file import File
|
bfd35649383c8ea6c76d8b70a23bf505d0f18165
|
47ef6997d03f4d5c921c83cc09aef1dfc6828e2c
|
/zeus/networks/pytorch/customs/deepfm.py
|
85f83c1b61776bb1646a2c369a7b2c8fbd5c67a0
|
[
"MIT"
] |
permissive
|
huawei-noah/xingtian
|
620c9f245183d636e0a65659fd99a984397ecbd4
|
e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04
|
refs/heads/master
| 2023-09-03T01:10:21.768245
| 2022-03-21T03:39:39
| 2022-03-21T03:39:39
| 287,759,621
| 308
| 91
|
MIT
| 2023-09-12T11:33:22
| 2020-08-15T14:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
deepfm.py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The DeepFM model."""
import torch
import copy
from .fis.layers import LinearLayer, EmbeddingLayer, \
FactorizationMachineLayer, MultiLayerPerceptron
from zeus.common import ClassType, ClassFactory
@ClassFactory.register(ClassType.NETWORK)
class DeepFactorizationMachineModel(torch.nn.Module):
"""DeepFM: A Factorization-Machine based Neural Network for CTR Prediction. https://arxiv.org/abs/1703.04247.
:param input_dim: feature space of dataset
:type input_dim: int
:param input_dim4lookup: feature number in `feature_id`, usually equals to number of non-zero features
:type input_dim4lookup: int
:param embed_dim: length of each feature's latent vector(embedding vector)
:type embed_dim: int
:param hidden_dims: width of each hidden layer, from bottom to top
:type hidden_dims: list of int
:param dropout_prob: dropout probability of all hidden layer
:type dropout_prob: float
:param batch_norm: applies batch normalization before activation, defaults to False
:type batch_norm: bool, optional
:param layer_norm: applies layer normalization before activation, defaults to False
:type layer_norm: bool, optional
"""
def __init__(self, net_desc):
"""
Construct the DeepFactorizationMachineModel class.
:param net_desc: config of the structure
"""
super().__init__()
self.desc = copy.deepcopy(net_desc)
self.linear = LinearLayer(net_desc['input_dim'])
self.embedding = EmbeddingLayer(net_desc['input_dim'], net_desc['embed_dim'])
self.fm = FactorizationMachineLayer()
self.mlp_input_dim = net_desc['input_dim4lookup'] * net_desc['embed_dim']
self.mlp = MultiLayerPerceptron(
self.mlp_input_dim, net_desc['hidden_dims'], net_desc['dropout_prob'],
batch_norm=net_desc['batch_norm'], layer_norm=net_desc['layer_norm'])
self.l1_cover_params = []
self.l2_cover_params = []
def forward(self, feature_id):
"""Calculate logits of pctr for given batch of samples.
:param feature_id: a batch of feature id, tensor of size ``(batch_size, input_dim4lookup)``
:type feature_id: torch.int
:return: logits of pctr for given batch of samples
:rtype: tensor.float, size ``(batch_size, 1)``
"""
feature_val = None
linear_score = self.linear(feature_id, feature_val).squeeze(1)
embed_v = self.embedding(feature_id, feature_val)
fm_score = self.fm(embed_v).squeeze(1)
mlp_score = self.mlp(embed_v.view(-1, self.mlp_input_dim)).squeeze(1)
# print("linear_score:",linear_score.size())
# print("fm_score:", fm_score.size())
# print("mlp_score:", mlp_score.size())
return linear_score + fm_score + mlp_score
|
5750121112f0b45a9fd94899970e7194350751da
|
77f85a550c28212071067cb122ebfd93eb705190
|
/pde/pdes/kuramoto_sivashinsky.py
|
024de1c2a29002cda340472a928726044dfe4036
|
[
"MIT"
] |
permissive
|
zwicker-group/py-pde
|
baf215a733508fe86093ea9e818228bbb3b34c58
|
d9c931a8361eaf27bc3766daba26edc11756b5f5
|
refs/heads/master
| 2023-08-31T06:36:34.514617
| 2023-08-30T18:15:44
| 2023-08-30T18:15:44
| 242,093,001
| 327
| 45
|
MIT
| 2023-08-31T13:16:24
| 2020-02-21T08:42:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,098
|
py
|
kuramoto_sivashinsky.py
|
"""
The Kardar–Parisi–Zhang (KPZ) equation describing the evolution of an interface
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from typing import Callable, Optional
import numba as nb
import numpy as np
from ..fields import ScalarField
from ..grids.boundaries.axes import BoundariesData
from ..tools.docstrings import fill_in_docstring
from ..tools.numba import jit
from .base import PDEBase, expr_prod
class KuramotoSivashinskyPDE(PDEBase):
r"""The Kuramoto-Sivashinsky equation
The mathematical definition is
.. math::
\partial_t u = -\nu \nabla^4 u - \nabla^2 u -
\frac{1}{2} \left(\nabla h\right)^2 + \eta(\boldsymbol r, t)
where :math:`u` is the height of the interface in Monge parameterization. The
dynamics are governed by the parameters :math:`\nu` , while :math:`\eta` is Gaussian
white noise, whose strength is controlled by the `noise` argument.
"""
explicit_time_dependence = False
@fill_in_docstring
def __init__(
self,
nu: float = 1,
*,
bc: BoundariesData = "auto_periodic_neumann",
bc_lap: Optional[BoundariesData] = None,
noise: float = 0,
rng: Optional[np.random.Generator] = None,
):
r"""
Args:
nu (float):
Parameter :math:`\nu` for the strength of the fourth-order term
bc:
The boundary conditions applied to the field.
{ARG_BOUNDARIES}
bc_lap:
The boundary conditions applied to the second derivative of the
scalar field :math:`c`. If `None`, the same boundary condition
as `bc` is chosen. Otherwise, this supports the same options as
`bc`.
noise (float):
Variance of the (additive) noise term
rng (:class:`~numpy.random.Generator`):
Random number generator (default: :func:`~numpy.random.default_rng()`)
used for stochastic simulations. Note that this random number generator
is only used for numpy function, while compiled numba code uses the
random number generator of numba. Moreover, in simulations using
multiprocessing, setting the same generator in all processes might yield
unintended correlations in the simulation results.
"""
super().__init__(noise=noise, rng=rng)
self.nu = nu
self.bc = bc
self.bc_lap = bc if bc_lap is None else bc_lap
@property
def expression(self) -> str:
"""str: the expression of the right hand side of this PDE"""
expr = f"c + {expr_prod(self.nu, '∇²c')}"
return f"-∇²({expr}) - 0.5 * |∇c|²"
def evolution_rate( # type: ignore
self,
state: ScalarField,
t: float = 0,
) -> ScalarField:
"""evaluate the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
The scalar field describing the concentration distribution
t (float): The current time point
Returns:
:class:`~pde.fields.ScalarField`:
Scalar field describing the evolution rate of the PDE
"""
assert isinstance(state, ScalarField), "`state` must be ScalarField"
state_lap = state.laplace(bc=self.bc, args={"t": t})
result = (
-self.nu * state_lap.laplace(bc=self.bc_lap, args={"t": t})
- state_lap
- 0.5 * state.gradient_squared(bc=self.bc, args={"t": t})
)
result.label = "evolution rate"
return result # type: ignore
def _make_pde_rhs_numba( # type: ignore
self, state: ScalarField
) -> Callable[[np.ndarray, float], np.ndarray]:
"""create a compiled function evaluating the right hand side of the PDE
Args:
state (:class:`~pde.fields.ScalarField`):
An example for the state defining the grid and data types
Returns:
A function with signature `(state_data, t)`, which can be called
with an instance of :class:`~numpy.ndarray` of the state data and
the time to obtained an instance of :class:`~numpy.ndarray` giving
the evolution rate.
"""
arr_type = nb.typeof(state.data)
signature = arr_type(arr_type, nb.double)
nu_value = self.nu
laplace = state.grid.make_operator("laplace", bc=self.bc)
laplace2 = state.grid.make_operator("laplace", bc=self.bc_lap)
gradient_sq = state.grid.make_operator("gradient_squared", bc=self.bc)
@jit(signature)
def pde_rhs(state_data: np.ndarray, t: float):
"""compiled helper function evaluating right hand side"""
result = -laplace(state_data, args={"t": t})
result += nu_value * laplace2(result, args={"t": t})
result -= 0.5 * gradient_sq(state_data, args={"t": t})
return result
return pde_rhs # type: ignore
|
e4b43c7667ae829dddd6eac4d84f8e235a7a20fb
|
3dbefd3d97d6a6fe1c60b9639cb92ce070ff772f
|
/Source/StockProcessing/Filter_Stock_CHN_1.py
|
43e5b8ba233a55ed708fbe925135038c96471173
|
[
"MIT"
] |
permissive
|
doncat99/StockRecommendSystem
|
69b3bcf7edfe4c85894e09fd22bc173c0e349cda
|
306534fc0705ff0037f45115de3bffa835c42f71
|
refs/heads/master
| 2022-01-02T08:58:29.125564
| 2021-12-30T05:25:22
| 2021-12-30T05:25:22
| 93,833,946
| 148
| 68
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,986
|
py
|
Filter_Stock_CHN_1.py
|
import sys, os, time, datetime, warnings, configparser
import pandas as pd
import numpy as np
import talib
import concurrent.futures
import tushare as ts
import matplotlib.pyplot as plt
from stockstats import StockDataFrame as Sdf
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/FetchData/')
sys.path.append(root_path + "/" + 'Source/DataBase/')
from Fetch_Data_Stock_CHN_Daily import updateStockData_CHN_Daily
from DB_API import queryStock, queryStockList
def KDJ(df):
low_list = df['low'].rolling(center=False,window=9).min()
low_list.fillna(value=df['low'].expanding(min_periods=1).min(), inplace=True)
high_list = df['high'].rolling(center=False,window=9).max()
high_list.fillna(value=df['high'].expanding(min_periods=1).max(), inplace=True)
rsv = (df['close'] - low_list) / (high_list - low_list) * 100
df['kdj_k'] = rsv.ewm(min_periods=0,adjust=True,ignore_na=False,com=2).mean()
df['kdj_d'] = df['kdj_k'].ewm(min_periods=0,adjust=True,ignore_na=False,com=2).mean()
df['kdj_j'] = 3 * df['kdj_k'] - 2 * df['kdj_d']
return df
def RSI(df, n=14):
prices = df['close'].values.tolist()
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
key = 'rsi_' + str(n)
df[key] = rsi
return df
def MACD(df, short_win=12, long_win=26, macd_win=9):
# talib计算MACD
prices = np.array(df['close'])
macd_tmp = talib.MACD(prices, fastperiod=short_win, slowperiod=long_win, signalperiod=macd_win)
df['macd_dif'] = macd_tmp[0]
df['macd_dea'] = macd_tmp[1]
df['macd'] = macd_tmp[2]
return df
def corssover(input_1, input_2):
index = -1
return (input_1[index] > input_2[index]) & (input_1[index-1] < input_2[index-1])
def ma_rule(df):
df['ma5'] = df['close'].rolling(window=5, center=False).mean()
df['ma10'] = df['close'].rolling(window=10, center=False).mean()
df['ma20'] = df['close'].rolling(window=20, center=False).mean()
df['ma30'] = df['close'].rolling(window=30, center=False).mean()
df['ma60'] = df['close'].rolling(window=60, center=False).mean()
df['ma120'] = df['close'].rolling(window=120, center=False).mean()
df['ma250'] = df['close'].rolling(window=250, center=False).mean()
index = -1
fit_count = 0
delta = 0.05 #ma5 / 60
ma5, ma10, ma20, ma30, ma60, ma120, ma250 = df['ma5'][index], df['ma10'][index], df['ma20'][index], df['ma30'][index], df['ma60'][index], df['ma120'][index], df['ma250'][index]
if abs(ma5 - ma10) < delta: fit_count += 1
if abs(ma5 - ma20) < delta: fit_count += 1
if abs(ma5 - ma30) < delta: fit_count += 1
if abs(ma5 - ma60) < delta: fit_count += 1
if abs(ma5 - ma120) < delta: fit_count += 1
if abs(ma5 - ma250) < delta: fit_count += 1
return fit_count > 4
def kdj_rule(df):
try: df = KDJ(df)
except: return False
if len(df) < 2: return False
index = -1
return corssover(df['kdj_j'], df['kdj_d']) & (df['kdj_d'][index] > df['kdj_d'][index-1])
def kdj_rule_1(df):
try: df = KDJ(df)
except: return False
return df['kdj_d'][-1] < 20
def macd_rule(df):
try: df = MACD(df)
except: return False
input_1 = 0.2
input_2 = -0.8
input_3 = 22 * 3
index = -1
df['macd_dif_1'] = df['macd_dif'].shift(1)
df['macd_dea_1'] = df['macd_dea'].shift(1)
return (abs(df['macd_dea'][index]) < input_1) & \
(abs(df['macd_dif'][index]) < input_1) & \
(df['macd_dif'][-input_3:].min() < input_2) & \
(df['macd_dif'][index] > df['macd_dea'][index]) & \
((df['macd_dea_1'][index] > df['macd_dif_1'][index]) | (abs(df['macd_dea_1'][index] - df['macd_dif_1'][index]) < 0.007))
def macd_rule_1(df):
try: df = MACD(df)
except: return False
input_1 = 0
input_2 = -0.8
input_3 = 0.05
dif_len = len(df['macd_dif'])
if dif_len < 2: return False
if abs(df['macd_dif'][-1]) > input_3:
return False
for idx in range(dif_len-1, 1, -1):
if ((df['macd_dif'][idx] - df['macd_dif'][idx-1]) > input_1):
continue
if df['macd_dif'][idx] <= input_2:
return True
else: return False
def macd_rule_2(df, symbol):
try: df = MACD(df)
except: return False
input_1 = -3
input_2 = -0.2
index = -1
return (df['macd_dif'][index] > input_1) & \
(df['macd_dif'][index] < input_2) & \
(df['macd_dif'][index] > df['macd_dea'][index]) & \
((df['macd_dea'][index-1] > df['macd_dif'][index-1]) | (abs(df['macd_dea'][index-1] - df['macd_dif'][index-1]) < 0.007))
def rsi_rule(df):
try:
df = RSI(df, 6)
df = RSI(df, 12)
df = RSI(df, 24)
except: return False
index = -1
rsi_6, rsi_12, rsi_24 = df['rsi_6'][index], df['rsi_12'][index], df['rsi_24'][index]
return (rsi_6 < 20) & (rsi_12 < 20) & (rsi_24 < 30)
def judge_rule(symbol, dataset, window, selection, str):
#if kdj_rule(dataset) & macd_rule(dataset):
if kdj_rule_1(dataset):
selection.append(symbol)
def get_single_stock_data(root_path, symbol):
'''
All data is from quandl wiki dataset
Feature set: [Open High Low Close Volume Ex-Dividend Split Ratio Adj. Open Adj. High Adj. Low
Adj. Close Adj. Volume]
'''
# file_name = stock_folder + ticker + '.csv'
# COLUMNS = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume']
# RENAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume']
# if os.path.exists(file_name) == False:
# print("get stock: " + ticker + " failed")
# return pd.DataFrame()
# df = pd.read_csv(
# file_name,
# #names=COLUMNS,
# skipinitialspace=True,
# engine='python',
# index_col=['Date'],
# #usecols=COLUMNS,
# parse_dates=['Date'],
# #skiprows=1,
# memory_map=True,
# #chunksize=300,
# ).sort_index()
df, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_CHN", "_DAILY", symbol, "daily_update")
df.index = pd.to_datetime(df.index)
suspended_day = pd.Timestamp((datetime.datetime.now() - datetime.timedelta(days=3)).strftime("%Y-%m-%d"))
if df.empty:
#print("stock delisted", symbol)
return df
if df.index[-1] < suspended_day:
#print("stock suspended", symbol)
return pd.DataFrame()
if 'adj_close' in df:
df = df.drop('close', 1)
df = df.rename(columns = {'adj_close':'close'})
return df
def inner_processing_stock_data(symbol, input_data, window, day_selection, week_selection, month_selection):
# start_date = pd.Timestamp(paras.start_date)
# end_date = pd.Timestamp(paras.end_date)
# input_data = input_data.loc[(input_data.index >= start_date) & (input_data.index <= end_date)]
day_data = input_data[input_data['volume'] > 0].copy()
#week_data = convert_week_based_data(day_data)
#month_data = convert_month_based_data(day_data)
judge_rule(symbol, day_data, window, day_selection, "day based")
#judge_rule(symbol, week_data, window, week_selection, "week based")
#judge_rule(symbol, month_data, window, month_selection, "month based")
def processing_stock_data(root_path, symbol, window, day_selection, week_selection, month_selection):
startTime = time.time()
data = get_single_stock_data(root_path, symbol)
if data.empty: return startTime
if len(data) < 60 + window: return startTime
inner_processing_stock_data(symbol, data, window, day_selection, week_selection, month_selection)
return startTime
def process_all_stocks_data(root_path, window = 1):
df = queryStockList(root_path, "DB_STOCK", "SHEET_CHN_DAILY")
df.index = df.index.astype(str).str.zfill(6)
symbols = df.index.values.tolist()
pbar = tqdm(total=len(symbols))
day_selection = []
week_selection = []
month_selection = []
# for index in range(0, window):
# day_window = []
# day_selection.append(day_window)
# week_window = []
# week_selection.append(week_window)
# month_window = []
# month_selection.append(month_window)
startTime = time.time()
for symbol in symbols:
startTime = processing_stock_data(root_path, symbol, window, day_selection, week_selection, month_selection)
outMessage = '%-*s processed in: %.4s seconds' % (6, symbol, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
print('total processing in: %.4s seconds' % ((time.time() - startTime)))
# with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
# # Start the load operations and mark each future with its URL
# future_to_stock = {executor.submit(processing_stock_data, root_path, symbol, window, day_selection, week_selection, month_selection): symbol for symbol in symbols}
# for future in concurrent.futures.as_completed(future_to_stock):
# stock = future_to_stock[future]
# try:
# startTime = future.result()
# except Exception as exc:
# startTime = time.time()
# print('%r generated an exception: %s' % (stock, exc))
# outMessage = '%-*s processed in: %.4s seconds' % (6, stock, (time.time() - startTime))
# pbar.set_description(outMessage)
# pbar.update(1)
# day_week_selection = []
# week_month_selection = []
# day_month_selection = []
# all_selection = []
#count = []
day_week_selection = list(set(day_selection) & set(week_selection ))
week_month_selection = list(set(week_selection) & set(month_selection ))
day_month_selection = list(set(day_selection) & set(month_selection ))
all_selection = list(set(day_week_selection) & set(week_month_selection))
#day_selection = list(set(day_selection) - set(all_selection))
#week_selection = list(set(week_selection) - set(all_selection))
#month_selection = list(set(month_selection) - set(all_selection))
# sumUp = len(day_week_selection[index]) + len(week_month_selection[index]) + len(day_month_selection[index]) + len(all_selection[index])
# count.insert(0,sumUp)
print("all_selection", len(all_selection), sorted(all_selection))
print("day_week_selection", len(day_week_selection), sorted(day_week_selection))
print("week_month_selection", len(week_month_selection), sorted(week_month_selection))
print("day_month_selection", len(day_month_selection), sorted(day_month_selection))
print("/n ------------------------ /n")
# plt.plot(range(0, len(count)), count)
# plt.title('A simple chirp')
# plt.show()
print("day_selection", len(day_selection), sorted(day_selection))
print("week_selection", len(week_selection), sorted(week_selection))
print("month_selection", len(month_selection), sorted(month_selection))
def calBasic():
pe = 40
gpr = 30 # 毛利率
npr = 15 # 净利率
nav = 20
roe = 40 # 净资产收益率 三年
df_base = ts.get_stock_basics()
baseData = pd.DataFrame(df_base)
baseData = baseData[(baseData.pe < pe) & (baseData.gpr > gpr) & (baseData.npr > npr)]
baseData = baseData.index.values.tolist()
years = [2016, 2015, 2014]
main_symbols = []
grow_symbols = []
if os.path.exists("year_2016.csv") == False:
df_main = ts.get_report_data(2016, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("year_2016.csv")
if os.path.exists("grow_2016.csv") == False:
df_main = ts.get_growth_data(2016, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("grow_2016.csv")
if os.path.exists("year_2015.csv") == False:
df_main = ts.get_report_data(2015, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("year_2015.csv")
if os.path.exists("grow_2015.csv") == False:
df_main = ts.get_growth_data(2015, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("grow_2015.csv")
if os.path.exists("year_2014.csv") == False:
df_main = ts.get_report_data(2014, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("year_2014.csv")
if os.path.exists("grow_2014.csv") == False:
df_main = ts.get_growth_data(2014, 4)
mainData = pd.DataFrame(df_main)
mainData.to_csv("grow_2014.csv")
for year in years:
mainData = pd.read_csv("year_" + str(year) + ".csv")
mainData = mainData[mainData.roe > roe]
main_symbols.append(mainData.code.values.tolist())
for year in years:
mainData = pd.read_csv("grow_" + str(year) + ".csv")
mainData = mainData[mainData.nav > nav]
grow_symbols.append(mainData.code.values.tolist())
roe_list = list(set(main_symbols[0]) & set(main_symbols[1]) & set(main_symbols[2]))
roe_list = [str(item).zfill(6) for item in roe_list]
nav_list = list(set(grow_symbols[0]) & set(grow_symbols[1]) & set(grow_symbols[2]))
nav_list = [str(item).zfill(6) for item in nav_list]
output = list(set(roe_list) & set(nav_list) & set(baseData))
print(output)
if __name__ == "__main__":
pd.set_option('precision', 3)
pd.set_option('display.width',1000)
warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)
now = datetime.datetime.now().strftime("%Y-%m-%d")
config = configparser.ConfigParser()
config.read(root_path + "/" + "config.ini")
storeType = int(config.get('Setting', 'StoreType'))
# if storeType == 1:
# from Start_DB_Server import StartServer, ShutdownServer
# # start database server (async)
# thread = StartServer(root_path)
# # wait for db start, the standard procedure should listen to
# # the completed event of function "StartServer"
# time.sleep(5)
print("updating data...")
#updateStockData_CHN_Daily(root_path, storeType)
print("Processing data...")
#calBasic()
process_all_stocks_data(root_path)
# if storeType == 1:
# # stop database server (sync)
# time.sleep(5)
# ShutdownServer()
|
109376ad6ad0af92719313f3c033d9c8fa8941e7
|
40dd8330e5f78c4348bbddc2c5acfd59d793dd51
|
/configs/swin/swin-large-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py
|
c93cdfeaaeb91a3aaefe8019afffd0d380b8f761
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmsegmentation
|
0d12092312e2c465ede1fd7dd9847b6f2b37049c
|
30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8
|
refs/heads/main
| 2023-09-04T10:54:52.299711
| 2023-07-24T07:28:21
| 2023-07-24T07:28:21
| 272,133,018
| 6,534
| 2,375
|
Apache-2.0
| 2023-09-14T01:22:32
| 2020-06-14T04:32:33
|
Python
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
swin-large-patch4-window7-in22k-pre_upernet_8xb2-160k_ade20k-512x512.py
|
_base_ = [
'swin-tiny-patch4-window7-in1k-pre_upernet_8xb2-160k_'
'ade20k-512x512.py'
]
checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file),
pretrain_img_size=224,
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7),
decode_head=dict(in_channels=[192, 384, 768, 1536], num_classes=150),
auxiliary_head=dict(in_channels=768, num_classes=150))
|
177b109e41870508aee009203a945236f8aff798
|
4f93f5a27b6c872903b9acf8d20fb736716a26df
|
/clair3/model.py
|
04193041d6ee2affdbd54827f998a8b02104ad5b
|
[
"BSD-3-Clause"
] |
permissive
|
HKU-BAL/Clair3
|
cf388ae3d0b0332eb8df125fb1e1a97120a90ed1
|
181f55d7a741855597d083baffc4551949d2837e
|
refs/heads/main
| 2023-07-28T05:34:32.917498
| 2023-07-20T02:58:19
| 2023-07-20T02:58:19
| 352,969,947
| 162
| 18
| null | 2023-06-01T14:15:31
| 2021-03-30T11:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 18,212
|
py
|
model.py
|
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import tensorflow as tf
import logging
import numpy as np
logging.basicConfig(format='%(message)s', level=logging.INFO)
tf.get_logger().setLevel(logging.ERROR)
from clair3.task.main import GT21, GENOTYPE, VARIANT_LENGTH_1, VARIANT_LENGTH_2
import shared.param_f as param
params = dict(
float_type=tf.float32,
task_loss_weights=[
1, # gt21
1, # genotype
1, # variant/indel length 0
1, # variant/indel length 1
1 # l2 loss
],
output_shape=GT21.output_label_count + \
GENOTYPE.output_label_count + \
VARIANT_LENGTH_1.output_label_count + \
VARIANT_LENGTH_2.output_label_count,
output_gt21_shape=GT21.output_label_count,
output_genotype_shape=GENOTYPE.output_label_count,
output_indel_length_shape_1=VARIANT_LENGTH_1.output_label_count,
output_indel_length_shape_2=VARIANT_LENGTH_2.output_label_count,
output_gt21_entropy_weights=[1] * GT21.output_label_count,
output_genotype_entropy_weights=[1] * GENOTYPE.output_label_count,
output_indel_length_entropy_weights_1=[1] * VARIANT_LENGTH_1.output_label_count,
output_indel_length_entropy_weights_2=[1] * VARIANT_LENGTH_2.output_label_count,
L3_dropout_rate=0.2,
L4_num_units=256,
L4_pileup_num_units=128,
L4_dropout_rate=0.5,
L5_1_num_units=128,
L5_1_dropout_rate=0.2,
L5_2_num_units=128,
L5_2_dropout_rate=0.2,
L5_3_num_units=128,
L5_3_dropout_rate=0.2,
L5_4_num_units=128,
L5_4_dropout_rate=0.2,
LSTM1_num_units=128,
LSTM2_num_units=160,
LSTM1_dropout_rate=0,
LSTM2_dropout_rate=0.5,
l2_regularization_lambda=param.l2RegularizationLambda,
)
add_l2_regulation = True
L2_regularizers = tf.keras.regularizers.l2(params['l2_regularization_lambda']) if add_l2_regulation else None
class Clair3_P(tf.keras.Model):
# Bi-lstm model for clair3 pileup input
def __init__(self, add_indel_length=False, predict=False):
super(Clair3_P, self).__init__()
# output
self.output_gt21_shape = params['output_gt21_shape']
self.output_genotype_shape = params['output_genotype_shape']
self.output_indel_length_shape_1 = params['output_indel_length_shape_1']
self.output_indel_length_shape_2 = params['output_indel_length_shape_2']
self.L3_dropout_rate = params['L3_dropout_rate']
self.L4_num_units = params['L4_num_units']
self.L4_pileup_num_units = params['L4_pileup_num_units']
self.L4_dropout_rate = params['L4_dropout_rate']
self.L5_1_num_units = params['L5_1_num_units']
self.L5_1_dropout_rate = params['L5_1_dropout_rate']
self.L5_2_num_units = params['L5_2_num_units']
self.L5_2_dropout_rate = params['L5_2_dropout_rate']
self.L5_3_num_units = params['L5_3_num_units']
self.L5_3_dropout_rate = params['L5_3_dropout_rate']
self.L5_4_num_units = params['L5_4_num_units']
self.L5_4_dropout_rate = params['L5_4_dropout_rate']
self.LSTM1_num_units = params['LSTM1_num_units']
self.LSTM2_num_units = params['LSTM2_num_units']
self.LSTM1_dropout_rate = params['LSTM1_dropout_rate']
self.LSTM2_dropout_rate = params['LSTM2_dropout_rate']
self.output_label_split = [
self.output_gt21_shape,
self.output_genotype_shape,
self.output_indel_length_shape_1,
self.output_indel_length_shape_2
]
self.add_indel_length = add_indel_length
self.predict = predict
self.LSTM1 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
units=self.LSTM1_num_units,
return_sequences=True,
kernel_regularizer=L2_regularizers
))
self.LSTM2 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
units=self.LSTM2_num_units,
return_sequences=True,
kernel_regularizer=L2_regularizers
))
self.L3_dropout = tf.keras.layers.Dropout(rate=self.L3_dropout_rate)
self.L3_dropout_flatten = tf.keras.layers.Flatten()
self.L4 = tf.keras.layers.Dense(units=self.L4_pileup_num_units, activation='selu',kernel_regularizer=L2_regularizers)
self.L4_dropout = tf.keras.layers.Dropout(rate=self.LSTM2_dropout_rate, seed=param.OPERATION_SEED)
self.L5_1 = tf.keras.layers.Dense(units=self.L5_1_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_1_dropout = tf.keras.layers.Dropout(rate=self.L5_1_dropout_rate, seed=param.OPERATION_SEED)
self.L5_2 = tf.keras.layers.Dense(units=self.L5_2_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_2_dropout = tf.keras.layers.Dropout(rate=self.L5_2_dropout_rate, seed=param.OPERATION_SEED)
self.Y_gt21_logits = tf.keras.layers.Dense(units=self.output_gt21_shape, activation='selu', kernel_regularizer=L2_regularizers)
self.Y_genotype_logits = tf.keras.layers.Dense(units=self.output_genotype_shape, activation='selu', kernel_regularizer=L2_regularizers)
if self.add_indel_length:
self.L5_3 = tf.keras.layers.Dense(units=self.L5_3_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_3_dropout = tf.keras.layers.Dropout(rate=self.L5_3_dropout_rate, seed=param.OPERATION_SEED)
self.L5_4 = tf.keras.layers.Dense(units=self.L5_4_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_4_dropout = tf.keras.layers.Dropout(rate=self.L5_4_dropout_rate, seed=param.OPERATION_SEED)
self.Y_indel_length_logits_1 = tf.keras.layers.Dense(units=self.output_indel_length_shape_1, activation='selu', kernel_regularizer=L2_regularizers)
self.Y_indel_length_logits_2 = tf.keras.layers.Dense(units=self.output_indel_length_shape_2, activation='selu', kernel_regularizer=L2_regularizers)
self.softmax = tf.keras.layers.Softmax()
def call(self, x,):
x = tf.cast(x, tf.float32)
x = self.LSTM1(x) # (batch_size, inp_seq_len, d_model)
x = self.LSTM2(x)
x = self.L3_dropout(x)
x = self.L3_dropout_flatten(x)
x = self.L4(x)
x = self.L4_dropout(x)
l5_1_dropout = self.L5_1_dropout(self.L5_1(x))
l5_2_dropout = self.L5_2_dropout(self.L5_2(x))
y_gt21_logits = self.softmax(self.Y_gt21_logits(l5_1_dropout))
y_genotype_logits = self.softmax(self.Y_genotype_logits(l5_2_dropout))
if self.add_indel_length:
l5_3_dropout = self.L5_3_dropout(self.L5_3(x))
l5_4_dropout = self.L5_4_dropout(self.L5_4(x))
y_indel_length_logits_1 = self.softmax(self.Y_indel_length_logits_1(l5_3_dropout))
y_indel_length_logits_2 = self.softmax(self.Y_indel_length_logits_2(l5_4_dropout))
if self.predict:
return tf.concat([y_gt21_logits, y_genotype_logits, y_indel_length_logits_1, y_indel_length_logits_2], axis=1)
return [y_gt21_logits, y_genotype_logits, y_indel_length_logits_1, y_indel_length_logits_2]
if self.predict:
return tf.concat([y_gt21_logits, y_genotype_logits],axis=1)
return [y_gt21_logits, y_genotype_logits]
class BasicConv2D(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding, SeparableConv=False):
super(BasicConv2D, self).__init__()
conv = tf.keras.layers.SeparableConv2D if SeparableConv else tf.keras.layers.Conv2D
self.conv = conv(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
kernel_regularizer=L2_regularizers)
self.bn = tf.keras.layers.BatchNormalization()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs):
output = self.conv(inputs)
output = self.bn(output)
output = self.relu(output)
return output
class BasicBlock(tf.keras.layers.Layer):
def __init__(self, filter_num, stride=1,SeparableConv=False):
super(BasicBlock, self).__init__()
conv = tf.keras.layers.SeparableConv2D if SeparableConv else tf.keras.layers.Conv2D
self.conv1 = conv(filters=filter_num,
kernel_size=(3, 3),
strides=stride,
padding="same",
kernel_regularizer=L2_regularizers)
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = conv(filters=filter_num,
kernel_size=(3, 3),
strides=1,
padding="same",
kernel_regularizer=L2_regularizers)
self.bn2 = tf.keras.layers.BatchNormalization()
if stride != 1:
self.downsample = tf.keras.Sequential()
self.downsample.add(tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(1, 1),
strides=stride,
kernel_regularizer=L2_regularizers))
self.downsample.add(tf.keras.layers.BatchNormalization())
else:
self.downsample = lambda x: x
def call(self, inputs):
residual = self.downsample(inputs)
x = self.conv1(inputs)
x = self.bn1(x, )
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.bn2(x, )
output = tf.nn.relu(tf.keras.layers.add([residual, x]))
return output
def make_basic_block_layer(filter_num, blocks, stride=1, SeparableConv=False):
res_block = tf.keras.Sequential()
res_block.add(BasicBlock(filter_num, stride=stride, SeparableConv=SeparableConv))
for _ in range(1, blocks):
res_block.add(BasicBlock(filter_num, stride=1,SeparableConv=SeparableConv))
return res_block
class PyramidPolling(tf.keras.layers.Layer):
def __init__(self, spatial_pool_size=(3, 2, 1)):
super(PyramidPolling, self).__init__()
self.spatial_pool_size = spatial_pool_size
self.pool_len = len(self.spatial_pool_size)
self.window_h = np.empty(self.pool_len, dtype=int)
self.stride_h = np.empty(self.pool_len, dtype=int)
self.window_w = np.empty(self.pool_len, dtype=int)
self.stride_w = np.empty(self.pool_len, dtype=int)
self.flatten = tf.keras.layers.Flatten()
def build(self, input_shape):
height = int(input_shape[1])
width = int(input_shape[2])
for i in range(self.pool_len):
self.window_h[i] = self.stride_h[i] = int(np.ceil(height / self.spatial_pool_size[i]))
self.window_w[i] = self.stride_w[i] = int(np.ceil(width / self.spatial_pool_size[i]))
def call(self, x):
for i in range(self.pool_len):
max_pool = tf.nn.max_pool(x,
ksize=[1, self.window_h[i], self.window_w[i], 1],
strides=[1, self.stride_h[i], self.stride_w[i], 1],
padding='SAME')
if i == 0:
pp = self.flatten(max_pool)
else:
pp = tf.concat([pp, self.flatten(max_pool)], axis=-1)
return pp
class Clair3_F(tf.keras.Model):
# Residual CNN model for clair3 full alignment input
def __init__(self, add_indel_length=False, predict=False):
super(Clair3_F, self).__init__()
self.output_gt21_shape = params['output_gt21_shape']
self.output_genotype_shape = params['output_genotype_shape']
self.output_indel_length_shape_1 = params['output_indel_length_shape_1']
self.output_indel_length_shape_2 = params['output_indel_length_shape_2']
self.L3_dropout_rate = params['L3_dropout_rate']
self.L4_num_units = params['L4_num_units']
self.L4_dropout_rate = params['L4_dropout_rate']
self.L5_1_num_units = params['L5_1_num_units']
self.L5_1_dropout_rate = params['L5_1_dropout_rate']
self.L5_2_num_units = params['L5_2_num_units']
self.L5_2_dropout_rate = params['L5_2_dropout_rate']
self.L5_3_num_units = params['L5_3_num_units']
self.L5_3_dropout_rate = params['L5_3_dropout_rate']
self.L5_4_num_units = params['L5_4_num_units']
self.L5_4_dropout_rate = params['L5_4_dropout_rate']
self.output_label_split = [
self.output_gt21_shape,
self.output_genotype_shape,
self.output_indel_length_shape_1,
self.output_indel_length_shape_2
]
self.add_indel_length = add_indel_length
self.predict = predict
self.conv1 = BasicConv2D(filters=64,
kernel_size=(3, 3),
strides=2,
padding="same",)
self.res_block1 = make_basic_block_layer(filter_num=64,
blocks=1, stride=1, SeparableConv=False)
self.conv3 = BasicConv2D(filters=128,
kernel_size=(3, 3),
strides=2,
padding="same")
self.res_block2 = make_basic_block_layer(filter_num=128,
blocks=1, stride=1, SeparableConv=False)
self.conv5 = BasicConv2D(filters=256,
kernel_size=(3, 3),
strides=2,
padding="same")
self.res_block3 = make_basic_block_layer(filter_num=256,
blocks=1, stride=1)
self.pyramidpolling = PyramidPolling()
self.L3_dropout = tf.keras.layers.Dropout(rate=self.L3_dropout_rate)
self.flatten = tf.keras.layers.Flatten()
self.L4 = tf.keras.layers.Dense(units=self.L4_num_units, activation='selu',kernel_regularizer=L2_regularizers)
self.L4_dropout = tf.keras.layers.Dropout(rate=self.L4_dropout_rate, seed=param.OPERATION_SEED)
self.L5_1 = tf.keras.layers.Dense(units=self.L5_1_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_1_dropout = tf.keras.layers.Dropout(rate=self.L5_1_dropout_rate, seed=param.OPERATION_SEED)
self.L5_2 = tf.keras.layers.Dense(units=self.L5_1_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_2_dropout = tf.keras.layers.Dropout(rate=self.L5_2_dropout_rate, seed=param.OPERATION_SEED)
self.Y_gt21_logits = tf.keras.layers.Dense(units=self.output_gt21_shape, activation='selu', kernel_regularizer=L2_regularizers)
self.Y_genotype_logits = tf.keras.layers.Dense(units=self.output_genotype_shape, activation='selu', kernel_regularizer=L2_regularizers)
if self.add_indel_length:
self.L5_3 = tf.keras.layers.Dense(units=self.L5_3_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_3_dropout = tf.keras.layers.Dropout(rate=self.L5_3_dropout_rate, seed=param.OPERATION_SEED)
self.L5_4 = tf.keras.layers.Dense(units=self.L5_4_num_units, activation='selu', kernel_regularizer=L2_regularizers)
self.L5_4_dropout = tf.keras.layers.Dropout(rate=self.L5_4_dropout_rate, seed=param.OPERATION_SEED)
self.Y_indel_length_logits_1 = tf.keras.layers.Dense(units=self.output_indel_length_shape_1, activation='selu',kernel_regularizer=L2_regularizers)
self.Y_indel_length_logits_2 = tf.keras.layers.Dense(units=self.output_indel_length_shape_2, activation='selu',kernel_regularizer=L2_regularizers)
self.softmax = tf.keras.layers.Softmax()
def call(self, inputs):
x = tf.cast(inputs, tf.float32) / param.NORMALIZE_NUM
x = self.conv1(x)
x = self.res_block1(x)
x = self.conv3(x)
x = self.res_block2(x)
x = self.conv5(x)
x = self.res_block3(x)
x = self.pyramidpolling(x)
x = self.flatten(self.L3_dropout(x))
x = self.L4(x)
x = self.L4_dropout(x)
l5_1_dropout = self.L5_1_dropout(self.L5_1(x))
l5_2_dropout = self.L5_2_dropout(self.L5_2(x))
y_gt21_logits = self.softmax(self.Y_gt21_logits(l5_1_dropout))
y_genotype_logits = self.softmax(self.Y_genotype_logits(l5_2_dropout))
if self.add_indel_length:
l5_3_dropout = self.L5_3_dropout(self.L5_3(x))
l5_4_dropout = self.L5_4_dropout(self.L5_4(x))
y_indel_length_logits_1 = self.softmax(self.Y_indel_length_logits_1(l5_3_dropout))
y_indel_length_logits_2 = self.softmax(self.Y_indel_length_logits_2(l5_4_dropout))
if self.predict:
return tf.concat([y_gt21_logits, y_genotype_logits, y_indel_length_logits_1, y_indel_length_logits_2], axis=1)
return [y_gt21_logits, y_genotype_logits, y_indel_length_logits_1, y_indel_length_logits_2]
if self.predict:
return tf.concat([y_gt21_logits, y_genotype_logits],axis=1)
return [y_gt21_logits, y_genotype_logits]
|
05c20dbc680700f41b4341b19e4ed1c7124b2725
|
9d3ac4621657b395396a133c85b3161cb2f2c4ea
|
/examples/single_attack_pytorch_resnet18.py
|
77ca6fcc802f90ed0613183c27e0b8477bb69d78
|
[
"MIT"
] |
permissive
|
bethgelab/foolbox
|
e34ff931a855dab74c9bf99a60a9b469524c5de8
|
12abe74e2f1ec79edb759454458ad8dd9ce84939
|
refs/heads/master
| 2023-09-06T06:25:42.730283
| 2022-05-25T09:55:55
| 2022-05-25T09:55:55
| 94,331,757
| 2,684
| 528
|
MIT
| 2023-03-25T01:35:14
| 2017-06-14T13:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
single_attack_pytorch_resnet18.py
|
#!/usr/bin/env python3
"""
A simple example that demonstrates how to run a single attack against
a PyTorch ResNet-18 model for different epsilons and how to then report
the robust accuracy.
"""
import torchvision.models as models
import eagerpy as ep
from foolbox import PyTorchModel, accuracy, samples
from foolbox.attacks import LinfPGD
def main() -> None:
# instantiate a model (could also be a TensorFlow or JAX model)
model = models.resnet18(pretrained=True).eval()
preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
# get data and test the model
# wrapping the tensors with ep.astensors is optional, but it allows
# us to work with EagerPy tensors in the following
images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
clean_acc = accuracy(fmodel, images, labels)
print(f"clean accuracy: {clean_acc * 100:.1f} %")
# apply the attack
attack = LinfPGD()
epsilons = [
0.0,
0.0002,
0.0005,
0.0008,
0.001,
0.0015,
0.002,
0.003,
0.01,
0.1,
0.3,
0.5,
1.0,
]
raw_advs, clipped_advs, success = attack(fmodel, images, labels, epsilons=epsilons)
# calculate and report the robust accuracy (the accuracy of the model when
# it is attacked)
robust_accuracy = 1 - success.float32().mean(axis=-1)
print("robust accuracy for perturbations with")
for eps, acc in zip(epsilons, robust_accuracy):
print(f" Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
# we can also manually check this
# we will use the clipped advs instead of the raw advs, otherwise
# we would need to check if the perturbation sizes are actually
# within the specified epsilon bound
print()
print("we can also manually check this:")
print()
print("robust accuracy for perturbations with")
for eps, advs_ in zip(epsilons, clipped_advs):
acc2 = accuracy(fmodel, advs_, labels)
print(f" Linf norm ≤ {eps:<6}: {acc2 * 100:4.1f} %")
print(" perturbation sizes:")
perturbation_sizes = (advs_ - images).norms.linf(axis=(1, 2, 3)).numpy()
print(" ", str(perturbation_sizes).replace("\n", "\n" + " "))
if acc2 == 0:
break
if __name__ == "__main__":
main()
|
7e18d1ca067fe9b41588825e1a52e6801508bda5
|
048c5e3f4f0b7a0d1d63714b0b58be4e297167c1
|
/dbus_next/auth.py
|
db91c86e47e2e74aa6b2d721ac631ef07bd587a8
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
altdesktop/python-dbus-next
|
17b3ceec366ba20b9377ad3b5c88cb7175ac017d
|
ab566e16a71bfc9d7e0d29676aa459ec060e72c5
|
refs/heads/master
| 2023-09-02T15:47:29.360808
| 2022-09-11T20:22:45
| 2022-09-11T20:22:45
| 181,604,924
| 156
| 55
|
MIT
| 2023-07-04T08:53:10
| 2019-04-16T03:05:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,598
|
py
|
auth.py
|
from .errors import AuthError
import enum
import os
# The auth interface here is unstable. I would like to eventually open this up
# for people to define their own custom authentication protocols, but I'm not
# familiar with what's needed for that exactly. To work with any message bus
# implementation would require abstracting out all the IO. Async operations
# might be challenging because different IO backends have different ways of
# doing that. I might just end up giving the raw socket and leaving it all up
# to the user, but it would be nice to have a little guidance in the interface
# since a lot of it is strongly specified. If you have a need for this, contact
# the project maintainer to help stabalize this interface.
class _AuthResponse(enum.Enum):
OK = 'OK'
REJECTED = 'REJECTED'
DATA = 'DATA'
ERROR = 'ERROR'
AGREE_UNIX_FD = 'AGREE_UNIX_FD'
@classmethod
def parse(klass, line):
args = line.split(' ')
response = klass(args[0])
return response, args[1:]
# UNSTABLE
class Authenticator:
"""The base class for authenticators for :class:`MessageBus <dbus_next.message_bus.BaseMessageBus>` authentication.
In the future, the library may allow extending this class for custom authentication protocols.
:seealso: https://dbus.freedesktop.org/doc/dbus-specification.html#auth-protocol
"""
def _authentication_start(self, negotiate_unix_fd=False):
raise NotImplementedError(
'authentication_start() must be implemented in the inheriting class')
def _receive_line(self, line):
raise NotImplementedError('receive_line() must be implemented in the inheriting class')
@staticmethod
def _format_line(line):
return f'{line}\r\n'.encode()
class AuthExternal(Authenticator):
"""An authenticator class for the external auth protocol for use with the
:class:`MessageBus <dbus_next.message_bus.BaseMessageBus>`.
:sealso: https://dbus.freedesktop.org/doc/dbus-specification.html#auth-protocol
"""
def __init__(self):
self.negotiate_unix_fd = False
self.negotiating_fds = False
def _authentication_start(self, negotiate_unix_fd=False) -> str:
self.negotiate_unix_fd = negotiate_unix_fd
hex_uid = str(os.getuid()).encode().hex()
return f'AUTH EXTERNAL {hex_uid}'
def _receive_line(self, line: str):
response, args = _AuthResponse.parse(line)
if response is _AuthResponse.OK:
if self.negotiate_unix_fd:
self.negotiating_fds = True
return "NEGOTIATE_UNIX_FD"
else:
return "BEGIN"
if response is _AuthResponse.AGREE_UNIX_FD:
return "BEGIN"
raise AuthError(f'authentication failed: {response.value}: {args}')
class AuthAnnonymous(Authenticator):
"""An authenticator class for the annonymous auth protocol for use with the
:class:`MessageBus <dbus_next.message_bus.BaseMessageBus>`.
:sealso: https://dbus.freedesktop.org/doc/dbus-specification.html#auth-protocol
"""
def _authentication_start(self, negotiate_unix_fd=False) -> str:
if negotiate_unix_fd:
raise AuthError(
'annonymous authentication does not support negotiating unix fds right now')
return 'AUTH ANONYMOUS'
def _receive_line(self, line: str) -> str:
response, args = _AuthResponse.parse(line)
if response != _AuthResponse.OK:
raise AuthError(f'authentication failed: {response.value}: {args}')
return 'BEGIN'
|
46d938131520a63358f8bd257b454aa39837bacc
|
01857ef455ea60eccaf03b5a9059ec83e9803c2e
|
/nicegui/functions/refreshable.py
|
c5625134c25fba8d998c56e258adb56fc089979e
|
[
"MIT"
] |
permissive
|
zauberzeug/nicegui
|
f08312cc1f393deca79e0e84a2506d3a35efff16
|
c61b1315f29d51e26cc1168207f5616b302f8df0
|
refs/heads/main
| 2023-08-18T18:09:30.937322
| 2023-08-18T15:04:00
| 2023-08-18T15:04:00
| 365,250,183
| 5,128
| 271
|
MIT
| 2023-09-14T01:50:56
| 2021-05-07T13:55:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
refreshable.py
|
from dataclasses import dataclass
from typing import Any, Awaitable, Callable, Dict, List, Tuple, Union
from typing_extensions import Self
from .. import background_tasks, globals
from ..element import Element
from ..helpers import KWONLY_SLOTS, is_coroutine_function
@dataclass(**KWONLY_SLOTS)
class RefreshableTarget:
container: Element
instance: Any
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
def run(self, func: Callable[..., Any]) -> Union[None, Awaitable]:
if is_coroutine_function(func):
async def wait_for_result() -> None:
with self.container:
if self.instance is None:
await func(*self.args, **self.kwargs)
else:
await func(self.instance, *self.args, **self.kwargs)
return wait_for_result()
else:
with self.container:
if self.instance is None:
func(*self.args, **self.kwargs)
else:
func(self.instance, *self.args, **self.kwargs)
return None # required by mypy
class RefreshableContainer(Element, component='refreshable.js'):
pass
class refreshable:
def __init__(self, func: Callable[..., Any]) -> None:
"""Refreshable UI functions
The `@ui.refreshable` decorator allows you to create functions that have a `refresh` method.
This method will automatically delete all elements created by the function and recreate them.
"""
self.func = func
self.instance = None
self.targets: List[RefreshableTarget] = []
def __get__(self, instance, _) -> Self:
self.instance = instance
return self
def __getattribute__(self, __name: str) -> Any:
attribute = object.__getattribute__(self, __name)
if __name == 'refresh':
def refresh(*args: Any, _instance=self.instance, **kwargs: Any) -> None:
self.instance = _instance
attribute(*args, **kwargs)
return refresh
return attribute
def __call__(self, *args: Any, **kwargs: Any) -> Union[None, Awaitable]:
self.prune()
target = RefreshableTarget(container=RefreshableContainer(), instance=self.instance, args=args, kwargs=kwargs)
self.targets.append(target)
return target.run(self.func)
def refresh(self, *args: Any, **kwargs: Any) -> None:
self.prune()
for target in self.targets:
if target.instance != self.instance:
continue
target.container.clear()
target.args = args or target.args
target.kwargs.update(kwargs)
try:
result = target.run(self.func)
except TypeError as e:
if 'got multiple values for argument' in str(e):
function = str(e).split()[0].split('.')[-1]
parameter = str(e).split()[-1]
raise Exception(f'{parameter} needs to be consistently passed to {function} '
'either as positional or as keyword argument') from e
raise
if is_coroutine_function(self.func):
assert result is not None
if globals.loop and globals.loop.is_running():
background_tasks.create(result)
else:
globals.app.on_startup(result)
def prune(self) -> None:
self.targets = [
target
for target in self.targets
if target.container.client.id in globals.clients and target.container.id in target.container.client.elements
]
|
6176b0de4042a7e82a72f3eb05da231a660aceac
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/nano/test/automl/pytorch/test_searcher.py
|
d67d023343294e78320390c4da1e484639394afc
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,036
|
py
|
test_searcher.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from unittest import TestCase
import torch
from bigdl.nano.pytorch import Trainer
import bigdl.nano.automl.hpo.space as space
from bigdl.nano.automl.pytorch import HPOSearcher
import bigdl.nano.automl.hpo as hpo
from _helper import BoringModel, RandomDataset
class TestHPOSearcher(TestCase):
def test_simple_model(self):
@hpo.plmodel()
class CustomModel(BoringModel):
def __init__(self,
out_dim1,
out_dim2,
dropout_1,
dropout_2):
super().__init__()
layers = []
input_dim = 32
for out_dim, dropout in [(out_dim1, dropout_1),
(out_dim2,dropout_2)]:
layers.append(torch.nn.Linear(input_dim, out_dim))
layers.append(torch.nn.Tanh())
layers.append(torch.nn.Dropout(dropout))
input_dim = out_dim
layers.append(torch.nn.Linear(input_dim, 2))
self.layers: torch.nn.Module = torch.nn.Sequential(*layers)
model = CustomModel(
out_dim1=space.Categorical(16,32),
out_dim2=space.Categorical(16,32),
dropout_1=space.Real(0.1,0.5),
dropout_2 = 0.2)
trainer = Trainer(
logger=True,
checkpoint_callback=False,
max_epochs=3,
)
searcher = HPOSearcher(trainer)
searcher.search(
model,
target_metric='val_loss',
direction='minimize',
n_trials=3,
max_epochs=3,
)
study = searcher.search_summary()
assert(study)
assert(study.best_trial)
def test_simple_model_multi_processes(self):
@hpo.plmodel()
class CustomModel(BoringModel):
def __init__(self,
out_dim1,
out_dim2,
dropout_1,
dropout_2):
super().__init__()
layers = []
input_dim = 32
for out_dim, dropout in [(out_dim1, dropout_1),
(out_dim2,dropout_2)]:
layers.append(torch.nn.Linear(input_dim, out_dim))
layers.append(torch.nn.Tanh())
layers.append(torch.nn.Dropout(dropout))
input_dim = out_dim
layers.append(torch.nn.Linear(input_dim, 2))
self.layers: torch.nn.Module = torch.nn.Sequential(*layers)
model = CustomModel(
out_dim1=space.Categorical(16,32),
out_dim2=space.Categorical(16,32),
dropout_1=space.Real(0.1,0.5),
dropout_2 = 0.2)
trainer = Trainer(
logger=True,
checkpoint_callback=True,
max_epochs=3,
num_processes=2,
# distributed_backend="ray",
)
searcher = HPOSearcher(trainer, num_processes=2)
searcher.search(
model,
target_metric='val_loss',
direction='minimize',
n_trials=3,
max_epochs=3,
)
study = searcher.search_summary()
assert(study)
assert(study.best_trial)
if __name__ == '__main__':
pytest.main([__file__])
|
81a41ca47b2123c34ca3eaacbea00b03299caacc
|
2212a32833776a5d5d2164d8efd11bd18bd3f768
|
/tf_agents/utils/test_utils.py
|
f42c1ff696b6624f7943effa00e96013bd6652eb
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/agents
|
f39805fb98ef9af712dcaff3ba49e1ac6d42804b
|
eca1093d3a047e538f17f6ab92ab4d8144284f23
|
refs/heads/master
| 2023-08-14T04:56:30.774797
| 2023-08-02T17:43:44
| 2023-08-02T17:44:09
| 157,936,206
| 2,755
| 848
|
Apache-2.0
| 2023-07-26T02:35:32
| 2018-11-17T00:29:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
test_utils.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import gin
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
FLAGS = flags.FLAGS
def contains(list1, list2):
"""Check if all items in list2 are in list1.
This function handles the case when the parameters are lists of np.arrays
(which wouldn't be handled by something like .issubset(...)
Args:
list1: List which may or may not contain list2.
list2: List to check if included in list 1.
Returns:
A boolean indicating whether list2 is contained in list1.
"""
contains_result = True
for item2 in list2:
contains_result = contains_result and np.any(
[np.all(item2 == item1) for item1 in list1]
)
if not contains_result:
break
return contains_result
def test_src_dir_path(relative_path):
"""Returns an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tf_agents root. e.g.
"environments/config".
Returns:
An absolute path to the linked in runfiles.
"""
return os.path.join(
FLAGS.test_srcdir, 'tf_agents', relative_path
)
class TestCase(tf.test.TestCase):
"""Base class for TF-Agents unit tests."""
def setUp(self):
super(TestCase, self).setUp()
tf.compat.v1.enable_resource_variables()
# Guard against tests calling gin.parse_config() without calling
# gin.clear_config(), which can cause nasty bugs that show up in a
# completely different test. See b/139088071 for example.
gin.clear_config()
def tearDown(self):
gin.clear_config()
super(TestCase, self).tearDown()
def initialize_v1_variables(self):
variables = tf.compat.v1.global_variables() + tf.compat.v1.local_variables()
self.evaluate(tf.compat.v1.variables_initializer(variables))
# Main function so that users of `test_utils.TestCase` can also call
# `test_utils.main()`.
def main():
tf.test.main()
|
01d7e38cd4ea666f82fb42afbf9ab02044782d9f
|
8d6f97d71518ea4e32bbb4c332f55aac1fbfefb9
|
/pyvex/expr.py
|
ff2f07e63e1b73467fdf0202889162dd0310b14b
|
[
"BSD-2-Clause"
] |
permissive
|
angr/pyvex
|
acb80c39d42622e930a1fa0cb774a95f1452467a
|
40f151e54e75b5ad57856675b4c4c70247d0cd9c
|
refs/heads/master
| 2023-08-28T18:41:00.203041
| 2023-08-25T19:40:56
| 2023-08-25T19:40:56
| 13,027,352
| 304
| 130
|
BSD-2-Clause
| 2023-09-11T22:08:43
| 2013-09-23T05:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 25,516
|
py
|
expr.py
|
import logging
import re
from typing import List, Optional
from archinfo import RegisterOffset, TmpVar
from .const import U8, U16, U32, U64, IRConst, get_type_size
from .enums import IRCallee, IRRegArray, VEXObject, get_enum_from_int, get_int_from_enum
from .errors import PyVEXError
from .native import ffi, pvc
log = logging.getLogger("pyvex.expr")
class IRExpr(VEXObject):
"""
IR expressions in VEX represent operations without side effects.
"""
__slots__ = []
tag: Optional[str] = None
tag_int = 0 # set automatically at bottom of file
def pp(self):
print(self.__str__())
@property
def child_expressions(self) -> List["IRExpr"]:
"""
A list of all of the expressions that this expression ends up evaluating.
"""
expressions = []
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
expressions.append(v)
expressions.extend(v.child_expressions)
return expressions
@property
def constants(self):
"""
A list of all of the constants that this expression ends up using.
"""
constants = []
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
constants.extend(v.constants)
elif isinstance(v, IRConst):
constants.append(v)
return constants
def result_size(self, tyenv):
return get_type_size(self.result_type(tyenv))
def result_type(self, tyenv):
raise NotImplementedError()
def replace_expression(self, replacements):
"""
Replace child expressions in-place.
:param Dict[IRExpr, IRExpr] replacements: A mapping from expression-to-find to expression-to-replace-with
:return: None
"""
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr) and v in replacements:
setattr(self, k, replacements.get(v))
elif isinstance(v, list):
# Replace the instance in the list
for i, expr_ in enumerate(v):
if isinstance(expr_, IRExpr) and expr_ in replacements:
v[i] = replacements.get(expr_)
elif type(v) is tuple:
# Rebuild the tuple
_lst = []
replaced = False
for i, expr_ in enumerate(v):
if isinstance(expr_, IRExpr) and expr_ in replacements:
_lst.append(replacements.get(expr_))
replaced = True
else:
_lst.append(expr_)
if replaced:
setattr(self, k, tuple(_lst))
elif isinstance(v, IRExpr):
v.replace_expression(replacements)
@staticmethod
def _from_c(c_expr) -> "IRExpr":
if c_expr == ffi.NULL or c_expr[0] == ffi.NULL:
return None
try:
return enum_to_expr_class(c_expr.tag)._from_c(c_expr)
except KeyError:
raise PyVEXError("Unknown/unsupported IRExprTag %s\n" % get_enum_from_int(c_expr.tag))
_translate = _from_c
@staticmethod
def _to_c(expr):
try:
return tag_to_expr_class(expr.tag)._to_c(expr)
except KeyError:
raise PyVEXError("Unknown/unsupported IRExprTag %s\n" % expr.tag)
def typecheck(self, tyenv):
return self.result_type(tyenv)
class Binder(IRExpr):
"""
Used only in pattern matching within Vex. Should not be seen outside of Vex.
"""
__slots__ = ["binder"]
tag = "Iex_Binder"
def __init__(self, binder):
self.binder = binder
def __str__(self):
return "Binder"
@staticmethod
def _from_c(c_expr):
return Binder(c_expr.iex.Binder.binder)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Binder(expr.binder)
def result_type(self, tyenv):
return "Ity_INVALID"
class VECRET(IRExpr):
tag = "Iex_VECRET"
__slots__ = []
def __str__(self):
return "VECRET"
@staticmethod
def _from_c(c_expr):
return VECRET()
@staticmethod
def _to_c(expr):
return pvc.IRExpr_VECRET()
def result_type(self, tyenv):
return "Ity_INVALID"
class GSPTR(IRExpr):
__slots__ = []
tag = "Iex_GSPTR"
def __str__(self):
return "GSPTR"
@staticmethod
def _from_c(c_expr):
return GSPTR()
@staticmethod
def _to_c(expr):
return pvc.IRExpr_GSPTR()
def result_type(self, tyenv):
return "Ity_INVALID"
class GetI(IRExpr):
"""
Read a guest register at a non-fixed offset in the guest state.
"""
__slots__ = ["descr", "ix", "bias"]
tag = "Iex_GetI"
def __init__(self, descr, ix, bias):
self.descr = descr
self.ix = ix
self.bias = bias
@property
def description(self):
return self.descr
@property
def index(self):
return self.ix
def __str__(self):
return f"GetI({self.descr})[{self.ix},{self.bias}]"
@staticmethod
def _from_c(c_expr):
descr = IRRegArray._from_c(c_expr.Iex.GetI.descr)
ix = IRExpr._from_c(c_expr.Iex.GetI.ix)
bias = c_expr.Iex.GetI.bias
return GetI(descr, ix, bias)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_GetI(IRRegArray._to_c(expr.descr), IRExpr._to_c(expr.ix), expr.bias)
def result_type(self, tyenv):
return self.descr.elemTy
class RdTmp(IRExpr):
"""
Read the value held by a temporary.
"""
__slots__ = ["_tmp"]
tag = "Iex_RdTmp"
def __init__(self, tmp: TmpVar):
self._tmp = tmp
def __str__(self):
return "t%d" % self.tmp
@property
def tmp(self) -> TmpVar:
return self._tmp
@staticmethod
def _from_c(c_expr):
tmp = c_expr.Iex.RdTmp.tmp
return RdTmp.get_instance(tmp)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_RdTmp(expr.tmp)
@staticmethod
def get_instance(tmp):
if tmp < 1024:
# for small tmp reads, they are cached and are only created once globally
return _RDTMP_POOL[tmp]
return RdTmp(tmp)
def replace_expression(self, replacements):
# RdTmp is one of the terminal IRExprs, which cannot be replaced.
pass
def result_type(self, tyenv):
return tyenv.lookup(self.tmp)
def __hash__(self):
return 133700 + self._tmp
_RDTMP_POOL = list(RdTmp(i) for i in range(0, 1024))
class Get(IRExpr):
"""
Read a guest register, at a fixed offset in the guest state.
"""
__slots__ = ["offset", "ty_int"]
tag = "Iex_Get"
def __init__(self, offset: RegisterOffset, ty: str, ty_int: Optional[int] = None):
self.offset = offset
if ty_int is None:
self.ty_int = get_int_from_enum(ty)
else:
self.ty_int = ty_int
@property
def ty(self):
return get_enum_from_int(self.ty_int)
@property
def type(self):
return get_enum_from_int(self.ty_int)
def __str__(self, reg_name=None):
if reg_name:
return f"GET:{self.ty[4:]}({reg_name})"
else:
return f"GET:{self.ty[4:]}(offset={self.offset})"
@staticmethod
def _from_c(c_expr):
return Get(c_expr.Iex.Get.offset, get_enum_from_int(c_expr.Iex.Get.ty))
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Get(expr.offset, expr.ty_int)
def result_type(self, tyenv):
return self.ty
def __hash__(self):
return (self.offset << 8) | self.ty_int
class Qop(IRExpr):
"""
A quaternary operation (4 arguments).
"""
__slots__ = ["op", "args"]
tag = "Iex_Qop"
def __init__(self, op, args):
self.op = op
self.args = args
def __str__(self):
return "{}({})".format(self.op[4:], ",".join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Qop(
get_enum_from_int(c_expr.Iex.Qop.details.op),
[
IRExpr._from_c(arg)
for arg in [
c_expr.Iex.Qop.details.arg1,
c_expr.Iex.Qop.details.arg2,
c_expr.Iex.Qop.details.arg3,
c_expr.Iex.Qop.details.arg4,
]
],
)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Qop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])
def result_type(self, tyenv):
return get_op_retty(self.op)
def typecheck(self, tyenv): # TODO change all this to use PyvexTypeErrorException
resty, (arg1ty, arg2ty, arg3ty, arg4ty) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
arg3ty_real = self.args[2].typecheck(tyenv)
arg4ty_real = self.args[3].typecheck(tyenv)
if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None or arg4ty_real is None:
return None
if arg1ty_real != arg1ty:
log.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
log.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
if arg3ty_real != arg3ty:
log.debug("Third arg of %s must be %s", self.op, arg3ty)
return None
if arg4ty_real != arg4ty:
log.debug("Fourth arg of %s must be %s", self.op, arg4ty)
return None
return resty
class Triop(IRExpr):
"""
A ternary operation (3 arguments)
"""
__slots__ = ["op", "args"]
tag = "Iex_Triop"
def __init__(self, op, args):
self.op = op
self.args = args
def __str__(self):
return "{}({})".format(self.op[4:], ",".join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Triop(
get_enum_from_int(c_expr.Iex.Triop.details.op),
[
IRExpr._from_c(arg)
for arg in [c_expr.Iex.Triop.details.arg1, c_expr.Iex.Triop.details.arg2, c_expr.Iex.Triop.details.arg3]
],
)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Triop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])
def result_type(self, tyenv):
return get_op_retty(self.op)
def typecheck(self, tyenv):
resty, (arg1ty, arg2ty, arg3ty) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
arg3ty_real = self.args[2].typecheck(tyenv)
if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None:
return None
if arg1ty_real != arg1ty:
log.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
log.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
if arg3ty_real != arg3ty:
log.debug("Third arg of %s must be %s", self.op, arg3ty)
return None
return resty
class Binop(IRExpr):
"""
A binary operation (2 arguments).
"""
__slots__ = ["_op", "op_int", "args"]
tag = "Iex_Binop"
def __init__(self, op, args, op_int=None):
self.op_int = op_int
self.args = args
self._op = op if op is not None else None
def __str__(self):
return "{}({})".format(self.op[4:], ",".join(str(a) for a in self.args))
@property
def op(self):
if self._op is None:
self._op = get_enum_from_int(self.op_int)
return self._op
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Binop(
None,
[IRExpr._from_c(arg) for arg in [c_expr.Iex.Binop.arg1, c_expr.Iex.Binop.arg2]],
op_int=c_expr.Iex.Binop.op,
)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Binop(get_int_from_enum(expr.op), *[IRExpr._to_c(arg) for arg in expr.args])
def result_type(self, tyenv):
return get_op_retty(self.op)
def typecheck(self, tyenv):
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
resty, (arg1ty, arg2ty) = op_arg_types(self.op)
if arg1ty_real is None or arg2ty_real is None:
return None
if arg1ty_real != arg1ty:
log.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
log.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
return resty
class Unop(IRExpr):
"""
A unary operation (1 argument).
"""
__slots__ = ["op", "args"]
tag = "Iex_Unop"
def __init__(self, op, args):
self.op = op
self.args = args
def __str__(self):
return "{}({})".format(self.op[4:], ",".join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Unop(get_enum_from_int(c_expr.Iex.Unop.op), [IRExpr._from_c(c_expr.Iex.Unop.arg)])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Unop(get_int_from_enum(expr.op), IRExpr._to_c(expr.args[0]))
def result_type(self, tyenv):
return get_op_retty(self.op)
def typecheck(self, tyenv):
resty, (arg1ty,) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
if arg1ty_real is None:
return None
if arg1ty_real != arg1ty:
log.debug("First arg of %s must be %s", self.op, arg1ty)
return None
return resty
class Load(IRExpr):
"""
A load from memory.
"""
__slots__ = ["end", "ty", "addr"]
tag = "Iex_Load"
def __init__(self, end, ty, addr):
self.end = end
self.ty = ty
self.addr = addr
@property
def endness(self):
return self.end
@property
def type(self):
return self.ty
def __str__(self):
return f"LD{self.end[-2:].lower()}:{self.ty[4:]}({self.addr})"
@staticmethod
def _from_c(c_expr):
return Load(
get_enum_from_int(c_expr.Iex.Load.end),
get_enum_from_int(c_expr.Iex.Load.ty),
IRExpr._from_c(c_expr.Iex.Load.addr),
)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Load(get_int_from_enum(expr.end), get_int_from_enum(expr.ty), IRExpr._to_c(expr.addr))
def result_type(self, tyenv):
return self.ty
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return None
if addrty != tyenv.wordty:
log.debug("Address must be word-sized")
return None
return self.ty
class Const(IRExpr):
"""
A constant expression.
"""
__slots__ = ["_con"]
tag = "Iex_Const"
def __init__(self, con: "IRConst"):
self._con = con
def __str__(self):
return str(self.con)
@property
def con(self) -> "IRConst":
return self._con
@staticmethod
def _from_c(c_expr):
con = IRConst._from_c(c_expr.Iex.Const.con)
return Const.get_instance(con)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Const(IRConst._to_c(expr.con))
@staticmethod
def get_instance(con):
if con.value < 1024 and con.__class__ in _CONST_POOL:
return _CONST_POOL[con.__class__][con.value]
return Const(con)
def result_type(self, tyenv):
return self.con.type
_CONST_POOL = {
U8: [Const(U8(i)) for i in range(0, 1024)],
U16: [Const(U16(i)) for i in range(0, 1024)],
U32: [Const(U32(i)) for i in range(0, 1024)],
U64: [Const(U64(i)) for i in range(0, 1024)],
}
class ITE(IRExpr):
"""
An if-then-else expression.
"""
__slots__ = ["cond", "iffalse", "iftrue"]
tag = "Iex_ITE"
def __init__(self, cond, iffalse, iftrue):
self.cond = cond
self.iffalse = iffalse
self.iftrue = iftrue
def __str__(self):
return f"ITE({self.cond},{self.iftrue},{self.iffalse})"
@staticmethod
def _from_c(c_expr):
return ITE(
IRExpr._from_c(c_expr.Iex.ITE.cond),
IRExpr._from_c(c_expr.Iex.ITE.iffalse),
IRExpr._from_c(c_expr.Iex.ITE.iftrue),
)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_ITE(IRExpr._to_c(expr.cond), IRExpr._to_c(expr.iftrue), IRExpr._to_c(expr.iffalse))
def result_type(self, tyenv):
return self.iftrue.result_type(tyenv)
def typecheck(self, tyenv):
condty = self.cond.typecheck(tyenv)
falsety = self.iffalse.typecheck(tyenv)
truety = self.iftrue.typecheck(tyenv)
if condty is None or falsety is None or truety is None:
return None
if condty != "Ity_I1":
log.debug("guard must be Ity_I1")
return None
if falsety != truety:
log.debug("false condition must be same type as true condition")
return None
return falsety
class CCall(IRExpr):
"""
A call to a pure (no side-effects) helper C function.
"""
__slots__ = ["retty", "cee", "args"]
tag = "Iex_CCall"
def __init__(self, retty, cee, args):
self.retty = retty
self.cee = cee
self.args = tuple(args)
@property
def ret_type(self):
return self.retty
@property
def callee(self):
return self.cee
def __str__(self):
return "{}({}):{}".format(self.cee, ",".join(str(a) for a in self.args), self.retty)
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
i = 0
args = []
while True:
arg = c_expr.Iex.CCall.args[i]
if arg == ffi.NULL:
break
args.append(IRExpr._from_c(arg))
i += 1
return CCall(get_enum_from_int(c_expr.Iex.CCall.retty), IRCallee._from_c(c_expr.Iex.CCall.cee), tuple(args))
@staticmethod
def _to_c(expr):
args = [IRExpr._to_c(arg) for arg in expr.args]
mkIRExprVec = getattr(pvc, "mkIRExprVec_%d" % len(args))
return pvc.IRExpr_CCall(IRCallee._to_c(expr.cee), get_int_from_enum(expr.retty), mkIRExprVec(*args))
def result_type(self, tyenv):
return self.retty
def get_op_retty(op):
return op_arg_types(op)[0]
op_signatures = {}
def _request_op_type_from_cache(op):
return op_signatures[op]
def _request_op_type_from_libvex(op):
Ity_INVALID = 0x1100 # as defined in enum IRType in VEX
res_ty = ffi.new("IRType *")
arg_tys = [ffi.new("IRType *") for _ in range(4)]
# initialize all IRTypes to Ity_INVALID
for arg in arg_tys:
arg[0] = Ity_INVALID
pvc.typeOfPrimop(get_int_from_enum(op), res_ty, *arg_tys)
arg_ty_vals = [a[0] for a in arg_tys]
try:
numargs = arg_ty_vals.index(Ity_INVALID)
except ValueError:
numargs = 4
args_tys_list = [get_enum_from_int(arg_ty_vals[i]) for i in range(numargs)]
op_ty_sig = (get_enum_from_int(res_ty[0]), tuple(args_tys_list))
op_signatures[op] = op_ty_sig
return op_ty_sig
class PyvexOpMatchException(Exception):
pass
class PyvexTypeErrorException(Exception):
pass
def int_type_for_size(size):
return "Ity_I%d" % size
# precompiled regexes
unop_signature_re = re.compile(r"Iop_(Not|Ctz|Clz)(?P<size>\d+)$")
binop_signature_re = re.compile(r"Iop_(Add|Sub|Mul|Xor|Or|And|Div[SU]|Mod)(?P<size>\d+)$")
shift_signature_re = re.compile(r"Iop_(Shl|Shr|Sar)(?P<size>\d+)$")
cmp_signature_re_1 = re.compile(r"Iop_Cmp(EQ|NE)(?P<size>\d+)$")
cmp_signature_re_2 = re.compile(r"Iop_Cmp(GT|GE|LT|LE)(?P<size>\d+)[SU]$")
mull_signature_re = re.compile(r"Iop_Mull[SU](?P<size>\d+)$")
half_signature_re = re.compile(r"Iop_DivMod[SU](?P<fullsize>\d+)to(?P<halfsize>\d+)$")
cast_signature_re = re.compile(r"Iop_(?P<srcsize>\d+)(U|S|HI|HL)?to(?P<dstsize>\d+)")
def unop_signature(op):
m = unop_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
size = int(m.group("size"))
size_type = int_type_for_size(size)
return size_type, (size_type,)
def binop_signature(op):
m = binop_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
size = int(m.group("size"))
size_type = int_type_for_size(size)
return (size_type, (size_type, size_type))
def shift_signature(op):
m = shift_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
size = int(m.group("size"))
if size > 255:
raise PyvexTypeErrorException("Cannot apply shift operation to %d size int because shift index is 8-bit" % size)
size_type = int_type_for_size(size)
return (size_type, (size_type, int_type_for_size(8)))
def cmp_signature(op):
m = cmp_signature_re_1.match(op)
m2 = cmp_signature_re_2.match(op)
if (m is None) == (m2 is None):
raise PyvexOpMatchException()
mfound = m if m is not None else m2
size = int(mfound.group("size"))
size_type = int_type_for_size(size)
return (int_type_for_size(1), (size_type, size_type))
def mull_signature(op):
m = mull_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
size = int(m.group("size"))
size_type = int_type_for_size(size)
doubled_size_type = int_type_for_size(2 * size)
return (doubled_size_type, (size_type, size_type))
def half_signature(op):
m = half_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
fullsize = int(m.group("fullsize"))
halfsize = int(m.group("halfsize"))
if halfsize * 2 != fullsize:
raise PyvexTypeErrorException("Invalid Instruction %s: Type 1 must be twice the size of type 2" % op)
fullsize_type = int_type_for_size(fullsize)
halfsize_type = int_type_for_size(halfsize)
return (fullsize_type, (fullsize_type, halfsize_type))
def cast_signature(op):
m = cast_signature_re.match(op)
if m is None:
raise PyvexOpMatchException()
src_type = int_type_for_size(int(m.group("srcsize")))
dst_type = int_type_for_size(int(m.group("dstsize")))
return (dst_type, (src_type,))
polymorphic_op_processors = [
unop_signature,
binop_signature,
shift_signature,
cmp_signature,
mull_signature,
half_signature,
cast_signature,
]
def _request_polymorphic_op_type(op):
for polymorphic_signature in polymorphic_op_processors:
try:
op_ty_sig = polymorphic_signature(op)
break
except PyvexOpMatchException:
continue
else:
raise PyvexOpMatchException("Op %s not recognized" % op)
return op_ty_sig
_request_funcs = [_request_op_type_from_cache, _request_op_type_from_libvex, _request_polymorphic_op_type]
def op_arg_types(op):
for _request_func in _request_funcs:
try:
return _request_func(op)
except KeyError:
continue
raise ValueError("Cannot find type of op %s" % op)
_globals = globals().copy()
#
# Mapping from tag strings/enums to IRExpr classes
#
tag_to_expr_mapping = {}
enum_to_expr_mapping = {}
tag_count = 0
cls = None
for cls in _globals.values():
if type(cls) is type and issubclass(cls, IRExpr) and cls is not IRExpr:
tag_to_expr_mapping[cls.tag] = cls
enum_to_expr_mapping[get_int_from_enum(cls.tag)] = cls
cls.tag_int = tag_count
tag_count += 1
del cls
def tag_to_expr_class(tag):
"""
Convert a tag string to the corresponding IRExpr class type.
:param str tag: The tag string.
:return: A class.
:rtype: type
"""
try:
return tag_to_expr_mapping[tag]
except KeyError:
raise KeyError("Cannot find expression class for type %s." % tag)
def enum_to_expr_class(tag_enum):
"""
Convert a tag enum to the corresponding IRExpr class.
:param int tag_enum: The tag enum.
:return: A class.
:rtype: type
"""
try:
return enum_to_expr_mapping[tag_enum]
except KeyError:
raise KeyError("Cannot find expression class for type %s." % get_enum_from_int(tag_enum))
|
848824b8494eaa2112380226266ae0482512fb82
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_algos/glm/pyunit_link_functions_tweedie_basic_glm.py
|
3e07337b50dabf4beeb426e54c51f276c6c6511f
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
pyunit_link_functions_tweedie_basic_glm.py
|
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def link_functions_tweedie_basic():
print("Read in prostate data.")
hdf = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
print("Testing for family: TWEEDIE")
print("Set variables for h2o.")
y = "CAPSULE"
x = ["AGE","RACE","DCAPS","PSA","VOL","DPROS","GLEASON"]
print("Create models with canonical link: TWEEDIE")
model_h2o_tweedie = H2OGeneralizedLinearEstimator(family="tweedie", link="tweedie", alpha=0.5, Lambda=0)
model_h2o_tweedie.train(x=x, y=y, training_frame=hdf)
print("Compare model deviances for link function tweedie (using precomputed values from R)")
deviance_h2o_tweedie = old_div(model_h2o_tweedie.residual_deviance(), model_h2o_tweedie.null_deviance())
assert 0.721452 - deviance_h2o_tweedie <= 0.01, "h2o's residual/null deviance is more than 0.01 lower than R's. h2o: " \
"{0}, r: {1}".format(deviance_h2o_tweedie, 0.721452)
if __name__ == "__main__":
pyunit_utils.standalone_test(link_functions_tweedie_basic)
else:
link_functions_tweedie_basic()
|
73a256ac3161626940fbc1cb11f396e3c77a9aa6
|
8f2c55a2530c3e59dab5907c0044c618b88dd09b
|
/third_party/cython_json.py
|
ed112b888492297d4cd7ec15956f7194391ffe24
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
fabioz/PyDev.Debugger
|
5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d
|
26864816cbfcf002a99913bcc31ebef48042a4ac
|
refs/heads/main
| 2023-08-18T01:08:34.323363
| 2023-04-15T11:15:47
| 2023-04-15T11:15:47
| 21,870,144
| 363
| 126
|
Apache-2.0
| 2023-07-30T23:03:31
| 2014-07-15T18:01:12
|
Python
|
UTF-8
|
Python
| false
| false
| 9,498
|
py
|
cython_json.py
|
import Cython
from Cython.Compiler import Nodes
from Cython.Compiler.Errors import CompileError
import sys
import json
import traceback
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Note: Cython has some recursive structures in some classes, so, parsing only what we really
# expect may be a bit better (although our recursion check should get that too).
accepted_info = {
'PyClassDef': set(['name', 'doc', 'body', 'bases', 'decorators', 'pos'])
}
def node_to_dict(node, _recurse_level=0, memo=None):
nodeid = id(node) # just to be sure it's checked by identity in the memo
if memo is None:
memo = {}
else:
if nodeid in memo:
# i.e.: prevent Nodes recursion.
return None
memo[nodeid] = 1
try:
_recurse_level += 1
assert _recurse_level < 500, "It seems we are recursing..."
node_name = node.__class__.__name__
# print((' ' * _recurse_level) + node_name)
if node_name.endswith("Node"):
node_name = node_name[:-4]
data = {"__node__": node_name}
if _recurse_level == 1:
data['__version__'] = Cython.__version__
dct = node.__dict__
accepted = accepted_info.get(node_name)
if accepted is None:
items = [(key, value) for key, value in dct.items()]
else:
# for key in dct.keys():
# if key not in accepted:
# print('Skipped: %s' % (key,))
items = [(key, dct[key]) for key in accepted]
for attr_name, attr in items:
if attr_name in ("pos", "position"):
data["line"] = attr[1]
data["col"] = attr[2]
continue
if isinstance(attr, Nodes.Node):
data[attr_name] = node_to_dict(attr, _recurse_level, memo)
elif isinstance(attr, (list, tuple)):
lst = []
for x in attr:
if isinstance(x, Nodes.Node):
lst.append(node_to_dict(x, _recurse_level, memo))
elif isinstance(x, (bytes, str)):
lst.append(x)
elif hasattr(x, 'encode'):
lst.append(x.encode('utf-8', 'replace'))
elif isinstance(x, (list, tuple)):
tup = []
for y in x:
if isinstance(y, (str, bytes)):
tup.append(y)
elif isinstance(y, Nodes.Node):
tup.append(node_to_dict(y, _recurse_level, memo))
lst.append(tup)
data[attr_name] = lst
else:
data[attr_name] = str(attr)
finally:
memo.pop(nodeid, None)
return data
def source_to_dict(source, name=None):
from Cython.Compiler.TreeFragment import parse_from_strings, StatListNode
# Right now we don't collect errors, but leave the API compatible already.
collected_errors = []
try:
# Note: we don't use TreeFragment because it formats the code removing empty lines
# (which ends up creating an AST with wrong lines).
if not name:
name = "(tree fragment)"
mod = t = parse_from_strings(name, source)
t = t.body # Make sure a StatListNode is at the top
if not isinstance(t, StatListNode):
t = StatListNode(pos=mod.pos, stats=[t])
root = t
except CompileError as e:
return {
'ast': None,
'errors': [node_to_dict(e)]
}
except BaseException as e:
as_dict = {
'ast': None,
'errors': [{
'__node__': 'CompileError', 'line': 1, 'col': 1, 'message_only': str(e)
}]
}
return as_dict
result = {'ast': node_to_dict(root), 'errors': [node_to_dict(e) for e in collected_errors]}
return result
from _pydev_bundle import pydev_localhost
HOST = pydev_localhost.get_localhost() # Symbolic name meaning the local host
def dbg(s):
sys.stderr.write('%s\n' % (s,))
# f = open('c:/temp/test.txt', 'a')
# print_ >> f, s
# f.close()
SERVER_NAME = 'CythonJson'
class Exit(Exception):
pass
class CythonJsonServer(object):
def __init__(self, port):
self.ended = False
self._buffer = b''
self.port = port
self.socket = None # socket to send messages.
self.exit_process_on_kill = True
def send(self, msg):
if not isinstance(msg, bytes):
msg = msg.encode('utf-8', 'replace')
self.socket.sendall(msg)
def connect_to_server(self):
from _pydev_bundle._pydev_saved_modules import socket
self.socket = s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((HOST, self.port))
except:
sys.stderr.write('Error on connect_to_server with parameters: host: %s port: %s\n' % (HOST, self.port))
raise
def _read(self, size):
while True:
buffer_len = len(self._buffer)
if buffer_len == size:
ret = self._buffer
self._buffer = b''
return ret
if buffer_len > size:
ret = self._buffer[:size]
self._buffer = self._buffer[size:]
return ret
try:
r = self.socket.recv(max(size - buffer_len, 1024))
except OSError:
return b''
if not r:
return b''
self._buffer += r
def _read_line(self):
while True:
i = self._buffer.find(b'\n')
if i != -1:
i += 1 # Add the newline to the return
ret = self._buffer[:i]
self._buffer = self._buffer[i:]
return ret
else:
try:
r = self.socket.recv(1024)
except OSError:
return b''
if not r:
return b''
self._buffer += r
def process_command(self, json_contents):
try:
as_dict = json.loads(json_contents)
if as_dict['command'] == 'cython_to_json_ast':
contents = as_dict['contents']
as_dict = source_to_dict(contents)
result = as_dict
else:
result = {'command': '<unexpected>', 'received': json_contents}
except:
from io import StringIO
s = StringIO()
traceback.print_exc(file=s)
result = {'command': '<errored>', 'error': s.getvalue()}
return json.dumps(result)
def run(self):
# Echo server program
try:
dbg(SERVER_NAME + ' connecting to java server on %s (%s)' % (HOST, self.port))
# after being connected, create a socket as a client.
self.connect_to_server()
dbg(SERVER_NAME + ' Connected to java server')
content_len = -1
while True:
dbg('Will read line...')
line = self._read_line()
dbg('Read: %s' % (line,))
if not line:
raise Exit()
if line.startswith(b'Content-Length:'):
content_len = int(line.strip().split(b':', 1)[1])
dbg('Found content len: %s' % (content_len,))
continue
if content_len != -1:
# If we previously received a content length, read until a '\r\n'.
if line == b'\r\n':
dbg('Will read contents (%s)...' % (content_len,))
json_contents = self._read(content_len)
dbg('Read: %s' % (json_contents,))
content_len = -1
if len(json_contents) == 0:
raise Exit()
# We just received a json message, let's process it.
dbg('Will process...')
output = self.process_command(json_contents)
if not isinstance(output, bytes):
output = output.encode('utf-8', 'replace')
self.send('Content-Length: %s\r\n\r\n' % (len(output),))
self.send(output)
continue
except Exit:
sys.exit(0)
except:
traceback.print_exc()
raise
if __name__ == '__main__':
args = sys.argv[1:]
if args == ['-']:
# Read from stdin/dump to stdout
if sys.version_info < (3,):
stdin_get_value = sys.stdin.read
else:
stdin_get_value = sys.stdin.buffer.read
source = stdin_get_value()
# After reading, convert to unicode (use the stdout encoding)
source = source.decode(sys.stdout.encoding, 'replace')
as_dict = source_to_dict(source)
print(json.dumps(as_dict, indent=4))
sys.stdout.flush()
else:
# start as server
port = int(sys.argv[1]) # this is from where we want to receive messages.
t = CythonJsonServer(port)
dbg(SERVER_NAME + ' will start')
t.run()
|
725085615fdfec15df6aa0bdb3e087c029ce992a
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/brick-wall.py
|
958ee11d6a02a9d3af3f7b8f3abeb80f891027e1
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
brick-wall.py
|
# Time: O(n), n is the total number of the bricks
# Space: O(m), m is the total number different widths
import collections
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
widths = collections.defaultdict(int)
result = len(wall)
for row in wall:
width = 0
for i in xrange(len(row)-1):
width += row[i]
widths[width] += 1
result = min(result, len(wall) - widths[width])
return result
|
ebba924d5585ea2a6d869331890fc939422bdabc
|
14990fe6493928adb17f401895a25a66391f2cef
|
/customtkinter/windows/widgets/appearance_mode/appearance_mode_base_class.py
|
b7f757ab3557f49dbd3c96e031d8cf61baca7e37
|
[
"MIT"
] |
permissive
|
TomSchimansky/CustomTkinter
|
3e9db4d9b7f2f8b2f8dda4c8a5b665b813e5d334
|
d719950f80eb2768db96bd4cc627523e99603b1b
|
refs/heads/master
| 2023-08-27T10:13:43.672181
| 2023-07-27T12:40:19
| 2023-07-27T12:40:19
| 344,554,014
| 8,506
| 1,069
|
MIT
| 2023-09-11T11:23:31
| 2021-03-04T17:24:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,602
|
py
|
appearance_mode_base_class.py
|
from typing import Union, Tuple, List
from .appearance_mode_tracker import AppearanceModeTracker
class CTkAppearanceModeBaseClass:
"""
Super-class that manages the appearance mode. Methods:
- destroy() must be called when sub-class is destroyed
- _set_appearance_mode() abstractmethod, gets called when appearance mode changes, must be overridden
- _apply_appearance_mode() to convert tuple color
"""
def __init__(self):
AppearanceModeTracker.add(self._set_appearance_mode, self)
self.__appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
def destroy(self):
AppearanceModeTracker.remove(self._set_appearance_mode)
def _set_appearance_mode(self, mode_string: str):
""" can be overridden but super method must be called at the beginning """
if mode_string.lower() == "dark":
self.__appearance_mode = 1
elif mode_string.lower() == "light":
self.__appearance_mode = 0
def _get_appearance_mode(self) -> str:
""" get appearance mode as a string, 'light' or 'dark' """
if self.__appearance_mode == 0:
return "light"
else:
return "dark"
def _apply_appearance_mode(self, color: Union[str, Tuple[str, str], List[str]]) -> str:
"""
color can be either a single hex color string or a color name or it can be a
tuple color with (light_color, dark_color). The functions returns
always a single color string
"""
if isinstance(color, (tuple, list)):
return color[self.__appearance_mode]
else:
return color
@staticmethod
def _check_color_type(color: any, transparency: bool = False):
if color is None:
raise ValueError(f"color is None, for transparency set color='transparent'")
elif isinstance(color, (tuple, list)) and (color[0] == "transparent" or color[1] == "transparent"):
raise ValueError(f"transparency is not allowed in tuple color {color}, use 'transparent'")
elif color == "transparent" and transparency is False:
raise ValueError(f"transparency is not allowed for this attribute")
elif isinstance(color, str):
return color
elif isinstance(color, (tuple, list)) and len(color) == 2 and isinstance(color[0], str) and isinstance(color[1], str):
return color
else:
raise ValueError(f"color {color} must be string ('transparent' or 'color-name' or 'hex-color') or tuple of two strings, not {type(color)}")
|
a0ae92f4b4ab4d09860affacfd9b345ff3a7909f
|
7a9beade653ebec41c8b6751057f97b199daddd6
|
/gputools/utils/histogram.py
|
113ee06a4b2521caa8f987daaecc81852f2c5583
|
[
"BSD-3-Clause"
] |
permissive
|
maweigert/gputools
|
4939bdbf0ecd4891f79827203b565fcf4b2b0ecf
|
4ca3b013879c18cf8d4c8b1d91b5681a3287616c
|
refs/heads/master
| 2023-01-24T09:24:03.924781
| 2021-12-03T17:18:20
| 2021-12-03T17:18:20
| 39,986,100
| 101
| 18
|
BSD-3-Clause
| 2021-12-02T23:31:10
| 2015-07-31T04:11:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
histogram.py
|
import numpy as np
from gputools.core.ocltypes import OCLArray, OCLProgram, get_device
from gputools.core.ocltypes import cl_buffer_datatype_dict
from ._abspath import abspath
def histogram(x, n_bins = 256):
if not x.dtype.type in cl_buffer_datatype_dict:
raise ValueError("dtype %s not supported" % x.dtype.type)
DTYPE = cl_buffer_datatype_dict[x.dtype.type]
x_g = OCLArray.from_array(x)
x0 = x_g.min().get()
x1 = x_g.max().get()
local_size = min(get_device().get_info("MAX_WORK_GROUP_SIZE"),
2**int(np.log2(np.sqrt(len(x)))))
red_size = len(x)//local_size
part_hist_g = OCLArray.zeros((n_bins,red_size), np.uint32)
hist_g = OCLArray.zeros((n_bins,), np.uint32)
prog = OCLProgram(abspath("kernels/histogram.cl"), build_options =
["-D","N_BINS=%s"%n_bins,"-D","RED_SIZE=%s"%red_size,
"-D","DTYPE=%s"%DTYPE])
prog.run_kernel("histogram_partial",(len(x),),(local_size,),
x_g.data, part_hist_g.data, np.float32(x0),np.float32(x1))
prog.run_kernel("histogram_sum",(n_bins,),None,
part_hist_g.data, hist_g.data)
return hist_g.get()
|
bd1a53ce53bb48020d61646c35399d776364c0ef
|
0ea52d558d0d6720f2278d406dd50431e5141ba5
|
/aiohttp_security/session_identity.py
|
3936d076c0c7abcd744520c7b02403dba360c4af
|
[
"Apache-2.0"
] |
permissive
|
aio-libs/aiohttp-security
|
8a87108ded6b85f378274d05abe22d73f9e4361d
|
2648daddff3375fc84a0c357468c29aaa399dda6
|
refs/heads/master
| 2023-08-09T16:04:39.680128
| 2023-08-02T11:35:58
| 2023-08-02T11:35:58
| 38,769,230
| 222
| 80
|
Apache-2.0
| 2023-09-14T11:22:56
| 2015-07-08T17:29:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
session_identity.py
|
"""Identity policy for storing info into aiohttp_session session.
aiohttp_session.setup() should be called on application initialization
to configure aiohttp_session properly.
"""
from typing import Optional
from aiohttp import web
try:
from aiohttp_session import get_session
HAS_AIOHTTP_SESSION = True
except ImportError: # pragma: no cover
HAS_AIOHTTP_SESSION = False
from .abc import AbstractIdentityPolicy
class SessionIdentityPolicy(AbstractIdentityPolicy):
def __init__(self, session_key: str = 'AIOHTTP_SECURITY'):
self._session_key = session_key
if not HAS_AIOHTTP_SESSION: # pragma: no cover
raise ImportError(
'SessionIdentityPolicy requires `aiohttp_session`')
async def identify(self, request: web.Request) -> Optional[str]:
session = await get_session(request)
return session.get(self._session_key)
async def remember(self, request: web.Request, response: web.StreamResponse,
identity: str, **kwargs: None) -> None:
session = await get_session(request)
session[self._session_key] = identity
async def forget(self, request: web.Request, response: web.StreamResponse) -> None:
session = await get_session(request)
session.pop(self._session_key, None)
|
90e7ee77a3c807cec83f6957d2fc2e9ef4dbfa2c
|
2be0425abb1b3fffd1fe5522550d6e2eed0b1c21
|
/PAT (Advanced Level) Practice/1119_Pre- and Post-order Traversals (30).py
|
8bafbe2163ad184cf43bbfef1269b4b260ec6487
|
[] |
no_license
|
tiny656/PAT
|
184fe85faa01493057e74ec415b627a64a756e8a
|
1fd038bb96401c048a9496d890a2c172c04a88bf
|
refs/heads/master
| 2023-05-25T04:39:04.591197
| 2023-05-15T13:17:50
| 2023-05-15T13:17:50
| 6,883,938
| 270
| 76
| null | 2016-01-04T01:02:55
| 2012-11-27T13:40:03
|
C++
|
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
1119_Pre- and Post-order Traversals (30).py
|
#coding: utf-8
# 树的先序和后序构造中序,遍历分左右子树可能存在多解
def build(preLeft, preRight, postLeft, postRight):
global max_find_count
root = preOrder[preLeft]
preLeft += 1
postRight -= 1
size = preRight - preLeft + 1
pos, find_count = -1, 0
for i in xrange(size+1):
preLeftList = preOrder[preLeft : preLeft+i]
preRightList = preOrder[preLeft+i : preRight+1]
postLeftList = postOrder[postLeft : postLeft+i]
postRightList = postOrder[postLeft+i : postRight+1]
if ((len(preLeftList) == 0 and len(postLeftList) == 0) \
or (sorted(preLeftList) == sorted(postLeftList) and preLeftList[0] == postLeftList[-1])) \
and ((len(preRightList) == 0 and len(postRightList) == 0) \
or (sorted(preRightList) == sorted(postRightList) and preRightList[0] == postRightList[-1])):
pos = i
find_count += 1
max_find_count = max(max_find_count, find_count)
if pos > 0:
build(preLeft, preLeft+pos-1, postLeft, postLeft+pos-1)
ans.append(root)
if 0 <= pos < size:
build(preLeft+pos, preRight, postLeft+pos, postRight)
n = int(raw_input())
preOrder = map(int, raw_input().split())
postOrder = map(int, raw_input().split())
max_find_count, ans = 0, []
build(0, n-1, 0, n-1)
print 'Yes' if max_find_count == 1 else 'No'
print ' '.join(map(str, ans))
|
d6a1991c210a339db5d58bbe427be94d15a0eec7
|
231a6e79e408ec2851a558f0864a67f62028fb65
|
/control/ctrlutil.py
|
aeb0c30f1ce3724f7865d464c05558c8fdc757cd
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
python-control/python-control
|
d917bc7d1ba61d3182d41bf98debcacad0f7f5ff
|
42c6fb1498aa9f5d542113da288c7c4e3547c117
|
refs/heads/main
| 2023-08-24T21:31:48.990585
| 2023-07-13T13:48:08
| 2023-07-13T13:48:08
| 22,791,752
| 1,447
| 441
|
BSD-3-Clause
| 2023-09-10T22:24:38
| 2014-08-09T17:48:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,694
|
py
|
ctrlutil.py
|
# ctrlutil.py - control system utility functions
#
# Author: Richard M. Murray
# Date: 24 May 09
#
# These are some basic utility functions that are used in the control
# systems library and that didn't naturally fit anyplace else.
#
# Copyright (c) 2009 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
# Packages that we need access to
from . import lti
import numpy as np
import math
import warnings
__all__ = ['unwrap', 'issys', 'db2mag', 'mag2db']
# Utility function to unwrap an angle measurement
def unwrap(angle, period=2*math.pi):
"""Unwrap a phase angle to give a continuous curve.
Parameters
----------
angle : array_like
Array of angles to be unwrapped
period : float, optional
Period (defaults to `2*pi`)
Returns
-------
angle_out : array_like
Output array, with jumps of period/2 eliminated
Examples
--------
>>> # Already continuous
>>> theta1 = np.array([1.0, 1.5, 2.0, 2.5, 3.0]) * np.pi
>>> theta2 = ct.unwrap(theta1)
>>> theta2/np.pi # doctest: +SKIP
array([1. , 1.5, 2. , 2.5, 3. ])
>>> # Wrapped, discontinuous
>>> theta1 = np.array([1.0, 1.5, 0.0, 0.5, 1.0]) * np.pi
>>> theta2 = ct.unwrap(theta1)
>>> theta2/np.pi # doctest: +SKIP
array([1. , 1.5, 2. , 2.5, 3. ])
"""
dangle = np.diff(angle)
dangle_desired = (dangle + period/2.) % period - period/2.
correction = np.cumsum(dangle_desired - dangle)
angle[1:] += correction
return angle
def issys(obj):
"""Return True if an object is a Linear Time Invariant (LTI) system,
otherwise False.
Examples
--------
>>> G = ct.tf([1], [1, 1])
>>> ct.issys(G)
True
>>> K = np.array([[1, 1]])
>>> ct.issys(K)
False
"""
warnings.warn("issys() is deprecated; use isinstance(obj, ct.LTI)",
FutureWarning, stacklevel=2)
return isinstance(obj, lti.LTI)
def db2mag(db):
"""Convert a gain in decibels (dB) to a magnitude.
If A is magnitude,
db = 20 * log10(A)
Parameters
----------
db : float or ndarray
input value or array of values, given in decibels
Returns
-------
mag : float or ndarray
corresponding magnitudes
Examples
--------
>>> ct.db2mag(-40.0) # doctest: +SKIP
0.01
>>> ct.db2mag(np.array([0, -20])) # doctest: +SKIP
array([1. , 0.1])
"""
return 10. ** (db / 20.)
def mag2db(mag):
"""Convert a magnitude to decibels (dB).
If A is magnitude,
db = 20 * log10(A)
Parameters
----------
mag : float or ndarray
input magnitude or array of magnitudes
Returns
-------
db : float or ndarray
corresponding values in decibels
Examples
--------
>>> ct.mag2db(10.0) # doctest: +SKIP
20.0
>>> ct.mag2db(np.array([1, 0.01])) # doctest: +SKIP
array([ 0., -40.])
"""
return 20. * np.log10(mag)
|
e5989051fe1fea7b5c16146cb6cbe59df6c4515a
|
3816f1ba0ff7eca9cc324f4ad5ac8c5b54028eeb
|
/fuzz/scripts/send_testcase.py
|
1c1a214c3ec23ddbd0b62ff19cb57f03248d1d7b
|
[
"BSD-3-Clause"
] |
permissive
|
EIPStackGroup/OpENer
|
d70064ad8d99406afab648a5985aacf3f3ef38fe
|
fc0179e9969a85a20fb907e3043062755b7a38d2
|
refs/heads/master
| 2023-08-26T10:09:49.085224
| 2022-03-08T21:05:02
| 2023-07-11T16:57:43
| 12,129,915
| 573
| 263
|
NOASSERTION
| 2023-09-03T21:47:47
| 2013-08-15T08:22:41
|
C
|
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
send_testcase.py
|
import sys
import socket
import struct
if len(sys.argv) != 3:
print("python {} IP TESTCASE_PATH".format(sys.argv[0]))
sys.exit(1)
HOST_IP = sys.argv[1]
HOST_PORT = 44818
TESTCASE_PATH = sys.argv[2]
ENIP_SESSION_CONTEXT = b"\x92\x83J\x0b=\x9e\x0cW"
ENIP_INIT_SESSION_PACKET = b"e\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00" + ENIP_SESSION_CONTEXT + b"\x00\x00\x00\x00\x01\x00\x00\x00"
print("[-] Connecting to {}:{}".format(HOST_IP, HOST_PORT))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST_IP, HOST_PORT))
print("[-] Init ENIP session")
s.sendall(ENIP_INIT_SESSION_PACKET)
enip_session = s.recv(1024)
session_handle = enip_session[4:8]
print("[-] Got ENIP Session Handle: {}".format(struct.unpack("<I", session_handle)[0]))
print("[-] Reading testcase from: '{}'".format(TESTCASE_PATH))
with open(TESTCASE_PATH, "rb") as f:
testcase_data = f.read()
print("[-] Patching sender context and session handle")
testcase = testcase_data[:4] # command, len
testcase += session_handle # session handle
testcase += testcase_data[8:12] # status
testcase += ENIP_SESSION_CONTEXT # session context
testcase += testcase_data[20:] # options and payload
print("[-] Sending testcase of {} bytes".format(len(testcase)))
s.send(testcase)
s.close()
|
ef33e254eeb87a00716ba171b0f0353ca2f55238
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/networking/__init__.py
|
5e8876484403af15345afd7812eb7d3d23b8ebd8
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
__init__.py
|
# flake8: noqa: 401
from .common import (
HEADRequest,
PUTRequest,
Request,
RequestDirector,
RequestHandler,
Response,
)
# isort: split
# TODO: all request handlers should be safely imported
from . import _urllib
|
c88b564fb16816edee028b9feb51f9f45ed04a6d
|
4292b0c439255ec73529dcb29a3a7c8a3167600a
|
/records/08-15/test.py
|
fa746c225f4137714ea6461ba1713cd7be094392
|
[
"Apache-2.0"
] |
permissive
|
AaronYang2333/CSCI_570
|
478bddfaf20ede5e0b982e9dce0070d7009eb425
|
03e34ce5ff192fc94612bc3afb51dcab3e854462
|
refs/heads/master
| 2021-06-10T18:40:12.204272
| 2021-06-05T08:52:12
| 2021-06-05T08:52:12
| 148,282,810
| 115
| 37
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
test.py
|
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/15/2020 7:42 PM'
class Solution:
def minOperations(self, n: int) -> int:
avg = n
data = [2 * i + 1 for i in range(n)]
res = sum(list(map(lambda val: abs(val - avg), data)))
return res / 2
if __name__ == '__main__':
res = Solution().minOperations(3)
print(res)
|
e1d5ec752714fd7969fc52483b8736649c1a0ac4
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/topology/goal/level.py
|
a099008f5abbebe8f0243f9916cf85b9dc12fb93
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
level.py
|
# ----------------------------------------------------------------------
# ManagedObjectLevel goal
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from typing import Optional
# NOC modules
from noc.sa.models.managedobject import ManagedObject
from .base import BaseGoal
class ManagedObjectLevelGoal(BaseGoal):
"""
Matches when current level greater of equal to defined
"""
TOWARDS_COST = 1
SAME_LEVEL_COST = 10
BACKWARDS_COST = 100
def __init__(self, level):
super().__init__()
self.level = level
def cost_estimate(
self, neighbor: ManagedObject, current: Optional[ManagedObject] = None
) -> int:
if current:
current_level = current.object_profile.level
neighbor_level = neighbor.object_profile.level
if current_level == neighbor_level:
return self.SAME_LEVEL_COST
elif current_level > neighbor_level:
return self.BACKWARDS_COST
else:
return self.TOWARDS_COST
return self.DEFAULT_COST
def is_goal(self, obj: ManagedObject) -> bool:
return obj.object_profile.level >= self.level
|
5b887ce649fb596a53f87113890b9255944ff9e5
|
95d304114b6c5e47c564c95718e09030d6584c7e
|
/winsandbox/folder_mapper.py
|
7eb3963ce025ba08b8823b7848f4847dfeb99021
|
[
"MIT"
] |
permissive
|
karkason/pywinsandbox
|
d05ad0092aa709b3d6ad59124b9891d5edd53d13
|
85d82fdd3054d844268c012d25b9862b9c4f949f
|
refs/heads/master
| 2022-10-01T05:31:19.434493
| 2022-09-01T08:36:32
| 2022-09-01T08:36:32
| 248,972,006
| 109
| 16
|
MIT
| 2022-09-01T08:36:33
| 2020-03-21T12:31:40
|
Python
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
folder_mapper.py
|
import sys
import pathlib
import site
class FolderMapper:
"""
Generic Folder Mapper. Enables to map a folder to the new sandbox.
"""
def __init__(self, folder_path, read_only=True):
self._folder_path = pathlib.Path(folder_path)
self._read_only = read_only
def path(self):
return self._folder_path
def read_only(self):
return self._read_only
class PythonMapper:
"""
Maps the current Python installation to the new sandbox.
"""
def path(self):
return pathlib.Path(sys.prefix)
def read_only(self):
return True
class PythonUserSitePackagesMapper:
"""
Maps the current Python installation's user site packages to the new sandbox.
"""
def path(self):
return pathlib.Path(site.getusersitepackages())
def read_only(self):
return True
|
1366e6708266e184ddf86fe22b917c8954241b7b
|
e7aad0b1c5d8907dbb52000c482c396d1b801751
|
/test/functional/tests/fault_injection/test_primary_device_error.py
|
069e15d1cc2c5565af1506bf2f7900ac1c64521a
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Open-CAS/open-cas-linux
|
c57d60f860702d7bc380c5d85cf502c0bf5e1bae
|
93334b4675afee8815f8ea12bb7297e0fd2a4195
|
refs/heads/master
| 2023-07-12T10:32:26.921455
| 2023-07-03T12:24:47
| 2023-07-03T12:24:47
| 178,356,155
| 202
| 84
|
BSD-3-Clause
| 2023-07-03T12:24:49
| 2019-03-29T07:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,141
|
py
|
test_primary_device_error.py
|
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod
from test_tools.device_mapper import ErrorDevice, DmTable
from core.test_run import TestRun
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CacheModeTrait,
CacheLineSize,
SeqCutOffPolicy,
CleaningPolicy,
)
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@pytest.mark.parametrizex("cache_mode", CacheMode.without_traits(CacheModeTrait.LazyWrites))
@pytest.mark.parametrizex("io_dir", [ReadWrite.randread, ReadWrite.randwrite])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_core_device_error(io_dir, cache_mode):
"""
title: Check if CAS behaves correctly when encountering errors on core device
description: |
Perform I/O on two exported objects created using error and non-error device.
Validate CAS that stats counting is consistent with OS reporting.
Also, check if normal I/O is uninterrupted and no DC occurs on any of the
core devices.
pass_criteria:
- I/O error count in FIO and in cache statistics match
- Positively passed fio verify on both core devices
"""
cache_line_size = CacheLineSize.DEFAULT
with TestRun.step("Prepare error device and setup cache and cores"):
cache, error_core, good_core = prepare_configuration(cache_mode, cache_line_size)
good_core_fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(good_core.size)
.block_size(cache_line_size)
.target(good_core)
.read_write(ReadWrite.randrw)
.verify_pattern()
.verify(VerifyMethod.pattern)
.direct()
)
error_core_fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(error_core.size)
.block_size(cache_line_size)
.target(error_core)
.read_write(io_dir)
.continue_on_error(ErrorFilter.io)
.direct()
)
if io_dir == ReadWrite.randwrite:
error_core_fio.verify_pattern().verify(VerifyMethod.pattern)
with TestRun.step("Run fio on core without errors in background"):
fio_pid = good_core_fio.run_in_background()
with TestRun.step("Run fio on error core and check if IO errors are present"):
fio_errors = error_core_fio.run()[0].total_errors()
if fio_errors == 0:
TestRun.fail("No I/O ended with error!")
with TestRun.step("Check error statistics on error core"):
stats = cache.get_statistics()
core_errors_in_cache = stats.error_stats.core.total
if fio_errors != core_errors_in_cache:
TestRun.fail(
f"Core errors in cache stats ({core_errors_in_cache}) "
f"should be equal to number of fio errors ({fio_errors})"
)
with TestRun.step("Wait for fio on good core"):
TestRun.executor.wait_cmd_finish(fio_pid)
with TestRun.step("Check error statistics on good core"):
stats = good_core.get_statistics()
if stats.error_stats.core.total != 0:
TestRun.fail(
f"No errors should be reported for good core. "
f"Actual result: {stats.error_stats.total}"
)
with TestRun.step("Stop the cache"):
cache.stop()
with TestRun.step("Verify error core device contents (if writes)"):
if io_dir == ReadWrite.randwrite:
error_core_fio.target(error_core.core_device).verify_only().run()
with TestRun.step("Verify good core device contents"):
good_core_fio.target(good_core.core_device).verify_only().run()
def prepare_configuration(cache_mode, cache_line_size):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
cache_device.create_partitions([Size(70, Unit.MebiByte)])
core_device.create_partitions(
[Size(70, Unit.MebiByte), Size(70, Unit.MebiByte)]
)
core1 = core_device.partitions[0]
core2 = core_device.partitions[1]
error_device = ErrorDevice(
"error",
core1,
DmTable.uniform_error_table(
start_lba=0,
stop_lba=int(core1.size.get_value(Unit.Blocks512)),
num_error_zones=100,
error_zone_size=Size(5, Unit.Blocks512),
).fill_gaps(core1),
)
cache = casadm.start_cache(
cache_device.partitions[0],
cache_mode=cache_mode,
cache_line_size=cache_line_size,
force=True,
)
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)
Udev.disable()
error_core = cache.add_core(core_dev=error_device)
good_core = cache.add_core(core_dev=core2)
return cache, error_core, good_core
|
ce2cb4698ad5cb09bfea8384082d8852827ff9b4
|
2b8f195b10e8e12db1252318922668cb432ea8ab
|
/mvlearn/model_selection/__init__.py
|
01146950652d21f8133f48b3f7ab5ea5a326f923
|
[
"MIT"
] |
permissive
|
mvlearn/mvlearn
|
70fba0fc52e1467101adadf46cf61e7076838c2f
|
003dccea563926fca5d957f5bbf39c1494acfe94
|
refs/heads/main
| 2023-04-18T15:47:53.716354
| 2022-04-05T22:17:18
| 2022-04-05T22:17:18
| 206,838,300
| 136
| 17
|
MIT
| 2023-03-08T17:37:59
| 2019-09-06T16:56:51
|
Python
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
__init__.py
|
from .split import train_test_split
from .validation import cross_validate
__all__ = ["train_test_split", "cross_validate", ]
|
0c72cecddc5bf0ca89189730f2c0c4bdd7b8a8de
|
9ca7f9130f9cbe3737b29143c506a43bc731fe36
|
/src/tests/lib/curses_api.py
|
ce88cc22cd90c4c9eb9613e7c651b8d4a8ea459d
|
[
"MIT"
] |
permissive
|
facebook/PathPicker
|
06b0763759d148834a6274903a456088bd3d5e3f
|
cc032b2b2fa2fa8ab2fedc93766b2bf2303781d2
|
refs/heads/main
| 2023-07-30T22:56:26.708321
| 2022-07-03T18:09:54
| 2022-07-03T18:09:54
| 34,887,588
| 5,445
| 401
|
MIT
| 2023-09-01T11:43:13
| 2015-05-01T03:05:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
curses_api.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathpicker.curses_api import CursesApiBase
class CursesForTest(CursesApiBase):
"""The dependency-injected curses wrapper which simply
stores some state in test runs of the UI"""
def __init__(self) -> None:
self.color_pairs = {}
self.current_color = (0, 0)
# The (0, 0) is hardcoded.
self.color_pairs[0] = self.current_color
def use_default_colors(self) -> None:
pass
def echo(self) -> None:
pass
def noecho(self) -> None:
pass
def init_pair(self, pair_number: int, fg_color: int, bg_color: int) -> None:
self.color_pairs[pair_number] = (fg_color, bg_color)
def color_pair(self, color_number: int) -> int:
self.current_color = self.color_pairs[color_number]
# TODO -- find a better return than this?
return color_number
def get_color_pairs(self) -> int:
# pretend we are on 256 color
return 256
def exit(self) -> None:
raise StopIteration("stopping program")
def allow_file_output(self) -> bool:
# do not output selection pickle
return False
|
d2dc2b14603cff9e192d6b1d865d3beaba2870da
|
af7d77930fed903a613fd3f70c65ef037798a985
|
/tests/test_penalties.py
|
a67de838684d77cb5b3b008269d75702bed5add5
|
[
"BSD-3-Clause"
] |
permissive
|
openopt/copt
|
f9fae01b6764bd568151ad6470fb8ca616bc64a3
|
5423537e41fd4b566910d7bdd3abca9638111bb5
|
refs/heads/master
| 2023-04-06T05:35:57.234868
| 2023-03-26T07:36:30
| 2023-03-26T07:36:30
| 46,262,908
| 141
| 32
|
NOASSERTION
| 2023-03-26T07:35:28
| 2015-11-16T08:53:00
|
Python
|
UTF-8
|
Python
| false
| false
| 4,156
|
py
|
test_penalties.py
|
import numpy as np
import copt as cp
import copt.constraint
import copt.penalty
from copt import tv_prox
from numpy import testing
import pytest
proximal_penalties = [
copt.penalty.L1Norm(1.0),
copt.penalty.GroupL1(1.0, np.array_split(np.arange(16), 5)),
copt.penalty.TraceNorm(1.0, (4, 4)),
copt.constraint.TraceBall(1.0, (4, 4)),
copt.penalty.TotalVariation2D(1.0, (4, 4)),
copt.penalty.FusedLasso(1.0),
]
def test_GroupL1():
groups = [(0, 1), (2, 3)]
g1 = copt.penalty.GroupL1(1.0, groups)
_, B = g1.prox_factory(5)
assert np.all(
B.toarray()
== np.array(
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, -1.0],
]
)
)
groups = [(0, 1), (3, 4)]
g2 = copt.penalty.GroupL1(1.0, groups)
_, B = g2.prox_factory(5)
assert np.all(
B.toarray()
== np.array(
[
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
]
)
)
#
# for blocks in [[(0, 1), (2, 3)], ]:
# pen = cp.utils.GroupL1(1., blocks)
# counter = 0
# for g in pen.groups:
# for j in g:
# counter += 1
# assert counter == blocks.size
# assert pen.groups
# for g in pen.groups:
# assert np.unique(blocks[g]).size == 1
def test_tv1_prox():
"""
Use the properties of strongly convex functions to test the implementation
of the TV1D proximal operator. In particular, we use the following inequality
applied to the proximal objective function: if f is mu-strongly convex then
f(x) - f(x^*) >= ||x - x^*||^2 / (2 mu)
where x^* is the optimum of f.
"""
n_features = 10
gamma = np.random.rand()
epsilon = 1e-10 # account for some numerical errors
tv_norm = lambda x: np.sum(np.abs(np.diff(x)))
for _ in range(1000):
x = np.random.randn(n_features)
x_next = tv_prox.prox_tv1d(x, gamma)
diff_obj = tv_norm(x) - tv_norm(x_next)
testing.assert_array_less(
((x - x_next) ** 2).sum() / gamma, (1 + epsilon) * diff_obj
)
def test_tv2_prox():
"""
similar test, but for 2D total variation penalty.
"""
np.random.seed(0)
n_rows, n_cols = 6, 8
n_features = n_rows * n_cols
gamma = np.random.rand()
epsilon = 0.1 # account for some numerical errors
def tv_norm(x, n_rows, n_cols):
X = x.reshape((n_rows, n_cols))
return np.sum(np.abs(np.diff(X, 0))) + np.sum(np.abs(np.diff(X, 1)))
for nrun in range(20):
x = np.random.randn(n_features)
x_next = tv_prox.prox_tv2d(x, gamma, n_rows, n_cols, tol=1e-10, max_iter=10000)
diff_obj = tv_norm(x, n_rows, n_cols) - tv_norm(x_next, n_rows, n_cols)
testing.assert_array_less(
((x - x_next) ** 2).sum() / gamma, (1 + epsilon) * diff_obj
)
def test_tv2d_linear_operator():
n_rows, n_cols = 20, 10
def TV(w):
img = w.reshape((n_rows, n_cols))
tmp1 = np.abs(np.diff(img, axis=0))
tmp2 = np.abs(np.diff(img, axis=1))
return tmp1.sum() + tmp2.sum()
L = tv_prox.tv2d_linear_operator(n_rows, n_cols)
x = np.random.randn(n_rows * n_cols)
testing.assert_almost_equal(np.abs(L.dot(x)).sum(), TV(x))
@pytest.mark.parametrize("pen", proximal_penalties)
def test_three_inequality(pen):
"""Test the L1 prox using the three point inequality
The three-point inequality is described e.g., in Lemma 1.4
in "Gradient-Based Algorithms with Applications to Signal
Recovery Problems", Amir Beck and Marc Teboulle
"""
n_features = 16
for _ in range(10):
z = np.random.randn(n_features)
u = np.random.randn(n_features)
xi = pen.prox(z, 1.0)
lhs = 2 * (pen(xi) - pen(u))
rhs = (
np.linalg.norm(u - z) ** 2
- np.linalg.norm(u - xi) ** 2
- np.linalg.norm(xi - z) ** 2
)
assert lhs <= rhs, pen
|
17ab9f50dea7a0d97f8011d1ae64bfd251ea928d
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/cpyext/test/test_typeobject.py
|
045ee28c4fcbda518cc96b09207e84d2ff226171
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 66,086
|
py
|
test_typeobject.py
|
import pytest
from pypy.interpreter import gateway
from rpython.rtyper.lltypesystem import rffi
from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.api import generic_cpy_call
from pypy.module.cpyext.pyobject import make_ref, from_ref, decref, as_pyobj
from pypy.module.cpyext.typeobject import PyTypeObjectPtr, W_PyCTypeObject
class AppTestTypeObject(AppTestCpythonExtensionBase):
def setup_class(cls):
AppTestCpythonExtensionBase.setup_class.im_func(cls)
def _check_uses_shortcut(w_inst):
res = hasattr(w_inst, "_cpy_ref") and w_inst._cpy_ref
res = res and as_pyobj(cls.space, w_inst) == w_inst._cpy_ref
return cls.space.newbool(res)
cls.w__check_uses_shortcut = cls.space.wrap(
gateway.interp2app(_check_uses_shortcut))
def test_typeobject(self):
import sys
module = self.import_module(name='foo')
assert 'foo' in sys.modules
assert "copy" in dir(module.fooType)
obj = module.new()
#print(obj.foo)
assert obj.foo == 42
#print("Obj has type", type(obj))
assert type(obj) is module.fooType
#print("type of obj has type", type(type(obj)))
#print("type of type of obj has type", type(type(type(obj))))
assert module.fooType.__doc__ == "foo is for testing."
def test_typeobject_method_descriptor(self):
module = self.import_module(name='foo')
obj = module.new()
obj2 = obj.copy()
assert module.new().name == "Foo Example"
c = module.fooType.copy
assert not "im_func" in dir(module.fooType.copy)
assert module.fooType.copy.__objclass__ is module.fooType
assert "copy" in repr(module.fooType.copy)
assert repr(module.fooType) == "<type 'foo.foo'>"
assert repr(obj2) == "<Foo>"
assert repr(module.fooType.__call__) == "<slot wrapper '__call__' of 'foo.foo' objects>"
assert obj2(foo=1, bar=2) == dict(foo=1, bar=2)
print(obj.foo)
assert obj.foo == 42
assert obj.int_member == obj.foo
def test_typeobject_data_member(self):
module = self.import_module(name='foo')
obj = module.new()
obj.int_member = 23
assert obj.int_member == 23
obj.int_member = 42
raises(TypeError, "obj.int_member = 'not a number'")
raises(TypeError, "del obj.int_member")
raises(TypeError, "obj.int_member_readonly = 42")
exc = raises(TypeError, "del obj.int_member_readonly")
assert "readonly" in str(exc.value)
raises(SystemError, "obj.broken_member")
raises(SystemError, "obj.broken_member = 42")
assert module.fooType.broken_member.__doc__ is None
assert module.fooType.object_member.__doc__ == "A Python object."
assert str(type(module.fooType.int_member)) == "<type 'member_descriptor'>"
def test_typeobject_object_member(self):
module = self.import_module(name='foo')
obj = module.new()
assert obj.object_member is None
obj.object_member = "hello"
assert obj.object_member == "hello"
del obj.object_member
del obj.object_member
assert obj.object_member is None
raises(AttributeError, "obj.object_member_ex")
obj.object_member_ex = None
assert obj.object_member_ex is None
obj.object_member_ex = 42
assert obj.object_member_ex == 42
del obj.object_member_ex
raises(AttributeError, "del obj.object_member_ex")
obj.set_foo = 32
assert obj.foo == 32
def test_typeobject_string_member(self):
module = self.import_module(name='foo')
obj = module.new()
assert obj.string_member == "Hello from PyPy"
raises(TypeError, "obj.string_member = 42")
raises(TypeError, "del obj.string_member")
obj.unset_string_member()
assert obj.string_member is None
assert obj.string_member_inplace == "spam"
raises(TypeError, "obj.string_member_inplace = 42")
raises(TypeError, "del obj.string_member_inplace")
assert obj.char_member == "s"
obj.char_member = "a"
assert obj.char_member == "a"
raises(TypeError, "obj.char_member = 'spam'")
raises(TypeError, "obj.char_member = 42")
#
import sys
bignum = sys.maxint - 42
obj.short_member = -12345; assert obj.short_member == -12345
obj.long_member = -bignum; assert obj.long_member == -bignum
obj.ushort_member = 45678; assert obj.ushort_member == 45678
obj.uint_member = 3000000000; assert obj.uint_member == 3000000000
obj.ulong_member = 2*bignum; assert obj.ulong_member == 2*bignum
obj.byte_member = -99; assert obj.byte_member == -99
obj.ubyte_member = 199; assert obj.ubyte_member == 199
obj.bool_member = True; assert obj.bool_member is True
obj.float_member = 9.25; assert obj.float_member == 9.25
obj.double_member = 9.25; assert obj.double_member == 9.25
obj.longlong_member = -2**59; assert obj.longlong_member == -2**59
obj.ulonglong_member = 2**63; assert obj.ulonglong_member == 2**63
obj.ssizet_member = sys.maxint;assert obj.ssizet_member == sys.maxint
#
def test_staticmethod(self):
module = self.import_module(name="foo")
obj = module.fooType.create()
assert obj.foo == 42
obj2 = obj.create()
assert obj2.foo == 42
def test_classmethod(self):
module = self.import_module(name="foo")
obj = module.fooType.classmeth()
assert obj is module.fooType
def test_methoddescr(self):
module = self.import_module(name='foo')
descr = module.fooType.copy
assert type(descr).__name__ == 'method_descriptor'
assert str(descr) in ("<method 'copy' of 'foo.foo' objects>",
"<method 'copy' of 'foo' objects>")
assert repr(descr) in ("<method 'copy' of 'foo.foo' objects>",
"<method 'copy' of 'foo' objects>")
raises(TypeError, descr, None)
def test_cython_fake_classmethod(self):
module = self.import_module(name='foo')
print(module.fooType.fake_classmeth)
print(type(module.fooType.fake_classmeth))
assert module.fooType.fake_classmeth() is module.fooType
def test_new(self):
# XXX cpython segfaults but if run singly (with -k test_new) this passes
module = self.import_module(name='foo')
obj = module.new()
# call __new__
newobj = module.UnicodeSubtype(u"xyz")
assert newobj == u"xyz"
assert isinstance(newobj, module.UnicodeSubtype)
assert isinstance(module.fooType(), module.fooType)
class bar(module.fooType):
pass
assert isinstance(bar(), bar)
fuu = module.UnicodeSubtype
class fuu2(fuu):
def baz(self):
return self
assert fuu2(u"abc").baz().escape()
raises(TypeError, module.fooType.object_member.__get__, 1)
def test_shortcut(self):
# test that instances of classes that are defined in C become an
# instance of W_BaseCPyObject and thus can be converted faster back to
# their pyobj, because they store a pointer to it directly.
if self.runappdirect:
skip("can't run with -A")
module = self.import_module(name='foo')
obj = module.fooType()
assert self._check_uses_shortcut(obj)
# W_TypeObjects use shortcut
assert self._check_uses_shortcut(object)
assert self._check_uses_shortcut(type)
# None, True, False use shortcut
assert self._check_uses_shortcut(None)
assert self._check_uses_shortcut(True)
assert self._check_uses_shortcut(False)
assert not self._check_uses_shortcut(1)
assert not self._check_uses_shortcut(object())
def test_multiple_inheritance1(self):
module = self.import_module(name='foo')
obj = module.UnicodeSubtype(u'xyz')
obj2 = module.UnicodeSubtype2()
obj3 = module.UnicodeSubtype3()
assert obj3.get_val() == 42
assert len(type(obj3).mro()) == 6
def test_init(self):
module = self.import_module(name="foo")
newobj = module.UnicodeSubtype()
assert newobj.get_val() == 42
# this subtype should inherit tp_init
newobj = module.UnicodeSubtype2()
assert newobj.get_val() == 42
# this subclass redefines __init__
class UnicodeSubclass2(module.UnicodeSubtype):
def __init__(self):
self.foobar = 32
super(UnicodeSubclass2, self).__init__()
newobj = UnicodeSubclass2()
assert newobj.get_val() == 42
assert newobj.foobar == 32
def test_metatype(self):
module = self.import_module(name='foo')
assert module.MetaType.__mro__ == (module.MetaType, type, object)
x = module.MetaType('name', (), {})
assert isinstance(x, type)
assert isinstance(x, module.MetaType)
x()
def test_metaclass_compatible(self):
# metaclasses should not conflict here
module = self.import_module(name='foo')
assert module.MetaType.__mro__ == (module.MetaType, type, object)
assert type(module.fooType).__mro__ == (type, object)
y = module.MetaType('other', (module.MetaType,), {})
assert isinstance(y, module.MetaType)
x = y('something', (type(y),), {})
del x, y
def test_metaclass_compatible2(self):
skip('fails even with -A, fooType has BASETYPE flag')
# XXX FIX - must raise since fooType (which is a base type)
# does not have flag Py_TPFLAGS_BASETYPE
module = self.import_module(name='foo')
raises(TypeError, module.MetaType, 'other', (module.fooType,), {})
def test_sre(self):
import sys
for m in ['_sre', 'sre_compile', 'sre_constants', 'sre_parse', 're']:
# clear out these modules
try:
del sys.modules[m]
except KeyError:
pass
module = self.import_module(name='_sre')
import re
assert re.sre_compile._sre is module
s = u"Foo " * 1000 + u"Bar"
prog = re.compile(u"Foo.*Bar")
assert prog.match(s)
m = re.search(u"xyz", u"xyzxyz")
assert m
m = re.search("xyz", "xyzxyz")
assert m
assert "groupdict" in dir(m)
re._cache.clear()
re._cache_repl.clear()
del prog, m
def test_init_error(self):
module = self.import_module("foo")
raises(ValueError, module.InitErrType)
def test_cmps(self):
module = self.import_module("comparisons")
cmpr = module.CmpType()
assert cmpr == 3
assert cmpr != 42
def test_richcompare(self):
module = self.import_module("comparisons")
cmpr = module.CmpType()
# should not crash
cmpr < 4
cmpr <= 4
cmpr > 4
cmpr >= 4
assert cmpr.__le__(4) is NotImplemented
def test_tpcompare(self):
module = self.import_module("comparisons")
cmpr = module.OldCmpType()
assert cmpr < cmpr
def test_unhashable_when_tpcompare(self):
module = self.import_module("comparisons")
cmpr = module.OldCmpType()
raises(TypeError, hash, cmpr)
def test_hash(self):
module = self.import_module("comparisons")
cmpr = module.CmpType()
assert hash(cmpr) == 3
d = {}
d[cmpr] = 72
assert d[cmpr] == 72
assert d[3] == 72
def test_descriptor(self):
module = self.import_module("foo")
prop = module.Property()
class C(object):
x = prop
obj = C()
assert obj.x == (prop, obj, C)
assert C.x == (prop, None, C)
obj.x = 2
assert obj.y == (prop, 2)
del obj.x
assert obj.z == prop
def test_tp_dict(self):
foo = self.import_module("foo")
module = self.import_extension('test', [
("read_tp_dict", "METH_O",
'''
PyObject *method;
if (!args->ob_type->tp_dict)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
method = PyDict_GetItemString(
args->ob_type->tp_dict, "copy");
Py_INCREF(method);
return method;
'''),
("get_type_dict", "METH_O",
'''
PyObject* value = args->ob_type->tp_dict;
if (value == NULL) value = Py_None;
Py_INCREF(value);
return value;
'''),
])
obj = foo.new()
assert module.read_tp_dict(obj) == foo.fooType.copy
d = module.get_type_dict(obj)
assert type(d) is dict
d["_some_attribute"] = 1
assert type(obj)._some_attribute == 1
del d["_some_attribute"]
class A(object):
pass
obj = A()
d = module.get_type_dict(obj)
assert type(d) is dict
d["_some_attribute"] = 1
assert type(obj)._some_attribute == 1
del d["_some_attribute"]
d = module.get_type_dict(1)
assert type(d) is dict
try:
d["_some_attribute"] = 1
except TypeError: # on PyPy, int.__dict__ is really immutable
pass
else:
assert int._some_attribute == 1
del d["_some_attribute"]
def test_custom_allocation(self):
foo = self.import_module("foo")
obj = foo.newCustom()
assert type(obj) is foo.Custom
assert type(foo.Custom) is foo.MetaType
def test_heaptype(self):
module = self.import_extension('foo', [
("name_by_heaptype", "METH_O",
'''
PyHeapTypeObject *heaptype = (PyHeapTypeObject *)args;
Py_INCREF(heaptype->ht_name);
return heaptype->ht_name;
'''),
("setattr", "METH_O",
'''
int ret;
PyObject* name = PyString_FromString("mymodule");
PyObject *obj = PyType_Type.tp_alloc(&PyType_Type, 0);
PyHeapTypeObject *type = (PyHeapTypeObject*)obj;
/* this is issue #2434: logic from pybind11 */
type->ht_type.tp_flags |= Py_TPFLAGS_HEAPTYPE;
type->ht_type.tp_name = ((PyTypeObject*)args)->tp_name;
PyType_Ready(&type->ht_type);
ret = PyObject_SetAttrString((PyObject*)&type->ht_type,
"__module__", name);
Py_DECREF(name);
if (ret < 0)
return NULL;
return PyLong_FromLong(ret);
'''),
])
class C(object):
pass
assert module.name_by_heaptype(C) == "C"
assert module.setattr(C) == 0
def test_type_dict(self):
foo = self.import_module("foo")
module = self.import_extension('test', [
("hack_tp_dict", "METH_VARARGS",
'''
PyTypeObject *type, *obj;
PyObject *a1 = PyLong_FromLong(1);
PyObject *a2 = PyLong_FromLong(2);
PyObject *value;
PyObject * key;
if (!PyArg_ParseTuple(args, "OO", &obj, &key))
return NULL;
type = obj->ob_type;
if (PyDict_SetItem(type->tp_dict, key,
a1) < 0)
return NULL;
Py_DECREF(a1);
PyType_Modified(type);
value = PyObject_GetAttr((PyObject *)type, key);
Py_DECREF(value);
if (PyDict_SetItem(type->tp_dict, key,
a2) < 0)
return NULL;
Py_DECREF(a2);
PyType_Modified(type);
value = PyObject_GetAttr((PyObject *)type, key);
return value;
'''
)
])
obj = foo.new()
assert module.hack_tp_dict(obj, "a") == 2
class Sub(foo.fooType):
pass
obj = Sub()
assert module.hack_tp_dict(obj, "b") == 2
def test_tp_descr_get(self):
module = self.import_extension('foo', [
("tp_descr_get", "METH_O",
'''
if (args->ob_type->tp_descr_get == NULL) {
Py_INCREF(Py_False);
return Py_False;
}
return args->ob_type->tp_descr_get(args, NULL,
(PyObject *)&PyInt_Type);
'''
)
])
assert module.tp_descr_get(42) is False
class Y(object):
def __get__(self, *args):
return 42
def unbound_method_example(self):
pass
assert module.tp_descr_get(Y()) == 42
#
p = property(lambda self: 42)
result = module.tp_descr_get(p)
assert result is p
#
f = lambda x: x + 1
ubm = module.tp_descr_get(f)
assert type(ubm) is type(Y.unbound_method_example)
assert ubm(42) == 43
def test_tp_descr_set(self):
module = self.import_extension('foo', [
("tp_descr_set", "METH_O",
'''
if (args->ob_type->tp_descr_set == NULL) {
Py_INCREF(Py_False);
return Py_False;
}
if (args->ob_type->tp_descr_set(args, Py_False, Py_True) != 0)
return NULL;
if (args->ob_type->tp_descr_set(args, Py_Ellipsis, NULL) != 0)
return NULL;
Py_INCREF(Py_True);
return Py_True;
'''
)
])
assert module.tp_descr_set(42) is False
class Y(object):
def __set__(self, obj, value):
assert obj is False
assert value is True
def __delete__(self, obj):
assert obj is Ellipsis
assert module.tp_descr_set(Y()) is True
#
def pset(obj, value):
assert obj is False
assert value is True
def pdel(obj):
assert obj is Ellipsis
p = property(lambda: "never used", pset, pdel)
assert module.tp_descr_set(p) is True
class TestTypes(BaseApiTest):
def test_type_attributes(self, space, api):
w_class = space.appexec([], """():
class A(object):
pass
return A
""")
ref = make_ref(space, w_class)
py_type = rffi.cast(PyTypeObjectPtr, ref)
assert py_type.c_tp_alloc
assert from_ref(space, py_type.c_tp_mro).wrappeditems is w_class.mro_w
decref(space, ref)
def test_type_dict(self, space, api):
w_class = space.appexec([], """():
class A(object):
pass
return A
""")
ref = make_ref(space, w_class)
py_type = rffi.cast(PyTypeObjectPtr, ref)
w_dict = from_ref(space, py_type.c_tp_dict)
w_name = space.newtext('a')
space.setitem(w_dict, w_name, space.wrap(1))
assert space.int_w(space.getattr(w_class, w_name)) == 1
space.delitem(w_dict, w_name)
def test_multiple_inheritance2(self, space, api):
w_class = space.appexec([], """():
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
return C
""")
ref = make_ref(space, w_class)
decref(space, ref)
def test_lookup(self, space, api):
w_type = space.w_bytes
w_obj = api._PyType_Lookup(w_type, space.wrap("upper"))
assert space.is_w(w_obj, space.w_bytes.getdictvalue(space, "upper"))
w_obj = api._PyType_Lookup(w_type, space.wrap("__invalid"))
assert w_obj is None
assert api.PyErr_Occurred() is None
def test_subclass_not_PyCTypeObject(self, space, api):
pyobj = make_ref(space, api.PyLong_Type)
py_type = rffi.cast(PyTypeObjectPtr, pyobj)
w_pyclass = W_PyCTypeObject(space, py_type)
w_class = space.appexec([w_pyclass], """(base):
class Sub(base):
def addattrib(self, value):
self.attrib = value
return Sub
""")
assert w_pyclass in w_class.mro_w
assert isinstance(w_pyclass, W_PyCTypeObject)
assert not isinstance(w_class, W_PyCTypeObject)
assert w_pyclass.is_cpytype()
# XXX document the current status, not clear if this is desirable
assert w_class.is_cpytype()
class AppTestSlots(AppTestCpythonExtensionBase):
def setup_class(cls):
AppTestCpythonExtensionBase.setup_class.im_func(cls)
def _check_type_object(w_X):
assert w_X.is_cpytype()
assert not w_X.is_heaptype()
cls.w__check_type_object = cls.space.wrap(
gateway.interp2app(_check_type_object))
def test_some_slots(self):
module = self.import_extension('foo', [
("test_type", "METH_O",
'''
/* "args->ob_type" is a strange way to get at 'type',
which should have a different tp_getattro/tp_setattro
than its tp_base, which is 'object'.
*/
if (!args->ob_type->tp_setattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_setattro");
return NULL;
}
if (args->ob_type->tp_setattro ==
args->ob_type->tp_base->tp_setattro)
{
/* Note that unlike CPython, in PyPy 'type.tp_setattro'
is the same function as 'object.tp_setattro'. This
test used to check that it was not, but that was an
artifact of the bootstrap logic only---in the final
C sources I checked and they are indeed the same.
So we ignore this problem here. */
}
if (!args->ob_type->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_getattro");
return NULL;
}
if (args->ob_type->tp_getattro ==
args->ob_type->tp_base->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "recursive tp_getattro");
return NULL;
}
Py_RETURN_TRUE;
'''
)
])
assert module.test_type(type(None))
def test_tp_getattro(self):
module = self.import_extension('foo', [
("test_tp_getattro", "METH_VARARGS",
'''
#if PY_MAJOR_VERSION > 2
#define PyString_FromString PyUnicode_FromString
#define PyIntObject PyLongObject
#define PyInt_AsLong PyLong_AsLong
#endif
PyObject *name, *obj = PyTuple_GET_ITEM(args, 0);
PyObject *attr, *value = PyTuple_GET_ITEM(args, 1);
if (!obj->ob_type->tp_getattro)
{
PyErr_SetString(PyExc_ValueError, "missing tp_getattro");
return NULL;
}
name = PyString_FromString("attr1");
attr = obj->ob_type->tp_getattro(obj, name);
if (PyInt_AsLong(attr) != PyInt_AsLong(value))
{
PyErr_SetString(PyExc_ValueError,
"tp_getattro returned wrong value");
return NULL;
}
Py_DECREF(name);
Py_DECREF(attr);
name = PyString_FromString("attr2");
attr = obj->ob_type->tp_getattro(obj, name);
if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError))
{
PyErr_Clear();
} else {
PyErr_SetString(PyExc_ValueError,
"tp_getattro should have raised");
return NULL;
}
Py_DECREF(name);
Py_RETURN_TRUE;
'''
)
])
class C:
def __init__(self):
self.attr1 = 123
assert module.test_tp_getattro(C(), 123)
def test_issue_2760_getattr(self):
module = self.import_extension('foo', [
("get_foo", "METH_O",
'''
#if PY_MAJOR_VERSION > 2
#define PyString_FromString PyUnicode_FromString
#endif
char* name = "foo";
PyTypeObject *tp = Py_TYPE(args);
PyObject *res;
if (tp->tp_getattr != NULL) {
res = (*tp->tp_getattr)(args, name);
}
else if (tp->tp_getattro != NULL) {
PyObject *w = PyString_FromString(name);
res = (*tp->tp_getattro)(args, w);
Py_DECREF(w);
}
else {
res = Py_None;
}
return res;
''')])
class Passthrough(object):
def __getattr__(self, name):
return name
obj = Passthrough()
assert module.get_foo(obj) == 'foo'
def test_nb_int(self):
module = self.import_extension('foo', [
("nb_int", "METH_VARARGS",
'''
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
if (!type->tp_as_number ||
!type->tp_as_number->nb_int)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
return type->tp_as_number->nb_int(obj);
'''
)
])
assert module.nb_int(int, 10) == 10
assert module.nb_int(float, -12.3) == -12
raises(ValueError, module.nb_int, str, "123")
class F(float):
def __int__(self):
return 666
# as long as issue 2248 is not fixed, 'expected' is 666 on pypy,
# but it should be -12. This test is not concerned about that,
# but only about getting the same answer with module.nb_int().
expected = float.__int__(F(-12.3))
assert module.nb_int(float, F(-12.3)) == expected
def test_nb_float(self):
module = self.import_extension('foo', [
("nb_float", "METH_VARARGS",
'''
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
if (!type->tp_as_number ||
!type->tp_as_number->nb_float)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
return type->tp_as_number->nb_float(obj);
'''
)
])
assert module.nb_float(int, 10) == 10.0
assert module.nb_float(float, -12.3) == -12.3
raises(ValueError, module.nb_float, str, "123")
#
# check that calling PyInt_Type->tp_as_number->nb_float(x)
# does not invoke a user-defined __float__()
class I(int):
def __float__(self):
return -55.55
class F(float):
def __float__(self):
return -66.66
assert float(I(10)) == -55.55
assert float(F(10.5)) == -66.66
assert module.nb_float(int, I(10)) == 10.0
assert module.nb_float(float, F(10.5)) == 10.5
# XXX but the subtype's tp_as_number->nb_float(x) should really invoke
# the user-defined __float__(); it doesn't so far
#assert module.nb_float(I, I(10)) == -55.55
#assert module.nb_float(F, F(10.5)) == -66.66
def test_tp_call(self):
module = self.import_extension('foo', [
("tp_call", "METH_VARARGS",
'''
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
PyObject *c_args = PyTuple_GET_ITEM(args, 2);
if (!type->tp_call)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
return type->tp_call(obj, c_args, NULL);
'''
)
])
class C:
def __call__(self, *args):
return args
assert module.tp_call(type(C()), C(), ('x', 2)) == ('x', 2)
class D(type):
def __call__(self, *args):
return "foo! %r" % (args,)
typ1 = D('d', (), {})
#assert module.tp_call(D, typ1, ()) == "foo! ()" XXX not working so far
assert isinstance(module.tp_call(type, typ1, ()), typ1)
def test_tp_init(self):
module = self.import_extension('foo', [
("tp_init", "METH_VARARGS",
'''
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
PyObject *c_args = PyTuple_GET_ITEM(args, 2);
if (!type->tp_init)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
if (type->tp_init(obj, c_args, NULL) < 0)
return NULL;
Py_INCREF(Py_None);
return Py_None;
'''
)
])
x = [42]
assert module.tp_init(list, x, ("hi",)) is None
assert x == ["h", "i"]
class LL(list):
def __init__(self, *ignored):
raise Exception
x = LL.__new__(LL)
assert module.tp_init(list, x, ("hi",)) is None
assert x == ["h", "i"]
def test_mp_subscript(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
static PyObject*
mp_subscript(PyObject *self, PyObject *key)
{
return Py_BuildValue("i", 42);
}
PyMappingMethods tp_as_mapping;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init = '''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_as_mapping = &tp_as_mapping;
tp_as_mapping.mp_subscript = (binaryfunc)mp_subscript;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
assert obj[100] == 42
raises(TypeError, "obj.__getitem__(100, 101)")
raises(TypeError, "obj.__getitem__(100, a=42)")
def test_mp_ass_subscript(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
static int
#if PY_MAJOR_VERSION > 2
#define PyString_FromString PyBytes_FromString
#define PyInt_Check PyLong_Check
#endif
mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value)
{
if (PyInt_Check(key)) {
PyErr_SetNone(PyExc_ZeroDivisionError);
return -1;
}
return 0;
}
PyMappingMethods tp_as_mapping;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init = '''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_as_mapping = &tp_as_mapping;
tp_as_mapping.mp_ass_subscript = mp_ass_subscript;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
raises(ZeroDivisionError, obj.__setitem__, 5, None)
res = obj.__setitem__('foo', None)
assert res is None
def test_sq_contains(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
static int
sq_contains(PyObject *self, PyObject *value)
{
return 42;
}
PySequenceMethods tp_as_sequence;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init='''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_as_sequence = &tp_as_sequence;
tp_as_sequence.sq_contains = sq_contains;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
res = "foo" in obj
assert res is True
def test_sq_ass_slice(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
#if PY_MAJOR_VERSION > 2
#define PyInt_Check PyLong_Check
#define PyInt_AsLong PyLong_AsLong
#endif
static int
sq_ass_slice(PyObject *self, Py_ssize_t a, Py_ssize_t b, PyObject *o)
{
int expected = (a == 10 && b == 20 &&
PyInt_Check(o) && PyInt_AsLong(o) == 42);
if (!expected) {
PyErr_SetString(PyExc_ValueError, "test failed");
return -1;
}
return 0;
}
PySequenceMethods tp_as_sequence;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init='''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_as_sequence = &tp_as_sequence;
tp_as_sequence.sq_ass_slice = sq_ass_slice;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
obj[10:20] = 42
raises(ValueError, "obj[10:20] = 43")
raises(ValueError, "obj[11:20] = 42")
raises(ValueError, "obj[10:21] = 42")
def test_sq_ass_item(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
#if PY_MAJOR_VERSION > 2
#define PyInt_Check PyLong_Check
#define PyInt_AsLong PyLong_AsLong
#endif
static int
sq_ass_item(PyObject *self, Py_ssize_t i, PyObject *o)
{
int expected;
if (o == NULL) // delitem
expected = (i == 12);
else // setitem
expected = (i == 10 && PyInt_Check(o) && PyInt_AsLong(o) == 42);
if (!expected) {
PyErr_SetString(PyExc_ValueError, "test failed");
return -1;
}
return 0;
}
PySequenceMethods tp_as_sequence;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init='''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_as_sequence = &tp_as_sequence;
tp_as_sequence.sq_ass_item = sq_ass_item;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
obj[10] = 42
raises(ValueError, "obj[10] = 43")
raises(ValueError, "obj[11] = 42")
del obj[12]
raises(ValueError, "del obj[13]")
def test_tp_iter(self):
module = self.import_extension('foo', [
("tp_iter", "METH_VARARGS",
'''
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
if (!type->tp_iter)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
return type->tp_iter(obj);
'''
),
("tp_iternext", "METH_VARARGS",
'''
#if PY_MAJOR_VERSION > 2
#define PyString_FromString PyBytes_FromString
#endif
PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0);
PyObject *obj = PyTuple_GET_ITEM(args, 1);
PyObject *result;
if (!type->tp_iternext)
{
PyErr_SetNone(PyExc_ValueError);
return NULL;
}
result = type->tp_iternext(obj);
if (!result && !PyErr_Occurred())
result = PyString_FromString("stop!");
return result;
'''
)
])
l = [1]
it = module.tp_iter(list, l)
assert type(it) is type(iter([]))
assert module.tp_iternext(type(it), it) == 1
assert module.tp_iternext(type(it), it) == b"stop!"
#
class LL(list):
def __iter__(self):
return iter(())
ll = LL([1])
it = module.tp_iter(list, ll)
assert type(it) is type(iter([]))
x = list(it)
assert x == [1]
def test_intlike(self):
module = self.import_extension('foo', [
("newInt", "METH_VARARGS",
"""
IntLikeObject *intObj;
int intval;
if (!PyArg_ParseTuple(args, "i", &intval))
return NULL;
intObj = PyObject_New(IntLikeObject, &IntLike_Type);
if (!intObj) {
return NULL;
}
intObj->value = intval;
return (PyObject *)intObj;
"""),
("check", "METH_VARARGS", """
IntLikeObject *intObj;
int intval, isint;
if (!PyArg_ParseTuple(args, "i", &intval))
return NULL;
intObj = PyObject_New(IntLikeObject, &IntLike_Type);
if (!intObj) {
return NULL;
}
intObj->value = intval;
isint = PyNumber_Check((PyObject*)intObj);
Py_DECREF((PyObject*)intObj);
return PyInt_FromLong(isint);
"""),
], prologue= """
typedef struct
{
PyObject_HEAD
int value;
} IntLikeObject;
static int
intlike_nb_nonzero(PyObject *o)
{
IntLikeObject *v = (IntLikeObject*)o;
if (v->value == -42) {
PyErr_SetNone(PyExc_ValueError);
return -1;
}
/* Returning -1 should be for exceptions only! */
return v->value;
}
static PyObject*
intlike_nb_int(PyObject* o)
{
IntLikeObject *v = (IntLikeObject*)o;
return PyInt_FromLong(v->value);
}
PyTypeObject IntLike_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
/*tp_name*/ "IntLike",
/*tp_basicsize*/ sizeof(IntLikeObject),
};
static PyNumberMethods intlike_as_number;
""", more_init="""
IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT;
IntLike_Type.tp_as_number = &intlike_as_number;
intlike_as_number.nb_nonzero = intlike_nb_nonzero;
intlike_as_number.nb_int = intlike_nb_int;
PyType_Ready(&IntLike_Type);
""")
assert not bool(module.newInt(0))
assert bool(module.newInt(1))
raises(SystemError, bool, module.newInt(-1))
raises(ValueError, bool, module.newInt(-42))
val = module.check(10);
assert val == 1
def test_mathfunc(self):
module = self.import_extension('foo', [
("newInt", "METH_VARARGS",
"""
IntLikeObject *intObj;
long intval;
if (!PyArg_ParseTuple(args, "l", &intval))
return NULL;
intObj = PyObject_New(IntLikeObject, &IntLike_Type);
if (!intObj) {
return NULL;
}
intObj->ival = intval;
return (PyObject *)intObj;
"""),
("newIntNoOp", "METH_VARARGS",
"""
IntLikeObjectNoOp *intObjNoOp;
long intval;
if (!PyArg_ParseTuple(args, "l", &intval))
return NULL;
intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp);
if (!intObjNoOp) {
return NULL;
}
intObjNoOp->ival = intval;
return (PyObject *)intObjNoOp;
""")], prologue="""
#include <math.h>
typedef struct
{
PyObject_HEAD
long ival;
} IntLikeObject;
#if PY_MAJOR_VERSION > 2
#define PyInt_Check PyLong_Check
#define PyInt_AsLong PyLong_AsLong
#define PyInt_FromLong PyLong_FromLong
#endif
static PyObject *
intlike_nb_add(PyObject *self, PyObject *other)
{
long val2, val1 = ((IntLikeObject *)(self))->ival;
if (PyInt_Check(other)) {
long val2 = PyInt_AsLong(other);
return PyInt_FromLong(val1+val2);
}
val2 = ((IntLikeObject *)(other))->ival;
return PyInt_FromLong(val1+val2);
}
static PyObject *
intlike_nb_pow(PyObject *self, PyObject *other, PyObject * z)
{
long val2, val1 = ((IntLikeObject *)(self))->ival;
if (PyInt_Check(other)) {
long val2 = PyInt_AsLong(other);
return PyInt_FromLong(val1+val2);
}
val2 = ((IntLikeObject *)(other))->ival;
return PyInt_FromLong((int)pow(val1,val2));
}
PyTypeObject IntLike_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
/*tp_name*/ "IntLike",
/*tp_basicsize*/ sizeof(IntLikeObject),
};
static PyNumberMethods intlike_as_number;
typedef struct
{
PyObject_HEAD
long ival;
} IntLikeObjectNoOp;
PyTypeObject IntLike_Type_NoOp = {
PyVarObject_HEAD_INIT(NULL, 0)
/*tp_name*/ "IntLikeNoOp",
/*tp_basicsize*/ sizeof(IntLikeObjectNoOp),
};
""", more_init="""
IntLike_Type.tp_as_number = &intlike_as_number;
IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES;
intlike_as_number.nb_add = intlike_nb_add;
intlike_as_number.nb_power = intlike_nb_pow;
if (PyType_Ready(&IntLike_Type) < 0) INITERROR;
IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES;
if (PyType_Ready(&IntLike_Type_NoOp) < 0) INITERROR;
""")
a = module.newInt(1)
b = module.newInt(2)
c = 3
d = module.newIntNoOp(4)
assert (a + b) == 3
assert (b + c) == 5
assert (d + a) == 5
assert pow(d,b) == 16
def test_tp_new_in_subclass(self):
import datetime
module = self.import_module(name='foo3')
module.footype("X", (object,), {})
a = module.datetimetype(1, 1, 1)
assert isinstance(a, module.datetimetype)
def test_app_subclass_of_c_type(self):
import sys
module = self.import_module(name='foo')
size = module.size_of_instances(module.fooType)
class f1(object):
pass
class f2(module.fooType):
pass
class bar(f1, f2):
pass
class foo(f2, f1):
pass
x = foo()
assert bar.__base__ is f2
# On cpython, the size changes.
if '__pypy__' in sys.builtin_module_names:
assert module.size_of_instances(bar) == size
else:
assert module.size_of_instances(bar) >= size
assert module.size_of_instances(foo) == module.size_of_instances(bar)
def test_app_cant_subclass_two_types(self):
import sys
if sys.version_info < (2, 7, 9):
skip("crashes on CPython (2.7.5 crashes, 2.7.9 is ok)")
module = self.import_module(name='foo')
try:
class bar(module.fooType, module.UnicodeSubtype):
pass
except TypeError as e:
import sys
if '__pypy__' in sys.builtin_module_names:
assert str(e) == 'instance layout conflicts in multiple inheritance'
else:
assert str(e) == ('Error when calling the metaclass bases\n'
' multiple bases have instance lay-out conflict')
else:
raise AssertionError("did not get TypeError!")
def test_call_tp_dealloc(self):
module = self.import_extension('foo', [
("fetchFooType", "METH_NOARGS",
"""
PyObject *o;
o = PyObject_New(PyObject, &Foo_Type);
init_foo(o);
Py_DECREF(o); /* calls dealloc_foo immediately */
Py_INCREF(&Foo_Type);
return (PyObject *)&Foo_Type;
"""),
("newInstance", "METH_O",
"""
PyTypeObject *tp = (PyTypeObject *)args;
PyObject *e = PyTuple_New(0);
PyObject *o = tp->tp_new(tp, e, NULL);
Py_DECREF(e);
return o;
"""),
("getCounter", "METH_NOARGS",
"""
return PyInt_FromLong(foo_counter);
""")], prologue="""
typedef struct {
PyObject_HEAD
int someval[99];
} FooObject;
static int foo_counter = 1000;
static void dealloc_foo(PyObject *foo) {
int i;
foo_counter += 10;
for (i = 0; i < 99; i++)
if (((FooObject *)foo)->someval[i] != 1000 + i)
foo_counter += 100000; /* error! */
Py_TYPE(foo)->tp_free(foo);
}
static void init_foo(PyObject *o)
{
int i;
if (o->ob_type->tp_basicsize < sizeof(FooObject))
abort();
for (i = 0; i < 99; i++)
((FooObject *)o)->someval[i] = 1000 + i;
}
static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k)
{
PyObject *o;
foo_counter += 1000;
o = t->tp_alloc(t, 0);
init_foo(o);
return o;
}
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
""", more_init="""
Foo_Type.tp_basicsize = sizeof(FooObject);
Foo_Type.tp_dealloc = &dealloc_foo;
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
| Py_TPFLAGS_BASETYPE;
Foo_Type.tp_new = &new_foo;
Foo_Type.tp_free = &PyObject_Del;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
""")
Foo = module.fetchFooType()
assert module.getCounter() == 1010
Foo(); Foo()
for i in range(10):
if module.getCounter() >= 3030:
break
# NB. use self.debug_collect() instead of gc.collect(),
# otherwise rawrefcount's dealloc callback doesn't trigger
self.debug_collect()
assert module.getCounter() == 3030
#
class Bar(Foo):
pass
assert Foo.__new__ is Bar.__new__
Bar(); Bar()
for i in range(10):
if module.getCounter() >= 5050:
break
self.debug_collect()
assert module.getCounter() == 5050
#
module.newInstance(Foo)
for i in range(10):
if module.getCounter() >= 6060:
break
self.debug_collect()
assert module.getCounter() == 6060
#
module.newInstance(Bar)
for i in range(10):
if module.getCounter() >= 7070:
break
self.debug_collect()
assert module.getCounter() == 7070
def test_tp_call_reverse(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
static PyObject *
my_tp_call(PyObject *self, PyObject *args, PyObject *kwds)
{
return PyInt_FromLong(42);
}
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init='''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_call = &my_tp_call;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
x = module.new_obj()
assert x() == 42
assert x(4, bar=5) == 42
def test_custom_metaclass(self):
module = self.import_extension('foo', [
("getMetaClass", "METH_NOARGS",
'''
Py_INCREF(&FooType_Type);
return (PyObject *)&FooType_Type;
'''
)], prologue='''
static PyTypeObject FooType_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.Type",
};
''', more_init='''
FooType_Type.tp_flags = Py_TPFLAGS_DEFAULT;
FooType_Type.tp_base = &PyType_Type;
if (PyType_Ready(&FooType_Type) < 0) INITERROR;
''')
FooType = module.getMetaClass()
if not self.runappdirect:
self._check_type_object(FooType)
class X(object):
__metaclass__ = FooType
X()
def test_multiple_inheritance3(self):
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
PyTypeObject *Base1, *Base2, *Base12;
Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
Base1->tp_name = "Base1";
Base2->tp_name = "Base2";
Base12->tp_name = "Base12";
Base1->tp_basicsize = sizeof(PyHeapTypeObject);
Base2->tp_basicsize = sizeof(PyHeapTypeObject);
Base12->tp_basicsize = sizeof(PyHeapTypeObject);
#ifndef PYPY_VERSION /* PyHeapTypeObject has no ht_qualname nor ht_name on PyPy */
#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 3
{
PyObject * dummyname = PyBytes_FromString("dummy name");
((PyHeapTypeObject*)Base1)->ht_qualname = dummyname;
((PyHeapTypeObject*)Base2)->ht_qualname = dummyname;
((PyHeapTypeObject*)Base12)->ht_qualname = dummyname;
}
#elif PY_MAJOR_VERSION == 2
{
PyObject * dummyname = PyBytes_FromString("dummy name");
((PyHeapTypeObject*)Base1)->ht_name = dummyname;
((PyHeapTypeObject*)Base2)->ht_name = dummyname;
((PyHeapTypeObject*)Base12)->ht_name = dummyname;
}
#endif
#endif
Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
Base12->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
Base12->tp_base = Base1;
Base12->tp_bases = PyTuple_Pack(2, Base1, Base2);
Base12->tp_doc = "The Base12 type or object";
if (PyType_Ready(Base1) < 0) return NULL;
if (PyType_Ready(Base2) < 0) return NULL;
if (PyType_Ready(Base12) < 0) return NULL;
obj = PyObject_New(PyObject, Base12);
return obj;
'''
)])
obj = module.new_obj()
assert 'Base12' in str(obj)
assert type(obj).__doc__ == "The Base12 type or object"
assert obj.__doc__ == "The Base12 type or object"
def test_multiple_inheritance_fetch_tp_bases(self):
module = self.import_extension('foo', [
("foo", "METH_O",
'''
PyTypeObject *tp;
tp = (PyTypeObject*)args;
Py_INCREF(tp->tp_bases);
return tp->tp_bases;
'''
)])
class A(object):
pass
class B(object):
pass
class C(A, B):
pass
bases = module.foo(C)
assert bases == (A, B)
def test_multiple_inheritance_old_style_base(self):
module = self.import_extension('foo', [
("foo", "METH_O",
'''
PyTypeObject *tp;
tp = (PyTypeObject*)args;
Py_INCREF(tp->tp_bases);
return tp->tp_bases;
'''
)])
# used to segfault after some iterations
for i in range(11):
class A(object):
pass
class B:
pass
class C(A, B):
pass
bases = module.foo(C)
assert bases == (A, B)
def test_getattr_getattro(self):
module = self.import_module(name='foo')
assert module.gettype2.dcba == b'getattro:dcba'
assert (type(module.gettype2).__getattribute__(module.gettype2, 'dcBA')
== b'getattro:dcBA')
assert module.gettype1.abcd == b'getattr:abcd'
# GetType1 objects have a __getattribute__ method, but this
# doesn't call tp_getattr at all, also on CPython
raises(AttributeError, type(module.gettype1).__getattribute__,
module.gettype1, 'dcBA')
def test_multiple_inheritance_tp_basicsize(self):
module = self.import_module(name='issue2482')
class PyBase(object):
pass
basesize = module.get_basicsize(PyBase)
CBase = module.issue2482_object
class A(CBase, PyBase):
def __init__(self, i):
CBase.__init__(self)
PyBase.__init__(self)
class B(PyBase, CBase):
def __init__(self, i):
PyBase.__init__(self)
CBase.__init__(self)
Asize = module.get_basicsize(A)
Bsize = module.get_basicsize(B)
assert Asize == Bsize
assert Asize > basesize
def test_multiple_inheritance_bug1(self):
module = self.import_extension('foo', [
("get_type", "METH_NOARGS",
'''
Py_INCREF(&Foo_Type);
return (PyObject *)&Foo_Type;
'''
), ("forty_two", "METH_O",
'''
return PyInt_FromLong(42);
'''
)], prologue='''
#if PY_MAJOR_VERSION > 2
#define PyInt_FromLong PyLong_FromLong
#endif
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
static PyObject *dummy_new(PyTypeObject *t, PyObject *a,
PyObject *k)
{
abort(); /* never actually called in CPython */
}
''', more_init = '''
Foo_Type.tp_base = (PyTypeObject *)PyExc_Exception;
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
Foo_Type.tp_new = dummy_new;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
Foo = module.get_type()
class A(Foo, SyntaxError):
pass
assert A.__base__ is SyntaxError
A(42) # assert is not aborting
class Bar(Exception):
__new__ = module.forty_two
class B(Bar, SyntaxError):
pass
assert B() == 42
# aaaaa even more hackiness
class C(A):
pass
C(42) # assert is not aborting
def test_getset(self):
module = self.import_extension('foo', [
("get_instance", "METH_NOARGS",
'''
return PyObject_New(PyObject, &Foo_Type);
'''
), ("get_number", "METH_NOARGS",
'''
return PyInt_FromLong(my_global_number);
'''
)], prologue='''
#if PY_MAJOR_VERSION > 2
#define PyInt_FromLong PyLong_FromLong
#define PyInt_AsLong PyLong_AsLong
#endif
static long my_global_number;
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
static PyObject *bar_get(PyObject *foo, void *closure)
{
return PyInt_FromLong(1000 + (long)closure);
}
static PyObject *baz_get(PyObject *foo, void *closure)
{
return PyInt_FromLong(2000 + (long)closure);
}
static int baz_set(PyObject *foo, PyObject *x, void *closure)
{
if (x != NULL)
my_global_number = 3000 + (long)closure + PyInt_AsLong(x);
else
my_global_number = 4000 + (long)closure;
return 0;
}
static PyGetSetDef foo_getset[] = {
{ "bar", bar_get, NULL, "mybardoc", (void *)42 },
{ "baz", baz_get, baz_set, "mybazdoc", (void *)43 },
{ NULL }
};
''', more_init = '''
Foo_Type.tp_getset = foo_getset;
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
foo = module.get_instance()
assert foo.bar == 1042
assert foo.bar == 1042
assert foo.baz == 2043
foo.baz = 50000
assert module.get_number() == 53043
e = raises(AttributeError, "foo.bar = 0")
assert str(e.value).startswith("attribute 'bar' of '")
assert str(e.value).endswith("foo' objects is not writable")
del foo.baz
assert module.get_number() == 4043
raises(AttributeError, "del foo.bar")
class AppTestHashable(AppTestCpythonExtensionBase):
def test_unhashable(self):
if not self.runappdirect:
skip('pointer to function equality available'
' only after translation')
module = self.import_extension('foo', [
("new_obj", "METH_NOARGS",
'''
PyObject *obj;
obj = PyObject_New(PyObject, &Foo_Type);
return obj;
'''
)], prologue='''
static PyTypeObject Foo_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"foo.foo",
};
''', more_init = '''
Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT;
Foo_Type.tp_hash = PyObject_HashNotImplemented;
if (PyType_Ready(&Foo_Type) < 0) INITERROR;
''')
obj = module.new_obj()
raises(TypeError, hash, obj)
assert type(obj).__dict__['__hash__'] is None
# this is equivalent to
from collections import Hashable
assert not isinstance(obj, Hashable)
class AppTestFlags(AppTestCpythonExtensionBase):
def test_has_subclass_flag(self):
module = self.import_extension('foo', [
("test_flags", "METH_VARARGS",
'''
long long in_flag, my_flag;
PyObject * obj;
if (!PyArg_ParseTuple(args, "OL", &obj, &in_flag))
return NULL;
if (!PyType_Check(obj))
{
PyErr_SetString(PyExc_ValueError, "input must be type");
return NULL;
}
my_flag = ((PyTypeObject*)obj)->tp_flags;
if ((my_flag & in_flag) != in_flag)
return PyLong_FromLong(-1);
if (!PyType_CheckExact(obj)) {
if ((my_flag & Py_TPFLAGS_TYPE_SUBCLASS) == Py_TPFLAGS_TYPE_SUBCLASS)
return PyLong_FromLong(-2);
}
return PyLong_FromLong(0);
'''),])
# copied from object.h
Py_TPFLAGS_INT_SUBCLASS = (1L<<23) # goes away on py3
Py_TPFLAGS_LONG_SUBCLASS = (1L<<24)
Py_TPFLAGS_LIST_SUBCLASS = (1L<<25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1L<<26)
Py_TPFLAGS_STRING_SUBCLASS = (1L<<27) # rename to BYTES on py3
Py_TPFLAGS_UNICODE_SUBCLASS = (1L<<28)
Py_TPFLAGS_DICT_SUBCLASS = (1L<<29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L<<30)
Py_TPFLAGS_TYPE_SUBCLASS = (1L<<31)
for t,f in ((long, Py_TPFLAGS_LONG_SUBCLASS),
(int, Py_TPFLAGS_INT_SUBCLASS),
(list, Py_TPFLAGS_LIST_SUBCLASS),
(tuple, Py_TPFLAGS_TUPLE_SUBCLASS),
(bytes, Py_TPFLAGS_STRING_SUBCLASS),
(str, Py_TPFLAGS_STRING_SUBCLASS),
(unicode, Py_TPFLAGS_UNICODE_SUBCLASS),
(dict, Py_TPFLAGS_DICT_SUBCLASS),
(Exception, Py_TPFLAGS_BASE_EXC_SUBCLASS),
(type, Py_TPFLAGS_TYPE_SUBCLASS),
):
assert module.test_flags(t, f) == 0
class MyList(list):
pass
assert module.test_flags(MyList, Py_TPFLAGS_LIST_SUBCLASS) == 0
def test_has_pypy_subclass_flag(self):
module = self.import_extension('foo', [
("test_pypy_flags", "METH_VARARGS",
'''
long long in_flag, my_flag;
PyObject * obj;
if (!PyArg_ParseTuple(args, "OL", &obj, &in_flag))
return NULL;
if (!PyType_Check(obj))
{
PyErr_SetString(PyExc_ValueError, "input must be type");
return NULL;
}
my_flag = ((PyTypeObject*)obj)->tp_pypy_flags;
if ((my_flag & in_flag) != in_flag)
return PyLong_FromLong(-1);
return PyLong_FromLong(0);
'''),])
# copied from object.h
Py_TPPYPYFLAGS_FLOAT_SUBCLASS = (1<<0)
class MyFloat(float):
pass
assert module.test_pypy_flags(float, Py_TPPYPYFLAGS_FLOAT_SUBCLASS) == 0
assert module.test_pypy_flags(MyFloat, Py_TPPYPYFLAGS_FLOAT_SUBCLASS) == 0
|
e9327ae6c226592477aedeac329293f57bc89afa
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
|
c513fb1c0b33547d8274e1a50690963247541861
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 23,907
|
py
|
test_stable_diffusion_pix2pix_zero.py
|
# coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DDPMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
StableDiffusionPix2PixZeroPipeline,
UNet2DConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_numpy, nightly, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
TEXT_TO_IMAGE_IMAGE_PARAMS,
)
from ..test_pipelines_common import (
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
@skip_mps
class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
pipeline_class = StableDiffusionPix2PixZeroPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def setUpClass(cls):
cls.source_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt"
)
cls.target_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt"
)
def get_dummy_components(self):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
scheduler = DDIMScheduler()
inverse_scheduler = DDIMInverseScheduler()
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"inverse_scheduler": inverse_scheduler,
"caption_generator": None,
"caption_processor": None,
}
return components
def get_dummy_inputs(self, device, seed=0):
generator = torch.manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"cross_attention_guidance_amount": 0.15,
"source_embeds": self.source_embeds,
"target_embeds": self.target_embeds,
"output_type": "numpy",
}
return inputs
def get_dummy_inversion_inputs(self, device, seed=0):
dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device)
dummy_image = dummy_image / 2 + 0.5
generator = torch.manual_seed(seed)
inputs = {
"prompt": [
"A painting of a squirrel eating a burger",
"A painting of a burger eating a squirrel",
],
"image": dummy_image.cpu(),
"num_inference_steps": 2,
"guidance_scale": 6.0,
"generator": generator,
"output_type": "numpy",
}
return inputs
def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"):
inputs = self.get_dummy_inversion_inputs(device, seed)
if input_image_type == "pt":
image = inputs["image"]
elif input_image_type == "np":
image = VaeImageProcessor.pt_to_numpy(inputs["image"])
elif input_image_type == "pil":
image = VaeImageProcessor.pt_to_numpy(inputs["image"])
image = VaeImageProcessor.numpy_to_pil(image)
else:
raise ValueError(f"unsupported input_image_type {input_image_type}")
inputs["image"] = image
inputs["output_type"] = output_type
return inputs
def test_save_load_optional_components(self):
if not hasattr(self.pipeline_class, "_optional_components"):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
pipe_loaded.set_progress_bar_config(disable=None)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)
inputs = self.get_dummy_inputs(torch_device)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs(output - output_loaded).max()
self.assertLess(max_diff, 1e-4)
def test_stable_diffusion_pix2pix_zero_inversion(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inversion_inputs(device)
inputs["image"] = inputs["image"][:1]
inputs["prompt"] = inputs["prompt"][:1]
image = sd_pipe.invert(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4823, 0.4783, 0.5638, 0.5201, 0.5247, 0.5644, 0.5029, 0.5404, 0.5062])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_inversion_batch(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inversion_inputs(device)
image = sd_pipe.invert(**inputs).images
image_slice = image[1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
expected_slice = np.array([0.6446, 0.5232, 0.4914, 0.4441, 0.4654, 0.5546, 0.4650, 0.4938, 0.5044])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_negative_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
negative_prompt = "french fries"
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_euler(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = EulerAncestralDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
)
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_ddpm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = DDPMScheduler()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self):
device = torch_device
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images
output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images
output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images
max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()
self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`")
max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max()
self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`")
def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self):
device = torch_device
components = self.get_dummy_components()
sd_pipe = StableDiffusionPix2PixZeroPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images
out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images
out_input_pil = sd_pipe.invert(
**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil")
).images
max_diff = np.abs(out_input_pt - out_input_np).max()
self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1)
# Non-determinism caused by the scheduler optimizing the latent inputs during inference
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()
@nightly
@require_torch_gpu
class StableDiffusionPix2PixZeroPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def setUpClass(cls):
cls.source_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat.pt"
)
cls.target_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog.pt"
)
def get_inputs(self, seed=0):
generator = torch.manual_seed(seed)
inputs = {
"prompt": "turn him into a cyborg",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"cross_attention_guidance_amount": 0.15,
"source_embeds": self.source_embeds,
"target_embeds": self.target_embeds,
"output_type": "numpy",
}
return inputs
def test_stable_diffusion_pix2pix_zero_default(self):
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.5742, 0.5757, 0.5747, 0.5781, 0.5688, 0.5713, 0.5742, 0.5664, 0.5747])
assert np.abs(expected_slice - image_slice).max() < 5e-2
def test_stable_diffusion_pix2pix_zero_k_lms(self):
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.6367, 0.5459, 0.5146, 0.5479, 0.4905, 0.4753, 0.4961, 0.4629, 0.4624])
assert np.abs(expected_slice - image_slice).max() < 5e-2
def test_stable_diffusion_pix2pix_zero_intermediate_state(self):
number_of_steps = 0
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([0.1345, 0.268, 0.1539, 0.0726, 0.0959, 0.2261, -0.2673, 0.0277, -0.2062])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([0.1393, 0.2637, 0.1617, 0.0724, 0.0987, 0.2271, -0.2666, 0.0299, -0.2104])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
callback_fn.has_been_called = False
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs()
pipe(**inputs, callback=callback_fn, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
inputs = self.get_inputs()
_ = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 8.2 GB is allocated
assert mem_bytes < 8.2 * 10**9
@nightly
@require_torch_gpu
class InversionPipelineNightlyTests(unittest.TestCase):
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def setUpClass(cls):
raw_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png"
)
raw_image = raw_image.convert("RGB").resize((512, 512))
cls.raw_image = raw_image
def test_stable_diffusion_pix2pix_inversion(self):
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
caption = "a photography of a cat with flowers"
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10)
inv_latents = output[0]
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
assert inv_latents.shape == (1, 4, 64, 64)
expected_slice = np.array([0.8447, -0.0730, 0.7588, -1.2070, -0.4678, 0.1511, -0.8555, 1.1816, -0.7666])
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
def test_stable_diffusion_2_pix2pix_inversion(self):
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
)
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
caption = "a photography of a cat with flowers"
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10)
inv_latents = output[0]
image_slice = inv_latents[0, -3:, -3:, -1].flatten()
assert inv_latents.shape == (1, 4, 64, 64)
expected_slice = np.array([0.8970, -0.1611, 0.4766, -1.1162, -0.5923, 0.1050, -0.9678, 1.0537, -0.6050])
assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2
def test_stable_diffusion_2_pix2pix_full(self):
# numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog_2.png
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog_2.npy"
)
pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16
)
pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config)
caption = "a photography of a cat with flowers"
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.manual_seed(0)
output = pipe.invert(caption, image=self.raw_image, generator=generator)
inv_latents = output[0]
source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"]
target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"]
source_embeds = pipe.get_embeds(source_prompts)
target_embeds = pipe.get_embeds(target_prompts)
image = pipe(
caption,
source_embeds=source_embeds,
target_embeds=target_embeds,
num_inference_steps=125,
cross_attention_guidance_amount=0.015,
generator=generator,
latents=inv_latents,
negative_prompt=caption,
output_type="np",
).images
mean_diff = np.abs(expected_image - image).mean()
assert mean_diff < 0.25
|
b023f77d7340b37c6f8c42a5ef95c5ae178e9547
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/modelbridge/transforms/trial_as_task.py
|
b91f1857c5dfb88f7aa2f8d018b365a3513dc030
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 169,880,381
| 2,207
| 315
|
MIT
| 2023-09-14T21:26:51
| 2019-02-09T15:23:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,437
|
py
|
trial_as_task.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, TYPE_CHECKING, Union
import numpy as np
from ax.core.observation import Observation, ObservationFeatures
from ax.core.parameter import ChoiceParameter, ParameterType
from ax.core.search_space import RobustSearchSpace, SearchSpace
from ax.exceptions.core import UnsupportedError
from ax.modelbridge.transforms.base import Transform
from ax.models.types import TConfig
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401
TRIAL_PARAM = "TRIAL_PARAM"
class TrialAsTask(Transform):
"""Convert trial to one or more task parameters.
How trial is mapped to parameter is specified with a map like
{parameter_name: {trial_index: level name}}.
For example,
{"trial_param1": {0: "level1", 1: "level1", 2: "level2"},}
will create choice parameters "trial_param1" with is_task=True.
Observations with trial 0 or 1 will have "trial_param1" set to "level1",
and those with trial 2 will have "trial_param1" set to "level2". Multiple
parameter names and mappings can be specified in this dict.
The trial level mapping can be specified in config["trial_level_map"]. If
not specified, defaults to a parameter with a level for every trial index.
For the reverse transform, if there are multiple mappings in the transform
the trial will not be set.
Will raise if trial not specified for every point in the training data.
Transform is done in-place.
"""
def __init__(
self,
search_space: Optional[SearchSpace] = None,
observations: Optional[List[Observation]] = None,
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
assert observations is not None, "TrialAskTask requires observations"
# Identify values of trial.
trials = {obs.features.trial_index for obs in observations}
if isinstance(search_space, RobustSearchSpace):
raise UnsupportedError(
"TrialAsTask transform is not supported for RobustSearchSpace."
)
if None in trials:
raise ValueError(
"Unable to use trial as task since not all observations have "
"trial specified."
)
# Get trial level map
if config is not None and "trial_level_map" in config:
# pyre-ignore [9]
trial_level_map: Dict[str, Dict[Union[int, str], str]] = config[
"trial_level_map"
]
# Validate
self.trial_level_map: Dict[str, Dict[int, str]] = {}
for _p_name, level_dict in trial_level_map.items():
# cast trial index as an integer
int_keyed_level_dict = {
int(trial_index): v for trial_index, v in level_dict.items()
}
self.trial_level_map[_p_name] = int_keyed_level_dict
# Check that trials match those in data
level_map = set(int_keyed_level_dict.keys())
if not trials.issubset(level_map):
raise ValueError(
f"Not all trials in data ({trials}) contained "
f"in trial level map for {_p_name} ({level_map})"
)
else:
# Set TRIAL_PARAM for each trial to the corresponding trial_index.
# pyre-fixme[6]: Expected `Union[bytes, str, typing.SupportsInt]` for
# 1st param but got `Optional[np.int64]`.
self.trial_level_map = {TRIAL_PARAM: {int(b): str(b) for b in trials}}
if len(self.trial_level_map) == 1:
level_dict = next(iter(self.trial_level_map.values()))
self.inverse_map: Optional[Dict[str, int]] = {
v: k for k, v in level_dict.items()
}
else:
self.inverse_map = None
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
if obsf.trial_index is not None:
for p_name, level_dict in self.trial_level_map.items():
# pyre-fixme[6]: Expected `Union[bytes, str,
# typing.SupportsInt]` for 1st param but got `Optional[np.int64]`.
obsf.parameters[p_name] = level_dict[int(obsf.trial_index)]
obsf.trial_index = None
return observation_features
def _transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
for p_name, level_dict in self.trial_level_map.items():
level_values = sorted(set(level_dict.values()))
if len(level_values) < 2:
details = (
f"only 1 found: {level_values}" if level_values else "none found"
)
raise ValueError(
f"TrialAsTask transform expects 2+ task params, {details}"
)
trial_param = ChoiceParameter(
name=p_name,
parameter_type=ParameterType.STRING,
# Expected `List[Optional[typing.Union[bool, float, str]]]` for 4th
# parameter `values` to call
# `ax.core.parameter.ChoiceParameter.__init__` but got
# `List[str]`.
# pyre-fixme[6]:
values=level_values,
is_ordered=False,
is_task=True,
sort_values=True,
)
search_space.add_parameter(trial_param)
return search_space
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name in self.trial_level_map:
pval = obsf.parameters.pop(p_name)
if self.inverse_map is not None:
# pyre-fixme[61]: `pval` may not be initialized here.
obsf.trial_index = np.int64(self.inverse_map[pval])
return observation_features
|
cc00fb6afab40eb73cdd01ad972ec2bdaddd9bf0
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/projectq/tests/_factoring_test.py
|
541c1492c9ae531c547a6aa3ae64cfe566b039f4
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
_factoring_test.py
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import projectq.libs.math
import projectq.setups.decompositions
from projectq.backends._sim._simulator_test import sim
from projectq.cengines import (
AutoReplacer,
DecompositionRuleSet,
InstructionFilter,
LocalOptimizer,
MainEngine,
TagRemover,
)
from projectq.libs.math import MultiplyByConstantModN
from projectq.meta import Control
from projectq.ops import QFT, All, BasicMathGate, H, Measure, Swap, X, get_inverse
rule_set = DecompositionRuleSet(modules=(projectq.libs.math, projectq.setups.decompositions))
assert sim # Asserts to tools that the fixture import is used.
def high_level_gates(eng, cmd):
g = cmd.gate
if g == QFT or get_inverse(g) == QFT or g == Swap:
return True
if isinstance(g, BasicMathGate):
return False
return eng.next_engine.is_available(cmd)
def get_main_engine(sim):
engine_list = [
AutoReplacer(rule_set),
InstructionFilter(high_level_gates),
TagRemover(),
LocalOptimizer(3),
AutoReplacer(rule_set),
TagRemover(),
LocalOptimizer(3),
]
return MainEngine(sim, engine_list)
def test_factoring(sim):
eng = get_main_engine(sim)
ctrl_qubit = eng.allocate_qubit()
N = 15
a = 2
x = eng.allocate_qureg(4)
X | x[0]
H | ctrl_qubit
with Control(eng, ctrl_qubit):
MultiplyByConstantModN(pow(a, 2**7, N), N) | x
H | ctrl_qubit
eng.flush()
cheat_tpl = sim.cheat()
idx = cheat_tpl[0][ctrl_qubit[0].id]
vec = cheat_tpl[1]
for i in range(len(vec)):
if abs(vec[i]) > 1.0e-8:
assert ((i >> idx) & 1) == 0
Measure | ctrl_qubit
assert int(ctrl_qubit) == 0
del vec, cheat_tpl
H | ctrl_qubit
with Control(eng, ctrl_qubit):
MultiplyByConstantModN(pow(a, 2, N), N) | x
H | ctrl_qubit
eng.flush()
cheat_tpl = sim.cheat()
idx = cheat_tpl[0][ctrl_qubit[0].id]
vec = cheat_tpl[1]
probability = 0.0
for i in range(len(vec)):
if abs(vec[i]) > 1.0e-8:
if ((i >> idx) & 1) == 0:
probability += abs(vec[i]) ** 2
assert probability == pytest.approx(0.5)
Measure | ctrl_qubit
All(Measure) | x
|
6dfb1c6695316b046a64403f905c23a337c2c0ce
|
af20739e34a39f0a0a99537ce047f502a3531ea5
|
/jupyter_server/extension/serverextension.py
|
6cd8dc14a30f8ac1f99258949a7c119e2d43af0a
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter-server/jupyter_server
|
66cf0ac45804aa5dd6bf8dff7050db02f3696b92
|
93fde1ad9fece22607960184501f5c9c80cd3765
|
refs/heads/main
| 2023-09-04T03:44:41.696097
| 2023-08-31T08:20:36
| 2023-08-31T08:20:36
| 68,849,978
| 237
| 186
|
BSD-3-Clause
| 2023-09-09T02:42:25
| 2016-09-21T19:18:45
|
Python
|
UTF-8
|
Python
| false
| false
| 12,932
|
py
|
serverextension.py
|
"""Utilities for installing extensions"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
import os
import sys
from jupyter_core.application import JupyterApp
from jupyter_core.paths import ENV_CONFIG_PATH, SYSTEM_CONFIG_PATH, jupyter_config_dir
from tornado.log import LogFormatter
from traitlets import Bool
from jupyter_server._version import __version__
from jupyter_server.extension.config import ExtensionConfigManager
from jupyter_server.extension.manager import ExtensionManager, ExtensionPackage
def _get_config_dir(user=False, sys_prefix=False):
"""Get the location of config files for the current context
Returns the string to the environment
Parameters
----------
user : bool [default: False]
Get the user's .jupyter config directory
sys_prefix : bool [default: False]
Get sys.prefix, i.e. ~/.envs/my-env/etc/jupyter
"""
if user and sys_prefix:
sys_prefix = False
if user:
extdir = jupyter_config_dir()
elif sys_prefix:
extdir = ENV_CONFIG_PATH[0]
else:
extdir = SYSTEM_CONFIG_PATH[0]
return extdir
def _get_extmanager_for_context(write_dir="jupyter_server_config.d", user=False, sys_prefix=False):
"""Get an extension manager pointing at the current context
Returns the path to the current context and an ExtensionManager object.
Parameters
----------
write_dir : str [default: 'jupyter_server_config.d']
Name of config directory to write extension config.
user : bool [default: False]
Get the user's .jupyter config directory
sys_prefix : bool [default: False]
Get sys.prefix, i.e. ~/.envs/my-env/etc/jupyter
"""
config_dir = _get_config_dir(user=user, sys_prefix=sys_prefix)
config_manager = ExtensionConfigManager(
read_config_path=[config_dir],
write_config_dir=os.path.join(config_dir, write_dir),
)
extension_manager = ExtensionManager(
config_manager=config_manager,
)
return config_dir, extension_manager
class ArgumentConflict(ValueError):
pass
_base_flags = {}
_base_flags.update(JupyterApp.flags)
_base_flags.pop("y", None)
_base_flags.pop("generate-config", None)
_base_flags.update(
{
"user": (
{
"BaseExtensionApp": {
"user": True,
}
},
"Apply the operation only for the given user",
),
"system": (
{
"BaseExtensionApp": {
"user": False,
"sys_prefix": False,
}
},
"Apply the operation system-wide",
),
"sys-prefix": (
{
"BaseExtensionApp": {
"sys_prefix": True,
}
},
"Use sys.prefix as the prefix for installing extensions (for environments, packaging)",
),
"py": (
{
"BaseExtensionApp": {
"python": True,
}
},
"Install from a Python package",
),
}
)
_base_flags["python"] = _base_flags["py"]
_base_aliases = {}
_base_aliases.update(JupyterApp.aliases)
class BaseExtensionApp(JupyterApp):
"""Base extension installer app"""
_log_formatter_cls = LogFormatter # type:ignore[assignment]
flags = _base_flags
aliases = _base_aliases
version = __version__
user = Bool(False, config=True, help="Whether to do a user install")
sys_prefix = Bool(True, config=True, help="Use the sys.prefix as the prefix")
python = Bool(False, config=True, help="Install from a Python package")
def _log_format_default(self):
"""A default format for messages"""
return "%(message)s"
@property
def config_dir(self):
return _get_config_dir(user=self.user, sys_prefix=self.sys_prefix)
# Constants for pretty print extension listing function.
# Window doesn't support coloring in the commandline
GREEN_ENABLED = "\033[32menabled\033[0m" if os.name != "nt" else "enabled"
RED_DISABLED = "\033[31mdisabled\033[0m" if os.name != "nt" else "disabled"
GREEN_OK = "\033[32mOK\033[0m" if os.name != "nt" else "ok"
RED_X = "\033[31m X\033[0m" if os.name != "nt" else " X"
# ------------------------------------------------------------------------------
# Public API
# ------------------------------------------------------------------------------
def toggle_server_extension_python(
import_name, enabled=None, parent=None, user=False, sys_prefix=True
):
"""Toggle the boolean setting for a given server extension
in a Jupyter config file.
"""
sys_prefix = False if user else sys_prefix
config_dir = _get_config_dir(user=user, sys_prefix=sys_prefix)
manager = ExtensionConfigManager(
read_config_path=[config_dir],
write_config_dir=os.path.join(config_dir, "jupyter_server_config.d"),
)
if enabled:
manager.enable(import_name)
else:
manager.disable(import_name)
# ----------------------------------------------------------------------
# Applications
# ----------------------------------------------------------------------
flags = {}
flags.update(BaseExtensionApp.flags)
flags.pop("y", None)
flags.pop("generate-config", None)
flags.update(
{
"user": (
{
"ToggleServerExtensionApp": {
"user": True,
}
},
"Perform the operation for the current user",
),
"system": (
{
"ToggleServerExtensionApp": {
"user": False,
"sys_prefix": False,
}
},
"Perform the operation system-wide",
),
"sys-prefix": (
{
"ToggleServerExtensionApp": {
"sys_prefix": True,
}
},
"Use sys.prefix as the prefix for installing server extensions",
),
"py": (
{
"ToggleServerExtensionApp": {
"python": True,
}
},
"Install from a Python package",
),
}
)
flags["python"] = flags["py"]
_desc = "Enable/disable a server extension using frontend configuration files."
class ToggleServerExtensionApp(BaseExtensionApp):
"""A base class for enabling/disabling extensions"""
name = "jupyter server extension enable/disable"
description = _desc
flags = flags
_toggle_value = Bool()
_toggle_pre_message = ""
_toggle_post_message = ""
def toggle_server_extension(self, import_name):
"""Change the status of a named server extension.
Uses the value of `self._toggle_value`.
Parameters
---------
import_name : str
Importable Python module (dotted-notation) exposing the magic-named
`load_jupyter_server_extension` function
"""
# Create an extension manager for this instance.
config_dir, extension_manager = _get_extmanager_for_context(
user=self.user, sys_prefix=self.sys_prefix
)
try:
self.log.info(f"{self._toggle_pre_message.capitalize()}: {import_name}")
self.log.info(f"- Writing config: {config_dir}")
# Validate the server extension.
self.log.info(f" - Validating {import_name}...")
# Interface with the Extension Package and validate.
extpkg = ExtensionPackage(name=import_name)
extpkg.validate()
version = extpkg.version
self.log.info(f" {import_name} {version} {GREEN_OK}")
# Toggle extension config.
config = extension_manager.config_manager
if self._toggle_value is True:
config.enable(import_name)
else:
config.disable(import_name)
# If successful, let's log.
self.log.info(f" - Extension successfully {self._toggle_post_message}.")
except Exception as err:
self.log.info(f" {RED_X} Validation failed: {err}")
def start(self):
"""Perform the App's actions as configured"""
if not self.extra_args:
sys.exit("Please specify a server extension/package to enable or disable")
for arg in self.extra_args:
self.toggle_server_extension(arg)
class EnableServerExtensionApp(ToggleServerExtensionApp):
"""An App that enables (and validates) Server Extensions"""
name = "jupyter server extension enable"
description = """
Enable a server extension in configuration.
Usage
jupyter server extension enable [--system|--sys-prefix]
"""
_toggle_value = True # type:ignore[assignment]
_toggle_pre_message = "enabling"
_toggle_post_message = "enabled"
class DisableServerExtensionApp(ToggleServerExtensionApp):
"""An App that disables Server Extensions"""
name = "jupyter server extension disable"
description = """
Disable a server extension in configuration.
Usage
jupyter server extension disable [--system|--sys-prefix]
"""
_toggle_value = False # type:ignore[assignment]
_toggle_pre_message = "disabling"
_toggle_post_message = "disabled"
class ListServerExtensionsApp(BaseExtensionApp):
"""An App that lists (and validates) Server Extensions"""
name = "jupyter server extension list"
version = __version__
description = "List all server extensions known by the configuration system"
def list_server_extensions(self):
"""List all enabled and disabled server extensions, by config path
Enabled extensions are validated, potentially generating warnings.
"""
configurations = (
{"user": True, "sys_prefix": False},
{"user": False, "sys_prefix": True},
{"user": False, "sys_prefix": False},
)
for option in configurations:
config_dir = _get_config_dir(**option)
self.log.info(f"Config dir: {config_dir}")
write_dir = "jupyter_server_config.d"
config_manager = ExtensionConfigManager(
read_config_path=[config_dir],
write_config_dir=os.path.join(config_dir, write_dir),
)
jpserver_extensions = config_manager.get_jpserver_extensions()
for name, enabled in jpserver_extensions.items():
# Attempt to get extension metadata
self.log.info(f" {name} {GREEN_ENABLED if enabled else RED_DISABLED}")
try:
self.log.info(f" - Validating {name}...")
extension = ExtensionPackage(name=name, enabled=enabled)
if not extension.validate():
msg = "validation failed"
raise ValueError(msg)
version = extension.version
self.log.info(f" {name} {version} {GREEN_OK}")
except Exception as err:
exc_info = False
if int(self.log_level) <= logging.DEBUG:
exc_info = True
self.log.warning(f" {RED_X} {err}", exc_info=exc_info)
# Add a blank line between paths.
self.log.info("")
def start(self):
"""Perform the App's actions as configured"""
self.list_server_extensions()
_examples = """
jupyter server extension list # list all configured server extensions
jupyter server extension enable --py <packagename> # enable all server extensions in a Python package
jupyter server extension disable --py <packagename> # disable all server extensions in a Python package
"""
class ServerExtensionApp(BaseExtensionApp):
"""Root level server extension app"""
name = "jupyter server extension"
version = __version__
description: str = "Work with Jupyter server extensions"
examples = _examples
subcommands: dict = {
"enable": (EnableServerExtensionApp, "Enable a server extension"),
"disable": (DisableServerExtensionApp, "Disable a server extension"),
"list": (ListServerExtensionsApp, "List server extensions"),
}
def start(self):
"""Perform the App's actions as configured"""
super().start()
# The above should have called a subcommand and raised NoStart; if we
# get here, it didn't, so we should self.log.info a message.
subcmds = ", ".join(sorted(self.subcommands))
sys.exit("Please supply at least one subcommand: %s" % subcmds)
main = ServerExtensionApp.launch_instance
if __name__ == "__main__":
main()
|
6e9538540960fd66a1df961fca20a8e4328f294f
|
769f6d88fd777459eb60eb1bbb0fba17cb20d963
|
/Chapter07/12_scrape_job_stackoverflow.py
|
b9d35afbb9da1caf760491201d1cc3d2b5d9e8bd
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Web-Scraping-Cookbook
|
141379d09abe2c7d8f408858a2eb44ff0fe3ef26
|
030eb974ba1437b2590b59d38f19fb697bbf9d4c
|
refs/heads/master
| 2023-02-16T04:29:49.942243
| 2023-01-30T04:19:03
| 2023-01-30T04:19:03
| 120,744,571
| 115
| 105
|
MIT
| 2019-10-03T17:38:37
| 2018-02-08T10:08:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
12_scrape_job_stackoverflow.py
|
from bs4 import BeautifulSoup
import json
with open("spacex-job-listing.html", "r") as file:
content = file.read()
bs = BeautifulSoup(content, "lxml")
script_tag = bs.find("script", {"type": "application/ld+json"})
job_listing_contents = json.loads(script_tag.contents[0])
print(job_listing_contents)
stop_list = stopwords.words('english')
cleaned = [word for word in tokenized if word not in stop_list]
# print the skills
for skill in job_listing_contents["skills"]:
print(skill)
|
16213c9802f90ab38a068b655ba98513fdc3f77d
|
6a8bc7da3104726f894ae360fce6a43a54b30812
|
/demo/blocks_inputs/run.py
|
b259312d8ecab627ab77007ddb964da528438921
|
[
"Apache-2.0"
] |
permissive
|
gradio-app/gradio
|
0b6b29bb0029ad3b8fc1b143f111b1230b29d23a
|
e4e7a4319924aaf51dcb18d07d0c9953d4011074
|
refs/heads/main
| 2023-09-01T10:56:50.822550
| 2023-09-01T00:28:01
| 2023-09-01T00:28:01
| 162,405,963
| 21,224
| 1,537
|
Apache-2.0
| 2023-09-14T21:42:00
| 2018-12-19T08:24:04
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
run.py
|
import gradio as gr
import os
def combine(a, b):
return a + " " + b
def mirror(x):
return x
with gr.Blocks() as demo:
txt = gr.Textbox(label="Input", lines=2)
txt_2 = gr.Textbox(label="Input 2")
txt_3 = gr.Textbox(value="", label="Output")
btn = gr.Button(value="Submit")
btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])
with gr.Row():
im = gr.Image()
im_2 = gr.Image()
btn = gr.Button(value="Mirror Image")
btn.click(mirror, inputs=[im], outputs=[im_2])
gr.Markdown("## Text Examples")
gr.Examples(
[["hi", "Adam"], ["hello", "Eve"]],
[txt, txt_2],
txt_3,
combine,
cache_examples=True,
)
gr.Markdown("## Image Examples")
gr.Examples(
examples=[os.path.join(os.path.dirname(__file__), "lion.jpg")],
inputs=im,
outputs=im_2,
fn=mirror,
cache_examples=True,
)
if __name__ == "__main__":
demo.launch()
|
9609019bf359e954e410463aa260a736372424ed
|
e876a1e237ba2c7602191df4618ea5d3f2d581ee
|
/tests/middleware/test_errors.py
|
392c2ba16a6d8074314aa4267c6f4071047b1366
|
[
"BSD-3-Clause"
] |
permissive
|
encode/starlette
|
f10ae42d3065ac04aeca504c4ac1c968919b33c4
|
a8b8856ce393a82ab4a714131085dbc4d658e34d
|
refs/heads/master
| 2023-08-17T02:18:09.168263
| 2023-07-24T15:46:24
| 2023-07-24T15:46:24
| 138,597,372
| 8,951
| 1,072
|
BSD-3-Clause
| 2023-09-13T11:50:46
| 2018-06-25T13:16:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,233
|
py
|
test_errors.py
|
import pytest
from starlette.applications import Starlette
from starlette.background import BackgroundTask
from starlette.middleware.errors import ServerErrorMiddleware
from starlette.responses import JSONResponse, Response
from starlette.routing import Route
def test_handler(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
def error_500(request, exc):
return JSONResponse({"detail": "Server Error"}, status_code=500)
app = ServerErrorMiddleware(app, handler=error_500)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 500
assert response.json() == {"detail": "Server Error"}
def test_debug_text(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 500
assert response.headers["content-type"].startswith("text/plain")
assert "RuntimeError: Something went wrong" in response.text
def test_debug_html(test_client_factory):
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/", headers={"Accept": "text/html, */*"})
assert response.status_code == 500
assert response.headers["content-type"].startswith("text/html")
assert "RuntimeError" in response.text
def test_debug_after_response_sent(test_client_factory):
async def app(scope, receive, send):
response = Response(b"", status_code=204)
await response(scope, receive, send)
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app, debug=True)
client = test_client_factory(app)
with pytest.raises(RuntimeError):
client.get("/")
def test_debug_not_http(test_client_factory):
"""
DebugMiddleware should just pass through any non-http messages as-is.
"""
async def app(scope, receive, send):
raise RuntimeError("Something went wrong")
app = ServerErrorMiddleware(app)
with pytest.raises(RuntimeError):
client = test_client_factory(app)
with client.websocket_connect("/"):
pass # pragma: nocover
def test_background_task(test_client_factory):
accessed_error_handler = False
def error_handler(request, exc):
nonlocal accessed_error_handler
accessed_error_handler = True
def raise_exception():
raise Exception("Something went wrong")
async def endpoint(request):
task = BackgroundTask(raise_exception)
return Response(status_code=204, background=task)
app = Starlette(
routes=[Route("/", endpoint=endpoint)],
exception_handlers={Exception: error_handler},
)
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 204
assert accessed_error_handler
|
daf893d1c996ea0a0c2ab2276d64ee42d036a167
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/signalr/shared_private_link_resource.py
|
48ec7abef8737deb3351c63c473ae239daac59fb
|
[
"MPL-2.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 23,299
|
py
|
shared_private_link_resource.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SharedPrivateLinkResourceArgs', 'SharedPrivateLinkResource']
@pulumi.input_type
class SharedPrivateLinkResourceArgs:
def __init__(__self__, *,
signalr_service_id: pulumi.Input[str],
sub_resource_name: pulumi.Input[str],
target_resource_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SharedPrivateLinkResource resource.
:param pulumi.Input[str] signalr_service_id: The id of the Signalr Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] sub_resource_name: The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
:param pulumi.Input[str] target_resource_id: The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
:param pulumi.Input[str] name: The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] request_message: The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
"""
pulumi.set(__self__, "signalr_service_id", signalr_service_id)
pulumi.set(__self__, "sub_resource_name", sub_resource_name)
pulumi.set(__self__, "target_resource_id", target_resource_id)
if name is not None:
pulumi.set(__self__, "name", name)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
@property
@pulumi.getter(name="signalrServiceId")
def signalr_service_id(self) -> pulumi.Input[str]:
"""
The id of the Signalr Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "signalr_service_id")
@signalr_service_id.setter
def signalr_service_id(self, value: pulumi.Input[str]):
pulumi.set(self, "signalr_service_id", value)
@property
@pulumi.getter(name="subResourceName")
def sub_resource_name(self) -> pulumi.Input[str]:
"""
The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sub_resource_name")
@sub_resource_name.setter
def sub_resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "sub_resource_name", value)
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Input[str]:
"""
The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
return pulumi.get(self, "target_resource_id")
@target_resource_id.setter
def target_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "target_resource_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[pulumi.Input[str]]:
"""
The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
"""
return pulumi.get(self, "request_message")
@request_message.setter
def request_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_message", value)
@pulumi.input_type
class _SharedPrivateLinkResourceState:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
signalr_service_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_resource_name: Optional[pulumi.Input[str]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SharedPrivateLinkResource resources.
:param pulumi.Input[str] name: The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] request_message: The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
:param pulumi.Input[str] signalr_service_id: The id of the Signalr Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: The status of a private endpoint connection. Possible values are `Pending`, `Approved`, `Rejected` or `Disconnected`.
:param pulumi.Input[str] sub_resource_name: The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
:param pulumi.Input[str] target_resource_id: The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
if signalr_service_id is not None:
pulumi.set(__self__, "signalr_service_id", signalr_service_id)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_resource_name is not None:
pulumi.set(__self__, "sub_resource_name", sub_resource_name)
if target_resource_id is not None:
pulumi.set(__self__, "target_resource_id", target_resource_id)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[pulumi.Input[str]]:
"""
The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
"""
return pulumi.get(self, "request_message")
@request_message.setter
def request_message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_message", value)
@property
@pulumi.getter(name="signalrServiceId")
def signalr_service_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the Signalr Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "signalr_service_id")
@signalr_service_id.setter
def signalr_service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signalr_service_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of a private endpoint connection. Possible values are `Pending`, `Approved`, `Rejected` or `Disconnected`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subResourceName")
def sub_resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sub_resource_name")
@sub_resource_name.setter
def sub_resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_resource_name", value)
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
return pulumi.get(self, "target_resource_id")
@target_resource_id.setter
def target_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_resource_id", value)
class SharedPrivateLinkResource(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
signalr_service_id: Optional[pulumi.Input[str]] = None,
sub_resource_name: Optional[pulumi.Input[str]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages the Shared Private Link Resource for a Signalr service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="east us")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tenant_id=current.tenant_id,
sku_name="standard",
soft_delete_retention_days=7,
access_policies=[azure.keyvault.KeyVaultAccessPolicyArgs(
tenant_id=current.tenant_id,
object_id=current.object_id,
certificate_permissions=["ManageContacts"],
key_permissions=["Create"],
secret_permissions=["Set"],
)])
test = azure.signalr.Service("test",
location=azurerm_resource_group["test"]["location"],
resource_group_name=azurerm_resource_group["test"]["name"],
sku=azure.signalr.ServiceSkuArgs(
name="Standard_S1",
capacity=1,
))
example_shared_private_link_resource = azure.signalr.SharedPrivateLinkResource("exampleSharedPrivateLinkResource",
signalr_service_id=azurerm_signalr_service["example"]["id"],
sub_resource_name="vault",
target_resource_id=example_key_vault.id)
```
## Import
Signalr Shared Private Link Resource can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:signalr/sharedPrivateLinkResource:SharedPrivateLinkResource example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.SignalRService/signalR/signalr1/sharedPrivateLinkResources/resource1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] request_message: The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
:param pulumi.Input[str] signalr_service_id: The id of the Signalr Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] sub_resource_name: The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
:param pulumi.Input[str] target_resource_id: The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SharedPrivateLinkResourceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages the Shared Private Link Resource for a Signalr service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="east us")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tenant_id=current.tenant_id,
sku_name="standard",
soft_delete_retention_days=7,
access_policies=[azure.keyvault.KeyVaultAccessPolicyArgs(
tenant_id=current.tenant_id,
object_id=current.object_id,
certificate_permissions=["ManageContacts"],
key_permissions=["Create"],
secret_permissions=["Set"],
)])
test = azure.signalr.Service("test",
location=azurerm_resource_group["test"]["location"],
resource_group_name=azurerm_resource_group["test"]["name"],
sku=azure.signalr.ServiceSkuArgs(
name="Standard_S1",
capacity=1,
))
example_shared_private_link_resource = azure.signalr.SharedPrivateLinkResource("exampleSharedPrivateLinkResource",
signalr_service_id=azurerm_signalr_service["example"]["id"],
sub_resource_name="vault",
target_resource_id=example_key_vault.id)
```
## Import
Signalr Shared Private Link Resource can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:signalr/sharedPrivateLinkResource:SharedPrivateLinkResource example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.SignalRService/signalR/signalr1/sharedPrivateLinkResources/resource1
```
:param str resource_name: The name of the resource.
:param SharedPrivateLinkResourceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SharedPrivateLinkResourceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
signalr_service_id: Optional[pulumi.Input[str]] = None,
sub_resource_name: Optional[pulumi.Input[str]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SharedPrivateLinkResourceArgs.__new__(SharedPrivateLinkResourceArgs)
__props__.__dict__["name"] = name
__props__.__dict__["request_message"] = request_message
if signalr_service_id is None and not opts.urn:
raise TypeError("Missing required property 'signalr_service_id'")
__props__.__dict__["signalr_service_id"] = signalr_service_id
if sub_resource_name is None and not opts.urn:
raise TypeError("Missing required property 'sub_resource_name'")
__props__.__dict__["sub_resource_name"] = sub_resource_name
if target_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'target_resource_id'")
__props__.__dict__["target_resource_id"] = target_resource_id
__props__.__dict__["status"] = None
super(SharedPrivateLinkResource, __self__).__init__(
'azure:signalr/sharedPrivateLinkResource:SharedPrivateLinkResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
request_message: Optional[pulumi.Input[str]] = None,
signalr_service_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_resource_name: Optional[pulumi.Input[str]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None) -> 'SharedPrivateLinkResource':
"""
Get an existing SharedPrivateLinkResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] request_message: The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
:param pulumi.Input[str] signalr_service_id: The id of the Signalr Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] status: The status of a private endpoint connection. Possible values are `Pending`, `Approved`, `Rejected` or `Disconnected`.
:param pulumi.Input[str] sub_resource_name: The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
:param pulumi.Input[str] target_resource_id: The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SharedPrivateLinkResourceState.__new__(_SharedPrivateLinkResourceState)
__props__.__dict__["name"] = name
__props__.__dict__["request_message"] = request_message
__props__.__dict__["signalr_service_id"] = signalr_service_id
__props__.__dict__["status"] = status
__props__.__dict__["sub_resource_name"] = sub_resource_name
__props__.__dict__["target_resource_id"] = target_resource_id
return SharedPrivateLinkResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Signalr Shared Private Link Resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> pulumi.Output[Optional[str]]:
"""
The request message for requesting approval of the Shared Private Link Enabled Remote Resource.
"""
return pulumi.get(self, "request_message")
@property
@pulumi.getter(name="signalrServiceId")
def signalr_service_id(self) -> pulumi.Output[str]:
"""
The id of the Signalr Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "signalr_service_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of a private endpoint connection. Possible values are `Pending`, `Approved`, `Rejected` or `Disconnected`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subResourceName")
def sub_resource_name(self) -> pulumi.Output[str]:
"""
The sub resource name which the Signalr Private Endpoint can connect to. Possible values are `sites`, `vault`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sub_resource_name")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[str]:
"""
The ID of the Shared Private Link Enabled Remote Resource which this Signalr Private Endpoint should be connected to. Changing this forces a new resource to be created.
> **NOTE:** The `sub_resource_name` should match with the type of the `target_resource_id` that's being specified.
"""
return pulumi.get(self, "target_resource_id")
|
32fce36b73bad818d58f641e0826e9adfad5b0f4
|
113977ddcc6be6e8cb71e3c594dc373918202b8c
|
/deezer_downloader/web/music_backend.py
|
c354bde9f62109afdde760ba99de63af9be383a4
|
[
"MIT"
] |
permissive
|
kmille/deezer-downloader
|
33030ee0903a869b30c5951442eef36e7d9f5dc5
|
4c4674a6532be7676650c8f715f7596bd991c62e
|
refs/heads/master
| 2023-08-08T19:09:44.982478
| 2023-08-01T10:37:04
| 2023-08-01T10:37:04
| 154,390,777
| 490
| 88
|
MIT
| 2023-08-01T10:37:06
| 2018-10-23T20:14:27
|
Python
|
UTF-8
|
Python
| false
| false
| 11,048
|
py
|
music_backend.py
|
import time
import os.path
from os.path import basename
import mpd
import platform
from zipfile import ZipFile, ZIP_DEFLATED
from deezer_downloader.configuration import config
from deezer_downloader.youtubedl import youtubedl_download
from deezer_downloader.spotify import get_songs_from_spotify_website
from deezer_downloader.deezer import TYPE_TRACK, TYPE_ALBUM, TYPE_PLAYLIST, get_song_infos_from_deezer_website, download_song, parse_deezer_playlist, deezer_search, get_deezer_favorites
from deezer_downloader.deezer import Deezer403Exception, Deezer404Exception
from deezer_downloader.threadpool_queue import ThreadpoolScheduler, report_progress
sched = ThreadpoolScheduler()
def check_download_dirs_exist():
for directory in [config["download_dirs"]["songs"], config["download_dirs"]["zips"], config["download_dirs"]["albums"],
config["download_dirs"]["playlists"], config["download_dirs"]["youtubedl"]]:
os.makedirs(directory, exist_ok=True)
check_download_dirs_exist()
def make_song_paths_relative_to_mpd_root(songs, prefix=""):
# ensure last slash
config["mpd"]["music_dir_root"] = os.path.join(config["mpd"]["music_dir_root"], '')
songs_paths_relative_to_mpd_root = []
for song in songs:
songs_paths_relative_to_mpd_root.append(prefix + song[len(config["mpd"]["music_dir_root"]):])
return songs_paths_relative_to_mpd_root
def update_mpd_db(songs, add_to_playlist):
# songs: list of music files or just a string (file path)
if not config["mpd"].getboolean("use_mpd"):
return
print("Updating mpd database")
timeout_counter = 0
mpd_client = mpd.MPDClient(use_unicode=True)
try:
mpd_client.connect(config["mpd"]["host"], config["mpd"].getint("port"))
except ConnectionRefusedError as e:
print("ERROR connecting to MPD ({}:{}): {}".format(config["mpd"]["host"], config["mpd"]["port"], e))
return
mpd_client.update()
if add_to_playlist:
songs = [songs] if type(songs) != list else songs
songs = make_song_paths_relative_to_mpd_root(songs)
while len(mpd_client.search("file", songs[0])) == 0:
# c.update() does not block so wait for it
if timeout_counter == 10:
print("Tried it {} times. Give up now.".format(timeout_counter))
return
print("'{}' not found in the music db. Let's wait for it".format(songs[0]))
timeout_counter += 1
time.sleep(2)
for song in songs:
try:
mpd_client.add(song)
print("Added to mpd playlist: '{}'".format(song))
except mpd.base.CommandError as mpd_error:
print("ERROR adding '{}' to playlist: {}".format(song, mpd_error))
def clean_filename(path):
path = path.replace("\t", " ")
if any(platform.win32_ver()):
path.replace("\"", "'")
array_of_special_characters = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
else:
array_of_special_characters = ['/', ':', '"', '?']
return ''.join([c for c in path if c not in array_of_special_characters])
def download_song_and_get_absolute_filename(search_type, song, playlist_name=None):
if search_type == TYPE_ALBUM:
song_filename = "{:02d} - {} {}.mp3".format(int(song['TRACK_NUMBER']),
song['ART_NAME'],
song['SNG_TITLE'])
else:
song_filename = "{} - {}.mp3".format(song['ART_NAME'],
song['SNG_TITLE'])
song_filename = clean_filename(song_filename)
if search_type == TYPE_TRACK:
absolute_filename = os.path.join(config["download_dirs"]["songs"], song_filename)
elif search_type == TYPE_ALBUM:
album_name = "{} - {}".format(song['ART_NAME'], song['ALB_TITLE'])
album_name = clean_filename(album_name)
album_dir = os.path.join(config["download_dirs"]["albums"], album_name)
if not os.path.exists(album_dir):
os.mkdir(album_dir)
absolute_filename = os.path.join(album_dir, song_filename)
elif search_type == TYPE_PLAYLIST:
assert type(playlist_name) == str
playlist_name = clean_filename(playlist_name)
playlist_dir = os.path.join(config["download_dirs"]["playlists"], playlist_name)
if not os.path.exists(playlist_dir):
os.mkdir(playlist_dir)
absolute_filename = os.path.join(playlist_dir, song_filename)
if os.path.exists(absolute_filename):
print("Skipping song '{}'. Already exists.".format(absolute_filename))
else:
print("Downloading '{}'".format(song_filename))
download_song(song, absolute_filename)
return absolute_filename
def create_zip_file(songs_absolute_location):
# take first song in list and take the parent dir (name of album/playlist")
parent_dir = basename(os.path.dirname(songs_absolute_location[0]))
location_zip_file = os.path.join(config["download_dirs"]["zips"], "{}.zip".format(parent_dir))
print("Creating zip file '{}'".format(location_zip_file))
with ZipFile(location_zip_file, 'w', compression=ZIP_DEFLATED) as zip:
for song_location in songs_absolute_location:
try:
print("Adding song {}".format(song_location))
zip.write(song_location, arcname=os.path.join(parent_dir, basename(song_location)))
except FileNotFoundError:
print("Could not find file '{}'".format(song_location))
print("Done with the zip")
return location_zip_file
def create_m3u8_file(songs_absolute_location):
playlist_directory, __ = os.path.split(songs_absolute_location[0])
# 00 as prefix => will be shown as first in dir listing
m3u8_filename = "00 {}.m3u8".format(os.path.basename(playlist_directory))
print("Creating m3u8 file: '{}'".format(m3u8_filename))
m3u8_file_abs = os.path.join(playlist_directory, m3u8_filename)
with open(m3u8_file_abs, "w") as f:
for song in songs_absolute_location:
if os.path.exists(song):
f.write(basename(song) + "\n")
# add m3u8_file so that will be zipped to
songs_absolute_location.append(m3u8_file_abs)
return songs_absolute_location
@sched.register_command()
def download_deezer_song_and_queue(track_id, add_to_playlist):
song = get_song_infos_from_deezer_website(TYPE_TRACK, track_id)
absolute_filename = download_song_and_get_absolute_filename(TYPE_TRACK, song)
update_mpd_db(absolute_filename, add_to_playlist)
return make_song_paths_relative_to_mpd_root([absolute_filename])
@sched.register_command()
def download_deezer_album_and_queue_and_zip(album_id, add_to_playlist, create_zip):
songs = get_song_infos_from_deezer_website(TYPE_ALBUM, album_id)
songs_absolute_location = []
for i, song in enumerate(songs):
report_progress(i, len(songs))
assert type(song) == dict
absolute_filename = download_song_and_get_absolute_filename(TYPE_ALBUM, song)
songs_absolute_location.append(absolute_filename)
update_mpd_db(songs_absolute_location, add_to_playlist)
if create_zip:
return [create_zip_file(songs_absolute_location)]
return make_song_paths_relative_to_mpd_root(songs_absolute_location)
@sched.register_command()
def download_deezer_playlist_and_queue_and_zip(playlist_id, add_to_playlist, create_zip):
playlist_name, songs = parse_deezer_playlist(playlist_id)
songs_absolute_location = []
for i, song in enumerate(songs):
report_progress(i, len(songs))
absolute_filename = download_song_and_get_absolute_filename(TYPE_PLAYLIST, song, playlist_name)
songs_absolute_location.append(absolute_filename)
update_mpd_db(songs_absolute_location, add_to_playlist)
songs_with_m3u8_file = create_m3u8_file(songs_absolute_location)
if create_zip:
return [create_zip_file(songs_with_m3u8_file)]
return make_song_paths_relative_to_mpd_root(songs_absolute_location)
@sched.register_command()
def download_spotify_playlist_and_queue_and_zip(playlist_name, playlist_id, add_to_playlist, create_zip):
songs = get_songs_from_spotify_website(playlist_id,
config["proxy"]["server"])
songs_absolute_location = []
print(f"We got {len(songs)} songs from the Spotify playlist")
for i, song_of_playlist in enumerate(songs):
report_progress(i, len(songs))
# song_of_playlist: string (artist - song)
try:
track_id = deezer_search(song_of_playlist, TYPE_TRACK)[0]['id'] #[0] can throw IndexError
song = get_song_infos_from_deezer_website(TYPE_TRACK, track_id)
absolute_filename = download_song_and_get_absolute_filename(TYPE_PLAYLIST, song, playlist_name)
songs_absolute_location.append(absolute_filename)
except (IndexError, Deezer403Exception, Deezer404Exception) as msg:
print(msg)
print(f"Could not find Spotify song ({song_of_playlist}) on Deezer?")
# return
update_mpd_db(songs_absolute_location, add_to_playlist)
songs_with_m3u8_file = create_m3u8_file(songs_absolute_location)
if create_zip:
return [create_zip_file(songs_with_m3u8_file)]
return make_song_paths_relative_to_mpd_root(songs_absolute_location)
@sched.register_command()
def download_youtubedl_and_queue(video_url, add_to_playlist):
filename_absolute = youtubedl_download(video_url,
config["download_dirs"]["youtubedl"],
config["proxy"]["server"])
update_mpd_db(filename_absolute, add_to_playlist)
return make_song_paths_relative_to_mpd_root([filename_absolute])
@sched.register_command()
def download_deezer_favorites(user_id: str, add_to_playlist: bool, create_zip: bool):
songs_absolute_location = []
output_directory = f"favorites_{user_id}"
favorite_songs = get_deezer_favorites(user_id)
for i, fav_song in enumerate(favorite_songs):
report_progress(i, len(favorite_songs))
try:
song = get_song_infos_from_deezer_website(TYPE_TRACK, fav_song)
absolute_filename = download_song_and_get_absolute_filename(TYPE_PLAYLIST, song, output_directory)
songs_absolute_location.append(absolute_filename)
except (IndexError, Deezer403Exception, Deezer404Exception) as msg:
print(msg)
print(f"Could not find song ({fav_song}) on Deezer?")
update_mpd_db(songs_absolute_location, add_to_playlist)
songs_with_m3u8_file = create_m3u8_file(songs_absolute_location)
if create_zip:
return [create_zip_file(songs_with_m3u8_file)]
return make_song_paths_relative_to_mpd_root(songs_absolute_location)
if __name__ == '__main__':
pass
#download_spotify_playlist_and_queue_and_zip("test", '21wZXvtrERELL0bVtKtuUh', False, False)
|
41cfee23b53d7a9690f9d05217fa96e6d3f3ef5d
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/stream/cluster_config/util/ScheduleHandler.py
|
0d7f2c01dc71a673ce15c0fa6173d81fc1c06c4b
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
ScheduleHandler.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from dataflow.shared.jobnavi.jobnavi_helper import JobNaviHelper
from dataflow.stream.api import stream_jobnavi_helper
from dataflow.stream.settings import API_ERR_RETRY_TIMES
class ScheduleHandler(object):
def __init__(self, schedule_id, geog_area_code):
self.schedule_id = schedule_id
self.geog_area_code = geog_area_code
self.cluster_id = JobNaviHelper.get_jobnavi_cluster("stream")
def list_status(self):
"""
@return:
{
{
u'jid': u'5c201c8826bbefc9ce2ced7c81f7d035',
u'end-time': -1,
u'start-time': 1586945646288,
u'name': u'249_9c4ac1e3251e4ab3a4d76a73b9e51f6f',
u'last-modification': 1586945646301,
u'state': u'RUNNING',
u'tasks': {
u'scheduled': 0,
u'failed': 0,
u'reconciling': 0,
u'created': 0,
u'canceling': 0,
u'finished': 0,
u'canceled': 0,
u'running': 1,
u'total': 1,
u'deploying': 0
},
u'duration': 497240143
}
}
"""
jobs_overview = self.send_event("jobs_overview")
jobs_overview["schedule_id"] = self.schedule_id
return jobs_overview
def send_event(self, event_name):
jobnavi_stream_helper = stream_jobnavi_helper.StreamJobNaviHelper(self.geog_area_code, self.cluster_id)
execute_id = jobnavi_stream_helper.get_execute_id(self.schedule_id, API_ERR_RETRY_TIMES)
# 调用获取 job 状态的接口
event_id = jobnavi_stream_helper.send_event(execute_id, event_name)
# 根据 event_id 获取 overview 信息
overview = json.loads(jobnavi_stream_helper.list_jobs_status(event_id))
return overview
|
ae2864c8512f013f458ade69e245176d91b2ce31
|
002d925a46fef6867c7092935a5a4113a11cf0c5
|
/care/facility/migrations_old/0034_facilitypatientstatshistory.py
|
74e4c7b66ec2d72cf8ef9565c2160d7f272cbb5c
|
[
"MIT"
] |
permissive
|
coronasafe/care
|
ba74c06c6486e8cd3c11e0f8b3d948e99d304746
|
c000eea7f1c79a37b0fa53eba09696cd95122202
|
refs/heads/master
| 2023-08-31T12:52:08.181541
| 2023-08-29T13:43:33
| 2023-08-29T13:43:33
| 247,995,671
| 216
| 218
|
MIT
| 2023-09-13T14:52:59
| 2020-03-17T14:48:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
0034_facilitypatientstatshistory.py
|
# Generated by Django 2.2.11 on 2020-03-27 18:39
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("facility", "0033_ambulance_service_charge"),
]
operations = [
migrations.CreateModel(
name="FacilityPatientStatsHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
("deleted", models.BooleanField(default=False)),
("entry_date", models.DateField()),
("num_patients_visited", models.IntegerField(default=0)),
("num_patients_home_quarantine", models.IntegerField(default=0)),
("num_patients_isolation", models.IntegerField(default=0)),
("num_patient_referred", models.IntegerField(default=0)),
(
"facility",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="facility.Facility",
),
),
],
options={
"unique_together": {("facility", "entry_date")},
},
),
]
|
10676bac24972cc1f0733baa36a526ae0a99324c
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/aea/aea.py
|
7ea67601f8ebcd03677789c84189d214166c7ee5
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 18,133
|
py
|
aea.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of an autonomous economic agent (AEA)."""
import datetime
from asyncio import AbstractEventLoop
from logging import Logger
from multiprocessing.pool import AsyncResult
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
cast,
)
from aea.agent import Agent
from aea.agent_loop import AsyncAgentLoop, BaseAgentLoop, SyncAgentLoop
from aea.configurations.base import PublicId
from aea.configurations.constants import (
DEFAULT_BUILD_DIR_NAME,
DEFAULT_SEARCH_SERVICE_ADDRESS,
)
from aea.context.base import AgentContext
from aea.crypto.ledger_apis import DEFAULT_CURRENCY_DENOMINATIONS
from aea.crypto.wallet import Wallet
from aea.decision_maker.base import DecisionMakerHandler
from aea.error_handler.base import AbstractErrorHandler
from aea.error_handler.default import ErrorHandler as DefaultErrorHandler
from aea.exceptions import AEAException, _StopRuntime
from aea.helpers.exception_policy import ExceptionPolicyEnum
from aea.helpers.logging import AgentLoggerAdapter, WithLogger, get_logger
from aea.identity.base import Identity
from aea.mail.base import Envelope
from aea.protocols.base import Message, Protocol
from aea.registries.filter import Filter
from aea.registries.resources import Resources
from aea.skills.base import Behaviour, Handler
class AEA(Agent):
"""This class implements an autonomous economic agent."""
RUN_LOOPS: Dict[str, Type[BaseAgentLoop]] = {
"async": AsyncAgentLoop,
"sync": SyncAgentLoop,
}
DEFAULT_RUN_LOOP: str = "async"
DEFAULT_BUILD_DIR_NAME = DEFAULT_BUILD_DIR_NAME
def __init__(
self,
identity: Identity,
wallet: Wallet,
resources: Resources,
data_dir: str,
loop: Optional[AbstractEventLoop] = None,
period: float = 0.05,
execution_timeout: float = 0,
max_reactions: int = 20,
error_handler_class: Optional[Type[AbstractErrorHandler]] = None,
error_handler_config: Optional[Dict[str, Any]] = None,
decision_maker_handler_class: Optional[Type[DecisionMakerHandler]] = None,
decision_maker_handler_config: Optional[Dict[str, Any]] = None,
skill_exception_policy: ExceptionPolicyEnum = ExceptionPolicyEnum.propagate,
connection_exception_policy: ExceptionPolicyEnum = ExceptionPolicyEnum.propagate,
loop_mode: Optional[str] = None,
runtime_mode: Optional[str] = None,
default_ledger: Optional[str] = None,
currency_denominations: Optional[Dict[str, str]] = None,
default_connection: Optional[PublicId] = None,
default_routing: Optional[Dict[PublicId, PublicId]] = None,
connection_ids: Optional[Collection[PublicId]] = None,
search_service_address: str = DEFAULT_SEARCH_SERVICE_ADDRESS,
storage_uri: Optional[str] = None,
task_manager_mode: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Instantiate the agent.
:param identity: the identity of the agent
:param wallet: the wallet of the agent.
:param resources: the resources (protocols and skills) of the agent.
:param data_dir: directory where to put local files.
:param loop: the event loop to run the connections.
:param period: period to call agent's act
:param execution_timeout: amount of time to limit single act/handle to execute.
:param max_reactions: the processing rate of envelopes per tick (i.e. single loop).
:param error_handler_class: the class implementing the error handler
:param error_handler_config: the configuration of the error handler
:param decision_maker_handler_class: the class implementing the decision maker handler to be used.
:param decision_maker_handler_config: the configuration of the decision maker handler
:param skill_exception_policy: the skill exception policy enum
:param connection_exception_policy: the connection exception policy enum
:param loop_mode: loop_mode to choose agent run loop.
:param runtime_mode: runtime mode (async, threaded) to run AEA in.
:param default_ledger: default ledger id
:param currency_denominations: mapping from ledger id to currency denomination
:param default_connection: public id to the default connection
:param default_routing: dictionary for default routing.
:param connection_ids: active connection ids. Default: consider all the ones in the resources.
:param search_service_address: the address of the search service used.
:param storage_uri: optional uri to set generic storage
:param task_manager_mode: task manager mode (threaded) to run tasks with.
:param kwargs: keyword arguments to be attached in the agent context namespace.
"""
self._skills_exception_policy = skill_exception_policy
self._connection_exception_policy = connection_exception_policy
aea_logger = AgentLoggerAdapter(
logger=get_logger(__name__, identity.name),
agent_name=identity.name,
)
self._resources = resources
super().__init__(
identity=identity,
connections=[],
loop=loop,
period=period,
loop_mode=loop_mode,
runtime_mode=runtime_mode,
storage_uri=storage_uri,
logger=cast(Logger, aea_logger),
task_manager_mode=task_manager_mode,
)
default_routing = default_routing if default_routing is not None else {}
connection_ids = connection_ids or []
connections = [
c
for c in self.resources.get_all_connections()
if (not connection_ids) or (c.connection_id in connection_ids)
]
if not bool(self.resources.get_all_connections()):
self.logger.warning(
"Resource's connections list is empty! Instantiating AEA without connections..."
)
elif bool(self.resources.get_all_connections()) and not bool(connections):
self.logger.warning( # pragma: nocover
"No connection left after filtering! Instantiating AEA without connections..."
)
self._set_runtime_and_mail_boxes(
runtime_class=self._get_runtime_class(),
loop_mode=loop_mode,
loop=loop,
multiplexer_options=dict(
connections=connections,
default_routing=default_routing,
default_connection=default_connection,
protocols=self.resources.get_all_protocols(),
),
)
self.max_reactions = max_reactions
if decision_maker_handler_class is None:
from aea.decision_maker.default import ( # isort:skip # pylint: disable=import-outside-toplevel
DecisionMakerHandler as DefaultDecisionMakerHandler,
)
decision_maker_handler_class = DefaultDecisionMakerHandler
if decision_maker_handler_config is None:
decision_maker_handler_config = {}
decision_maker_handler = decision_maker_handler_class(
identity=identity, wallet=wallet, config=decision_maker_handler_config
)
self.runtime.set_decision_maker(decision_maker_handler)
if error_handler_class is None:
error_handler_class = DefaultErrorHandler
if error_handler_config is None:
error_handler_config = {}
self._error_handler = error_handler_class(**error_handler_config)
default_ledger_id = (
default_ledger
if default_ledger is not None
else identity.default_address_key
)
currency_denominations = (
currency_denominations
if currency_denominations is not None
else DEFAULT_CURRENCY_DENOMINATIONS
)
self._context = AgentContext(
self.identity,
self.runtime.multiplexer.connection_status,
self.outbox,
self.runtime.decision_maker.message_in_queue,
decision_maker_handler.context,
self.runtime.task_manager,
default_ledger_id,
currency_denominations,
default_connection,
default_routing,
search_service_address,
decision_maker_handler.self_address,
data_dir,
storage_callable=lambda: self.runtime.storage,
build_dir=self.get_build_dir(),
send_to_skill=self.runtime.agent_loop.send_to_skill,
**kwargs,
)
self._execution_timeout = execution_timeout
self._filter = Filter(
self.resources, self.runtime.decision_maker.message_out_queue
)
self._setup_loggers()
@classmethod
def get_build_dir(cls) -> str:
"""Get agent build directory."""
return cls.DEFAULT_BUILD_DIR_NAME
@property
def context(self) -> AgentContext:
"""Get (agent) context."""
return self._context
@property
def resources(self) -> Resources:
"""Get resources."""
return self._resources
@resources.setter
def resources(self, resources: "Resources") -> None:
"""Set resources."""
self._resources = resources
@property
def filter(self) -> Filter:
"""Get the filter."""
return self._filter
@property
def active_behaviours(self) -> List[Behaviour]:
"""Get all active behaviours to use in act."""
return self.filter.get_active_behaviours()
def setup(self) -> None:
"""
Set up the agent.
Calls setup() on the resources.
"""
self.resources.setup()
def act(self) -> None:
"""
Perform actions.
Adds new handlers and behaviours for use/execution by the runtime.
"""
self.filter.handle_new_handlers_and_behaviours()
def _get_error_handler(self) -> AbstractErrorHandler:
"""Get error handler."""
return self._error_handler
def _get_msg_and_handlers_for_envelope(
self, envelope: Envelope
) -> Tuple[Optional[Message], List[Handler]]:
"""Get the msg and its handlers."""
protocol = self.resources.get_protocol_by_specification_id(
envelope.protocol_specification_id
)
error_handler = self._get_error_handler()
if protocol is None:
error_handler.send_unsupported_protocol(envelope, self.logger)
return None, []
msg, handlers = self._handle_decoding(envelope, protocol, error_handler)
return msg, handlers
def _handle_decoding(
self,
envelope: Envelope,
protocol: Protocol,
error_handler: AbstractErrorHandler,
) -> Tuple[Optional[Message], List[Handler]]:
handlers = self.filter.get_active_handlers(
protocol.public_id, envelope.to_as_public_id
)
if len(handlers) == 0:
reason = (
f"no active handler for protocol={protocol.public_id} in skill={envelope.to_as_public_id}"
if envelope.is_component_to_component_message
else f"no active handler for protocol={protocol.public_id}"
)
error_handler.send_no_active_handler(envelope, reason, self.logger)
return None, []
if isinstance(envelope.message, Message):
msg = envelope.message
return msg, handlers
try:
msg = protocol.serializer.decode(envelope.message)
msg.sender = envelope.sender
msg.to = envelope.to
return msg, handlers
except Exception as e: # pylint: disable=broad-except # thats ok, because we send the decoding error back
error_handler.send_decoding_error(envelope, e, self.logger)
return None, []
def handle_envelope(self, envelope: Envelope) -> None:
"""
Handle an envelope.
Performs the following:
- fetching the protocol referenced by the envelope, and
- handling if the protocol is unsupported, using the error handler, or
- handling if there is a decoding error, using the error handler, or
- handling if no active handler is available for the specified protocol, using the error handler, or
- handling the message recovered from the envelope with all active handlers for the specified protocol.
:param envelope: the envelope to handle.
:return: None
"""
self.logger.debug("Handling envelope: {}".format(envelope))
msg, handlers = self._get_msg_and_handlers_for_envelope(envelope)
if msg is None:
return
for handler in handlers:
handler.handle_wrapper(msg)
def _setup_loggers(self) -> None:
"""Set up logger with agent name."""
for element in [
self.runtime.agent_loop,
self.runtime.multiplexer,
self.runtime.task_manager,
self.resources.component_registry,
self.resources.behaviour_registry,
self.resources.handler_registry,
self.resources.model_registry,
]:
element = cast(WithLogger, element)
element.logger = cast(
Logger,
AgentLoggerAdapter(element.logger, agent_name=self._identity.name),
)
def get_periodic_tasks(
self,
) -> Dict[Callable, Tuple[float, Optional[datetime.datetime]]]:
"""
Get all periodic tasks for agent.
:return: dict of callable with period specified
"""
tasks = super().get_periodic_tasks()
tasks.update(self._get_behaviours_tasks())
return tasks
def _get_behaviours_tasks(
self,
) -> Dict[Callable, Tuple[float, Optional[datetime.datetime]]]:
"""
Get all periodic tasks for AEA behaviours.
:return: dict of callable with period specified
"""
tasks = {}
for behaviour in self.active_behaviours:
tasks[behaviour.act_wrapper] = (behaviour.tick_interval, behaviour.start_at)
return tasks
def get_message_handlers(self) -> List[Tuple[Callable[[Any], None], Callable]]:
"""
Get handlers with message getters.
:return: List of tuples of callables: handler and coroutine to get a message
"""
return super().get_message_handlers() + [
(
self.filter.handle_internal_message,
self.filter.get_internal_message,
),
(self.handle_envelope, self.runtime.agent_loop.skill2skill_queue.get),
]
def exception_handler(self, exception: Exception, function: Callable) -> bool:
"""
Handle exception raised during agent main loop execution.
:param exception: exception raised
:param function: a callable exception raised in.
:return: bool, propagate exception if True otherwise skip it.
"""
# docstyle: ignore # noqa: E800
def log_exception(e: Exception, fn: Callable, is_debug: bool = False) -> None:
if is_debug:
self.logger.debug(f"<{e}> raised during `{fn}`")
else:
self.logger.exception(f"<{e}> raised during `{fn}`")
if self._skills_exception_policy == ExceptionPolicyEnum.propagate:
log_exception(exception, function, is_debug=True)
return True
if self._skills_exception_policy == ExceptionPolicyEnum.stop_and_exit:
log_exception(exception, function)
raise _StopRuntime(
AEAException(
f"AEA was terminated cause exception `{exception}` in skills {function}! Please check logs."
)
)
if self._skills_exception_policy == ExceptionPolicyEnum.just_log:
log_exception(exception, function)
return False
raise AEAException(
f"Unsupported exception policy: {self._skills_exception_policy}"
)
def teardown(self) -> None:
"""
Tear down the agent.
Performs the following:
- tears down the resources.
"""
self.resources.teardown()
def get_task_result(self, task_id: int) -> AsyncResult:
"""
Get the result from a task.
:param task_id: the id of the task
:return: async result for task_id
"""
return self.runtime.task_manager.get_task_result(task_id)
def enqueue_task(
self,
func: Callable,
args: Sequence = (),
kwargs: Optional[Dict[str, Any]] = None,
) -> int:
"""
Enqueue a task with the task manager.
:param func: the callable instance to be enqueued
:param args: the positional arguments to be passed to the function.
:param kwargs: the keyword arguments to be passed to the function.
:return: the task id to get the the result.
"""
return self.runtime.task_manager.enqueue_task(func, args, kwargs)
|
360d0b680c596f10522af390cd51938ba047968e
|
51771bc8bd5a7717fdc5916b3367444cebec09f1
|
/examples/testBandit.py
|
a6f9860d94e7d5ce429b1a521e7fad502dbdacb2
|
[] |
no_license
|
intohole/moodstyle
|
951c70df2c2f5c038a58535ee941853a038a42d7
|
1d06fc565c0df4bf07196854f3efb94bbefd1bfb
|
refs/heads/master
| 2021-01-18T22:43:23.002705
| 2017-12-22T09:36:18
| 2017-12-22T09:36:18
| 12,214,343
| 249
| 27
| null | 2017-11-28T12:31:00
| 2013-08-19T10:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
testBandit.py
|
#coding=utf-8
from moodstyle.alg import Bandit
import random
N = 100
# 随机生成N个概率
p = [random.random() for i in range(N)]
# 0.1 进行explor
greedy = Bandit.Greedy(0.05,N)
# 重复实验次数
TIMES = 100000
COUNT = 0
for _ in range(TIMES):
index = greedy.getIndex()
prop = random.random()
if prop <= p[index]:
label = 1
COUNT += 1
else:
label = 0
greedy.process(label)
print greedy.p
print p
print COUNT / float(TIMES)
|
6e4db4c7da5756edf6ef8eec1303a660b1d26569
|
9cb4f0813068fe326ef15f05cd983578f8ae8e55
|
/src/ruptures/__init__.py
|
8a2ecbfa11fb1bf2b6d9a15d0b8541545598740a
|
[
"BSD-2-Clause"
] |
permissive
|
deepcharles/ruptures
|
9a3619e6d3bf147a77e6b5029424ab58b78aef52
|
0eb34388df2096d22fb1afd6e33ec511fb64cfa6
|
refs/heads/master
| 2023-08-29T10:43:59.775941
| 2023-07-05T08:52:41
| 2023-07-05T08:52:41
| 118,264,731
| 1,299
| 169
|
BSD-2-Clause
| 2023-09-05T14:50:16
| 2018-01-20T17:25:20
|
Python
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
__init__.py
|
"""Offline change point detection for Python."""
from .datasets import pw_constant, pw_linear, pw_normal, pw_wavy
from .detection import Binseg, BottomUp, Dynp, KernelCPD, Pelt, Window
from .exceptions import NotEnoughPoints
from .show import display
# Convenient access to the version number
from .version import version as __version__
|
9150a460f5ad10c5f49729ba91f87ae5e3d376bd
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/keras/backend_config.py
|
ad2adba81f23e29d90655eaa01048573120259be
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 3,777
|
py
|
backend_config.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend config API."""
from tensorflow.python.util import dispatch
# The type of float to use throughout a session.
_FLOATX = 'float32'
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = 'channels_last'
@dispatch.add_dispatch_support
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> tf.keras.backend.epsilon()
1e-07
"""
return _EPSILON
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Example:
>>> tf.keras.backend.epsilon()
1e-07
>>> tf.keras.backend.set_epsilon(1e-5)
>>> tf.keras.backend.epsilon()
1e-05
>>> tf.keras.backend.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
def floatx():
"""Returns the default float type, as a string.
E.g. `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> tf.keras.backend.floatx()
'float32'
"""
return _FLOATX
def set_floatx(value):
"""Sets the default float type.
Note: It is not recommended to set this to float16 for training, as this will
likely cause numeric stability issues. Instead, mixed precision, which is
using a mix of float16 and float32, can be used by calling
`tf.keras.mixed_precision.set_global_policy('mixed_float16')`. See the
[mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for details.
Args:
value: String; `'float16'`, `'float32'`, or `'float64'`.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> tf.keras.backend.set_floatx('float64')
>>> tf.keras.backend.floatx()
'float64'
>>> tf.keras.backend.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
if value not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(value))
_FLOATX = str(value)
@dispatch.add_dispatch_support
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
>>> tf.keras.backend.set_image_data_format('channels_first')
>>> tf.keras.backend.image_data_format()
'channels_first'
>>> tf.keras.backend.set_image_data_format('channels_last')
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('Unknown data_format: ' + str(data_format))
_IMAGE_DATA_FORMAT = str(data_format)
|
00ed905513baa58b24ff91a1bdb3865bcad113fb
|
05b0c763ab92086e69a8d00ae6465009c596f6bc
|
/tests/cpu/test_linear_reorder.py
|
1fb6c11716ef7011926d592998d77088a073eece
|
[
"Apache-2.0"
] |
permissive
|
intel/intel-extension-for-pytorch
|
60ce2af2ec3a1dacae0d0db13dd51a5b44512e61
|
7f9266789de7ca9d8bcf55606f3204f1a3640640
|
refs/heads/master
| 2023-09-01T09:13:16.866410
| 2023-08-31T08:00:37
| 2023-08-31T08:00:37
| 256,061,008
| 991
| 144
|
Apache-2.0
| 2023-08-13T13:56:07
| 2020-04-15T23:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
test_linear_reorder.py
|
import unittest
from common_utils import VerboseTestCase
import subprocess
class TestLinearReorder(VerboseTestCase):
def test_linear_reorder(self):
with subprocess.Popen(
"DNNL_VERBOSE=1 python -u linear_reorder.py",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
segmentation = {
"fp32": {
"reorder_for_pack": 2,
"reorder_for_dtype": 0,
"reorder_for_format": 0,
"redundent_reorder": 0,
},
"bf16": {
"reorder_for_pack": 3,
"reorder_for_dtype": 0,
"reorder_for_format": 0,
"redundent_reorder": 0,
},
} # there should be only reorders on prepack, if any other reorder appears, will cause fail
seg = None
for line in p.stdout.readlines():
line = str(line, "utf-8").strip()
if line.endswith("***************"):
seg = line.strip().split(",")[0]
continue
# Following is to check if there is the reorder number is as excepted
if self.is_dnnl_verbose(line) and self.ReorderForPack(line):
segmentation[seg]["reorder_for_pack"] -= 1
self.assertTrue(
segmentation[seg]["reorder_for_pack"] >= 0,
"show unexpected reorder for pack",
)
if self.is_dnnl_verbose(line) and self.OnlyReorderDtype(line):
segmentation[seg]["reorder_for_dtype"] -= 1
self.assertTrue(
segmentation[seg]["reorder_for_dtype"] >= 0,
"show unexpected reorder for dtype",
)
if self.is_dnnl_verbose(line) and self.OnlyReorderFormat(line):
segmentation[seg]["reorder_for_format"] -= 1
self.assertTrue(
segmentation[seg]["reorder_for_format"] >= 0,
"show unexpected reorder for format",
)
if self.is_dnnl_verbose(line) and self.RedundantReorder(line):
segmentation[seg]["redundent_reorder"] -= 1
self.assertTrue(
segmentation[seg]["redundent_reorder"] >= 0,
"show unexpected redundent reorder",
)
if __name__ == "__main__":
test = unittest.main()
|
4889a252ca565bc9bf2b5acf4e521e410aeb14a3
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/tune/tests/test_tune_server.py
|
8ad7a59963f7c8f908ef998fff359b8d7e63bc67
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,468
|
py
|
test_tune_server.py
|
import requests
import socket
import subprocess
import unittest
import json
import ray
from ray.rllib import _register_all
from ray.tune import PlacementGroupFactory
from ray.tune.experiment.trial import Trial
from ray.tune.web_server import TuneClient
from ray.tune.execution.tune_controller import TuneController
from ray.train.tests.util import mock_storage_context
def get_valid_port():
port = 4321
while True:
try:
print("Trying port", port)
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
return port
class TuneServerSuite(unittest.TestCase):
def basicSetup(self):
ray.init(num_cpus=4, num_gpus=1)
port = get_valid_port()
self.runner = TuneController(server_port=port, storage=mock_storage_context())
runner = self.runner
kwargs = {
"stopping_criterion": {"training_iteration": 3},
"placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]),
"storage": mock_storage_context(),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
client = TuneClient("localhost", port)
return runner, client
def tearDown(self):
print("Tearing down....")
try:
self.runner._server.shutdown()
self.runner = None
except Exception as e:
print(e)
ray.shutdown()
_register_all()
def testAddTrial(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
spec = {
"run": "__fake",
"stop": {"training_iteration": 3},
"resources_per_trial": {"cpu": 1, "gpu": 1},
}
client.add_trial("test", spec)
runner.step()
all_trials = client.get_all_trials()["trials"]
runner.step()
self.assertEqual(len(all_trials), 3)
def testGetTrials(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 2)
tid = all_trials[0]["id"]
client.get_trial(tid)
runner.step()
self.assertEqual(len(all_trials), 2)
def testGetTrialsWithFunction(self):
runner, client = self.basicSetup()
test_trial = Trial(
"__fake",
trial_id="function_trial",
stopping_criterion={"training_iteration": 3},
config={"callbacks": {"on_episode_start": lambda x: None}},
storage=mock_storage_context(),
)
runner.add_trial(test_trial)
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 3)
client.get_trial("function_trial")
runner.step()
self.assertEqual(len(all_trials), 3)
def testStopTrial(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
while not any(t.status == Trial.RUNNING for t in runner.get_trials()):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 1
)
tid = [t for t in all_trials if t["status"] == Trial.RUNNING][0]["id"]
client.stop_trial(tid)
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 0
)
def testStopExperiment(self):
"""Check if stop_experiment works."""
runner, client = self.basicSetup()
while not any(t.status == Trial.RUNNING for t in runner.get_trials()):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 1
)
client.stop_experiment()
runner.step()
self.assertTrue(runner.is_finished())
self.assertRaises(
requests.exceptions.ReadTimeout, lambda: client.get_all_trials(timeout=1)
)
def testCurlCommand(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
stdout = subprocess.check_output(
'curl "http://{}:{}/trials"'.format(
client.server_address, client.server_port
),
shell=True,
)
self.assertNotEqual(stdout, None)
curl_trials = json.loads(stdout.decode())["trials"]
client_trials = client.get_all_trials()["trials"]
for curl_trial, client_trial in zip(curl_trials, client_trials):
self.assertEqual(curl_trial.keys(), client_trial.keys())
self.assertEqual(curl_trial["id"], client_trial["id"])
self.assertEqual(
curl_trial["trainable_name"], client_trial["trainable_name"]
)
self.assertEqual(curl_trial["status"], client_trial["status"])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
4af8d841db18ef76744765fdbb80884c558d1685
|
554718851656376ad2bceb282de30459167ffeb2
|
/tests/tensorflow2/test_keras_lstm.py
|
5e3426d834f394987565377ad9569eb37f89c2dd
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,136
|
py
|
test_keras_lstm.py
|
# Standard Library
# Third Party
import numpy as np
import pytest
import tensorflow.compat.v2 as tf
from tensorflow.keras.layers import LSTM, Activation, Dense, Dropout, Embedding, TimeDistributed
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from packaging import version
if version.parse(tf.__version__) >= version.parse("2.11.0") or "rc" in tf.__version__:
from tensorflow.keras.optimizers.legacy import Adam
else:
from tensorflow.keras.optimizers import Adam
# First Party
from smdebug import SaveConfig
from smdebug.core.collection import CollectionKeys
from smdebug.tensorflow import KerasHook
from smdebug.trials import create_trial
class KerasBatchGenerator(object):
def __init__(self, num_steps, batch_size, skip_step=5):
self.data = np.random.randint(low=0, high=1000, size=10000).tolist()
self.num_steps = num_steps
self.batch_size = batch_size
self.vocabulary = 1000
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set it will reset
# back to zero
self.current_idx = 0
# skip_step is the number of words which will be skipped before the next
# batch is skimmed from the data set
self.skip_step = skip_step
def generate(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx : self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1 : self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield x, y
def train(num_epochs, batch_size, model, num_steps, hook):
train_data_generator = KerasBatchGenerator(num_steps, batch_size, skip_step=num_steps)
valid_data_generator = KerasBatchGenerator(num_steps, batch_size, skip_step=num_steps)
callbacks = []
if hook:
callbacks.append(hook)
model.fit_generator(
train_data_generator.generate(),
len(train_data_generator.data) // (batch_size * num_steps),
num_epochs,
validation_data=valid_data_generator.generate(),
validation_steps=len(valid_data_generator.data) // (batch_size * num_steps),
verbose=0,
callbacks=callbacks,
)
@pytest.mark.slow
def test_lstm_and_generator(out_dir, tf_eager_mode):
# init hook
hook = KerasHook(
out_dir,
include_collections=[
CollectionKeys.WEIGHTS,
CollectionKeys.LOSSES,
CollectionKeys.GRADIENTS,
],
save_config=SaveConfig(save_steps=[0, 1, 2, 3]),
)
if not tf_eager_mode:
tf.compat.v1.disable_eager_execution()
# init model
num_steps = 100
hidden_size = 100
vocabulary = 1000
model = Sequential()
model.add(Embedding(vocabulary, hidden_size, input_length=num_steps))
model.add(LSTM(hidden_size, return_sequences=True))
model.add(LSTM(hidden_size, return_sequences=True))
model.add(Dropout(0.2))
model.add(TimeDistributed(Dense(vocabulary)))
model.add(Activation("softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=hook.wrap_optimizer(Adam()),
metrics=["categorical_accuracy"],
)
train(3, 32, model, num_steps, hook)
tr = create_trial(out_dir)
assert len(tr.tensor_names(collection=CollectionKeys.LOSSES)) > 0
assert len(tr.tensor_names(collection=CollectionKeys.WEIGHTS)) > 0
# can't get gradients with TF 2.x yet
# assert len(tr.tensor_names(collection=CollectionKeys.GRADIENTS)) > 0
|
8348743d34743f09bc8209ba5af061795a96b8d2
|
bca49447a0757f4a1ab22898d8391ebd0a3af262
|
/typings/kazoo/exceptions.pyi
|
db6f4ed111809a2a40f7c9470b47652826005d77
|
[
"MIT"
] |
permissive
|
zalando/patroni
|
ccba2a34f7aa659e334f6aabf8fafd735839b3a9
|
6b7f914da7fd4974ca091c8f55c769b2ad0f58a3
|
refs/heads/master
| 2023-09-04T11:06:43.490117
| 2023-09-04T08:03:37
| 2023-09-04T08:03:37
| 38,744,670
| 5,866
| 916
|
MIT
| 2023-09-14T17:36:28
| 2015-07-08T09:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
pyi
|
exceptions.pyi
|
class KazooException(Exception):
...
class ZookeeperError(KazooException):
...
class SessionExpiredError(ZookeeperError):
...
class ConnectionClosedError(SessionExpiredError):
...
class NoNodeError(ZookeeperError):
...
class NodeExistsError(ZookeeperError):
...
|
0e357a27f1f73fc69d80b272ddfd58757d53259c
|
0d48db33dc6c69b1f6e339defbf3b99b2e20f4e7
|
/lib/svtplay_dl/utils/getmedia.py
|
979cd7d62384c739b55c66bd214153f5b310b27f
|
[
"MIT"
] |
permissive
|
spaam/svtplay-dl
|
082464969cb180fb71d5db1f3df06754027398d1
|
9d27fe86d162c92ca55cceafbe0678ee1f1f638c
|
refs/heads/master
| 2023-09-02T15:41:16.522035
| 2023-07-18T22:28:53
| 2023-07-18T22:41:44
| 1,426,859
| 636
| 159
|
MIT
| 2023-07-11T22:28:27
| 2011-03-01T16:49:18
|
Python
|
UTF-8
|
Python
| false
| false
| 8,419
|
py
|
getmedia.py
|
import copy
import logging
import os
import sys
from datetime import datetime
from shutil import which
from svtplay_dl.error import UIException
from svtplay_dl.fetcher import VideoRetriever
from svtplay_dl.postprocess import postprocess
from svtplay_dl.service import Generic
from svtplay_dl.service import service_handler
from svtplay_dl.service.services import Raw
from svtplay_dl.service.services import sites
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.nfo import write_nfo_episode
from svtplay_dl.utils.nfo import write_nfo_tvshow
from svtplay_dl.utils.output import filename
from svtplay_dl.utils.output import find_dupes
from svtplay_dl.utils.output import formatname
from svtplay_dl.utils.stream import list_quality
from svtplay_dl.utils.stream import select_quality
from svtplay_dl.utils.stream import subtitle_decider
from svtplay_dl.utils.text import exclude
def get_multiple_media(urls, config):
if config.get("output") and os.path.isfile(config.get("output")):
logging.error("Output must be a directory if used with multiple URLs")
sys.exit(2)
elif config.get("output") and not os.path.exists(config.get("output")):
try:
os.makedirs(config.get("output"))
except OSError as e:
logging.error("%s: %s", e.strerror, e.filename)
return
for url in urls:
get_media(url, copy.copy(config))
def get_media(url, options, version="Unknown"):
if "http" not in url[:4]:
url = f"http://{url}"
if options.get("verbose"):
logging.debug("version: %s", version)
stream = service_handler(sites, options, url)
if not stream:
generic = Generic(options, url)
url, stream = generic.get(sites)
if not stream:
if url.find(".f4m") > 0 or url.find(".m3u8") > 0 or url.find(".mpd") > 1:
stream = Raw(options, url)
if not stream:
logging.error("That site is not supported. Make a ticket or send a message")
sys.exit(2)
if options.get("all_episodes") or stream.config.get("all_episodes"):
get_all_episodes(stream, url, options)
else:
get_one_media(stream)
def get_all_episodes(stream, url, options):
name = os.path.dirname(formatname({"basedir": True}, stream.config))
if name and os.path.isfile(name):
logging.error("Output must be a directory if used with --all-episodes")
sys.exit(2)
elif name and not os.path.exists(name):
try:
os.makedirs(name)
except OSError as e:
logging.error("%s: %s", e.strerror, e.filename)
return
episodes = stream.find_all_episodes(stream.config)
if episodes is None:
return
for idx, o in enumerate(episodes):
if o == url:
substream = stream
else:
substream = service_handler(sites, copy.copy(stream.config), o)
logging.info("Episode %d of %d", idx + 1, len(episodes))
logging.info("Url: %s", o)
if not (options.get("get_url") and options.get("get_only_episode_url")):
# get_one_media overwrites options.output...
get_one_media(substream)
def get_one_media(stream):
# Make an automagic filename
if not filename(stream):
return
if stream.config.get("merge_subtitle"):
if not which("ffmpeg"):
logging.error("--merge-subtitle needs ffmpeg. Please install ffmpeg.")
logging.info("https://ffmpeg.org/download.html")
sys.exit(2)
videos = []
subtitles = []
error = []
streams = stream.get()
try:
for i in streams:
if isinstance(i, Exception):
error.append(i)
elif not exclude(stream.config, formatname(i.output, stream.config)):
if isinstance(i, VideoRetriever):
if stream.config.get("preferred"):
if stream.config.get("preferred").lower() == i.name:
videos.append(i)
else:
videos.append(i)
if isinstance(i, subtitle):
subtitles.append(i)
except Exception:
if stream.config.get("verbose"):
raise
else:
logging.error("svtplay-dl crashed")
logging.error("Run again and add --verbose as an argument, to get more information")
logging.error("If the error persists, you can report it at https://github.com/spaam/svtplay-dl/issues")
logging.error("Include the URL used, the stack trace and the output of svtplay-dl --version in the issue")
return
try:
after_date = datetime.strptime(stream.config.get("after_date"), "%Y-%m-%d")
except (ValueError, TypeError, KeyError, AttributeError): # gotta catch em all..
after_date = None
try:
pub_date = datetime.fromtimestamp(stream.output["publishing_datetime"])
except (ValueError, TypeError, KeyError):
pub_date = None
if after_date is not None and pub_date is not None and pub_date.date() < after_date.date():
logging.info(
"Video %sS%dE%d skipped since published %s.",
stream.output["title"],
stream.output["season"],
stream.output["episode"],
pub_date.date(),
)
return
if stream.config.get("require_subtitle") and not subtitles:
logging.info("No subtitles available")
return
if not stream.config.get("list_quality"):
subtitle_decider(stream, subtitles)
if stream.config.get("force_subtitle"):
return
if not videos:
errormsg = None
for exc in error:
if errormsg:
errormsg = f"{errormsg}. {str(exc)}"
else:
errormsg = str(exc)
if errormsg:
logging.error("No videos found. %s", errormsg)
else:
logging.error("No videos found.")
else:
if stream.config.get("list_quality"):
list_quality(videos)
return
if stream.config.get("nfo"):
# Create NFO files
write_nfo_episode(stream.output, stream.config)
write_nfo_tvshow(stream.output, stream.config)
if stream.config.get("force_nfo"):
return
try:
fstream = select_quality(stream.config, videos)
except UIException as e:
logging.error(e)
return
if fstream.config.get("get_url"):
print(fstream.url)
return
dupe, fileame = find_dupes(fstream.output, stream.config)
if dupe and not stream.config.get("force"):
logging.warning("File (%s) already exists. Use --force to overwrite", fileame.name)
return
if fstream.config.get("output_format") and fstream.config.get("output_format").lower() not in ["mkv", "mp4"]:
logging.error("Unknown output format. please choose mp4 or mkv")
sys.exit(2)
try:
logging.info("Selected to download %s, bitrate: %s format: %s", fstream.name, fstream.bitrate, fstream.format)
fstream.download()
except UIException as e:
if fstream.config.get("verbose"):
raise e
logging.error(e)
sys.exit(2)
if fstream.config.get("thumbnail") and hasattr(stream, "get_thumbnail"):
stream.get_thumbnail(stream.config)
if fstream.config.get("silent_semi") and fstream.finished:
logging.log(25, "Download of %s was completed", formatname(fstream.output, fstream.config))
if fstream.config.get("no_postprocess") is True or all(fstream.config.get(x) for x in ["no_remux", "no_merge"]) is True:
logging.info("All done. Not postprocessing files, leaving them completely untouched.")
return
post = postprocess(fstream, fstream.config, subtitles)
if fstream.audio and not post.detect and fstream.finished:
logging.warning("Can't find ffmpeg/avconv. audio and video is in seperate files. if you dont want this use -P hls or hds")
if post.detect and fstream.config.get("no_merge") is False:
post.merge()
else:
logging.info("All done. Not postprocessing files, leaving them completely untouched.")
|
b7a24472a842ccaae07413887453e9622ef8d3e4
|
6946f9a3e9d57b00ea275b2303ced0dedcdba1d4
|
/demo_scripts/ib_examples/order_samples.py
|
7d38f22d4c36d795600fd828c67687444f603808
|
[
"Apache-2.0"
] |
permissive
|
quarkfin/qf-lib
|
8eaf76e3db385295ff8845b3250ba64a6fcfc7a6
|
f707e51bc2ff45f6e46dcdd24d59d83ce7dc4f94
|
refs/heads/master
| 2023-08-31T17:41:57.213680
| 2023-08-29T10:01:49
| 2023-08-29T10:01:49
| 202,696,503
| 379
| 51
|
Apache-2.0
| 2023-09-05T06:11:35
| 2019-08-16T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 46,641
|
py
|
order_samples.py
|
"""
Copyright (C) 2016 Interactive Brokers LLC. All rights reserved. This code is
subject to the terms and conditions of the IB API Non-Commercial License or the
IB API Commercial License, as applicable.
"""
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibapi import order_condition
from ibapi.common import ListOfOrder
from ibapi.order import (OrderComboLeg, Order)
from ibapi.order_condition import OrderCondition
from ibapi.tag_value import TagValue
class OrderSamples:
""" <summary>
#/ An auction order is entered into the electronic trading system during the pre-market opening period for execution at the
#/ Calculated Opening Price (COP). If your order is not filled on the open, the order is re-submitted as a limit order with
#/ the limit price set to the COP or the best bid/ask after the market opens.
#/ Products: FUT, STK
</summary>"""
@staticmethod
def AtAuction(action: str, quantity: float, price: float):
order = Order()
order.action = action
order.tif = "AUC"
order.orderType = "MTL"
order.totalQuantity = quantity
order.lmtPrice = price
return order
""" <summary>
#/ A Discretionary order is a limit order submitted with a hidden, specified 'discretionary' amount off the limit price which
#/ may be used to increase the price range over which the limit order is eligible to execute. The market sees only the limit price.
#/ Products: STK
</summary>"""
@staticmethod
def Discretionary(action: str, quantity: float, price: float, discretionaryAmount: float):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = price
order.discretionaryAmt = discretionaryAmount
return order
""" <summary>
#/ A Market order is an order to buy or sell at the market bid or offer price. A market order may increase the likelihood of a fill
#/ and the speed of execution, but unlike the Limit order a Market order provides no price protection and may fill at a price far
#/ lower/higher than the current displayed bid/ask.
#/ Products: BOND, CFD, EFP, CASH, FUND, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def MarketOrder(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MKT"
order.totalQuantity = quantity
return order
""" <summary>
#/ A Market if Touched (MIT) is an order to buy (or sell) a contract below (or above) the market. Its purpose is to take advantage
#/ of sudden or unexpected changes in share or other prices and provides investors with a trigger price to set an order in motion.
#/ Investors may be waiting for excessive strength (or weakness) to cease, which might be represented by a specific price point.
#/ MIT orders can be used to determine whether or not to enter the market once a specific price level has been achieved. This order
#/ is held in the system until the trigger price is touched, and is then submitted as a market order. An MIT order is similar to a
#/ stop order, except that an MIT sell order is placed above the current market price, and a stop sell order is placed below
#/ Products: BOND, CFD, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def MarketIfTouched(action: str, quantity: float, price: float):
order = Order()
order.action = action
order.orderType = "MIT"
order.totalQuantity = quantity
order.auxPrice = price
return order
""" <summary>
#/ A Market-on-Close (MOC) order is a market order that is submitted to execute as close to the closing price as possible.
#/ Products: CFD, FUT, STK, WAR
</summary>"""
@staticmethod
def MarketOnClose(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MOC"
order.totalQuantity = quantity
return order
""" <summary>
#/ A Market-on-Open (MOO) order combines a market order with the OPG time in force to create an order that is automatically
#/ submitted at the market's open and fills at the market price.
#/ Products: CFD, STK, OPT, WAR
</summary>"""
@staticmethod
def MarketOnOpen(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MKT"
order.totalQuantity = quantity
order.tif = "OPG"
return order
""" <summary>
#/ ISE MidpoMatch:int (MPM) orders always execute at the midpoof:the:int NBBO. You can submit market and limit orders direct-routed
#/ to ISE for MPM execution. Market orders execute at the midpowhenever:an:int eligible contra-order is available. Limit orders
#/ execute only when the midpoprice:is:int better than the limit price. Standard MPM orders are completely anonymous.
#/ Products: STK
</summary>"""
@staticmethod
def MidpointMatch(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MKT"
order.totalQuantity = quantity
return order
""" <summary>
#/ A pegged-to-market order is designed to maintain a purchase price relative to the national best offer (NBO) or a sale price
#/ relative to the national best bid (NBB). Depending on the width of the quote, this order may be passive or aggressive.
#/ The trader creates the order by entering a limit price which defines the worst limit price that they are willing to accept.
#/ Next, the trader enters an offset amount which computes the active limit price as follows:
#/ Sell order price = Bid price + offset amount
#/ Buy order price = Ask price - offset amount
#/ Products: STK
</summary>"""
@staticmethod
def PeggedToMarket(action: str, quantity: float, marketOffset: float):
order = Order()
order.action = action
order.orderType = "PEG MKT"
order.totalQuantity = quantity
order.auxPrice = marketOffset # Offset price
return order
""" <summary>
#/ A Pegged to Stock order continually adjusts the option order price by the product of a signed user-define delta and the change of
#/ the option's underlying stock price. The delta is entered as an absolute and assumed to be positive for calls and negative for puts.
#/ A buy or sell call order price is determined by adding the delta times a change in an underlying stock price to a specified starting
#/ price for the call. To determine the change in price, the stock reference price is subtracted from the current NBBO midpoint.
#/ The Stock Reference Price can be defined by the user, or defaults to the NBBO midpoat:the:int time of the order if no reference price
#/ is entered. You may also enter a high/low stock price range which cancels the order when reached. The delta times the change in stock
#/ price will be rounded to the nearest penny in favor of the order.
#/ Products: OPT
</summary>"""
@staticmethod
def PeggedToStock(action: str, quantity: float, delta: float, stockReferencePrice: float, startingPrice: float):
order = Order()
order.action = action
order.orderType = "PEG STK"
order.totalQuantity = quantity
order.delta = delta
order.lmtPrice = stockReferencePrice
order.startingPrice = startingPrice
return order
""" <summary>
#/ Relative (a.k.a. Pegged-to-Primary) orders provide a means for traders to seek a more aggressive price than the National Best Bid
#/ and Offer (NBBO). By acting as liquidity providers, and placing more aggressive bids and offers than the current best bids and offers,
#/ traders increase their odds of filling their order. Quotes are automatically adjusted as the markets move, to remain aggressive.
#/ For a buy order, your bid is pegged to the NBB by a more aggressive offset, and if the NBB moves up, your bid will also move up.
#/ If the NBB moves down, there will be no adjustment because your bid will become even more aggressive and execute. For sales, your
#/ offer is pegged to the NBO by a more aggressive offset, and if the NBO moves down, your offer will also move down. If the NBO moves up,
#/ there will be no adjustment because your offer will become more aggressive and execute. In addition to the offset, you can define an
#/ absolute cap, which works like a limit price, and will prevent your order from being executed above or below a specified level.
#/ Stocks, Options and Futures - not available on paper trading
#/ Products: CFD, STK, OPT, FUT
</summary>"""
@staticmethod
def RelativePeggedToPrimary(action: str, quantity: float, priceCap: float, offsetAmount: float):
order = Order()
order.action = action
order.orderType = "REL"
order.totalQuantity = quantity
order.lmtPrice = priceCap
order.auxPrice = offsetAmount
return order
""" <summary>
#/ Sweep-to-fill orders are useful when a trader values speed of execution over price. A sweep-to-fill order identifies the best price
#/ and the exact quantity offered/available at that price, and transmits the corresponding portion of your order for immediate execution.
#/ Simultaneously it identifies the next best price and quantity offered/available, and submits the matching quantity of your order for
#/ immediate execution.
#/ Products: CFD, STK, WAR
</summary>"""
@staticmethod
def SweepToFill(action: str, quantity: float, price: float):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = price
order.sweepToFill = True
return order
""" <summary>
#/ For option orders routed to the Boston Options Exchange (BOX) you may elect to participate in the BOX's price improvement auction in
#/ pennies. All BOX-directed price improvement orders are immediately sent from Interactive Brokers to the BOX order book, and when the
#/ terms allow, IB will evaluate it for inclusion in a price improvement auction based on price and volume priority. In the auction, your
#/ order will have priority over broker-dealer price improvement orders at the same price.
#/ An Auction Limit order at a specified price. Use of a limit order ensures that you will not receive an execution at a price less favorable
#/ than the limit price. Enter limit orders in penny increments with your auction improvement amount computed as the difference between your
#/ limit order price and the nearest listed increment.
#/ Products: OPT
#/ Supported Exchanges: BOX
</summary>"""
@staticmethod
def AuctionLimit(action: str, quantity: float, price: float, auctionStrategy: int):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = price
order.auctionStrategy = auctionStrategy
return order
""" <summary>
#/ For option orders routed to the Boston Options Exchange (BOX) you may elect to participate in the BOX's price improvement auction in pennies.
#/ All BOX-directed price improvement orders are immediately sent from Interactive Brokers to the BOX order book, and when the terms allow,
#/ IB will evaluate it for inclusion in a price improvement auction based on price and volume priority. In the auction, your order will have
#/ priority over broker-dealer price improvement orders at the same price.
#/ An Auction Pegged to Stock order adjusts the order price by the product of a signed delta (which is entered as an absolute and assumed to be
#/ positive for calls, negative for puts) and the change of the option's underlying stock price. A buy or sell call order price is determined
#/ by adding the delta times a change in an underlying stock price change to a specified starting price for the call. To determine the change
#/ in price, a stock reference price (NBBO midpoat:the:int time of the order is assumed if no reference price is entered) is subtracted from
#/ the current NBBO midpoint. A stock range may also be entered that cancels an order when reached. The delta times the change in stock price
#/ will be rounded to the nearest penny in favor of the order and will be used as your auction improvement amount.
#/ Products: OPT
#/ Supported Exchanges: BOX
</summary>"""
@staticmethod
def AuctionPeggedToStock(action: str, quantity: float, startingPrice: float, delta: float):
order = Order()
order.action = action
order.orderType = "PEG STK"
order.totalQuantity = quantity
order.delta = delta
order.startingPrice = startingPrice
return order
""" <summary>
#/ For option orders routed to the Boston Options Exchange (BOX) you may elect to participate in the BOX's price improvement auction in pennies.
#/ All BOX-directed price improvement orders are immediately sent from Interactive Brokers to the BOX order book, and when the terms allow,
#/ IB will evaluate it for inclusion in a price improvement auction based on price and volume priority. In the auction, your order will have
#/ priority over broker-dealer price improvement orders at the same price.
#/ An Auction Relative order that adjusts the order price by the product of a signed delta (which is entered as an absolute and assumed to be
#/ positive for calls, negative for puts) and the change of the option's underlying stock price. A buy or sell call order price is determined
#/ by adding the delta times a change in an underlying stock price change to a specified starting price for the call. To determine the change
#/ in price, a stock reference price (NBBO midpoat:the:int time of the order is assumed if no reference price is entered) is subtracted from
#/ the current NBBO midpoint. A stock range may also be entered that cancels an order when reached. The delta times the change in stock price
#/ will be rounded to the nearest penny in favor of the order and will be used as your auction improvement amount.
#/ Products: OPT
#/ Supported Exchanges: BOX
</summary>"""
@staticmethod
def AuctionRelative(action: str, quantity: float, offset: float):
order = Order()
order.action = action
order.orderType = "REL"
order.totalQuantity = quantity
order.auxPrice = offset
return order
""" <summary>
#/ The Block attribute is used for large volume option orders on ISE that consist of at least 50 contracts. To execute large-volume
#/ orders over time without moving the market, use the Accumulate/Distribute algorithm.
#/ Products: OPT
</summary>"""
@staticmethod
def Block(action: str, quantity: float, price: float):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity # Large volumes!
order.lmtPrice = price
order.blockOrder = True
return order
""" <summary>
#/ A Box Top order executes as a market order at the current best price. If the order is only partially filled, the remainder is submitted as
#/ a limit order with the limit price equal to the price at which the filled portion of the order executed.
#/ Products: OPT
#/ Supported Exchanges: BOX
</summary>"""
@staticmethod
def BoxTop(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "BOX TOP"
order.totalQuantity = quantity
return order
""" <summary>
#/ A Limit order is an order to buy or sell at a specified price or better. The Limit order ensures that if the order fills,
#/ it will not fill at a price less favorable than your limit price, but it does not guarantee a fill.
#/ Products: BOND, CFD, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def LimitOrder(action: str, quantity: float, limitPrice: float):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
return order
""" <summary>
#/ Forex orders can be placed in demonination of second currency in pair using cashQty field
#/ Requires TWS or IBG 963+
#/ https://www.interactivebrokers.com/en/index.php?f=23876#963-02
</summary>"""
@staticmethod
def LimitOrderWithCashQty(action: str, quantity: float, limitPrice: float, cashQty: float):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
order.cashQty = cashQty
return order
""" <summary>
#/ A Limit if Touched is an order to buy (or sell) a contract at a specified price or better, below (or above) the market. This order is
#/ held in the system until the trigger price is touched. An LIT order is similar to a stop limit order, except that an LIT sell order is
#/ placed above the current market price, and a stop limit sell order is placed below.
#/ Products: BOND, CFD, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def LimitIfTouched(action: str, quantity: float, limitPrice: float, triggerPrice: float):
order = Order()
order.action = action
order.orderType = "LIT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
order.auxPrice = triggerPrice
return order
""" <summary>
#/ A Limit-on-close (LOC) order will be submitted at the close and will execute if the closing price is at or better than the submitted
#/ limit price.
#/ Products: CFD, FUT, STK, WAR
</summary>"""
@staticmethod
def LimitOnClose(action: str, quantity: float, limitPrice: float):
order = Order()
order.action = action
order.orderType = "LOC"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
return order
""" <summary>
#/ A Limit-on-Open (LOO) order combines a limit order with the OPG time in force to create an order that is submitted at the market's open,
#/ and that will only execute at the specified limit price or better. Orders are filled in accordance with specific exchange rules.
#/ Products: CFD, STK, OPT, WAR
</summary>"""
@staticmethod
def LimitOnOpen(action: str, quantity: float, limitPrice: float):
order = Order()
order.action = action
order.tif = "OPG"
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
return order
""" <summary>
#/ Passive Relative orders provide a means for traders to seek a less aggressive price than the National Best Bid and Offer (NBBO) while
#/ keeping the order pegged to the best bid (for a buy) or ask (for a sell). The order price is automatically adjusted as the markets move
#/ to keep the order less aggressive. For a buy order, your order price is pegged to the NBB by a less aggressive offset, and if the NBB
#/ moves up, your bid will also move up. If the NBB moves down, there will be no adjustment because your bid will become aggressive and execute.
#/ For a sell order, your price is pegged to the NBO by a less aggressive offset, and if the NBO moves down, your offer will also move down.
#/ If the NBO moves up, there will be no adjustment because your offer will become aggressive and execute. In addition to the offset, you can
#/ define an absolute cap, which works like a limit price, and will prevent your order from being executed above or below a specified level.
#/ The Passive Relative order is similar to the Relative/Pegged-to-Primary order, except that the Passive relative subtracts the offset from
#/ the bid and the Relative adds the offset to the bid.
#/ Products: STK, WAR
</summary>"""
@staticmethod
def PassiveRelative(action: str, quantity: float, offset: float):
order = Order()
order.action = action
order.orderType = "PASSV REL"
order.totalQuantity = quantity
order.auxPrice = offset
return order
""" <summary>
#/ A pegged-to-midpoorder:provides:int a means for traders to seek a price at the midpoof:the:int National Best Bid and Offer (NBBO).
#/ The price automatically adjusts to peg the midpoas:the:int markets move, to remain aggressive. For a buy order, your bid is pegged to
#/ the NBBO midpoand:the:int order price adjusts automatically to continue to peg the midpoif:the:int market moves. The price only adjusts
#/ to be more aggressive. If the market moves in the opposite direction, the order will execute.
#/ Products: STK
</summary>"""
@staticmethod
def PeggedToMidpoint(action: str, quantity: float, offset: float, limitPrice: float):
order = Order()
order.action = action
order.orderType = "PEG MID"
order.totalQuantity = quantity
order.auxPrice = offset
order.lmtPrice = limitPrice
return order
""" <summary>
#/ Bracket orders are designed to help limit your loss and lock in a profit by "bracketing" an order with two opposite-side orders.
#/ A BUY order is bracketed by a high-side sell limit order and a low-side sell stop order. A SELL order is bracketed by a high-side buy
#/ stop order and a low side buy limit order.
#/ Products: CFD, BAG, FOP, CASH, FUT, OPT, STK, WAR
</summary>"""
@staticmethod
def BracketOrder(parentOrderId: int, action: str, quantity: float,
limitPrice: float, takeProfitLimitPrice: float,
stopLossPrice: float):
# This will be our main or "parent" order
parent = Order()
parent.orderId = parentOrderId
parent.action = action
parent.orderType = "LMT"
parent.totalQuantity = quantity
parent.lmtPrice = limitPrice
# The parent and children orders will need this attribute set to False to prevent accidental executions.
# The LAST CHILD will have it set to True,
parent.transmit = False
takeProfit = Order()
takeProfit.orderId = parent.orderId + 1
takeProfit.action = "SELL" if action == "BUY" else "BUY"
takeProfit.orderType = "LMT"
takeProfit.totalQuantity = quantity
takeProfit.lmtPrice = takeProfitLimitPrice
takeProfit.parentId = parentOrderId
takeProfit.transmit = False
stopLoss = Order()
stopLoss.orderId = parent.orderId + 2
stopLoss.action = "SELL" if action == "BUY" else "BUY"
stopLoss.orderType = "STP"
# Stop trigger price
stopLoss.auxPrice = stopLossPrice
stopLoss.totalQuantity = quantity
stopLoss.parentId = parentOrderId
# In this case, the low side order will be the last child being sent. Therefore, it needs to set this attribute to True
# to activate all its predecessors
stopLoss.transmit = True
bracketOrder = [parent, takeProfit, stopLoss]
return bracketOrder
""" <summary>
#/ Products:CFD, FUT, FOP, OPT, STK, WAR
#/ A Market-to-Limit (MTL) order is submitted as a market order to execute at the current best market price. If the order is only
#/ partially filled, the remainder of the order is canceled and re-submitted as a limit order with the limit price equal to the price
#/ at which the filled portion of the order executed.
</summary>"""
@staticmethod
def MarketToLimit(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MTL"
order.totalQuantity = quantity
return order
""" <summary>
#/ This order type is useful for futures traders using Globex. A Market with Protection order is a market order that will be cancelled and
#/ resubmitted as a limit order if the entire order does not immediately execute at the market price. The limit price is set by Globex to be
#/ close to the current market price, slightly higher for a sell order and lower for a buy order.
#/ Products: FUT, FOP
</summary>"""
@staticmethod
def MarketWithProtection(action: str, quantity: float):
order = Order()
order.action = action
order.orderType = "MKT PRT"
order.totalQuantity = quantity
return order
""" <summary>
#/ A Stop order is an instruction to submit a buy or sell market order if and when the user-specified stop trigger price is attained or
#/ penetrated. A Stop order is not guaranteed a specific execution price and may execute significantly away from its stop price. A Sell
#/ Stop order is always placed below the current market price and is typically used to limit a loss or protect a profit on a long stock
#/ position. A Buy Stop order is always placed above the current market price. It is typically used to limit a loss or help protect a
#/ profit on a short sale.
#/ Products: CFD, BAG, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def Stop(action: str, quantity: float, stopPrice: float):
order = Order()
order.action = action
order.orderType = "STP"
order.auxPrice = stopPrice
order.totalQuantity = quantity
return order
""" <summary>
#/ A Stop-Limit order is an instruction to submit a buy or sell limit order when the user-specified stop trigger price is attained or
#/ penetrated. The order has two basic components: the stop price and the limit price. When a trade has occurred at or through the stop
#/ price, the order becomes executable and enters the market as a limit order, which is an order to buy or sell at a specified price or better.
#/ Products: CFD, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def StopLimit(action: str, quantity: float, limitPrice: float, stopPrice: float):
order = Order()
order.action = action
order.orderType = "STP LMT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
order.auxPrice = stopPrice
return order
""" <summary>
#/ A Stop with Protection order combines the functionality of a stop limit order with a market with protection order. The order is set
#/ to trigger at a specified stop price. When the stop price is penetrated, the order is triggered as a market with protection order,
#/ which means that it will fill within a specified protected price range equal to the trigger price +/- the exchange-defined protection
#/ porange:int. Any portion of the order that does not fill within this protected range is submitted as a limit order at the exchange-defined
#/ trigger price +/- the protection points.
#/ Products: FUT
</summary>"""
@staticmethod
def StopWithProtection(action: str, quantity: float, stopPrice: float):
order = Order()
order.totalQuantity = quantity
order.action = action
order.orderType = "STP PRT"
order.auxPrice = stopPrice
return order
""" <summary>
#/ A sell trailing stop order sets the stop price at a fixed amount below the market price with an attached "trailing" amount. As the
#/ market price rises, the stop price rises by the trail amount, but if the stock price falls, the stop loss price doesn't change,
#/ and a market order is submitted when the stop price is hit. This technique is designed to allow an investor to specify a limit on the
#/ maximum possible loss, without setting a limit on the maximum possible gain. "Buy" trailing stop orders are the mirror image of sell
#/ trailing stop orders, and are most appropriate for use in falling markets.
#/ Products: CFD, CASH, FOP, FUT, OPT, STK, WAR
</summary>"""
@staticmethod
def TrailingStop(action: str, quantity: float, trailingPercent: float, trailStopPrice: float):
order = Order()
order.action = action
order.orderType = "TRAIL"
order.totalQuantity = quantity
order.trailingPercent = trailingPercent
order.trailStopPrice = trailStopPrice
return order
""" <summary>
#/ A trailing stop limit order is designed to allow an investor to specify a limit on the maximum possible loss, without setting a limit
#/ on the maximum possible gain. A SELL trailing stop limit moves with the market price, and continually recalculates the stop trigger
#/ price at a fixed amount below the market price, based on the user-defined "trailing" amount. The limit order price is also continually
#/ recalculated based on the limit offset. As the market price rises, both the stop price and the limit price rise by the trail amount and
#/ limit offset respectively, but if the stock price falls, the stop price remains unchanged, and when the stop price is hit a limit order
#/ is submitted at the last calculated limit price. A "Buy" trailing stop limit order is the mirror image of a sell trailing stop limit,
#/ and is generally used in falling markets.
#/ Products: BOND, CFD, CASH, FUT, FOP, OPT, STK, WAR
</summary>"""
@staticmethod
def TrailingStopLimit(action: str, quantity: float, lmtPriceOffset: float, trailingAmount: float,
trailStopPrice: float):
order = Order()
order.action = action
order.orderType = "TRAIL LIMIT"
order.totalQuantity = quantity
order.trailStopPrice = trailStopPrice
order.lmtPriceOffset = lmtPriceOffset
order.auxPrice = trailingAmount
return order
""" <summary>
#/ Create combination orders that include options, stock and futures legs (stock legs can be included if the order is routed
#/ through SmartRouting). Although a combination/spread order is constructed of separate legs, it is executed as a single transaction
#/ if it is routed directly to an exchange. For combination orders that are SmartRouted, each leg may be executed separately to ensure
#/ best execution.
#/ Products: OPT, STK, FUT
</summary>"""
@staticmethod
def ComboLimitOrder(action: str, quantity: float, limitPrice: float, nonGuaranteed: bool):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.lmtPrice = limitPrice
if nonGuaranteed:
order.smartComboRoutingParams = []
order.smartComboRoutingParams.append(
TagValue("NonGuaranteed", "1"))
return order
""" <summary>
#/ Create combination orders that include options, stock and futures legs (stock legs can be included if the order is routed
#/ through SmartRouting). Although a combination/spread order is constructed of separate legs, it is executed as a single transaction
#/ if it is routed directly to an exchange. For combination orders that are SmartRouted, each leg may be executed separately to ensure
#/ best execution.
#/ Products: OPT, STK, FUT
</summary>"""
@staticmethod
def ComboMarketOrder(action: str, quantity: float, nonGuaranteed: bool):
order = Order()
order.action = action
order.orderType = "MKT"
order.totalQuantity = quantity
if nonGuaranteed:
order.smartComboRoutingParams = []
order.smartComboRoutingParams.append(
TagValue("NonGuaranteed", "1"))
return order
""" <summary>
#/ Create combination orders that include options, stock and futures legs (stock legs can be included if the order is routed
#/ through SmartRouting). Although a combination/spread order is constructed of separate legs, it is executed as a single transaction
#/ if it is routed directly to an exchange. For combination orders that are SmartRouted, each leg may be executed separately to ensure
#/ best execution.
#/ Products: OPT, STK, FUT
</summary>"""
@staticmethod
def LimitOrderForComboWithLegPrices(action: str, quantity: float, legPrices: list, nonGuaranteed: bool):
order = Order()
order.action = action
order.orderType = "LMT"
order.totalQuantity = quantity
order.orderComboLegs = []
for price in legPrices:
comboLeg = OrderComboLeg()
comboLeg.price = price
order.orderComboLegs.append(comboLeg)
if nonGuaranteed:
order.smartComboRoutingParams = []
order.smartComboRoutingParams.append(
TagValue("NonGuaranteed", "1"))
return order
""" <summary>
#/ Create combination orders that include options, stock and futures legs (stock legs can be included if the order is routed
#/ through SmartRouting). Although a combination/spread order is constructed of separate legs, it is executed as a single transaction
#/ if it is routed directly to an exchange. For combination orders that are SmartRouted, each leg may be executed separately to ensure
#/ best execution.
#/ Products: OPT, STK, FUT
</summary>"""
@staticmethod
def RelativeLimitCombo(action: str, quantity: float, limitPrice: float, nonGuaranteed: bool):
order = Order()
order.action = action
order.totalQuantity = quantity
order.orderType = "REL + LMT"
order.lmtPrice = limitPrice
if nonGuaranteed:
order.smartComboRoutingParams = []
order.smartComboRoutingParams.append(
TagValue("NonGuaranteed", "1"))
return order
""" <summary>
#/ Create combination orders that include options, stock and futures legs (stock legs can be included if the order is routed
#/ through SmartRouting). Although a combination/spread order is constructed of separate legs, it is executed as a single transaction
#/ if it is routed directly to an exchange. For combination orders that are SmartRouted, each leg may be executed separately to ensure
#/ best execution.
#/ Products: OPT, STK, FUT
</summary>"""
@staticmethod
def RelativeMarketCombo(action: str, quantity: float, nonGuaranteed: bool):
order = Order()
order.action = action
order.totalQuantity = quantity
order.orderType = "REL + MKT"
if nonGuaranteed:
order.smartComboRoutingParams = []
order.smartComboRoutingParams.append(
TagValue("NonGuaranteed", "1"))
return order
""" <summary>
#/ One-Cancels All (OCA) order type allows an investor to place multiple and possibly unrelated orders assigned to a group. The aim is
#/ to complete just one of the orders, which in turn will cause TWS to cancel the remaining orders. The investor may submit several
#/ orders aimed at taking advantage of the most desirable price within the group. Completion of one piece of the group order causes
#/ cancellation of the remaining group orders while partial completion causes the group to rebalance. An investor might desire to sell
#/ 1000 shares of only ONE of three positions held above prevailing market prices. The OCA order group allows the investor to enter prices
#/ at specified target levels and if one is completed, the other two will automatically cancel. Alternatively, an investor may wish to take
#/ a LONG position in eMini S&P stock index futures in a falling market or else SELL US treasury futures at a more favorable price.
#/ Grouping the two orders using an OCA order type offers the investor two chance to enter a similar position, while only running the risk
#/ of taking on a single position.
#/ Products: BOND, CASH, FUT, FOP, STK, OPT, WAR
</summary>"""
@staticmethod
def OneCancelsAll(ocaGroup: str, ocaOrders: ListOfOrder, ocaType: int):
for o in ocaOrders:
o.ocaGroup = ocaGroup
o.ocaType = ocaType
return ocaOrders
""" <summary>
#/ Specific to US options, investors are able to create and enter Volatility-type orders for options and combinations rather than price orders.
#/ Option traders may wish to trade and position for movements in the price of the option determined by its implied volatility. Because
#/ implied volatility is a key determinant of the premium on an option, traders position in specific contract months in an effort to take
#/ advantage of perceived changes in implied volatility arising before, during or after earnings or when company specific or broad market
#/ volatility is predicted to change. In order to create a Volatility order, clients must first create a Volatility Trader page from the
#/ Trading Tools menu and as they enter option contracts, premiums will display in percentage terms rather than premium. The buy/sell process
#/ is the same as for regular orders priced in premium terms except that the client can limit the volatility level they are willing to pay or
#/ receive.
#/ Products: FOP, OPT
</summary>"""
@staticmethod
def Volatility(action: str, quantity: float, volatilityPercent: float, volatilityType: int):
order = Order()
order.action = action
order.orderType = "VOL"
order.totalQuantity = quantity
order.volatility = volatilityPercent # Expressed in percentage (40%)
order.volatilityType = volatilityType # 1=daily, 2=annual
return order
@staticmethod
def MarketFHedge(parentOrderId: int, action: str):
# FX Hedge orders can only have a quantity of 0
order = OrderSamples.MarketOrder(action, 0)
order.parentId = parentOrderId
order.hedgeType = "F"
return order
@staticmethod
def PeggedToBenchmark(action: str, quantity: float, startingPrice: float, peggedChangeAmountDecrease: bool,
peggedChangeAmount: float, referenceChangeAmount: float, referenceConId: int,
referenceExchange: str, stockReferencePrice: float,
referenceContractLowerRange: float, referenceContractUpperRange: float):
order = Order()
order.orderType = "PEG BENCH"
# BUY or SELL
order.action = action
order.totalQuantity = quantity
# Beginning with price...
order.startingPrice = startingPrice
# increase/decrease price..
order.isPeggedChangeAmountDecrease = peggedChangeAmountDecrease
# by... (and likewise for price moving in opposite direction)
order.peggedChangeAmount = peggedChangeAmount
# whenever there is a price change of...
order.referenceChangeAmount = referenceChangeAmount
# in the reference contract...
order.referenceContractId = referenceConId
# being traded at...
order.referenceExchange = referenceExchange
# starting reference price is...
order.stockRefPrice = stockReferencePrice
# Keep order active as long as reference contract trades between...
order.stockRangeLower = referenceContractLowerRange
# and...
order.stockRangeUpper = referenceContractUpperRange
return order
@staticmethod
def AttachAdjustableToStop(parent: Order, attachedOrderStopPrice: float, triggerPrice: float,
adjustStopPrice: float):
# Attached order is a conventional STP order in opposite direction
order = OrderSamples.Stop("SELL" if parent.action == "BUY" else "BUY",
parent.totalQuantity, attachedOrderStopPrice)
order.parentId = parent.orderId
# When trigger price is penetrated
order.triggerPrice = triggerPrice
# The parent order will be turned into a STP order
order.adjustedOrderType = "STP"
# With the given STP price
order.adjustedStopPrice = adjustStopPrice
return order
@staticmethod
def AttachAdjustableToStopLimit(parent: Order, attachedOrderStopPrice: float, triggerPrice: float,
adjustedStopPrice: float, adjustedStopLimitPrice: float):
# Attached order is a conventional STP order
order = OrderSamples.Stop("SELL" if parent.action == "BUY" else "BUY",
parent.totalQuantity, attachedOrderStopPrice)
order.parentId = parent.orderId
# When trigger price is penetrated
order.triggerPrice = triggerPrice
# The parent order will be turned into a STP LMT order
order.adjustedOrderType = "STP LMT"
# With the given stop price
order.adjustedStopPrice = adjustedStopPrice
# And the given limit price
order.adjustedStopLimitPrice = adjustedStopLimitPrice
return order
@staticmethod
def AttachAdjustableToTrail(parent: Order, attachedOrderStopPrice: float, triggerPrice: float,
adjustedStopPrice: float, adjustedTrailAmount: float, trailUnit: int):
# Attached order is a conventional STP order
order = OrderSamples.Stop("SELL" if parent.action == "BUY" else "BUY",
parent.totalQuantity, attachedOrderStopPrice)
order.ParentId = parent.orderId
# When trigger price is penetrated
order.TriggerPrice = triggerPrice
# The parent order will be turned into a TRAIL order
order.AdjustedOrderType = "TRAIL"
# With a stop price of...
order.AdjustedStopPrice = adjustedStopPrice
# traling by and amount (0) or a percent (1)...
order.AdjustableTrailingUnit = trailUnit
# of...
order.AdjustedTrailingAmount = adjustedTrailAmount
return order
@staticmethod
def PriceCondition(triggerMethod: int, conId: int, exchange: str, price: float, isMore: bool, isConjunction: bool):
# Conditions have to be created via the OrderCondition.create
priceCondition = order_condition.Create(OrderCondition.Price)
# When this contract...
priceCondition.conId = conId
# traded on this exchange
priceCondition.exchange = exchange
# has a price above/below
priceCondition.isMore = isMore
priceCondition.triggerMethod = triggerMethod
# this quantity
priceCondition.price = price
# AND | OR next condition (will be ignored if no more conditions are added)
priceCondition.isConjunctionConnection = isConjunction
return priceCondition
@staticmethod
def ExecutionCondition(symbol: str, secType: str, exchange: str, isConjunction: bool):
execCondition = order_condition.Create(OrderCondition.Execution)
# When an execution on symbol
execCondition.symbol = symbol
# at exchange
execCondition.exchange = exchange
# for this secType
execCondition.secType = secType
# AND | OR next condition (will be ignored if no more conditions are added)
execCondition.isConjunctionConnection = isConjunction
return execCondition
@staticmethod
def MarginCondition(percent: int, isMore: bool, isConjunction: bool):
marginCondition = order_condition.Create(OrderCondition.Margin)
# If margin is above/below
marginCondition.isMore = isMore
# given percent
marginCondition.percent = percent
# AND | OR next condition (will be ignored if no more conditions are added)
marginCondition.isConjunctionConnection = isConjunction
# ! [margin_condition]
return marginCondition
@staticmethod
def PercentageChangeCondition(pctChange: float, conId: int, exchange: str, isMore: bool, isConjunction: bool):
pctChangeCondition = order_condition.Create(
OrderCondition.PercentChange)
# If there is a price percent change measured against last close price above or below...
pctChangeCondition.isMore = isMore
# this amount...
pctChangeCondition.changePercent = pctChange
# on this contract
pctChangeCondition.conId = conId
# when traded on this exchange...
pctChangeCondition.exchange = exchange
# AND | OR next condition (will be ignored if no more conditions are added)
pctChangeCondition.isConjunctionConnection = isConjunction
# ! [percentage_condition]
return pctChangeCondition
@staticmethod
def TimeCondition(time: str, isMore: bool, isConjunction: bool):
timeCondition = order_condition.Create(OrderCondition.Time)
# Before or after...
timeCondition.isMore = isMore
# this time..
timeCondition.time = time
# AND | OR next condition (will be ignored if no more conditions are added)
timeCondition.isConjunctionConnection = isConjunction
return timeCondition
@staticmethod
def VolumeCondition(conId: int, exchange: str, isMore: bool, volume: int, isConjunction: bool):
volCond = order_condition.Create(OrderCondition.Volume)
# Whenever contract...
volCond.conId = conId
# When traded at
volCond.exchange = exchange
# reaches a volume higher/lower
volCond.isMore = isMore
# than this...
volCond.volume = volume
# AND | OR next condition (will be ignored if no more conditions are added)
volCond.isConjunctionConnection = isConjunction
return volCond
|
f8f2a273793675f4b8909bbea9c818e6ae3569fd
|
817029694b5a4fe0783ae56b1d5a27a4ce768205
|
/pin_to_hud_sample.py
|
5b95631449264e5a9eef67952b0c2d5a36b2cec6
|
[
"Apache-2.0"
] |
permissive
|
phantomcyber/playbooks
|
da8f6d11a8a608be3f2d58ab9f83258977989c3b
|
277814cde4f0b61e3ae922d5ba61ff1392b8558b
|
refs/heads/6.1
| 2023-08-31T04:34:58.947648
| 2023-08-30T15:15:09
| 2023-08-30T15:15:09
| 41,704,030
| 442
| 197
|
Apache-2.0
| 2023-09-14T16:19:09
| 2015-08-31T22:35:12
|
Python
|
UTF-8
|
Python
| false
| false
| 7,795
|
py
|
pin_to_hud_sample.py
|
"""
Demonstrate updating the Heads-Up Display from a Playbook using a variety of indicator types, styles, and sizes.
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
##############################
# Start - Global Code Block
def is_ioc(value):
import phantom.utils as phutils
ioc_funcs = [phutils.is_ip, phutils.is_url, phutils.is_email, phutils.is_hash]
for f in ioc_funcs:
if f(value):
return True, f.__name__.split('_')[1]
return False, None
def pin_name_mangle(pin_name, container):
return pin_name + '__{0}'.format(container['id'])
# End - Global Code block
##############################
def on_start(container):
phantom.debug('on_start() called')
pin_1(container=container)
pin_2(container=container)
pin_3(container=container)
pin_4(container=container)
return
def pin_3(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
import random
phantom.debug('pin_3() called')
# collect data for 'pin_to_hud_6' call
dest_domain = [x for x in phantom.collect2(container=container, datapath=['artifact:*.cef.destinationDnsDomain']) if x[0]]
pin_name = pin_name_mangle("pin_3", container)
try:
most_rcnt_domain = dest_domain[0][0]
except:
pass
else:
pin_id = phantom.get_data(pin_name)
if not pin_id:
ret_val, message, pin_id = phantom.pin(container=container, message="Most Recent Domain", data=most_rcnt_domain, pin_type="card_medium", pin_style="red")
phantom.debug("new pin_3")
else:
ret_val, message = phantom.update_pin(pin_id, message="Most Recent Domain", data=most_rcnt_domain, pin_type="card_medium", pin_style="red")
if ret_val:
phantom.save_data(pin_id, pin_name)
# set container properties for:
update_data = {
}
phantom.update(container, update_data)
return
def pin_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
import random
phantom.debug('pin_1() called')
# collect data for 'pin_to_hud_6' call
dest_ip_artifacts = [x for x in phantom.collect2(container=container, datapath=['artifact:*.cef.destinationAddress']) if x[0]]
sorc_ip_artifacts = [x for x in phantom.collect2(container=container, datapath=['artifact:*.cef.sourceAddress']) if x[0]]
styles = set(["white", "red", "purple"])
pin_name = pin_name_mangle("pin_1", container)
pin_id = phantom.get_data(pin_name)
if not pin_id:
ret_val, message, pin_id = phantom.pin(container=container, message="Affected IPs", data=str(len(dest_ip_artifacts) + len(sorc_ip_artifacts)), pin_type="card_medium", pin_style="white")
phantom.debug("new pin_1")
else:
style = random.sample(styles, 1)[0]
phantom.debug(style)
ret_val, message = phantom.update_pin(pin_id, message="Affected IPs", data=str(len(dest_ip_artifacts) + len(sorc_ip_artifacts)), pin_style=style)
if ret_val:
phantom.save_data(pin_id, pin_name)
# set container properties for:
update_data = {
}
phantom.update(container, update_data)
return
def pin_2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
import random
phantom.debug('pin_2() called')
# collect data for 'pin_to_hud_6' call
dest_username = [x for x in phantom.collect2(container=container, datapath=['artifact:*.cef.destinationUserName']) if x[0]]
sorc_username = [x for x in phantom.collect2(container=container, datapath=['artifact:*.cef.sourceUserName']) if x[0]]
styles = set(["white", "red", "purple"])
pin_name = pin_name_mangle("pin_2", container)
pin_id = phantom.get_data(pin_name)
if not pin_id:
ret_val, message, pin_id = phantom.pin(container=container, message="Affected Users", data=str(len(dest_username) + len(sorc_username)), pin_type="card_medium", pin_style="purple")
phantom.debug("new pin_2")
else:
# Delete and remake this one, for the sake of demonstration
ret_val, message = phantom.delete_pin(pin_id)
ret_val, message, pin_id = phantom.pin(container=container, message="Affected Users", data=str(len(dest_username) + len(sorc_username)), pin_type="card_medium", pin_style=random.sample(styles, 1)[0])
if ret_val:
phantom.save_data(pin_id, pin_name)
# set container properties for:
update_data = {
}
phantom.update(container, update_data)
return
def pin_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug('pin_4() called')
artifacts = phantom.collect(container=container, datapath='artifacts:*', scope='all')
artifacts = sorted(artifacts, key = lambda x: x['update_time'], reverse=True)
ioc_count = 0
most_recent_ioc = None
ioc_types = set()
for artifact in artifacts:
for key, value in artifact['cef'].items():
value = str(value)
ret, ioc_type = is_ioc(value)
if ret:
if most_recent_ioc is None:
most_recent_ioc = value
ioc_count += 1
ioc_types.add(ioc_type)
pin4_name = pin_name_mangle("pin_4", container)
pin5_name = pin_name_mangle("pin_5", container)
pin6_name = pin_name_mangle("pin_6", container)
pin_id_ioc_cnt = phantom.get_data(pin4_name)
pin_id_ioc_rct = phantom.get_data(pin5_name)
pin_id_ioc_type = phantom.get_data(pin6_name)
if not pin_id_ioc_cnt:
ret_val, message, pin_id_ioc_cnt = phantom.pin(container=container, message="IOC Count", data=str(ioc_count), pin_type="card_medium", pin_style="white")
else:
ret_val, message = phantom.update_pin(pin_id_ioc_cnt, message="IOC Count", data=str(ioc_count), pin_type="card_medium", pin_style="red")
if ret_val:
phantom.save_data(pin_id_ioc_cnt, pin4_name)
if ioc_count:
if not pin_id_ioc_rct:
ret_val, message, pin_id_ioc_rct = phantom.pin(container=container, message="Most Recent IOC", data=most_recent_ioc, pin_type="card_medium", pin_style="purple")
else:
ret_val, message = phantom.update_pin(pin_id_ioc_rct, message="Most Recent IOC", data=most_recent_ioc, pin_type="card_medium", pin_style="purple")
if ret_val:
phantom.save_data(pin_id_ioc_rct, pin5_name)
if not pin_id_ioc_type:
ret_val, message, pin_id_ioc_type = phantom.pin(container=container, message="IOC Types", data=", ".join(ioc_types))
else:
ret_val, message = phantom.update_pin(pin_id_ioc_type, message="IOC Types", data=", ".join(ioc_types))
if ret_val:
phantom.save_data(pin_id_ioc_type, pin6_name)
return
def on_finish(container, summary):
phantom.debug('on_finish() called')
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
return
|
3e9ba982b43319a2d4f2f6febeaa573e2d96d0c3
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/perf/core/external_modules.py
|
663a03e53db60e38a84843028aac1ca1e0b1803f
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
external_modules.py
|
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow importing external modules which may be missing in some platforms.
These modules are normally provided by the vpython environment manager. But
some platforms, e.g. CromeOs, do not have access to this facility.
To be safe, instead of e.g.:
import pandas
clients should do:
from core.external_modules import pandas
Tests that require pandas to work can be skipped as follows:
from core.external_modules import pandas
@unittest.skipIf(pandas is None, 'pandas not available')
class TestsForMyModule(unittest.TestCase):
def testSomeBehavior(self):
# test some behavior that requires pandas module.
Finally, scripts that to work properly require any of these external
dependencies should call:
from core import external_modules
if __name__ == '__main__':
external_modules.RequireModules()
# the rest of your script here.
to exit early with a suitable error message if the dependencies are not
satisfied.
"""
import sys
try:
import numpy # pylint: disable=import-error
except ImportError:
numpy = None
try:
import pandas # pylint: disable=import-error
except ImportError:
pandas = None
def RequireModules():
if numpy is None or pandas is None:
sys.exit(
'ERROR: Some required python modules are not available.\n\n'
'Make sure to run this script using vpython or ensure that '
'module dependencies listed in src/.vpython are satisfied.')
|
db2ed6b51cb2144b1f5cdf5b6b6f9346d464b5ac
|
c641636e184c0ec1dcc7b851bad678c898cdd05d
|
/apps/PGLBox/src/distributed_program.py
|
07264c23095c434881451b00e78d3fc57b410537
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PGL
|
d8f0a82854a141bee1afdddd9a77bdd723c83ed8
|
7a55649d46d7ad93de31eb9b3ebf71b82d1fcffb
|
refs/heads/main
| 2023-08-17T10:33:02.425526
| 2023-08-04T02:52:06
| 2023-08-04T02:52:06
| 191,286,408
| 1,719
| 341
|
Apache-2.0
| 2023-08-04T02:52:07
| 2019-06-11T03:23:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
distributed_program.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed Program
"""
import copy
import paddle
import paddle.static as static
from place import get_cuda_places
def make_distributed_train_program(args, model_dict):
device_ids = get_cuda_places()
train_opt = copy.deepcopy(static.default_main_program()._fleet_opt)
#print("train opt = ", train_opt)
model_dict.startup_program = static.default_startup_program()
model_dict.train_program = static.default_main_program().clone()
model_dict.train_program._fleet_opt = train_opt
model_dict.train_program._fleet_opt['worker_places'] = device_ids
with open("join_main_program.pbtxt", "w") as fout:
fout.write(str(model_dict.train_program))
with open("join_startup_program.pbtxt", "w") as fout:
fout.write(str(model_dict.startup_program))
def make_distributed_infer_program(args, model_dict):
device_ids = get_cuda_places()
infer_opt = copy.deepcopy(static.default_main_program()._fleet_opt)
model_dict.train_program = static.default_main_program().clone()
model_dict.train_program._fleet_opt = infer_opt
opt_info = model_dict.train_program._fleet_opt
opt_info['worker_places'] = device_ids
opt_info["dump_fields"] = [
args.dump_node_name + ".tmp_0", args.dump_node_emb_name + ".tmp_0"
]
opt_info["dump_fields_path"] = args.local_result_path
opt_info["is_dump_in_simple_mode"] = True
opt_info["user_define_dump_filename"] = "000"
opt_info["dump_fields_mode"] = "a"
with open("infer_before_main_program.pbtxt", "w") as fout:
fout.write(str(model_dict.train_program))
remove_op(model_dict.train_program, "push_gpups_sparse")
remove_backword(model_dict.train_program)
with open("infer_main_program.pbtxt", "w") as fout:
fout.write(str(model_dict.train_program))
def remove_op(program, name):
"""
remove op
"""
block = program.global_block()
for ids, op in list(enumerate(block.ops)):
if op.type == name:
block._remove_op(ids)
return
def remove_backword(program):
"""
remove_backword
"""
block = program.global_block()
last_idx = -1
for ids, op in list(enumerate(block.ops)):
if op.has_attr("is_test"):
op._set_attr("is_test", True)
for ids, op in list(enumerate(block.ops)):
if op._is_backward_op():
last_idx = ids
break
last_idx -= 1 # remove fill_constant
for ids, op in list(enumerate(block.ops)):
if ids > last_idx:
block._remove_op(last_idx + 1)
|
7d0ef0f5a493a57a9ff2c257623943ff609a01bf
|
1d0613fb401e92b6861ea3f615561df854603db6
|
/KiBuzzard/deps/fonttools/Lib/fontTools/cffLib/width.py
|
303c94620aaef73579428216784c10e40dc4605a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
gregdavill/KiBuzzard
|
8c84a4339108c9942e1ec0e05e4110bba49fd265
|
88c4129b3fbed2cad718c01e5e2d29204e2f2071
|
refs/heads/main
| 2023-09-01T19:46:45.146077
| 2023-08-31T11:55:10
| 2023-08-31T11:55:10
| 328,686,533
| 358
| 36
|
MIT
| 2023-08-31T12:12:45
| 2021-01-11T14:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,221
|
py
|
width.py
|
# -*- coding: utf-8 -*-
"""T2CharString glyph width optimizer.
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
value do not need to specify their width in their charstring, saving bytes.
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
values for a font, when provided with a list of glyph widths."""
from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, 'items'):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w,freq in widths.items():
if w == default: continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw+1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, 'items'):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw+1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal-108, nominal-1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal+108, nominal+1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
help="Input TTF files")
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
help="Use brute-force approach (VERY slow)")
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font['hmtx']
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()
|
e26153c811e913db0b02d365f1cc79b2f25c0849
|
110044654f706e920380dad2779bb32a77f1f26f
|
/bin/docs-create-example-outputs.py
|
e5d40ce4cce3b6e30ff9f9f0b31f40ff689f4a15
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 607
|
py
|
docs-create-example-outputs.py
|
#!/usr/bin/env python
#
# Searches through the whole doc/user tree and creates
# all output files for the single examples.
#
import os
import sys
import SConsExamples
if __name__ == "__main__":
print("Checking whether all example names are unique...")
if SConsExamples.exampleNamesAreUnique(os.path.join('doc', 'user')):
print("OK")
else:
print(
"Not all example names and suffixes are unique! "
"Please correct the errors listed above and try again."
)
sys.exit(1)
SConsExamples.createAllExampleOutputs(os.path.join('doc', 'user'))
|
c52b39db8d807d2e761b46ebe19ba39ba97cf2b7
|
7d0ec90fa17c202d71290a526fb11b1c5f6a7a92
|
/asv/benchmarks.py
|
7e25616331e9f3f878c502d4f345c83da2063064
|
[
"BSD-3-Clause"
] |
permissive
|
airspeed-velocity/asv
|
511aeb5730bfb017fcb56dc626b356ba3255c9a6
|
880a237ab39e566383e93340a3c895616f553f1d
|
refs/heads/master
| 2023-08-28T09:13:26.092337
| 2023-08-23T22:37:04
| 2023-08-23T22:37:04
| 14,215,348
| 614
| 112
|
BSD-3-Clause
| 2023-09-11T04:46:14
| 2013-11-07T20:43:31
|
Python
|
UTF-8
|
Python
| false
| false
| 11,486
|
py
|
benchmarks.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
import re
import tempfile
import itertools
from .console import log
from . import util, runner
from .repo import NoSuchNameError
class Benchmarks(dict):
"""
Manages and runs the set of benchmarks in the project.
"""
api_version = 2
def __init__(self, conf, benchmarks, regex=None):
"""
Initialize a list of benchmarks.
Parameters
----------
conf : Config object
The project's configuration
benchmarks : list
Benchmarks as from Benchmarks._disc_benchmarks
or loaded from a file.
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to run. If none are provided, all benchmarks
are run.
For parameterized benchmarks, the regex match against
`funcname(param0, param1, ...)` to include the parameter
combination in regex filtering.
"""
self._conf = conf
self._benchmark_dir = conf.benchmark_dir
if not regex:
regex = []
if isinstance(regex, str):
regex = [regex]
self._all_benchmarks = {}
self._benchmark_selection = {}
for benchmark in benchmarks:
self._all_benchmarks[benchmark['name']] = benchmark
if benchmark['params']:
self._benchmark_selection[benchmark['name']] = []
for idx, param_set in enumerate(
itertools.product(*benchmark['params'])):
name = f"{benchmark['name']}({', '.join(param_set)})"
if not regex or any(re.search(reg, name) for reg in regex):
self[benchmark['name']] = benchmark
self._benchmark_selection[benchmark['name']].append(idx)
else:
self._benchmark_selection[benchmark['name']] = None
if not regex or any(re.search(reg, benchmark['name']) for reg in regex):
self[benchmark['name']] = benchmark
@property
def benchmark_selection(self):
"""
Active sets of parameterized benchmarks.
"""
return self._benchmark_selection
@property
def benchmark_dir(self):
"""
Benchmark directory.
"""
return self._benchmark_dir
def filter_out(self, skip):
"""
Return a new Benchmarks object, with some benchmarks filtered out.
"""
benchmarks = super(Benchmarks, self).__new__(self.__class__)
benchmarks._conf = self._conf
benchmarks._benchmark_dir = self._benchmark_dir
benchmarks._all_benchmarks = self._all_benchmarks
selected_idx = {}
for name, benchmark in self.items():
if name not in skip:
benchmarks[name] = benchmark
if name in self._benchmark_selection:
selected_idx[name] = self._benchmark_selection[name]
benchmarks._benchmark_selection = selected_idx
return benchmarks
@classmethod
def discover(cls, conf, repo, environments, commit_hash, regex=None,
check=False):
"""
Discover benchmarks in the given `benchmark_dir`.
Parameters
----------
conf : Config object
The project's configuration
repo : Repo object
The project's repository
environments : list of Environment
List of environments available for benchmark discovery.
commit_hash : list of str
Commit hashes to use for benchmark discovery.
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to run. If none are provided, all benchmarks
are run.
check : bool
Run additional checks after discovery.
"""
benchmarks = cls._disc_benchmarks(conf, repo, environments, commit_hash, check)
return cls(conf, benchmarks, regex=regex)
@classmethod
def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):
"""
Discover all benchmarks in a directory tree.
"""
root = conf.benchmark_dir
cls.check_tree(root)
if len(environments) == 0:
raise util.UserError("No available environments")
# Try several different commits:
#
# - First of commit_hashes provided
# - Tips of branches from configuration file
# - Rest of the commit_hashes
#
def iter_hashes():
for h in commit_hashes[:1]:
yield h
for branch in conf.branches:
try:
yield repo.get_hash_from_name(branch)
except NoSuchNameError:
continue
for h in commit_hashes[1:]:
yield h
def iter_unique(iter):
seen = set()
for item in iter:
if item not in seen:
seen.add(item)
yield item
try_hashes = iter_unique(iter_hashes())
log.info("Discovering benchmarks")
with log.indent():
last_err = None
for env, commit_hash in itertools.product(environments, try_hashes):
env.create()
if last_err is not None:
log.warning("Failed: trying different commit/environment")
result_dir = tempfile.mkdtemp()
try:
env.install_project(conf, repo, commit_hash)
env_vars = dict(os.environ)
env_vars.update(env.env_vars)
result_file = os.path.join(result_dir, 'result.json')
env.run(
[runner.BENCHMARK_RUN_SCRIPT, 'discover',
os.path.abspath(root),
os.path.abspath(result_file)],
cwd=result_dir,
env=env_vars,
dots=False)
try:
with open(result_file, 'r') as fp:
benchmarks = json.load(fp)
except (IOError, ValueError):
log.error("Invalid discovery output")
raise util.UserError()
break
except (util.UserError, util.ProcessError) as err:
last_err = err
continue
except KeyboardInterrupt:
raise util.UserError("Interrupted.")
finally:
util.long_path_rmtree(result_dir)
else:
raise util.UserError("Failed to build the project and import the benchmark suite.")
if check:
log.info("Checking benchmarks")
with log.indent():
result_dir = tempfile.mkdtemp()
try:
out, err, retcode = env.run(
[runner.BENCHMARK_RUN_SCRIPT, 'check',
os.path.abspath(root)],
cwd=result_dir,
dots=False,
env=env_vars,
valid_return_codes=None,
return_stderr=True,
redirect_stderr=True)
finally:
util.long_path_rmtree(result_dir)
out = out.strip()
if retcode == 0:
if out:
log.info(out)
log.info("No problems found.")
else:
if out:
log.error(out)
raise util.UserError("Benchmark suite check failed.")
return benchmarks
@classmethod
def check_tree(cls, root, require_init_py=True):
"""
Check the benchmark tree for files with the same name as
directories.
Also, ensure that the top-level directory has an __init__.py file.
Raises
------
UserError
A .py file and directory with the same name (excluding the
extension) were found.
"""
if os.path.basename(root) == '__pycache__':
return
if not os.path.isfile(os.path.join(root, '__init__.py')):
# Not a Python package directory
if require_init_py:
raise util.UserError(
f"No __init__.py file in '{root}'")
else:
return
# First, check for the case where a .py file and a directory
# have the same name (without the extension). This can't be
# handled, so just raise an exception
found = set()
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isfile(path):
filename, ext = os.path.splitext(filename)
if ext == '.py':
found.add(filename)
for dirname in os.listdir(root):
path = os.path.join(root, dirname)
if os.path.isdir(path):
if dirname in found:
raise util.UserError(
"Found a directory and python file with same name in "
"benchmark tree: '{0}'".format(path))
cls.check_tree(path, require_init_py=False)
@classmethod
def get_benchmark_file_path(cls, results_dir):
"""
Get the path to the benchmarks.json file in the results dir.
"""
return os.path.join(results_dir, "benchmarks.json")
def save(self):
"""
Save the ``benchmarks.json`` file, which is a cached set of the
metadata about the discovered benchmarks, in the results dir.
"""
path = self.get_benchmark_file_path(self._conf.results_dir)
util.write_json(path, self._all_benchmarks, self.api_version)
@classmethod
def load(cls, conf, regex=None):
"""
Load the benchmark descriptions from the `benchmarks.json` file.
If the file is not found, one of the given `environments` will
be used to discover benchmarks.
Parameters
----------
conf : Config object
The project's configuration
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to load. See __init__ docstring.
Returns
-------
benchmarks : Benchmarks object
"""
try:
path = cls.get_benchmark_file_path(conf.results_dir)
if not os.path.isfile(path):
raise util.UserError(f"Benchmark list file {path} missing!")
d = util.load_json(path, api_version=cls.api_version)
benchmarks = d.values()
return cls(conf, benchmarks, regex=regex)
except util.UserError as err:
if "asv update" in str(err):
# Don't give conflicting instructions
raise
raise util.UserError("{}\nUse `asv run --bench just-discover` to "
"regenerate benchmarks.json".format(str(err)))
|
07246819224d8b91876b7bf8f20b519735ceabb6
|
36ef50b45089f974e254537079474b1e732cd799
|
/archivy/click_web/resources/cmd_exec.py
|
88631ffb7f9733147b61e6cab8f7552ef9266426
|
[
"MIT"
] |
permissive
|
archivy/archivy
|
f7ba6ca142dceaa65c4163f90a9714c31d94c3ae
|
bdcdd39ac6cf9f7b3709b984d8be2f0fa898139e
|
refs/heads/master
| 2023-08-20T22:53:51.001707
| 2023-07-25T23:32:46
| 2023-07-25T23:32:46
| 269,419,089
| 2,335
| 103
|
MIT
| 2023-06-25T17:23:23
| 2020-06-04T17:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 14,879
|
py
|
cmd_exec.py
|
import os
import shutil
import subprocess
import sys
import tempfile
import traceback
from pathlib import Path
from typing import List
from flask import Response, request
from werkzeug.utils import secure_filename
from archivy import click_web
from .input_fields import FieldId
logger = None
def exec(command_path):
"""
Execute the command and stream the output from it as response
:param command_path:
"""
command_path = "cli/" + command_path
global logger
logger = click_web.logger
omitted = ["shell", "run", "routes", "create-admin"]
root_command, *commands = command_path.split("/")
cmd = ["archivy"]
req_to_args = RequestToCommandArgs()
# root command_index should not add a command
cmd.extend(req_to_args.command_args(0))
for i, command in enumerate(commands):
if command in omitted:
return Response(status=400)
cmd.append(command)
cmd.extend(req_to_args.command_args(i + 1))
def _generate_output():
yield _create_cmd_header(commands)
try:
yield from _run_script_and_generate_stream(req_to_args, cmd)
except Exception as e:
# exited prematurely, show the error to user
yield f"\nERROR: Got exception when reading output from script: {type(e)}\n"
yield traceback.format_exc()
raise
return Response(_generate_output(), mimetype="text/plain")
def _run_script_and_generate_stream(
req_to_args: "RequestToCommandArgs", cmd: List[str]
):
"""
Execute the command the via Popen and yield output
"""
logger.info("Executing archivy command")
if not os.environ.get("PYTHONIOENCODING"):
# Fix unicode on windows
os.environ["PYTHONIOENCODING"] = "UTF-8"
process = subprocess.Popen(
cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
logger.info("script running Pid: %d", process.pid)
encoding = sys.getdefaultencoding()
with process.stdout:
for line in iter(process.stdout.readline, b""):
yield line.decode(encoding)
process.wait() # wait for the subprocess to exit
logger.info("script finished Pid: %d", process.pid)
for fi in req_to_args.field_infos:
fi.after_script_executed()
def _create_cmd_header(commands: List[str]):
"""
Generate a command header.
Note:
here we always allow to generate HTML as long as we have it between CLICK-WEB comments.
This way the JS frontend can insert it in the correct place in the DOM.
"""
def generate():
yield "<!-- CLICK_WEB START HEADER -->"
yield '<div class="command-line">Executing: {}</div>'.format("/".join(commands))
yield "<!-- CLICK_WEB END HEADER -->"
# important yield this block as one string so it pushed to client in one go.
# so the whole block can be treated as html.
html_str = "\n".join(generate())
return html_str
def _create_result_footer(req_to_args: "RequestToCommandArgs"):
"""
Generate a footer.
Note:
here we always allow to generate HTML as long as we have it between CLICK-WEB comments.
This way the JS frontend can insert it in the correct place in the DOM.
"""
to_download = [
fi
for fi in req_to_args.field_infos
if fi.generate_download_link and fi.link_name
]
# important yield this block as one string so it pushed to client in one go.
# This is so the whole block can be treated as html if JS frontend.
lines = []
lines.append("<!-- CLICK_WEB START FOOTER -->")
if to_download:
lines.append("<b>Result files:</b><br>")
for fi in to_download:
lines.append("<ul> ")
lines.append(f"<li>{_get_download_link(fi)}<br>")
lines.append("</ul>")
else:
lines.append("<b>DONE</b>")
lines.append("<!-- CLICK_WEB END FOOTER -->")
html_str = "\n".join(lines)
yield html_str
def _get_download_link(field_info):
"""Hack as url_for need request context"""
rel_file_path = Path(field_info.file_path).relative_to(click_web.OUTPUT_FOLDER)
uri = f"/static/results/{rel_file_path.as_posix()}"
return f'<a href="{uri}">{field_info.link_name}</a>'
class RequestToCommandArgs:
def __init__(self):
keys = [key for key in list(request.form.keys()) + list(request.files.keys())]
field_infos = [FieldInfo.factory(key) for key in keys if key != "csrf_token"]
# important to sort them so they will be in expected order on command line
self.field_infos = list(sorted(field_infos))
def command_args(self, command_index) -> List[str]:
"""
Convert the post request into a list of command line arguments
:param command_index: (int) the index for the command to get arguments for.
:return: list of command line arguments for command at that cmd_index
"""
args = []
# only include relevant fields for this command index
commands_field_infos = [
fi for fi in self.field_infos if fi.param.command_index == command_index
]
commands_field_infos = sorted(commands_field_infos)
for fi in commands_field_infos:
# must be called mostly for saving and preparing file output.
fi.before_script_execute()
if fi.cmd_opt.startswith("--"):
# it's an option
args.extend(self._process_option(fi))
else:
# argument(s)
if isinstance(fi, FieldFileInfo):
# it's a file, append the written temp file path
# TODO: does file upload support multiple keys? In that case support it.
args.append(fi.file_path)
else:
arg_values = request.form.getlist(fi.key)
has_values = bool("".join(arg_values))
if has_values:
if fi.param.nargs == -1:
# Variadic argument, in html form each argument
# is a separate line in a textarea.
# treat each line we get from text area as a separate argument.
for value in arg_values:
values = value.splitlines()
logger.info(
f'variadic arguments, split into: "{values}"'
)
args.extend(values)
else:
logger.info(f'arg_value: "{arg_values}"')
args.extend(arg_values)
return args
def _process_option(self, field_info):
vals = request.form.getlist(field_info.key)
if field_info.is_file:
if field_info.link_name:
# it's a file, append the file path
yield field_info.cmd_opt
yield field_info.file_path
elif field_info.param.param_type == "flag":
# To work with flag that is default True
# a hidden field with same name is also sent by form.
# This is to detect if checkbox was not checked as then
# we will get the field anyway with the "off flag" as value.
if len(vals) == 1:
off_flag = vals[0]
flag_on_cmd_line = off_flag
else:
# we got both off and on flags, checkbox is checked.
on_flag = vals[1]
flag_on_cmd_line = on_flag
yield flag_on_cmd_line
elif "".join(vals):
# opt with value, if option was given multiple times get the values for each.
# flag options should always be set if we get them
# for normal options they must have a non empty value
yield field_info.cmd_opt
for val in vals:
if val:
yield val
else:
# option with empty values, should not be added to command line.
pass
class FieldInfo:
"""
Extract information from the encoded form input field name
the parts:
[command_index].[opt_or_arg_index].[click_type].[html_input_type].[opt_or_arg_name]
e.g.
"0.0.option.text.text.--an-option"
"0.1.argument.file[rb].text.an-argument"
"""
@staticmethod
def factory(key):
field_id = FieldId.from_string(key)
is_file = field_id.click_type.startswith("file")
is_path = field_id.click_type.startswith("path")
is_uploaded = key in request.files
if is_file:
if is_uploaded:
field_info = FieldFileInfo(field_id)
else:
field_info = FieldOutFileInfo(field_id)
elif is_path:
if is_uploaded:
field_info = FieldPathInfo(field_id)
else:
field_info = FieldPathOutInfo(field_id)
else:
field_info = FieldInfo(field_id)
return field_info
def __init__(self, param: FieldId):
self.param = param
self.key = param.key
"Type of option (file, text)"
self.is_file = self.param.click_type.startswith("file")
"The actual command line option (--debug)"
self.cmd_opt = param.name
self.generate_download_link = False
def before_script_execute(self):
pass
def after_script_executed(self):
pass
def __str__(self):
return str(self.param)
def __lt__(self, other):
"Make class sortable"
return (self.param.command_index, self.param.param_index) < (
other.param.command_index,
other.param.param_index,
)
def __eq__(self, other):
return self.key == other.key
class FieldFileInfo(FieldInfo):
"""
Use for processing input fields of file type.
Saves the posted data to a temp file.
"""
"temp dir is on class in order to be uniqe for each request"
_temp_dir = None
def __init__(self, fimeta):
super().__init__(fimeta)
# Extract the file mode that is in the type e.g file[rw]
self.mode = self.param.click_type.split("[")[1][:-1]
self.generate_download_link = True if "w" in self.mode else False
self.link_name = f"{self.cmd_opt}.out"
logger.info(f"File mode for {self.key} is {self.mode}")
def before_script_execute(self):
self.save()
@classmethod
def temp_dir(cls):
if not cls._temp_dir:
cls._temp_dir = tempfile.mkdtemp(dir=click_web.OUTPUT_FOLDER)
logger.info(f"Temp dir: {cls._temp_dir}")
return cls._temp_dir
def save(self):
logger.info("Saving...")
logger.info("field value is a file! %s", self.key)
file = request.files[self.key]
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == "":
raise ValueError("No selected file")
elif file and file.filename:
filename = secure_filename(file.filename)
name, suffix = os.path.splitext(filename)
fd, filename = tempfile.mkstemp(
dir=self.temp_dir(), prefix=name, suffix=suffix
)
self.file_path = filename
logger.info(f"Saving {self.key} to {filename}")
file.save(filename)
def __str__(self):
res = [super().__str__()]
res.append(f"file_path: {self.file_path}")
return ", ".join(res)
class FieldOutFileInfo(FieldFileInfo):
"""
Used when file option is just for output and form posted it as hidden or text field.
Just create a empty temp file to give it's path to command.
"""
def __init__(self, fimeta):
super().__init__(fimeta)
if self.param.form_type == "text":
self.link_name = request.form[self.key]
# set the postfix to name name provided from form
# this way it will at least have the same extension when downloaded
self.file_suffix = request.form[self.key]
else:
# hidden no preferred file name can be provided by user
self.file_suffix = ".out"
def save(self):
name = secure_filename(self.key)
filename = tempfile.mkstemp(
dir=self.temp_dir(), prefix=name, suffix=self.file_suffix
)
logger.info(f"Creating empty file for {self.key} as {filename}")
self.file_path = filename
class FieldPathInfo(FieldFileInfo):
"""
Use for processing input fields of path type.
Extracts the posted data to a temp folder.
When script finished zip that folder and provide download link to zip file.
"""
def save(self):
super().save()
zip_extract_dir = tempfile.mkdtemp(dir=self.temp_dir())
logger.info(f"Extracting: {self.file_path} to {zip_extract_dir}")
shutil.unpack_archive(self.file_path, zip_extract_dir, "zip")
self.file_path = zip_extract_dir
def after_script_executed(self):
super().after_script_executed()
fd, filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=self.key)
folder_path = self.file_path
self.file_path = filename
logger.info(f"Zipping {self.key} to {filename}")
self.file_path = shutil.make_archive(self.file_path, "zip", folder_path)
logger.info(f"Zip file created {self.file_path}")
self.generate_download_link = True
class FieldPathOutInfo(FieldOutFileInfo):
"""
Use for processing output fields of path type.
Create a folder and use as path to script.
When script finished zip that folder and provide download link to zip file.
"""
def save(self):
super().save()
self.file_path = tempfile.mkdtemp(dir=self.temp_dir())
def after_script_executed(self):
super().after_script_executed()
fd, filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=self.key)
folder_path = self.file_path
self.file_path = filename
logger.info(f"Zipping {self.key} to {filename}")
self.file_path = shutil.make_archive(self.file_path, "zip", folder_path)
logger.info(f"Zip file created {self.file_path}")
self.generate_download_link = True
|
adfa2a8204948e0b2a85541cc8ab203d45a53a0c
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Alignment/MuonAlignmentAlgorithms/scripts/geometryDiffVisualizer.py
|
bdc7d3a40399924a4da56ce4316b2e921ca56efe
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
geometryDiffVisualizer.py
|
#! /usr/bin/env python3
from __future__ import print_function
import sys, ROOT
from geometryDiffVisualization import *
ROOT.gROOT.SetBatch(1)
cargs = sys.argv[:]
if len(cargs) != 5:
print("Makes schematic drawings of the detector in various projections with overlayed chambers")
print("shifted and rotated by their correction amounts times some scale (geom2-geom1)")
print("usage: ./geometryDiffVisualizer.py label svg_template_dir geometry2.xml geometry1.xml")
print("The label will be included into the filenames as geoVisual_label__specifier.png")
print("")
sys.exit()
label = cargs[1]
svg_template_dir = cargs[2]
xmlfile2 = cargs[3]
xmlfile1 = cargs[4]
g2 = MuonGeometry(xmlfile2)
g1 = MuonGeometry(xmlfile1)
pfx = "geoVisual__" + label + "__"
sf_dt = 200.
sf_csc = 100.
draw_station(g2, g1, 1, pfx+"st_1_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_station(g2, g1, 2, pfx+"st_2_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_station(g2, g1, 3, pfx+"st_3_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_station(g2, g1, 4, pfx+"st_4_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_wheel(g2, g1, -2, pfx+"wh_a_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_wheel(g2, g1, -1, pfx+"wh_b_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_wheel(g2, g1, 0, pfx+"wh_c_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_wheel(g2, g1, +1, pfx+"wh_d_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_wheel(g2, g1, +2, pfx+"wh_e_DT.svg", length_factor=sf_dt, angle_factor=sf_dt, template_dir=svg_template_dir)
draw_disk(g2, g1, 1, 1, pfx+"e1_st1_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 1, 2, pfx+"e1_st2_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 1, 3, pfx+"e1_st3_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 1, 4, pfx+"e1_st4_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 2, 1, pfx+"e2_st1_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 2, 2, pfx+"e2_st2_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 2, 3, pfx+"e2_st3_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
draw_disk(g2, g1, 2, 4, pfx+"e2_st4_CSC.svg", length_factor=sf_csc, angle_factor=sf_csc, template_dir=svg_template_dir)
|
e37da497144485e596f92a103c0f20d0015e1472
|
6b3cc22e7b8d356b01d638dffc820322312da3e4
|
/pkg_blender/blendtorch/btb/__init__.py
|
22b5f9d0b9f7853af93ac053155f314a5d5ba143
|
[
"MIT"
] |
permissive
|
cheind/pytorch-blender
|
911961f760dde82a591f7cb26c0c648b120ad570
|
d2386df70e14c190669509e28009f46aed561c88
|
refs/heads/develop
| 2023-04-08T01:44:09.341680
| 2022-04-03T09:37:17
| 2022-04-03T09:37:17
| 169,447,021
| 509
| 43
|
MIT
| 2023-03-20T03:06:22
| 2019-02-06T17:32:38
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
__init__.py
|
# flake8: noqa
from .animation import AnimationController
from .offscreen import OffScreenRenderer
from .renderer import CompositeRenderer, CompositeSelection
from .arguments import parse_blendtorch_args
from .paths import add_scene_dir_to_path
from .publisher import DataPublisher
from .camera import Camera
from .duplex import DuplexChannel
from . import env, utils, materials
__version__ = "0.4.0"
|
c1fa9adb6694e497a0b5adddaa1050e9602a5c00
|
110044654f706e920380dad2779bb32a77f1f26f
|
/test/option/option_profile.py
|
19e68f6b20d1906e17d6ab0cba85b63e3429276e
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SCons/scons
|
89327bb9635cee6e7cc59249edca9cd859d7d1ff
|
b2a7d7066a2b854460a334a5fe737ea389655e6e
|
refs/heads/master
| 2023-09-01T19:37:03.603772
| 2023-08-28T04:32:42
| 2023-08-28T04:32:42
| 104,670,160
| 1,827
| 342
|
MIT
| 2023-09-14T15:13:21
| 2017-09-24T19:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,012
|
py
|
option_profile.py
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import contextlib
import sys
from io import StringIO
import TestSCons
test = TestSCons.TestSCons()
try:
import pstats
except ImportError:
test.skip_test('No pstats module, skipping test.\n')
test.write('SConstruct', """\
DefaultEnvironment(tools=[])
Command('file.out', 'file.in', Copy("$TARGET", "$SOURCE"))
""")
test.write('file.in', "file.in\n")
scons_prof = test.workpath('scons.prof')
test.run(arguments = "--profile=%s -h" % scons_prof)
test.must_contain_all_lines(test.stdout(), ['usage: scons [OPTIONS] [VARIABLES] [TARGETS]'])
try:
save_stdout = sys.stdout
with contextlib.closing(StringIO()) as sys.stdout:
stats = pstats.Stats(scons_prof)
stats.sort_stats('time')
stats.strip_dirs().print_stats()
s = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
test.must_contain_all_lines(s, ['Main.py', '_main'])
scons_prof = test.workpath('scons2.prof')
test.run(arguments = "--profile %s" % scons_prof)
try:
save_stdout = sys.stdout
with contextlib.closing(StringIO()) as sys.stdout:
stats = pstats.Stats(scons_prof)
stats.sort_stats('time')
stats.strip_dirs().print_stats()
s = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
test.must_contain_all_lines(s, ['Main.py', '_main', 'FS.py'])
scons_prof = test.workpath('scons3.prof')
test.run(arguments = "--profile %s --debug=memory -h" % scons_prof)
expect = [
'usage: scons [OPTIONS] [VARIABLES] [TARGETS]',
'Options:'
]
test.must_contain_all_lines(test.stdout(), expect)
expect = 'Memory before reading SConscript files'
lines = test.stdout().split('\n')
memory_lines = [l for l in lines if l.find(expect) != -1]
test.fail_test(len(memory_lines) != 1)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
b36d9f2e12c520ff8515c5dd15b313108e519a2c
|
554718851656376ad2bceb282de30459167ffeb2
|
/tests/tensorflow/hooks/test_training_end.py
|
ce59c14dc9121d9f1b2558159309b8aa2a953cef
|
[
"Apache-2.0"
] |
permissive
|
awslabs/sagemaker-debugger
|
d6ae6a6177a6cb457972772e2b3021e8a9dcc621
|
37ecf0aaeb24ab2adbe7f0ad664d0e50fa4154f2
|
refs/heads/master
| 2023-09-05T05:20:02.458427
| 2023-04-20T20:48:11
| 2023-04-20T20:48:11
| 222,554,670
| 162
| 89
|
Apache-2.0
| 2023-08-23T14:31:27
| 2019-11-18T22:12:36
|
Python
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
test_training_end.py
|
# Standard Library
import subprocess
import sys
# Third Party
import pytest
import tensorflow as tf
# First Party
from smdebug.core.access_layer.utils import has_training_ended
@pytest.mark.slow # 0:03 to run
def test_training_job_has_ended(out_dir):
tf.reset_default_graph()
subprocess.check_call(
[
sys.executable,
"examples/tensorflow/local/simple.py",
"--out_dir",
out_dir,
"--steps",
"10",
"--save_interval",
"5",
],
env={"CUDA_VISIBLE_DEVICES": "-1", "SMDEBUG_LOG_LEVEL": "debug"},
)
assert has_training_ended(out_dir) == True
|
5e7efb0090ba13b2eb1961f92b02d91d8079aa1e
|
d066f7fe739fb78f74ec2de8ccbfefdd4270f60f
|
/appimagebuilder/modules/setup/helpers/gtk.py
|
510388c9fad99376913bb54ec3f96b9a2f7fe8b5
|
[
"MIT"
] |
permissive
|
AppImageCrafters/appimage-builder
|
666e75363a74f615cdb3673b3ca9d51a6d292a49
|
f38699ef3644fa5409a5a262b7b6d99d6fb85db9
|
refs/heads/main
| 2023-08-17T06:34:54.029664
| 2023-06-03T17:51:04
| 2023-06-03T17:51:04
| 218,847,680
| 270
| 54
|
MIT
| 2023-09-06T17:04:18
| 2019-10-31T19:44:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
gtk.py
|
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import re
import subprocess
from .base_helper import BaseHelper
from ..environment import Environment
class Gtk(BaseHelper):
"""
Helper for making Gtk based applications portable
Reference: https://developer.gnome.org/gtk3/stable/gtk-running.html
"""
def configure(self, env: Environment, preserve_files):
prefix = self.app_dir / "usr"
env.set("GTK_EXE_PREFIX", str(prefix))
env.set("GTK_DATA_PREFIX", str(prefix))
gtk_path = [
str(path)
for path in self.finder.find("lib/**/gtk-?.0", [self.finder.is_dir])
]
env.set("GTK_PATH", gtk_path)
for path in self.finder.find("usr/share/icons/*", [self.finder.is_dir]):
subprocess.run(["gtk-update-icon-cache", str(path)])
|
1bfee0a18a54933565de27f2c43b7d57197bdf1d
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/integration_tests/tasks/entity_manager/test_utils.py
|
e5da8fbb40b85f8d4f985d01d184c8a0667999ce
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 16,730
|
py
|
test_utils.py
|
import json
from datetime import datetime
from typing import List
import pytest
from sqlalchemy import desc
from web3.datastructures import AttributeDict
from integration_tests.utils import populate_mock_db
from src.models.indexing.cid_data import CIDData
from src.models.users.user import User
from src.tasks.entity_manager.utils import (
PLAYLIST_ID_OFFSET,
TRACK_ID_OFFSET,
USER_ID_OFFSET,
copy_record,
get_metadata_type_and_format,
parse_metadata,
save_cid_metadata,
)
from src.utils import helpers
from src.utils.db_session import get_db
def test_save_cid_metadata(app):
"""Tests that cid is persisted correctly"""
with app.app_context():
db = get_db()
with db.scoped_session() as session:
cid_metadata = {
"cid1": {"user_id": 1},
"cid2": {"user_id": 2},
"cid3": {"track_id": 2},
"cid4": {"playlist_id": 3},
}
cid_type = {
"cid1": "user",
"cid2": "user",
"cid3": "track",
"cid4": "playlist_data",
}
save_cid_metadata(session, cid_metadata, cid_type)
users = (
session.query(CIDData)
.filter(CIDData.type == "user")
.order_by(desc(CIDData.cid))
.all()
)
assert len(users) == 2
assert users[0].data == {"user_id": 2}
assert users[1].data == {"user_id": 1}
tracks = session.query(CIDData).filter(CIDData.type == "track").all()
assert len(tracks) == 1
assert tracks[0].data == {"track_id": 2}
playlists = (
session.query(CIDData).filter(CIDData.type == "playlist_data").all()
)
assert len(playlists) == 1
assert playlists[0].data == {"playlist_id": 3}
def test_valid_parse_metadata(app):
"""Test that cid metadata blobs are processed correctly"""
with app.app_context():
expected_cid_type = {
"QmUpdateUser1": "user",
"QmCreateTrack1": "track",
"QmUpdatePlaylist1": "playlist_data",
}
expected_metadata = {
"QmUpdateUser1": {
"profile_picture": None,
"profile_picture_sizes": "QmYRHAJ4YuLjT4fLLRMg5STnQA4yDpiBmzk5R3iCDTmkmk",
"cover_photo": None,
"cover_photo_sizes": "QmUk61QDUTzhNqjnCAWipSp3jnMmXBmtTUC2mtF5F6VvUy",
"bio": "🌞👄🌞",
"name": "raymont updated",
"location": "chik fil yay!!",
"handle": "rayjacobsonupdated",
"associated_wallets": None,
"associated_sol_wallets": None,
"collectibles": None,
"playlist_library": {
"contents": [
{"playlist_id": "Audio NFTs", "type": "explore_playlist"},
{"playlist_id": 4327, "type": "playlist"},
{"playlist_id": 52792, "type": "playlist"},
{"playlist_id": 63949, "type": "playlist"},
{
"contents": [
{"playlist_id": 6833, "type": "playlist"},
{"playlist_id": 4735, "type": "playlist"},
{"playlist_id": 114799, "type": "playlist"},
{"playlist_id": 115049, "type": "playlist"},
{"playlist_id": 89495, "type": "playlist"},
],
"id": "d515f4db-1db2-41df-9e0c-0180302a24f9",
"name": "WIP",
"type": "folder",
},
{
"contents": [
{"playlist_id": 9616, "type": "playlist"},
{"playlist_id": 112826, "type": "playlist"},
],
"id": "a0da6552-ddc4-4d13-a19e-ecc63ca23e90",
"name": "Community",
"type": "folder",
},
{
"contents": [
{"playlist_id": 128608, "type": "playlist"},
{"playlist_id": 90778, "type": "playlist"},
{"playlist_id": 94395, "type": "playlist"},
{"playlist_id": 97193, "type": "playlist"},
],
"id": "1163fbab-e710-4d33-8769-6fcb02719d7b",
"name": "Actually Albums",
"type": "folder",
},
{"playlist_id": 131423, "type": "playlist"},
{"playlist_id": 40151, "type": "playlist"},
]
},
"events": {"is_mobile_user": True},
"is_deactivated": False,
"is_storage_v2": False,
"artist_pick_track_id": TRACK_ID_OFFSET,
"allow_ai_attribution": False,
},
"QmCreateTrack1": {
"track_cid": "some-track-cid",
"owner_id": 1,
"title": "track 1",
"route_id": None,
"duration": 0,
"cover_art": None,
"cover_art_sizes": "QmdxhDiRUC3zQEKqwnqksaSsSSeHiRghjwKzwoRvm77yaZ",
"tags": "realmagic,rickyreed,theroom",
"genre": "R&B/Soul",
"mood": "Empowering",
"credits_splits": None,
"create_date": "2020-07-11 08:22:15",
"release_date": "Sat Jul 11 2020 01:19:58 GMT-0700",
"file_type": None,
"description": None,
"license": "All rights reserved",
"isrc": None,
"iswc": None,
"track_segments": [
{
"duration": 6.016,
"multihash": "QmabM5svgDgcRdQZaEKSMBCpSZrrYy2y87L8Dx8EQ3T2jp",
}
],
"download": {
"cid": None,
"is_downloadable": False,
"requires_follow": False,
},
"remix_of": {"tracks": [{"parent_track_id": 75808}]},
"is_unlisted": False,
"field_visibility": {
"mood": True,
"tags": True,
"genre": True,
"share": True,
"play_count": True,
"remixes": True,
},
"stem_of": None,
"is_premium": False,
"premium_conditions": None,
"is_playlist_upload": True,
"ai_attribution_user_id": None,
"preview_cid": None,
"preview_start_seconds": None,
"audio_upload_id": None,
},
"QmUpdatePlaylist1": {
"playlist_id": 1,
"playlist_contents": {"track_ids": [{"time": 1660927554, "track": 1}]},
"playlist_name": "playlist 1 updated",
"playlist_image_sizes_multihash": "",
"description": "",
"is_album": False,
"is_private": False,
"is_image_autogenerated": None,
},
}
user1_tx_metadata = expected_metadata["QmUpdateUser1"].copy()
# Add invalid unicode to verify fetch_cid_metadata sanitizes metadata
user1_tx_metadata["name"] += "\ud835"
user1_json = json.dumps(user1_tx_metadata)
track1_tx_metadata = expected_metadata["QmCreateTrack1"].copy()
track1_tx_metadata.pop("premium_conditions")
track1_tx_metadata["incorrect_key"] = True
track1_json = json.dumps(track1_tx_metadata)
playlist1_tx_metadata = expected_metadata["QmUpdatePlaylist1"].copy()
playlist1_json = json.dumps(playlist1_tx_metadata)
tx_receipts = {
"UpdateUser1Tx": [
{
"args": AttributeDict(
{
"_entityId": USER_ID_OFFSET,
"_entityType": "User",
"_userId": USER_ID_OFFSET,
"_action": "Update",
"_metadata": f'{{"cid": "QmUpdateUser1", "data": {user1_json}}}',
"_signer": "user1wallet",
}
)
},
],
"CreateTrack1Tx": [
{
"args": AttributeDict(
{
"_entityId": TRACK_ID_OFFSET,
"_entityType": "Track",
"_userId": 1,
"_action": "Create",
"_metadata": f'{{"cid": "QmCreateTrack1", "data": {track1_json}}}',
"_signer": "user1wallet",
}
)
},
],
"UpdatePlaylist1Tx": [
{
"args": AttributeDict(
{
"_entityId": PLAYLIST_ID_OFFSET,
"_entityType": "Playlist",
"_userId": 1,
"_action": "Update",
"_metadata": f'{{"cid": "QmUpdatePlaylist1", "data": {playlist1_json}}}',
"_signer": "user1wallet",
}
)
},
],
}
for tx_receipt in tx_receipts.values():
for event in tx_receipt:
metadata = helpers.get_tx_arg(event, "_metadata")
action = helpers.get_tx_arg(event, "_action")
entity_type = helpers.get_tx_arg(event, "_entityType")
cid_metadata, cid = parse_metadata(metadata, action, entity_type)
metadata_type, _ = get_metadata_type_and_format(entity_type)
assert cid is not None
assert cid in expected_metadata
assert cid_metadata == expected_metadata[cid]
assert metadata_type == expected_cid_type[cid]
def test_invalid_parse_metadata(app):
"""Test that invalid cid metadata blobs are not processed"""
with app.app_context():
track_metadata = {
"track_cid": "some-track-cid",
"owner_id": 1,
"title": "track 1",
"route_id": None,
"duration": 0,
"cover_art": None,
"cover_art_sizes": "QmdxhDiRUC3zQEKqwnqksaSsSSeHiRghjwKzwoRvm77yaZ",
"tags": "realmagic,rickyreed,theroom",
"genre": "R&B/Soul",
"mood": "Empowering",
"credits_splits": None,
"create_date": "2020-07-11 08:22:15",
"release_date": "Sat Jul 11 2020 01:19:58 GMT-0700",
"file_type": None,
"description": None,
"license": "All rights reserved",
"isrc": None,
"iswc": None,
"track_segments": [
{
"duration": 6.016,
"multihash": "QmabM5svgDgcRdQZaEKSMBCpSZrrYy2y87L8Dx8EQ3T2jp",
}
],
"download": {
"cid": None,
"is_downloadable": False,
"requires_follow": False,
},
"remix_of": {"tracks": [{"parent_track_id": 75808}]},
"is_unlisted": False,
"field_visibility": {
"mood": True,
"tags": True,
"genre": True,
"share": True,
"play_count": True,
"remixes": True,
},
"stem_of": None,
"is_premium": False,
"premium_conditions": None,
"is_playlist_upload": True,
"ai_attribution_user_id": None,
}
track_json = json.dumps(track_metadata)
tx_receipts = {
"CreateTrack2InvalidTx": [
{
"args": AttributeDict(
{
"_entityId": TRACK_ID_OFFSET + 1,
"_entityType": "Track",
"_userId": 1,
"_action": "Create",
"_metadata": f'{{"cid": "QmCreateInvalidTrack", "data": "{track_json}"}}', # stringified value for data key
"_signer": "user1wallet",
}
)
},
],
"CreateTrack3InvalidTx": [
{
"args": AttributeDict(
{
"_entityId": TRACK_ID_OFFSET + 2,
"_entityType": "Track",
"_userId": 1,
"_action": "Create",
"_metadata": f'{{"data": {track_json}}}', # missing cid key
"_signer": "user1wallet",
}
)
},
],
"CreateTrack4InvalidTx": [
{
"args": AttributeDict(
{
"_entityId": TRACK_ID_OFFSET + 3,
"_entityType": "Track",
"_userId": 1,
"_action": "Create",
"_metadata": '{"cid": "QmCreateInvalidTrack", "data": {}}', # no values in data json
"_signer": "user1wallet",
}
)
},
],
}
for tx_receipt in tx_receipts.values():
for event in tx_receipt:
metadata = helpers.get_tx_arg(event, "_metadata")
action = helpers.get_tx_arg(event, "_action")
entity_type = helpers.get_tx_arg(event, "_entityType")
with pytest.raises(Exception):
parse_metadata(metadata, action, entity_type)
def test_copy_record(app):
with app.app_context():
db = get_db()
entities = {
"users": [
{
"user_id": 1,
"handle": "user-1",
"wallet": "user1wallet",
"bio": "hi",
"primary_id": 1,
"secondary_ids": [2, 3],
"artist_pick_track_id": 1,
}
]
}
populate_mock_db(db, entities)
with db.scoped_session() as session:
all_users: List[User] = session.query(User).all()
assert len(all_users) == 1
user_1 = all_users[0]
user_1_updated_at = user_1.updated_at
user_1_block_number = user_1.blocknumber
user_1_blockhash = user_1.blockhash
user_1_txhash = user_1.txhash
block_number = 10
event_blockhash = hex(10)
txhash = "0x01"
block_datetime = datetime.now()
user_1_copy = copy_record(
user_1, block_number, event_blockhash, txhash, block_datetime
)
old_user_attributes = user_1.get_attributes_dict()
user_copy_attributes = user_1_copy.get_attributes_dict()
for key, value in user_copy_attributes.items():
if key == "is_current":
assert value == False
assert old_user_attributes[key] == True
elif key == "updated_at":
assert value == block_datetime
assert old_user_attributes[key] == user_1_updated_at
elif key == "blocknumber":
assert value == block_number
assert old_user_attributes[key] == user_1_block_number
elif key == "blockhash":
assert value == event_blockhash
assert old_user_attributes[key] == user_1_blockhash
elif key == "txhash":
assert value == txhash
assert old_user_attributes[key] == user_1_txhash
else:
assert value == old_user_attributes[key]
|
48d53c014374fc449768ebc08100a75ffc1069b5
|
27d00032088ccb3beca2718612b0129623e73fc2
|
/tests/unit/test_config.py
|
a030d069d148c4cf774c125cd1e9c7fdcb6120f8
|
[
"Apache-2.0"
] |
permissive
|
intel/CPU-Manager-for-Kubernetes
|
9831391c36cdcc33237da55ed80e4cede22f5381
|
c08228e0a9445237e35afef648cb8ea98a3143db
|
refs/heads/master
| 2023-05-27T13:25:43.399473
| 2023-01-18T10:05:25
| 2023-01-18T10:05:25
| 76,979,646
| 177
| 89
|
Apache-2.0
| 2022-12-27T15:48:18
| 2016-12-20T17:59:39
|
Python
|
UTF-8
|
Python
| false
| false
| 8,190
|
py
|
test_config.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intel import config, topology
class MockConfig():
def __init__(self, conf):
self.metadata = MockMetadata()
self.data = conf
class MockMetadata():
def __init__(self):
self.annotations = {"Owner": ""}
FAKE_CONFIG = {
"exclusive": {
"0": {
"0,9": ["1001"],
"1,10": ["1002"],
"2,11": ["1003"]
},
"1": {
"3,12": ["1004"]
}
},
"shared": {
"0": {
"4,13,5,14": ["2001", "2002", "2003"]
},
"1": {}
},
"infra": {
"0": {
"6,15,7,16,8,17": ["3001", "4002", "5003"]
},
"1": {}
}
}
def return_fake_platform():
sockets = dict()
sockets["0"] = topology.Socket("0")
sockets["1"] = topology.Socket("1")
core0 = topology.Core("0")
core0.pool = "exclusive"
cpu0 = topology.CPU("0")
cpu0.isolated = True
cpu3 = topology.CPU("3")
cpu3.isolated = True
core0.cpus["0"] = cpu0
core0.cpus["3"] = cpu3
sockets["0"].cores["0"] = core0
core1 = topology.Core("1")
core1.pool = "shared"
cpu1 = topology.CPU("1")
cpu1.isolated = True
cpu4 = topology.CPU("4")
cpu4.isolated = True
core1.cpus["1"] = cpu1
core1.cpus["4"] = cpu4
sockets["0"].cores["1"] = core1
core2 = topology.Core("2")
core2.pool = "shared"
cpu2 = topology.CPU("2")
cpu5 = topology.CPU("5")
core2.cpus["2"] = cpu2
core2.cpus["5"] = cpu5
sockets["0"].cores["2"] = core2
return topology.Platform(sockets)
"""@patch('intel.k8s.get_config_map',
MagicMock(return_value={'config': yaml.dump(FAKE_CONFIG)}))
def test_get_config():
c = config.get_config("fake-name")
assert len(c.pools.keys()) == 3
assert len(c.pools["exclusive"].sockets.keys()) == 2
assert len(c.pools["shared"].sockets.keys()) == 2
assert len(c.pools["infra"].sockets.keys()) == 2
assert len(c.pools["exclusive"].sockets["0"].core_lists.keys()) == 3
assert len(c.pools["exclusive"].sockets["1"].core_lists.keys()) == 1
assert len(c.pools["shared"].sockets["0"].core_lists.keys()) == 1
assert "0,9" in c.pools["exclusive"].sockets["0"].core_lists.keys()
assert "1,10" in c.pools["exclusive"].sockets["0"].core_lists.keys()
assert "2,11" in c.pools["exclusive"].sockets["0"].core_lists.keys()
assert "3,12" in c.pools["exclusive"].sockets["1"].core_lists.keys()
assert "1001" in c.pools["exclusive"].sockets["0"].core_lists["0,9"].tasks
assert "1002" in c.pools["exclusive"].sockets["0"].core_lists["1,10"].tasks
assert "1003" in c.pools["exclusive"].sockets["0"].core_lists["2,11"].tasks
assert "1004" in c.pools["exclusive"].
sockets["1"].core_lists["3,12"].tasks"""
def test_config_class():
c = config.build_config(FAKE_CONFIG)
assert len(c.get_pools()) == 3
assert "exclusive" in c.get_pools()
assert "shared" in c.get_pools()
assert "infra" in c.get_pools()
assert c.get_pool("exclusive").name == "exclusive"
assert c.get_pool("shared").name == "shared"
assert c.get_pool("infra").name == "infra"
assert c.get_pool("exclusive").exclusive
assert not c.get_pool("shared").exclusive
assert not c.get_pool("infra").exclusive
c.add_pool(True, "fake-pool")
assert "fake-pool" in c.get_pools()
assert c.get_pool("fake-pool").name == "fake-pool"
assert c.get_pool("fake-pool").exclusive
def test_pool_class():
c = config.build_config(FAKE_CONFIG)
p = c.get_pool("exclusive")
assert p.name == "exclusive"
assert p.is_exclusive()
assert len(p.get_sockets()) == 2
assert "0" in p.get_sockets()
assert "1" in p.get_sockets()
assert p.get_socket("0").socket_id == "0"
assert len(p.get_core_lists()) == 4
assert len(p.get_core_lists("0")) == 3
assert len(p.get_core_lists("1")) == 1
assert p.get_core_list("0,9", "0").core_id == "0,9"
assert p.get_core_list("1,10").core_id == "1,10"
assert p.get_core_list("3,12").core_id == "3,12"
p.update_clist("0,9", "1004")
assert "1001" in p.sockets["0"].core_lists["0,9"].tasks
assert "1004" in p.sockets["0"].core_lists["0,9"].tasks
p.remove_task("0,9", "1004")
assert "1001" in p.sockets["0"].core_lists["0,9"].tasks
assert "1004" not in p.sockets["0"].core_lists["0,9"].tasks
def test_socket_class():
c = config.build_config(FAKE_CONFIG)
s = c.get_pool("exclusive").get_socket("0")
assert s.socket_id == "0"
assert len(s.core_lists.keys()) == 3
assert s.get_core_list("0,9").core_id == "0,9"
assert len(s.get_core_lists()) == 3
assert "0,9" in s.get_core_lists()
assert "1,10" in s.get_core_lists()
assert "2,11" in s.get_core_lists()
s.add_core_list("fake-core")
assert len(s.get_core_lists()) == 4
assert "fake-core" in s.get_core_lists()
def test_core_list_class():
c = config.build_config(FAKE_CONFIG)
cl = c.get_pool("exclusive").get_core_list("0,9", "0")
assert cl.core_id == "0,9"
assert len(cl.tasks) == 1
assert len(cl.get_tasks()) == 1
assert "1001" in cl.get_tasks()
cl.add_task("1005")
assert len(cl.get_tasks()) == 2
assert "1001" in cl.get_tasks()
assert "1005" in cl.get_tasks()
cl.remove_task("1005")
assert len(cl.get_tasks()) == 1
assert "1001" in cl.get_tasks()
assert "1005" not in cl.get_tasks()
def test_update_configmap_exclusive():
c = dict()
c = config.update_configmap_exclusive("exclusive",
return_fake_platform(), c)
assert "exclusive" in c.keys()
assert len(c["exclusive"].keys()) == 2
assert "0" in c["exclusive"].keys()
assert "1" in c["exclusive"].keys()
assert len(c["exclusive"]["0"].keys()) == 1
assert "0,3" in c["exclusive"]["0"].keys()
assert len(c["exclusive"]["1"].keys()) == 0
def test_update_configmap_shared():
c = dict()
c = config.update_configmap_shared("shared", return_fake_platform(), c)
assert "shared" in c.keys()
assert len(c["shared"].keys()) == 2
assert "0" in c["shared"].keys()
assert "1" in c["shared"].keys()
assert len(c["shared"]["0"].keys()) == 1
assert "1,4,2,5" in c["shared"]["0"].keys()
assert len(c["shared"]["1"].keys()) == 0
"""def test_set_configmap():
c = config.build_config(FAKE_CONFIG)
def configmap_mock(unused1, configmap, unused2):
c = yaml.load(configmap.data["config"], Loader=yaml.FullLoader)
conf = config.build_config(c)
pools = conf.get_pools()
assert len(pools) == 3
assert "exclusive" in pools
assert "shared" in pools
assert "infra" in pools
exl_pool = conf.get_pool("exclusive")
cl_exclusive = [cl.core_id for cl in exl_pool.get_core_lists()]
sha_pool = conf.get_pool("shared")
cl_shared = [cl.core_id for cl in sha_pool.get_core_lists()]
inf_pool = conf.get_pool("infra")
cl_infra = [cl.core_id for cl in inf_pool.get_core_lists()]
assert exl_pool.is_exclusive()
assert not sha_pool.is_exclusive()
assert not inf_pool.is_exclusive()
assert "0,9" in cl_exclusive
assert "1,10" in cl_exclusive
assert "2,11" in cl_exclusive
assert "3,12" in cl_exclusive
assert "4,13,5,14" in cl_shared
assert "6,15,7,16,8,17" in cl_infra
mock = MagicMock(name="mock")
mock.side_effect = configmap_mock
with patch('intel.k8s.create_config_map', new=mock):
config.set_config(c, "fake-name")"""
|
1195aed41323c1e63d570b5d2f3ec431e0e39fdf
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/tests/models/barthez/test_tokenization_barthez.py
|
fa128f5091b9f40fa2c436feaaf710a3a231604f
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 5,573
|
py
|
test_tokenization_barthez.py
|
# coding=utf-8
# Copyright 2020 Ecole Polytechnique and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class BarthezTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BarthezTokenizer
rust_tokenizer_class = BarthezTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname, legacy_format=False)
self.tokenizer = tokenizer
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<pad>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-1], "<mask>")
self.assertEqual(len(vocab_keys), 101_122)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 101_122)
@require_torch
def test_prepare_batch(self):
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
expected_src_tokens = [0, 57, 3018, 70307, 91, 2]
batch = self.tokenizer(
src_text, max_length=len(expected_src_tokens), padding=True, truncation=True, return_tensors="pt"
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 6), batch.input_ids.shape)
self.assertEqual((2, 6), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(expected_src_tokens, result)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
@slow
def test_tokenizer_integration(self):
# fmt: off
expected_encoding = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
sequences = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="moussaKam/mbarthez",
revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6",
sequences=sequences,
)
|
1743538a2c8016ad8205da95e2452269e130eb58
|
b2fef77e77f77b6cfd83da4ec2f89cbe73330844
|
/tests/test_se_block.py
|
de129f4d557119573e611af5a02fb2d46a32dc4a
|
[
"Apache-2.0"
] |
permissive
|
Project-MONAI/MONAI
|
8ef2593cc5fd1cd16e13464f927fe563fe3f5bac
|
e48c3e2c741fa3fc705c4425d17ac4a5afac6c47
|
refs/heads/dev
| 2023-09-02T00:21:04.532596
| 2023-09-01T06:46:45
| 2023-09-01T06:46:45
| 214,485,001
| 4,805
| 996
|
Apache-2.0
| 2023-09-14T15:19:30
| 2019-10-11T16:41:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
test_se_block.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import torch
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.blocks import SEBlock
from monai.networks.layers.factories import Act, Norm
from tests.utils import test_script_save
device = "cuda" if torch.cuda.is_available() else "cpu"
TEST_CASES = [
[
{"spatial_dims": 2, "in_channels": 4, "n_chns_1": 20, "n_chns_2": 30, "n_chns_3": 4, "r": 2},
(7, 4, 64, 48), # 4-channel 2D, batch 7
(7, 4, 64, 48),
],
[
{"spatial_dims": 1, "in_channels": 3, "n_chns_1": 20, "n_chns_2": 30, "n_chns_3": 40, "r": 5},
(16, 3, 63), # 3-channel 1D, batch 16
(16, 40, 63),
],
]
TEST_CASES_3D = []
for type_1 in (
{"kernel_size": 3, "act": Act.PRELU, "norm": Norm.INSTANCE},
{"kernel_size": 1, "act": None, "norm": Norm.INSTANCE},
):
for type_2 in (
{"kernel_size": 3, "act": Act.PRELU, "norm": Norm.INSTANCE},
{"kernel_size": 1, "act": None, "norm": Norm.INSTANCE},
):
test_case = [
{
"spatial_dims": 3,
"in_channels": 10,
"r": 3,
"n_chns_1": 3,
"n_chns_2": 5,
"n_chns_3": 11,
"conv_param_1": type_1,
"conv_param_3": type_2,
},
(16, 10, 32, 24, 48), # 10-channel 3D, batch 16
(16, 11, 32, 24, 48),
]
TEST_CASES_3D.append(test_case)
class TestSEBlockLayer(unittest.TestCase):
@parameterized.expand(TEST_CASES + TEST_CASES_3D)
def test_shape(self, input_param, input_shape, expected_shape):
net = SEBlock(**input_param).to(device)
with eval_mode(net):
result = net(torch.randn(input_shape).to(device))
self.assertEqual(result.shape, expected_shape)
def test_script(self):
input_param, input_shape, _ = TEST_CASES[0]
net = SEBlock(**input_param)
test_data = torch.randn(input_shape)
test_script_save(net, test_data)
def test_ill_arg(self):
with self.assertRaises(ValueError):
SEBlock(spatial_dims=1, in_channels=4, n_chns_1=2, n_chns_2=3, n_chns_3=4, r=100)
if __name__ == "__main__":
unittest.main()
|
e79ee614bed7f63a42bd9a8aef60d52d91c1effc
|
9c54b9ea3e9fe208457bf64ad53eba8889f4b057
|
/Problem062/Python/solution_1.py
|
7d244687a7d57e2345d702f03c047df9b0abfbcd
|
[
"MIT"
] |
permissive
|
DestructHub/ProjectEuler
|
e0d77c02f0646a85d09af64127e92ac907ebad2a
|
efba582f976cd59748566c19799d84984c77ea61
|
refs/heads/master
| 2022-12-21T13:08:00.128200
| 2021-10-06T12:47:48
| 2022-12-15T20:33:12
| 36,625,177
| 179
| 87
|
MIT
| 2022-12-15T20:33:14
| 2015-05-31T22:36:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
solution_1.py
|
#!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © Manoel Vilela
#
#
"""
Cubic permutations
Problem 62
The cube, 41063625 (345^3), can be permuted to produce two other cubes: 56623104 (384^3) and 66430125 (405^3). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
"""
def cubesGen(n, start=1):
return map(lambda x: str(x*x*x), range(start, n + 1))
def getId(n):
return sum([2 ** int(x) for x in n]) * sum([int(x) for x in list(n)])
def isPerm(a, b):
for i in a:
for j in b:
if i not in b or j not in a:
return False
return getId(a) == getId(b)
def cubeRoot(n):
return int(n ** (1/3) + 0.00001)
def mostCube(n):
most = int(''.join(sorted(n, reverse=True)))
return cubeRoot(most)
def lowerCube(n):
lower = int(''.join(sorted(n)))
return cubeRoot(lower)
# my_solution... after much hard work the results final is this...
# i want to break free XD
def solution(max):
maxCube = 10 ** 15
for cube_a in cubesGen(maxCube):
permCubes = 0
for cube_b in cubesGen(int(mostCube(cube_a)), start=int(lowerCube(cube_a))):
if isPerm(cube_a, cube_b):
permCubes += 1
if permCubes == max:
return cube_a
#the solution of one guy, much more efficient.
def solution_extern():
def cubes():
i = 1
while True:
yield i * i * i
i += 1
cube_dict = {}
for c in cubes():
digits = ''.join(sorted(str(c)))
if digits in cube_dict:
cube_list = cube_dict[digits]
cube_list.append(c)
if(len(cube_list)) == 5:
return min(cube_list)
break
else:
cube_dict[digits] = [c]
print(solution_extern())
#wrong answer: 1000600120008 (because its included the self number, this answer contain 6 permutations)
#correct answer: 127035954683 (5 permutations!)
|
1ca1c3a3dc6727d2a7797555cf36d3640c58ca3f
|
8eb7e2224cd81cd21fd5b0c4dd54abe85ba07e49
|
/netmiko/netapp/__init__.py
|
23d5259fdda50d435abdcdb3e44157d412541ab4
|
[
"MIT"
] |
permissive
|
ktbyers/netmiko
|
f8b980569fd863f0a7bfe28580366339c4bd31ec
|
2e56b40ec639da130471c59dd1f3c93983471e41
|
refs/heads/develop
| 2023-08-30T20:33:05.554926
| 2023-08-29T21:50:45
| 2023-08-29T21:50:45
| 27,283,062
| 3,397
| 1,594
|
MIT
| 2023-09-04T03:04:31
| 2014-11-28T21:42:52
|
Python
|
UTF-8
|
Python
| false
| false
| 86
|
py
|
__init__.py
|
from netmiko.netapp.netapp_cdot_ssh import NetAppcDotSSH
__all__ = ["NetAppcDotSSH"]
|
6ddba63a794d10d02c14f4cf6b06a24d9135d85f
|
ffdc77394c5b5532b243cf3c33bd584cbdc65cb7
|
/tests/st/ops/test_func_interpolate.py
|
fb12bcda05e5a298053c8327dd1e050d9b84b683
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] |
permissive
|
mindspore-ai/mindspore
|
ca7d5bb51a3451c2705ff2e583a740589d80393b
|
54acb15d435533c815ee1bd9f6dc0b56b4d4cf83
|
refs/heads/master
| 2023-07-29T09:17:11.051569
| 2023-07-17T13:14:15
| 2023-07-17T13:14:15
| 239,714,835
| 4,178
| 768
|
Apache-2.0
| 2023-07-26T22:31:11
| 2020-02-11T08:43:48
|
C++
|
UTF-8
|
Python
| false
| false
| 24,600
|
py
|
test_func_interpolate.py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import ops
class Net(nn.Cell):
def __init__(self, mode):
super(Net, self).__init__()
self.mode = mode
def construct(self, x, size=None, scale_factor=None, align_corners=None, recompute_scale_factor=None):
return ops.interpolate(x, size, scale_factor, self.mode, align_corners, recompute_scale_factor)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_area_3d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate area mode
1. 3D size
2. 3D scale_factor
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("area")
# 1. 3D(1, 3, 4)
input_3d = np.array([[[0.476130, 0.196372, 0.320748, 0.574267],
[0.558406, 0.186530, 0.144793, 0.017598],
[0.043675, 0.360826, 0.367078, 0.607198]]], dtype=np.float32)
except_3d_1 = np.array([[[0.336251, 0.447508],
[0.372468, 0.081196],
[0.202250, 0.487138]]], dtype=np.float32)
size = 2
output_3d_1 = net(Tensor(input_3d), size=size)
assert np.allclose(output_3d_1.asnumpy(), except_3d_1, atol=1e-3, rtol=1e-3)
# 2. 3D(1, 3, 4) scale_factor=0.3
except_3d_2 = np.array([[[0.391879],
[0.226832],
[0.344694]]], dtype=np.float32)
scale_factor = 0.3
output_3d_2 = net(Tensor(input_3d), scale_factor=scale_factor)
assert np.allclose(output_3d_2.asnumpy(), except_3d_2, atol=1e-3, rtol=1e-3)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_area_4d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate area mode
1. 4D size
2. 4D scale_factor
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("area")
# 1. 4D(1, 3, 3, 5) size=(5, 8)
input_4d = np.array([[[[0.4992, 0.743079, 0.570383, 0.942855, 0.833395],
[0.645754, 0.485126, 0.957497, 0.933144, 0.556276],
[0.298626, 0.594928, 0.150964, 0.447654, 0.267512],
[0.46943, 0.419558, 0.665492, 0.414906, 0.708427]]]], dtype=np.float32)
except_4d_1 = np.array([[[[0.4992, 0.62114, 0.743079, 0.656731, 0.756619, 0.942855,
0.888125, 0.833395],
[0.572477, 0.59329, 0.614102, 0.689021, 0.85097, 0.937999,
0.816418, 0.694836],
[0.47219, 0.506109, 0.540027, 0.547129, 0.622315, 0.690399,
0.551147, 0.411894],
[0.384028, 0.445635, 0.507243, 0.457736, 0.419754, 0.43128,
0.459625, 0.48797],
[0.46943, 0.444494, 0.419558, 0.542525, 0.540199, 0.414906,
0.561666, 0.708427]]]], dtype=np.float32)
size = (5, 8)
output_4d_1 = net(Tensor(input_4d), size=size)
assert np.allclose(output_4d_1.asnumpy(), except_4d_1, atol=1e-5, rtol=1e-5)
# 2. 4D(1, 3, 3, 5) scale_factor=(1.5, 0.4)
except_4d_2 = np.array([[[[0.604221, 0.782211],
[0.650173, 0.798925],
[0.696126, 0.815639],
[0.348173, 0.28871],
[0.433166, 0.442492],
[0.51816, 0.596275]]]], dtype=np.float32)
scale_factor = (1.5, 0.4)
output_4d_2 = net(Tensor(input_4d), scale_factor=scale_factor)
assert np.allclose(output_4d_2.asnumpy(), except_4d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_area_5d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate area mode
1. 5D size
2. 5D scale_factor
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("area")
# 1. 5D(1, 1, 1, 4, 5) size=(2, 4, 3)
input_5d = np.array([[[[[0.4992, 0.743079, 0.570383, 0.942855, 0.833395],
[0.645754, 0.485126, 0.957497, 0.933144, 0.556276],
[0.298626, 0.594928, 0.150964, 0.447654, 0.267512],
[0.46943, 0.419558, 0.665492, 0.414906, 0.708427]]]]], dtype=np.float32)
except_5d_1 = np.array([[[[[0.62114, 0.752106, 0.888125],
[0.56544, 0.791922, 0.74471],
[0.446777, 0.397849, 0.357583],
[0.444494, 0.499985, 0.561666]],
[[0.62114, 0.752106, 0.888125],
[0.56544, 0.791922, 0.74471],
[0.446777, 0.397849, 0.357583],
[0.444494, 0.499985, 0.561666]]]]], dtype=np.float32)
size = (2, 4, 3)
output_5d_1 = net(Tensor(input_5d), size=size)
assert np.allclose(output_5d_1.asnumpy(),
except_5d_1, atol=1e-5, rtol=1e-5)
# 2. 5D(1, 1, 1, 4, 5) scale_factor=(3, 0.4, 0.7)
except_5d_2 = np.array([[[[[0.519463, 0.610466, 0.638021]],
[[0.519463, 0.610466, 0.638021]],
[[0.519463, 0.610466, 0.638021]]]]], dtype=np.float32)
scale_factor = (3., 0.4, 0.7)
output_5d_2 = net(Tensor(input_5d), scale_factor=scale_factor)
assert np.allclose(output_5d_2.asnumpy(), except_5d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_bicubic(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate bicubic mode
1. 4D size tf
2. 4D size align_corners=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("bicubic")
input_4d = np.array([[[[0.003088, 0.313131, 0.481231, 0.326219, 0.190293],
[0.711616, 0.583990, 0.718121, 0.258823, 0.121847],
[0.781316, 0.591508, 0.858185, 0.091935, 0.444639]],
[[0.389884, 0.894497, 0.471427, 0.188708, 0.557449],
[0.998047, 0.380719, 0.570574, 0.722258, 0.997173],
[0.195751, 0.050744, 0.002008, 0.482685, 0.708559]]]],
dtype=np.float32)
size = (5, 3)
# 1. 4D size=(5, 3)
except_4d_1 = np.array([[[[0.038226, 0.46334, 0.228343],
[0.287464, 0.558147, 0.186838],
[0.671836, 0.718121, 0.14377],
[0.729394, 0.819616, 0.255285],
[0.723466, 0.868763, 0.334474]],
[[0.522484, 0.463939, 0.409831],
[0.670862, 0.531739, 0.626711],
[0.821431, 0.570574, 0.926654],
[0.40312, 0.206133, 0.777062],
[0.107333, -0.040933, 0.642958]]]], dtype=np.float32)
output_4d_1 = net(Tensor(input_4d), size=size)
assert np.allclose(output_4d_1.asnumpy(),
except_4d_1, atol=1e-5, rtol=1e-5)
# 2. 4D size=(5, 3), align_corners=True
except_4d_2 = np.array([[[[0.003088, 0.481231, 0.190293],
[0.350818, 0.586545, 0.125808],
[0.711616, 0.718121, 0.121847],
[0.81289, 0.810361, 0.276826],
[0.781316, 0.858185, 0.444639]],
[[0.389884, 0.471427, 0.557449],
[0.769181, 0.574304, 0.804369],
[0.998047, 0.570574, 0.997173],
[0.653914, 0.295586, 0.89409],
[0.195751, 0.002008, 0.708559]]]], dtype=np.float32)
output_4d_2 = net(Tensor(input_4d), size=size, align_corners=True)
assert np.allclose(output_4d_2.asnumpy(), except_4d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_linear(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate linear mode
1. 3D size
2. 3D size align_corners=True
3. 3D scale_factor recompute_scale_factor=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("linear")
# 1. 3D(1, 3, 5) size=8
input_3d = np.array([[[0.784811, 0.008794, 0.52707, 0.821989, 0.88349],
[0.00115, 0.556748, 0.8774, 0.761826, 0.23384],
[0.842441, 0.853507, 0.294813, 0.375813, 0.399932]]], dtype=np.float32)
except_3d_1 = np.array([[[0.784811, 0.445304, 0.041186, 0.365109, 0.619232, 0.803557, 0.856583, 0.88349],
[0.00115, 0.244224, 0.576789, 0.777196, 0.841283, 0.769049, 0.464834, 0.23384],
[0.842441, 0.847282, 0.818589, 0.469405, 0.320126, 0.370751, 0.38938, 0.399932]]],
dtype=np.float32)
size = 8
output_3d_1 = net(Tensor(input_3d), size=size)
assert np.allclose(output_3d_1.asnumpy(), except_3d_1, atol=1e-5, rtol=1e-5)
# 2. 3D(1, 3, 5) size=8 align_corners=True
except_3d_2 = np.array([[[0.784811, 0.341373, 0.082833, 0.378991, 0.611333, 0.779858,
0.848347, 0.88349],
[0.00115, 0.318635, 0.602555, 0.785785, 0.844379, 0.778337,
0.535546, 0.23384],
[0.842441, 0.848764, 0.773694, 0.45444, 0.317956, 0.364242,
0.38615, 0.399932]]], dtype=np.float32)
output_3d_2 = net(Tensor(input_3d), size=size, align_corners=True)
assert np.allclose(output_3d_2.asnumpy(), except_3d_2, atol=1e-5, rtol=1e-5)
# 3. scale_factor=0.7, recompute_scale_factor=True
except_3d_3 = np.array([[[0.526139, 0.52707, 0.86299],
[0.186349, 0.8774, 0.409835],
[0.84613, 0.294813, 0.391892]]], dtype=np.float32)
scale_factor = 0.7
output_3d_3 = net(Tensor(input_3d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_3d_3.asnumpy(), except_3d_3, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_bilinear(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate bilinear mode
1. 4D size
2. 4D size align_corners=True
3. 4D scale_factor recompute_scale_factor=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("bilinear")
input_4d = np.array([[[[0.003088, 0.313131, 0.481231, 0.326219, 0.190293],
[0.711616, 0.58399, 0.718121, 0.258823, 0.121847],
[0.781316, 0.591508, 0.858185, 0.091935, 0.444639]],
[[0.389884, 0.894497, 0.471427, 0.188708, 0.557449],
[0.998047, 0.380719, 0.570574, 0.722258, 0.997173],
[0.195751, 0.050744, 0.002008, 0.482685, 0.708559]]]],
dtype=np.float32)
size = (5, 3)
# 1. 4D size=(5, 3)
except_4d_1 = np.array([[[[0.106436, 0.481231, 0.235602],
[0.331491, 0.575987, 0.208363],
[0.669074, 0.718121, 0.167506],
[0.698458, 0.802159, 0.263245],
[0.718047, 0.858185, 0.327071]],
[[0.558088, 0.471427, 0.434535],
[0.651761, 0.511086, 0.622935],
[0.792271, 0.570574, 0.905535],
[0.405358, 0.229434, 0.742174],
[0.147415, 0.002008, 0.633268]]]], dtype=np.float32)
output_4d_1 = net(Tensor(input_4d), size=size)
assert np.allclose(output_4d_1.asnumpy(), except_4d_1, atol=1e-5, rtol=1e-5)
# 2. 4D size=(5, 3), align_corners=True
except_4d_2 = np.array([[[[0.003088, 0.481231, 0.190293],
[0.357352, 0.599676, 0.15607],
[0.711616, 0.718121, 0.121847],
[0.746466, 0.788153, 0.283243],
[0.781316, 0.858185, 0.444639]],
[[0.389884, 0.471427, 0.557449],
[0.693965, 0.521001, 0.777311],
[0.998047, 0.570574, 0.997173],
[0.596899, 0.286291, 0.852866],
[0.195751, 0.002008, 0.708559]]]], dtype=np.float32)
output_4d_2 = net(Tensor(input_4d), size=size, align_corners=True)
assert np.allclose(output_4d_2.asnumpy(), except_4d_2, atol=1e-5, rtol=1e-5)
# 3. scale_factor=(0.5, 1.5), recompute_scale_factor=True
scale_factor = (0.5, 1.5)
except_4d_3 = np.array([[[[0.711616, 0.638687, 0.622313, 0.718121, 0.390051, 0.200119,
0.121847]],
[[0.998047, 0.645288, 0.434963, 0.570574, 0.67892, 0.840079,
0.997173]]]], dtype=np.float32)
output_4d_3 = net(Tensor(input_4d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_4d_3.asnumpy(), except_4d_3, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_nearest_exact_3d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate nearest-exact mode
1. 3D size
2. 3D scale_factor recompute_scale_factor=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("nearest-exact")
# 1. 3D(1, 3, 5) size=8
input_3d = np.array([[[0.816117, 0.004037, 0.746452, 0.137449, 0.337593],
[0.970709, 0.558792, 0.053919, 0.734102, 0.432973],
[0.830186, 0.77753, 0.384094, 0.905231, 0.76362]]], dtype=np.float32)
except_3d_1 = np.array([[[0.816117, 0.816117, 0.004037, 0.746452, 0.746452, 0.137449,
0.337593, 0.337593],
[0.970709, 0.970709, 0.558792, 0.053919, 0.053919, 0.734102,
0.432973, 0.432973],
[0.830186, 0.830186, 0.77753, 0.384094, 0.384094, 0.905231,
0.76362, 0.76362]]], dtype=np.float32)
size = 8
output_3d_1 = net(Tensor(input_3d), size=size)
assert np.allclose(output_3d_1.asnumpy(), except_3d_1, atol=1e-5, rtol=1e-5)
# 2. 3D(1, 3, 5) scale_factor=0.7 recompute_scale_factor=True
except_3d_2 = np.array([[[0.816117, 0.746452, 0.337593],
[0.970709, 0.053919, 0.432973],
[0.830186, 0.384094, 0.76362]]], dtype=np.float32)
scale_factor = 0.7
output_3d_2 = net(Tensor(input_3d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_3d_2.asnumpy(), except_3d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_nearest_exact_4d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate nearest-exact mode
Expectation: success
1. 4D size
2. 4D scale_factor recompute_scale_factor=True
"""
ms.set_context(mode=mode)
net = Net("nearest-exact")
# 1. 4D(1, 3, 3, 5) size=(5, 8)
size = (5, 8)
input_4d = np.array([[[[0.4992, 0.743079, 0.570383, 0.942855, 0.833395],
[0.645754, 0.485126, 0.957497, 0.933144, 0.556276],
[0.298626, 0.594928, 0.150964, 0.447654, 0.267512],
[0.46943, 0.419558, 0.665492, 0.414906, 0.708427]]]], dtype=np.float32)
except_4d_1 = np.array([[[[0.4992, 0.4992, 0.743079, 0.570383, 0.570383, 0.942855,
0.833395, 0.833395],
[0.645754, 0.645754, 0.485126, 0.957497, 0.957497, 0.933144,
0.556276, 0.556276],
[0.298626, 0.298626, 0.594928, 0.150964, 0.150964, 0.447654,
0.267512, 0.267512],
[0.298626, 0.298626, 0.594928, 0.150964, 0.150964, 0.447654,
0.267512, 0.267512],
[0.46943, 0.46943, 0.419558, 0.665492, 0.665492, 0.414906,
0.708427, 0.708427]]]], dtype=np.float32)
output_4d_1 = net(Tensor(input_4d), size=size)
assert np.allclose(output_4d_1.asnumpy(), except_4d_1, atol=1e-5, rtol=1e-5)
# 2. 4D(1, 3, 3, 5) scale_factor=(1.5, 0.4) recompute_scale_factor=True
scale_factor = (1.5, 0.4)
except_4d_2 = np.array([[[[0.743079, 0.942855],
[0.485126, 0.933144],
[0.485126, 0.933144],
[0.594928, 0.447654],
[0.419558, 0.414906],
[0.419558, 0.414906]]]], dtype=np.float32)
output_4d_2 = net(Tensor(input_4d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_4d_2.asnumpy(), except_4d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_nearest_3d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate nearest mode
1. 3D size
2. 3D scale_factor recompute_scale_factor=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("nearest")
# 1. 3D(1, 3, 5) size=8
size = 8
input_3d = np.array([[[0.816117, 0.004037, 0.746452, 0.137449, 0.337593],
[0.970709, 0.558792, 0.053919, 0.734102, 0.432973],
[0.830186, 0.77753, 0.384094, 0.905231, 0.76362]]], dtype=np.float32)
except_3d_1 = np.array([[[0.816117, 0.816117, 0.004037, 0.004037, 0.746452, 0.137449,
0.137449, 0.337593],
[0.970709, 0.970709, 0.558792, 0.558792, 0.053919, 0.734102,
0.734102, 0.432973],
[0.830186, 0.830186, 0.77753, 0.77753, 0.384094, 0.905231,
0.905231, 0.76362]]], dtype=np.float32)
output_3d_1 = net(Tensor(input_3d), size=size)
assert np.allclose(output_3d_1.asnumpy(), except_3d_1, atol=1e-5, rtol=1e-5)
# 2. 3D(1, 3, 5) scale_factor=0.7 recompute_scale_factor=True
except_3d_2 = np.array([[[0.816117, 0.004037, 0.137449],
[0.970709, 0.558792, 0.734102],
[0.830186, 0.77753, 0.905231]]], dtype=np.float32)
scale_factor = 0.7
output_3d_2 = net(Tensor(input_3d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_3d_2.asnumpy(), except_3d_2, atol=1e-5, rtol=1e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_interpolate_nearest_4d(mode):
"""
Feature: interpolate
Description: Verify the result of interpolate nearest mode
1. 4D size
2. 4D scale_factor recompute_scale_factor=True
Expectation: success
"""
ms.set_context(mode=mode)
net = Net("nearest")
# 1. 4D(1, 3, 3, 5) size=(5, 8)
size = (5, 8)
input_4d = np.array([[[[0.4992, 0.743079, 0.570383, 0.942855, 0.833395],
[0.645754, 0.485126, 0.957497, 0.933144, 0.556276],
[0.298626, 0.594928, 0.150964, 0.447654, 0.267512],
[0.46943, 0.419558, 0.665492, 0.414906, 0.708427]]]], dtype=np.float32)
except_4d_1 = np.array([[[[0.4992, 0.4992, 0.743079, 0.743079, 0.570383, 0.942855,
0.942855, 0.833395],
[0.4992, 0.4992, 0.743079, 0.743079, 0.570383, 0.942855,
0.942855, 0.833395],
[0.645754, 0.645754, 0.485126, 0.485126, 0.957497, 0.933144,
0.933144, 0.556276],
[0.298626, 0.298626, 0.594928, 0.594928, 0.150964, 0.447654,
0.447654, 0.267512],
[0.46943, 0.46943, 0.419558, 0.419558, 0.665492, 0.414906,
0.414906, 0.708427]]]], dtype=np.float32)
output_4d_1 = net(Tensor(input_4d), size=size)
assert np.allclose(output_4d_1.asnumpy(), except_4d_1, atol=1e-5, rtol=1e-5)
# 2. 4D(1, 3, 3, 5) scale_factor=(1.5, 0.4) recompute_scale_factor=True
except_4d_2 = np.array([[[[0.4992, 0.570383],
[0.4992, 0.570383],
[0.645754, 0.957497],
[0.298626, 0.150964],
[0.298626, 0.150964],
[0.46943, 0.665492]]]], dtype=np.float32)
scale_factor = (1.5, 0.4)
output_4d_2 = net(Tensor(input_4d), scale_factor=scale_factor, recompute_scale_factor=True)
assert np.allclose(output_4d_2.asnumpy(), except_4d_2, atol=1e-5, rtol=1e-5)
|
5bbaabe6003f65b841cf80cb5596044c1b84c9fc
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/conpono/cpc/bilin_model_builder.py
|
011fcf61e4b94053e3a6fc187ce3e2b683b3aa38
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,973
|
py
|
bilin_model_builder.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the paragraph reconstruction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from bert import modeling
import tensorflow.compat.v1 as tf
def create_model(model, labels, label_types, num_choices, k_size=4):
"""Creates a classification model.
Args:
model: the BERT model from modeling.py
labels: ground truth paragraph order
label_types: which k distances are being predicted
num_choices: number of negatives samples + 1
k_size: window size of CPC k distance
Returns:
tuple of (loss, per_example_loss, logits, probabilities) for model
"""
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
with tf.variable_scope("cpc_loss"):
output = tf.reshape(output_layer, (-1, num_choices + 1, hidden_size))
contexts = output[:, 0, :]
targets = output[:, 1:, :]
softmax_weights = tf.get_variable(
"cpc_weights", [k_size * 2, hidden_size, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
context_encoded = tf.matmul(softmax_weights, contexts, transpose_b=True)
context_encoded = tf.transpose(context_encoded, perm=[2, 0, 1])
logits = tf.matmul(targets, context_encoded, transpose_b=True)
logits = tf.transpose(logits, perm=[0, 2, 1])
example_weights = tf.reduce_sum(tf.one_hot(label_types, k_size * 2), axis=1)
per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
probabilities = tf.nn.softmax(logits, axis=-1)
loss = tf.reduce_mean(
tf.reduce_sum(example_weights * per_example_loss, axis=-1))
return (loss, per_example_loss, logits, probabilities)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
|
b0f45e935d4824814cff43ffb24282c1e2c2c60f
|
704649e236cd2977bfe4b5ee5184a592cc252f4d
|
/setup.py
|
67c68922a2f99e9014f70487dced8ecab0f49850
|
[
"MIT"
] |
permissive
|
soraxas/echo360
|
a5a443954bdc3b7d60a2c87a302feb625cb30c13
|
1d01827e444e0871cb6cb031078c8e22c92a25f8
|
refs/heads/master
| 2022-12-24T16:10:50.483825
| 2022-12-15T05:35:24
| 2022-12-15T05:35:24
| 106,752,621
| 224
| 58
|
MIT
| 2022-10-25T00:17:47
| 2017-10-12T22:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
setup.py
|
import setuptools
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
with open("requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="echo360",
version="v2.1",
description="Commandline tool for automated downloads of echo360 videos",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/soraxas/echo360",
project_urls={
"Documentation": "https://github.com/soraxas/echo360/blob/master/README.md",
"Code": "https://github.com/soraxas/echo360",
"Issue tracker": "https://github.com/soraxas/echo360/issues",
},
python_requires=">=2.7",
install_requires=required,
platforms="linux, macos, windows",
entry_points={
"console_scripts": [
"echo360-downloader=echo360.main:main",
],
},
package_data={"echo360": ["*.py"]},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Unix Shell",
"Topic :: System :: Shells",
"Topic :: System :: System Shells",
"Topic :: Terminals",
"Topic :: System :: Networking",
"License :: OSI Approved :: MIT License",
],
license="MIT",
author="soraxas",
author_email="oscar@tinyiu.com",
)
|
cf55b002c23cce04b929728df5109a24cf888cac
|
e0484f74a89ae3f9bb52bef299a717d6e38b3acc
|
/rotary.py
|
b599e749ef8ad5ed2b7851330b9188e2143f1dd6
|
[
"MIT"
] |
permissive
|
miketeachman/micropython-rotary
|
8346e3add4a5782bd5f88c126e31b333fc455301
|
b4c8ce53ee2413bdad41d24a2fcf91fac6ab2d32
|
refs/heads/master
| 2023-07-24T06:05:25.392114
| 2023-07-22T04:33:43
| 2023-07-22T04:33:43
| 152,363,249
| 219
| 56
|
MIT
| 2023-07-22T04:33:44
| 2018-10-10T04:31:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,408
|
py
|
rotary.py
|
# MIT License (MIT)
# Copyright (c) 2022 Mike Teachman
# https://opensource.org/licenses/MIT
# Platform-independent MicroPython code for the rotary encoder module
# Documentation:
# https://github.com/MikeTeachman/micropython-rotary
import micropython
_DIR_CW = const(0x10) # Clockwise step
_DIR_CCW = const(0x20) # Counter-clockwise step
# Rotary Encoder States
_R_START = const(0x0)
_R_CW_1 = const(0x1)
_R_CW_2 = const(0x2)
_R_CW_3 = const(0x3)
_R_CCW_1 = const(0x4)
_R_CCW_2 = const(0x5)
_R_CCW_3 = const(0x6)
_R_ILLEGAL = const(0x7)
_transition_table = [
# |------------- NEXT STATE -------------| |CURRENT STATE|
# CLK/DT CLK/DT CLK/DT CLK/DT
# 00 01 10 11
[_R_START, _R_CCW_1, _R_CW_1, _R_START], # _R_START
[_R_CW_2, _R_START, _R_CW_1, _R_START], # _R_CW_1
[_R_CW_2, _R_CW_3, _R_CW_1, _R_START], # _R_CW_2
[_R_CW_2, _R_CW_3, _R_START, _R_START | _DIR_CW], # _R_CW_3
[_R_CCW_2, _R_CCW_1, _R_START, _R_START], # _R_CCW_1
[_R_CCW_2, _R_CCW_1, _R_CCW_3, _R_START], # _R_CCW_2
[_R_CCW_2, _R_START, _R_CCW_3, _R_START | _DIR_CCW], # _R_CCW_3
[_R_START, _R_START, _R_START, _R_START]] # _R_ILLEGAL
_transition_table_half_step = [
[_R_CW_3, _R_CW_2, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CCW, _R_START, _R_CW_1, _R_START],
[_R_CW_3 | _DIR_CW, _R_CW_2, _R_START, _R_START],
[_R_CW_3, _R_CCW_2, _R_CCW_1, _R_START],
[_R_CW_3, _R_CW_2, _R_CCW_1, _R_START | _DIR_CW],
[_R_CW_3, _R_CCW_2, _R_CW_3, _R_START | _DIR_CCW],
[_R_START, _R_START, _R_START, _R_START],
[_R_START, _R_START, _R_START, _R_START]]
_STATE_MASK = const(0x07)
_DIR_MASK = const(0x30)
def _wrap(value, incr, lower_bound, upper_bound):
range = upper_bound - lower_bound + 1
value = value + incr
if value < lower_bound:
value += range * ((lower_bound - value) // range + 1)
return lower_bound + (value - lower_bound) % range
def _bound(value, incr, lower_bound, upper_bound):
return min(upper_bound, max(lower_bound, value + incr))
def _trigger(rotary_instance):
for listener in rotary_instance._listener:
listener()
class Rotary(object):
RANGE_UNBOUNDED = const(1)
RANGE_WRAP = const(2)
RANGE_BOUNDED = const(3)
def __init__(self, min_val, max_val, incr, reverse, range_mode, half_step, invert):
self._min_val = min_val
self._max_val = max_val
self._incr = incr
self._reverse = -1 if reverse else 1
self._range_mode = range_mode
self._value = min_val
self._state = _R_START
self._half_step = half_step
self._invert = invert
self._listener = []
def set(self, value=None, min_val=None, incr=None,
max_val=None, reverse=None, range_mode=None):
# disable DT and CLK pin interrupts
self._hal_disable_irq()
if value is not None:
self._value = value
if min_val is not None:
self._min_val = min_val
if max_val is not None:
self._max_val = max_val
if incr is not None:
self._incr = incr
if reverse is not None:
self._reverse = -1 if reverse else 1
if range_mode is not None:
self._range_mode = range_mode
self._state = _R_START
# enable DT and CLK pin interrupts
self._hal_enable_irq()
def value(self):
return self._value
def reset(self):
self._value = 0
def close(self):
self._hal_close()
def add_listener(self, l):
self._listener.append(l)
def remove_listener(self, l):
if l not in self._listener:
raise ValueError('{} is not an installed listener'.format(l))
self._listener.remove(l)
def _process_rotary_pins(self, pin):
old_value = self._value
clk_dt_pins = (self._hal_get_clk_value() <<
1) | self._hal_get_dt_value()
if self._invert:
clk_dt_pins = ~clk_dt_pins & 0x03
# Determine next state
if self._half_step:
self._state = _transition_table_half_step[self._state &
_STATE_MASK][clk_dt_pins]
else:
self._state = _transition_table[self._state &
_STATE_MASK][clk_dt_pins]
direction = self._state & _DIR_MASK
incr = 0
if direction == _DIR_CW:
incr = self._incr
elif direction == _DIR_CCW:
incr = -self._incr
incr *= self._reverse
if self._range_mode == self.RANGE_WRAP:
self._value = _wrap(
self._value,
incr,
self._min_val,
self._max_val)
elif self._range_mode == self.RANGE_BOUNDED:
self._value = _bound(
self._value,
incr,
self._min_val,
self._max_val)
else:
self._value = self._value + incr
try:
if old_value != self._value and len(self._listener) != 0:
_trigger(self)
except:
pass
|
4c15bbc0705ec7f1b649fa630391906035cbf536
|
47d69d21f53333d93d5ba9973840ef192808a090
|
/tests/config/test_types.py
|
e859ea18bc95cf8a57be98934b203ff4f019f667
|
[
"MIT"
] |
permissive
|
tox-dev/tox
|
27ce3072e7faf5c88ed5305bbd66359369bba13d
|
da0885cd162fb02de866831a75eca9dcfe87eb36
|
refs/heads/main
| 2023-09-01T11:45:18.097559
| 2023-08-31T14:51:57
| 2023-08-31T14:51:57
| 68,465,360
| 3,512
| 624
|
MIT
| 2023-09-11T20:58:32
| 2016-09-17T16:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
test_types.py
|
from __future__ import annotations
from tox.config.types import Command, EnvList
def tests_command_repr() -> None:
cmd = Command(["python", "-m", "pip", "list"])
assert repr(cmd) == "Command(args=['python', '-m', 'pip', 'list'])"
assert cmd.ignore_exit_code is False
def tests_command_repr_ignore() -> None:
cmd = Command(["-", "python", "-m", "pip", "list"])
assert repr(cmd) == "Command(args=['-', 'python', '-m', 'pip', 'list'])"
assert cmd.ignore_exit_code is True
def tests_command_eq() -> None:
cmd_1 = Command(["python", "-m", "pip", "list"])
cmd_2 = Command(["python", "-m", "pip", "list"])
assert cmd_1 == cmd_2
def tests_command_ne() -> None:
cmd_1 = Command(["python", "-m", "pip", "list"])
cmd_2 = Command(["-", "python", "-m", "pip", "list"])
assert cmd_1 != cmd_2
def tests_env_list_repr() -> None:
env = EnvList(["py39", "py38"])
assert repr(env) == "EnvList(['py39', 'py38'])"
def tests_env_list_eq() -> None:
env_1 = EnvList(["py39", "py38"])
env_2 = EnvList(["py39", "py38"])
assert env_1 == env_2
def tests_env_list_ne() -> None:
env_1 = EnvList(["py39", "py38"])
env_2 = EnvList(["py38", "py39"])
assert env_1 != env_2
|
2d38da54b7269713f3219bb2c6e57c6b2fd1676f
|
a33e380ba70fa915bd3a6199cf88dcfa38ca5fab
|
/backpack/extensions/secondorder/diag_hessian/convtranspose3d.py
|
9b83eba9bdb9e4beee6762490241d22a76c11706
|
[
"MIT"
] |
permissive
|
f-dangel/backpack
|
1c90aaacad4569dd3153342ad68864466389767c
|
1ebfb4055be72ed9e0f9d101d78806bd4119645e
|
refs/heads/master
| 2023-08-14T05:49:31.904383
| 2023-07-12T18:17:34
| 2023-07-12T18:17:34
| 196,406,270
| 505
| 61
|
MIT
| 2023-09-05T14:06:32
| 2019-07-11T14:03:56
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
convtranspose3d.py
|
from backpack.core.derivatives.conv_transpose3d import ConvTranspose3DDerivatives
from backpack.extensions.secondorder.diag_hessian.convtransposend import (
BatchDiagHConvTransposeND,
DiagHConvTransposeND,
)
class DiagHConvTranspose3d(DiagHConvTransposeND):
def __init__(self):
super().__init__(
derivatives=ConvTranspose3DDerivatives(), params=["bias", "weight"]
)
class BatchDiagHConvTranspose3d(BatchDiagHConvTransposeND):
def __init__(self):
super().__init__(
derivatives=ConvTranspose3DDerivatives(), params=["bias", "weight"]
)
|
a9fe92894e459c4d96a9b0fde9bb2012a2de58cf
|
27b86f422246a78704e0e84983b2630533a47db6
|
/examples/blocks/multi_insert.py
|
2c1f58db6af49bd8bf29adb049f4f99e807ff77f
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
multi_insert.py
|
# Copyright (c) 2020-2022 Manfred Moitzi
# License: MIT License
import pathlib
import ezdxf
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# This example shows how to render a single block reference multiple times in a
# regular grid.
#
# tutorial: https://ezdxf.mozman.at/docs/tutorials/blocks.html
# ------------------------------------------------------------------------------
FLAG_SYMBOL = [(0, 0), (0, 5), (4, 3), (0, 3)]
def main():
doc = ezdxf.new()
doc.layers.add("FLAGS")
flag = doc.blocks.new(name="FLAG")
flag.add_polyline2d(FLAG_SYMBOL)
flag.add_circle((0, 0), 0.4, dxfattribs={"color": 1})
# Define some attribute templates as ATTDEF entities:
flag.add_attdef(
tag="NAME", insert=(0.5, -0.5), dxfattribs={"height": 0.5, "color": 3}
)
flag.add_attdef(
tag="XPOS", insert=(0.5, -1.0), dxfattribs={"height": 0.25, "color": 4}
)
flag.add_attdef(
tag="YPOS", insert=(0.5, -1.5), dxfattribs={"height": 0.25, "color": 4}
)
modelspace = doc.modelspace()
location = (0, 0)
values = {
"NAME": "Flag",
"XPOS": f"x = {location[0]:.3f}",
"YPOS": f"y = {location[1]:.3f}",
}
block_ref = modelspace.add_blockref(
"FLAG",
location,
dxfattribs={
"layer": "FLAGS",
},
).grid(
size=(5, 5), spacing=(10, 10)
) # render multiple blocks
block_ref.dxf.rotation = 15
block_ref.add_auto_attribs(values)
filename = CWD / "multi_insert_with_attribs.dxf"
doc.set_modelspace_vport(height=200)
doc.saveas(filename)
print(f"drawing '{filename}' created.")
if __name__ == "__main__":
main()
|
01957e79428f08c84a8ccbd575dcdc47829d9707
|
933b0eef6909e52fb086015e1f73e9507aad9c3f
|
/Mathematical_Algorithms/src/Kadane_Algorithm.py
|
c39655bbd58dd8051bc7abed0fdbf70536e58c96
|
[] |
no_license
|
codezoned/ScriptsDump
|
c105641ee06b8bf148b9c2779b04eb1d0369a85f
|
df6fcc47f5a73c7a5c3522f985e23b89fe56117e
|
refs/heads/master
| 2023-04-29T03:12:40.610817
| 2022-11-26T12:22:33
| 2022-11-26T12:22:33
| 141,881,901
| 157
| 221
| null | 2023-08-10T07:00:05
| 2018-07-22T09:17:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
Kadane_Algorithm.py
|
# Kadane's Algorithm by Master-Fury
# Worst and Average Case Time Complexity: O(n)
from sys import maxsize # importing maximum int from sys module
def kadane_algorithm(arr: int):
len_arr = len(arr) # Finding the len of array
max_so_far = -maxsize - 1 # Setting max value as maximum negative value
max_ending_here = 0
for i in range(0, len_arr):
max_ending_here = max_ending_here + arr[i]
if (max_so_far < max_ending_here):
max_so_far = max_ending_here
if max_ending_here < 0:
max_ending_here = 0
return max_so_far
# Driver function
sample_array = [-5, -2, 5, 9, -8, 9, 4, 7, -7, -1]
max_cont_subarray_sum = kadane_algorithm(sample_array)
print("The maximum sum of contigous sub array is - ", max_cont_subarray_sum)
# DESCRIPTION
# This algorithm is widely used to find the sum of contiguous subarray within a one-dimensional array of numbers that has the largest sum.
# The simple idea of Kadane’s algorithm is to look for all positive contiguous segments of the array (max_ending_here is used for this).
# And keep track of maximum sum contiguous segment among all positive segments (max_so_far is used for this).
# Each time we get a positive-sum compare it with max_so_far and update max_so_far if it is greater than max_so_far .
# It can be viewed both as a greedy and DP problem.
|
d82255d7a553e659a391358cfdc2f828fd5edbc6
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/1165. 单行键盘_磁道寻址.py
|
7a449ba66b35d26b0dd2599fd2b0670d44ad2c89
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
1165. 单行键盘_磁道寻址.py
|
from collections import defaultdict
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
chr_idx = defaultdict(int)
for index, char in enumerate(keyboard):
chr_idx[char] = index
res, pre = 0, 0
for char in word:
res += abs(chr_idx[char] - pre)
pre = chr_idx[char]
return res
print(Solution().calculateTime("abcdefghijklmnopqrstuvwxyz", "cba"))
# 输出:4
# 解释:
# 机械手从 0 号键移动到 2 号键来输出 'c',又移动到 1 号键来输出 'b',接着移动到 0 号键来输出 'a'。
# 总用时 = 2 + 1 + 1 = 4.
|
dffd0c830114bef9883fa7c9badf596f6923d4e7
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_node/server/request_handlers/read_req_handlers/get_claim_def_handler.py
|
d3297da8385e0eb1e74b8a2b319bf716824b1788
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,290
|
py
|
get_claim_def_handler.py
|
from indy_common.constants import CLAIM_DEF_SIGNATURE_TYPE, GET_CLAIM_DEF
from indy_common.req_utils import get_read_claim_def_from, get_read_claim_def_signature_type, \
get_read_claim_def_schema_ref, get_read_claim_def_tag
from indy_node.server.request_handlers.domain_req_handlers.claim_def_handler import ClaimDefHandler
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.request import Request
from plenum.server.database_manager import DatabaseManager
from plenum.server.request_handlers.handler_interfaces.read_request_handler import ReadRequestHandler
class GetClaimDefHandler(ReadRequestHandler):
def __init__(self, database_manager: DatabaseManager):
super().__init__(database_manager, GET_CLAIM_DEF, DOMAIN_LEDGER_ID)
def get_result(self, request: Request):
self._validate_request_type(request)
frm = get_read_claim_def_from(request)
signature_type = get_read_claim_def_signature_type(request)
schema_ref = get_read_claim_def_schema_ref(request)
tag = get_read_claim_def_tag(request)
keys, last_seq_no, last_update_time, proof = self.get_claim_def(
author=frm,
schema_seq_no=schema_ref,
signature_type=signature_type,
tag=tag
)
result = self.make_result(request=request,
data=keys,
last_seq_no=last_seq_no,
update_time=last_update_time,
proof=proof)
result[CLAIM_DEF_SIGNATURE_TYPE] = signature_type
return result
def get_claim_def(self,
author: str,
schema_seq_no: str,
signature_type,
tag,
is_committed=True) -> (str, int, int, list):
assert author is not None
assert schema_seq_no is not None
path = ClaimDefHandler.make_state_path_for_claim_def(author, schema_seq_no, signature_type, tag)
try:
keys, seq_no, last_update_time, proof = self.lookup(path, is_committed, with_proof=True)
return keys, seq_no, last_update_time, proof
except KeyError:
return None, None, None, None
|
08c34a0a03777ebe4b75559210016711e238d78a
|
2ed0210bc41f848a0e67fce3ad6b7a3e85228261
|
/src/pykeen/evaluation/classification_evaluator.py
|
f4c4c451b404955d425810ce177e05e0122d530c
|
[
"MIT"
] |
permissive
|
pykeen/pykeen
|
f7483445bd99d3a404bc4ff42538550d56702b66
|
5ff3597b18ab9a220e34361d3c3f262060811df1
|
refs/heads/master
| 2023-08-25T20:29:55.021639
| 2023-08-24T20:05:20
| 2023-08-24T20:05:20
| 242,672,435
| 1,308
| 199
|
MIT
| 2023-09-13T18:18:36
| 2020-02-24T07:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
classification_evaluator.py
|
# -*- coding: utf-8 -*-
"""Implementation of wrapper around sklearn metrics."""
from typing import Mapping, MutableMapping, Optional, Tuple, Type, cast
import numpy as np
import torch
from .evaluator import Evaluator, MetricResults
from ..constants import TARGET_TO_INDEX
from ..metrics.classification import ClassificationMetric, classification_metric_resolver
from ..typing import MappedTriples, Target
__all__ = [
"ClassificationEvaluator",
"ClassificationMetricResults",
]
CLASSIFICATION_METRICS: Mapping[str, Type[ClassificationMetric]] = {
cls().key: cls for cls in classification_metric_resolver
}
class ClassificationMetricResults(MetricResults):
"""Results from computing metrics."""
metrics = CLASSIFICATION_METRICS
@classmethod
def from_scores(cls, y_true: np.ndarray, y_score: np.ndarray):
"""Return an instance of these metrics from a given set of true and scores."""
if y_true.size == 0:
raise ValueError(f"Cannot calculate scores from empty array (y_true.shape={y_true.shape}).")
data = dict()
for key, metric in CLASSIFICATION_METRICS.items():
value = metric.score(y_true, y_score)
if isinstance(value, np.number):
# TODO: fix this upstream / make metric.score comply to signature
value = value.item()
data[key] = value
data["num_scores"] = y_score.size
return cls(data=data)
# docstr-coverage: inherited
def get_metric(self, name: str) -> float: # noqa: D102
if name not in self.data:
raise KeyError(f"Unknown metric: '{name}'. Possible options are: {sorted(self.data.keys())}")
return self.data[name]
class ClassificationEvaluator(Evaluator):
"""An evaluator that uses a classification metrics."""
all_scores: MutableMapping[Tuple[Target, int, int], np.ndarray]
all_positives: MutableMapping[Tuple[Target, int, int], np.ndarray]
def __init__(self, **kwargs):
"""
Initialize the evaluator.
:param kwargs:
keyword-based parameters passed to :meth:`Evaluator.__init__`.
"""
super().__init__(
filtered=False,
requires_positive_mask=True,
**kwargs,
)
self.all_scores = {}
self.all_positives = {}
# docstr-coverage: inherited
def process_scores_(
self,
hrt_batch: MappedTriples,
target: Target,
scores: torch.FloatTensor,
true_scores: Optional[torch.FloatTensor] = None,
dense_positive_mask: Optional[torch.FloatTensor] = None,
) -> None: # noqa: D102
if dense_positive_mask is None:
raise KeyError("Sklearn evaluators need the positive mask!")
# Transfer to cpu and convert to numpy
scores = scores.detach().cpu().numpy()
dense_positive_mask = dense_positive_mask.detach().cpu().numpy()
remaining = [i for i in range(hrt_batch.shape[1]) if i != TARGET_TO_INDEX[target]]
keys = hrt_batch[:, remaining].detach().cpu().numpy()
# Ensure that each key gets counted only once
for i in range(keys.shape[0]):
# include head_side flag into key to differentiate between (h, r) and (r, t)
key_suffix = tuple(map(int, keys[i]))
assert len(key_suffix) == 2
key_suffix = cast(Tuple[int, int], key_suffix)
key = (target,) + key_suffix
self.all_scores[key] = scores[i]
self.all_positives[key] = dense_positive_mask[i]
# docstr-coverage: inherited
def clear(self) -> None: # noqa: D102
self.all_positives.clear()
self.all_scores.clear()
# docstr-coverage: inherited
def finalize(self) -> ClassificationMetricResults: # noqa: D102
# Because the order of the values of an dictionary is not guaranteed,
# we need to retrieve scores and masks using the exact same key order.
all_keys = list(self.all_scores.keys())
y_score = np.concatenate([self.all_scores[k] for k in all_keys], axis=0).flatten()
y_true = np.concatenate([self.all_positives[k] for k in all_keys], axis=0).flatten()
# Clear buffers
self.clear()
return ClassificationMetricResults.from_scores(y_true, y_score)
|
163b5b68e2be398707a3bb5e6958e54908466687
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/export/management/commands/rebuild_schemas_subase_repeat_bug.py
|
b2792728531382a00ab7cc642a143b92104c28b3
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
rebuild_schemas_subase_repeat_bug.py
|
import logging
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.apps.export.models import FormExportDataSchema
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = """"Once-off command to rebuild schemas affected by subcase in repeat bug
See https://github.com/dimagi/commcare-hq/pull/21384
"""
def handle(self, **options):
schemas_to_rebuild = defaultdict(list)
for doc_id, domain, app_id, xmlns in _latest_form_schema_ids():
schema = FormExportDataSchema.get(doc_id)
group_schemas = schema.group_schemas[1:]
for gs in group_schemas:
if not gs.path[-1].is_repeat:
schemas_to_rebuild[domain].append((app_id, xmlns))
break
for domain, schema_keys in schemas_to_rebuild.items():
print("Rebuilding {} schemas for domain '{}'".format(len(schema_keys), domain))
for app_id, xmlns in schema_keys:
print(" rebuilding ('{}', '{}')".format(app_id, xmlns))
FormExportDataSchema.generate_schema_from_builds(domain, app_id, xmlns, force_rebuild=True)
def _latest_form_schema_ids():
db = FormExportDataSchema.get_db()
seen = set()
for row in db.view('schemas_by_xmlns_or_case_type/view', reduce=False, descending=True):
key_ = row['key']
doc_type = key_[1]
if doc_type != 'FormExportDataSchema':
continue
domain, doc_type, app_id, xmlns, created_on = key_
doc_key = (domain, app_id, xmlns)
if doc_key in seen:
continue
seen.add(doc_key)
yield row['id'], domain, app_id, xmlns
|
21bb690f812d2b8b17122a2e491d2468505e93ae
|
38bed8ec0229b2d42ebdb33e09930ba8ee6ba5b7
|
/torchvision/io/__init__.py
|
8427095cea62068d19718f31e3898d0eda856c11
|
[
"BSD-3-Clause",
"CC-BY-NC-4.0"
] |
permissive
|
pytorch/vision
|
10443ac1eddf7a32ecb288fe8f58e28cab2a60a1
|
1f94320d8db8d102214a7dc02c22fa65ee9ac58a
|
refs/heads/main
| 2023-09-06T03:48:02.303020
| 2023-09-04T18:25:36
| 2023-09-04T18:25:36
| 73,328,905
| 15,620
| 8,564
|
BSD-3-Clause
| 2023-09-14T17:52:49
| 2016-11-09T23:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
__init__.py
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
b261ee409cc54f085a9955178b352bc956ef81c4
|
5068bc927a7fff73923ce95862ff70120160c491
|
/contrib/axe/osx.spec
|
f7bbcd5d662ceee4d7676edc36debee7b0ec0ea2
|
[
"MIT"
] |
permissive
|
AXErunners/electrum-axe
|
cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f
|
7ef05088c0edaf0688fb167df353d6da619ebf2f
|
refs/heads/master
| 2021-04-03T09:40:37.109317
| 2020-08-27T16:53:18
| 2020-08-27T16:53:18
| 124,705,752
| 336
| 75
|
MIT
| 2020-10-17T18:30:25
| 2018-03-10T23:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,885
|
spec
|
osx.spec
|
# -*- mode: python -*-
import os
import os.path
import sys
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
for i, x in enumerate(sys.argv):
if x == '--name':
cmdline_name = sys.argv[i+1]
break
else:
raise Exception('no name')
PY36BINDIR = os.environ.get('PY36BINDIR')
TRAVIS_TAG = os.environ.get('TRAVIS_TAG')
AXE_ELECTRUM_VERSION = os.environ.get('AXE_ELECTRUM_VERSION')
ICONS_FILE = 'electrum_axe/gui/icons/electrum-axe.icns'
hiddenimports = collect_submodules('trezorlib')
hiddenimports += collect_submodules('hideezlib')
hiddenimports += collect_submodules('safetlib')
hiddenimports += collect_submodules('btchip')
hiddenimports += collect_submodules('keepkeylib')
hiddenimports += collect_submodules('websocket')
# safetlib imports PyQt5.Qt. We use a local updated copy of pinmatrix.py until they
# release a new version that includes https://github.com/archos-safe-t/python-safet/commit/b1eab3dba4c04fdfc1fcf17b66662c28c5f2380e
hiddenimports.remove('safetlib.qt.pinmatrix')
hiddenimports += [
'electrum_axe',
'electrum_axe.base_crash_reporter',
'electrum_axe.base_wizard',
'electrum_axe.plot',
'electrum_axe.qrscanner',
'electrum_axe.websockets',
'electrum_axe.gui.qt',
'PyQt5.sip',
'PyQt5.QtPrintSupport', # needed by Revealer
'electrum_axe.plugins',
'electrum_axe.plugins.hw_wallet.qt',
'electrum_axe.plugins.audio_modem.qt',
'electrum_axe.plugins.cosigner_pool.qt',
'electrum_axe.plugins.digitalbitbox.qt',
'electrum_axe.plugins.email_requests.qt',
'electrum_axe.plugins.keepkey.qt',
'electrum_axe.plugins.revealer.qt',
'electrum_axe.plugins.labels.qt',
'electrum_axe.plugins.trezor.qt',
'electrum_axe.plugins.hideez.client',
'electrum_axe.plugins.hideez.qt',
'electrum_axe.plugins.safe_t.client',
'electrum_axe.plugins.safe_t.qt',
'electrum_axe.plugins.ledger.qt',
'electrum_axe.plugins.virtualkeyboard.qt',
]
datas = [
('electrum_axe/checkpoints*.*', 'electrum_axe'),
('electrum_axe/*.json', 'electrum_axe'),
('electrum_axe/locale', 'electrum_axe/locale'),
('electrum_axe/wordlist', 'electrum_axe/wordlist'),
('electrum_axe/gui/icons', 'electrum_axe/gui/icons'),
]
datas += collect_data_files('trezorlib')
datas += collect_data_files('hideezlib')
datas += collect_data_files('safetlib')
datas += collect_data_files('btchip')
datas += collect_data_files('keepkeylib')
# Add the QR Scanner helper app
if TRAVIS_TAG:
QRREADER_ZPATH = 'contrib/CalinsQRReader/build/Release/CalinsQRReader.app'
QRREADER_PATH = './contrib/CalinsQRReader/build/Release/CalinsQRReader.app'
datas += [(QRREADER_ZPATH, QRREADER_PATH)]
# Add libusb so Trezor and Safe-T mini will work
binaries = []
binaries += [('../libusb-1.0.dylib', '.')]
binaries += [('../libsecp256k1.0.dylib', '.')]
binaries += [('/usr/local/lib/libgmp.10.dylib', '.')]
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-remove-tkinter-tcl
sys.modules['FixTk'] = None
excludes = ['FixTk', 'tcl', 'tk', '_tkinter', 'tkinter', 'Tkinter']
excludes += [
'PyQt5.QtBluetooth',
'PyQt5.QtCLucene',
'PyQt5.QtDBus',
'PyQt5.Qt5CLucene',
'PyQt5.QtDesigner',
'PyQt5.QtDesignerComponents',
'PyQt5.QtHelp',
'PyQt5.QtLocation',
'PyQt5.QtMultimedia',
'PyQt5.QtMultimediaQuick_p',
'PyQt5.QtMultimediaWidgets',
'PyQt5.QtNetwork',
'PyQt5.QtNetworkAuth',
'PyQt5.QtNfc',
'PyQt5.QtOpenGL',
'PyQt5.QtPositioning',
'PyQt5.QtQml',
'PyQt5.QtQuick',
'PyQt5.QtQuickParticles',
'PyQt5.QtQuickWidgets',
'PyQt5.QtSensors',
'PyQt5.QtSerialPort',
'PyQt5.QtSql',
'PyQt5.Qt5Sql',
'PyQt5.Qt5Svg',
'PyQt5.QtTest',
'PyQt5.QtWebChannel',
'PyQt5.QtWebEngine',
'PyQt5.QtWebEngineCore',
'PyQt5.QtWebEngineWidgets',
'PyQt5.QtWebKit',
'PyQt5.QtWebKitWidgets',
'PyQt5.QtWebSockets',
'PyQt5.QtXml',
'PyQt5.QtXmlPatterns',
'PyQt5.QtWebProcess',
'PyQt5.QtWinExtras',
]
a = Analysis(['electrum-axe'],
hiddenimports=hiddenimports,
datas=datas,
binaries=binaries,
excludes=excludes,
runtime_hooks=['pyi_runtimehook.py'])
# http://stackoverflow.com/questions/19055089/
for d in a.datas:
if 'pyconfig' in d[0]:
a.datas.remove(d)
break
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
debug=False,
strip=False,
upx=False,
console=False,
icon='electrum_axe/gui/icons/electrum-axe.ico',
name=os.path.join('build/electrum-axe/electrum-axe', cmdline_name))
# trezorctl separate bin
tctl_a = Analysis([os.path.join(PY36BINDIR, 'trezorctl')],
hiddenimports=['pkgutil'],
excludes=excludes,
runtime_hooks=['pyi_tctl_runtimehook.py'])
tctl_pyz = PYZ(tctl_a.pure)
tctl_exe = EXE(tctl_pyz,
tctl_a.scripts,
exclude_binaries=True,
debug=False,
strip=False,
upx=False,
console=True,
name=os.path.join('build/electrum-axe/electrum-axe', 'trezorctl.bin'))
coll = COLLECT(exe, #tctl_exe,
a.binaries,
a.datas,
strip=False,
upx=False,
name=os.path.join('dist', 'electrum-axe'))
app = BUNDLE(coll,
info_plist={
'NSHighResolutionCapable': True,
'NSSupportsAutomaticGraphicsSwitching': True,
'CFBundleURLTypes': [
{'CFBundleURLName': 'axe', 'CFBundleURLSchemes': ['axe']}
],
},
name=os.path.join('dist', 'Axe Electrum.app'),
appname="Axe Electrum",
icon=ICONS_FILE,
version=AXE_ELECTRUM_VERSION)
|
7a170d1ec4d9ec303f741d965d91a8aac9d0e6d7
|
4feb5744ab5a26aeeb04573e4944d2bf4d1a6a2a
|
/peeringdb_server/search.py
|
ee3e2c4c06f1e8bed81f84093817a2e6f754bd5f
|
[
"BSD-2-Clause"
] |
permissive
|
peeringdb/peeringdb
|
cb79f809c4bb8cc5192180366df1f05d8fc0111f
|
3f62b2d97c78ccf151fb1a5761637e28463b9541
|
refs/heads/master
| 2023-09-04T09:26:43.741086
| 2023-08-22T19:20:34
| 2023-08-22T19:20:34
| 60,563,174
| 311
| 121
|
BSD-2-Clause
| 2023-09-13T02:13:42
| 2016-06-06T21:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 11,239
|
py
|
search.py
|
"""
Search implementation used for the peeringdb top search bar, name
searches through the api `name_search` filter, as well as advanced
search functionality.
Search logic is handled by django-haystack and whoosh.
Refer to search_indexes.py for search index definition.
"""
# import time
import re
import unidecode
from django.conf import settings
from django.db.models import Q
from elasticsearch import Elasticsearch
from haystack.inputs import Exact
from haystack.query import SearchQuerySet
from mainsite.settings import ELASTICSEARCH_HOST
from peeringdb_server.models import (
Facility,
InternetExchange,
IXLanPrefix,
Network,
NetworkIXLan,
Organization,
)
# models considered during autocomplete (quick-search)
autocomplete_models = [
Organization,
Network,
InternetExchange,
Facility,
]
# models considered during standard search
searchable_models = [
Organization,
Network,
Facility,
InternetExchange,
NetworkIXLan,
IXLanPrefix,
# InternetExchangeFacility,
# NetworkFacility,
# NetworkContact,
# IXLan,
]
ONLY_DIGITS = re.compile(r"^[0-9]+$")
# These are not exact, but should be good enough
PARTIAL_IPV4_ADDRESS = re.compile(r"^([0-9]{1,3}\.){1,3}([0-9]{1,3})?$")
PARTIAL_IPV6_ADDRESS = re.compile(r"^([0-9A-Fa-f]{1,4}|:):[0-9A-Fa-f:]*$")
def unaccent(v):
return unidecode.unidecode(v).lower().strip()
def valid_partial_ipv4_address(ip):
return all(int(s) >= 0 and int(s) <= 255 for s in ip.split(".") if len(s) > 0)
def is_valid_latitude(lat):
"""Validates a latitude."""
return re.match(r"^[-]?((([0-8]?[0-9])\.(\d+))|(90(\.0+)?))$", str(lat)) is not None
def is_valid_longitude(long):
"""Validates a longitude."""
return (
re.match(
r"^[-]?((((1[0-7][0-9])|([0-9]?[0-9]))\.(\d+))|180(\.0+)?)$", str(long)
)
is not None
)
def make_asn_query(term):
return Network.objects.filter(asn__exact=term, status="ok")
def make_ipv4_query(term):
return NetworkIXLan.objects.filter(ipaddr4__startswith=term, status="ok")
def make_ipv6_query(term):
return NetworkIXLan.objects.filter(ipaddr6__startswith=term, status="ok")
def prepare_term(term):
try:
if len(term) == 1:
int(term)
term = f"AS{term}"
except ValueError:
pass
return unaccent(term)
def make_search_query(term):
if not term:
return SearchQuerySet().none()
term = unaccent(term)
if ONLY_DIGITS.match(term):
return make_asn_query(term)
if PARTIAL_IPV4_ADDRESS.match(term):
if valid_partial_ipv4_address(term):
return make_ipv4_query(term)
if PARTIAL_IPV6_ADDRESS.match(term):
return make_ipv6_query(term)
term_filters = Q(content=term) | Q(content__startswith=term)
return (
SearchQuerySet()
.filter(term_filters, status=Exact("ok"))
.models(*searchable_models)
)
def make_name_search_query(term):
if not term:
return SearchQuerySet().none()
term = prepare_term(term)
term_filters = Q(name=term) | Q(name__startswith=term)
return SearchQuerySet().filter(term_filters, status=Exact("ok"))
def make_autocomplete_query(term):
if not term:
return SearchQuerySet().none()
term = prepare_term(term)
return SearchQuerySet().autocomplete(auto=term).filter(status=Exact("ok"))
def search(term, autocomplete=False):
"""
Search searchable objects (ixp, network, facility ...) by term.
Returns result dict.
"""
# t0 = time.time()
if autocomplete:
search_query = make_autocomplete_query(term).models(*autocomplete_models)
limit = settings.SEARCH_RESULTS_AUTOCOMPLETE_LIMIT
else:
search_query = make_search_query(term)
limit = settings.SEARCH_RESULTS_LIMIT
categories = ("fac", "ix", "net", "org")
result = {tag: [] for tag in categories}
pk_map = {tag: {} for tag in categories}
# add entries to the result by order of scoring with the
# highest scored on top (beginning of list)
for sq in search_query[:limit]:
if hasattr(sq, "model"):
model = sq.model
model.HandleRef.tag
categorize(sq, result, pk_map)
else:
if sq.HandleRef.tag == "netixlan":
add_secondary_entries(sq, result, pk_map)
else:
append_result(
sq.HandleRef.tag,
sq.pk,
getattr(sq, "search_result_name", None),
sq.org_id,
None,
result,
pk_map,
)
# print("done", time.time() - t0)
return result
def get_lat_long_from_search_result(search_result):
if search_result is None:
return None
latitude = search_result.get("latitude")
longitude = search_result.get("longitude")
if latitude is not None and longitude is not None:
return latitude, longitude
else:
return None
def elasticsearch_proximity_entity(name):
es = Elasticsearch(ELASTICSEARCH_HOST)
body = {
"query": {
"bool": {
"must": [
{
"multi_match": {
"query": name,
"fields": ["name", "name_long", "aka"],
}
},
{"exists": {"field": "geocode_coordinates"}},
]
}
},
"size": 1, # Return only the first match
}
index = ["fac", "org"] # Replace this with your desired index or indices
search_result = es.search(index=index, body=body)
# Check if there are any matches and return the first one if available
if search_result["hits"]["total"]["value"] > 0:
item = search_result["hits"]["hits"][0]
item["_source"]["ref_tag"] = item["_index"]
item["_source"]["id"] = item["_id"]
return item["_source"]
else:
return None
def order_results_alphabetically(result, search_term):
"""
Order the search results alphabetically and put the exact case-insensitive matches in front.
Args:
- result: A dictionary containing categories and their search results.
- search_term: A string representing the search term.
Returns:
- result: A dictionary containing the search results in alphabetical order.
"""
search_term_lower = search_term.lower()
for category in result:
result[category] = sorted(result[category], key=lambda x: x["name"].lower())
exact_match_index = -1
for index, item in enumerate(result[category]):
if item["name"].lower() == search_term_lower:
exact_match_index = index
break
if exact_match_index != -1:
exact_match = result[category].pop(exact_match_index)
result[category].insert(0, exact_match)
return result
def search_v2(term, geo={}):
"""
Search searchable objects (ixp, network, facility ...) by term on elasticsearch engine.
Returns result dict.
"""
es = Elasticsearch(ELASTICSEARCH_HOST)
qs = " ".join([str(elem) for elem in term])
term = f"*{' '.join(qs.split())}*"
body = {"query": {"bool": {"must": {"query_string": {"query": term}}}}}
if geo:
if is_valid_latitude(geo["lat"]) and is_valid_longitude(geo["long"]):
body["query"]["bool"]["filter"] = {
"geo_distance": {
"distance": geo["dist"],
"geocode_coordinates": {
"lat": float(geo["lat"]),
"lon": float(geo["long"]),
},
}
}
limit = settings.SEARCH_RESULTS_LIMIT
indexes = ["fac", "ix", "net", "org"] # Add new index names
if term and term.strip("*").split(" ")[0].lower() in indexes:
ref_tag = term.strip("*").split(" ")[0]
indexes = [ref_tag.lower()]
term = term.replace(f"*{ref_tag}", "").strip()
if term:
body["query"]["bool"]["must"]["query_string"]["query"] = term
else:
del body["query"]["bool"]["must"]
search_query = es.search(index=indexes, body=body, size=limit)
categories = ("fac", "ix", "net", "org")
result = {tag: [] for tag in categories}
pk_map = {tag: {} for tag in categories}
# add entries to the result by order of scoring with the
# highest scored on top (beginning of list)
for sq in search_query["hits"]["hits"][:limit]:
if geo.get("country"):
if not sq["_source"].get("country"):
continue
if geo["country"] not in sq["_source"].get("country"):
continue
if geo.get("state"):
if not sq["_source"].get("state"):
continue
if geo["state"] not in sq["_source"].get("state"):
continue
if sq["_source"]["status"] == "ok":
if sq["_index"] == "net":
append_result(
sq["_index"],
sq["_id"],
f"{sq['_source']['name']} ({sq['_source']['asn']})",
sq["_source"]["org"]["id"],
None,
result,
pk_map,
)
elif sq["_index"] == "org":
append_result(
sq["_index"],
sq["_id"],
sq["_source"]["name"],
sq["_id"],
None,
result,
pk_map,
)
else:
append_result(
sq["_index"],
sq["_id"],
sq["_source"]["name"],
sq["_source"]["org"]["id"],
None,
result,
pk_map,
)
result = order_results_alphabetically(result, term)
return result
def categorize(sq, result, pk_map):
if getattr(sq, "result_name", None):
# main entity
tag = sq.model.HandleRef.tag
if tag == "org":
org_id = int(sq.pk)
else:
org_id = sq.org_id
append_result(tag, int(sq.pk), sq.result_name, org_id, None, result, pk_map)
else:
add_secondary_entries(sq, result, pk_map)
def add_secondary_entries(sq, result, pk_map):
for tag in result.keys():
if not getattr(sq, f"{tag}_result_name", None):
continue
org_id = int(getattr(sq, f"{tag}_org_id", 0))
name = getattr(sq, f"{tag}_result_name")
pk = int(getattr(sq, f"{tag}_id", 0))
sub_name = getattr(sq, f"{tag}_sub_result_name")
append_result(tag, pk, name, org_id, sub_name, result, pk_map)
def append_result(tag, pk, name, org_id, sub_name, result, pk_map):
if pk in pk_map[tag]:
return
pk_map[tag][pk] = True
result[tag].append(
{"id": pk, "name": name, "org_id": int(org_id), "sub_name": sub_name}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.