max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
advent/days/day16/day.py | RuedigerLudwig/advent2021 | 0 | 6623951 | from __future__ import annotations
import abc
from math import prod
from typing import Generator, Iterator
from advent.common import utils
day_num = 16
def part1(lines: Iterator[str]) -> int:
bit = Packet.from_str(next(lines))
return bit.get_version_sum()
def part2(lines: Iterator[str]) -> int:
bit = Packet.from_str(next(lines))
return bit.calc_value()
BitConverter = Generator[int, int, None]
@utils.coroutine
def bit_converter(data: Iterator[int]) -> BitConverter:
bit_count = yield 0
while True:
result = 0
for _ in range(bit_count):
result = (result << 1) + next(data)
bit_count = yield result
class Packet(abc.ABC):
@staticmethod
def expand(input: str) -> Iterator[int]:
for char in input:
digit = int(char, base=16)
yield int(digit & 0x08 > 0)
yield int(digit & 0x04 > 0)
yield int(digit & 0x02 > 0)
yield int(digit & 0x01 > 0)
@staticmethod
def from_str(line: str) -> Packet:
packet, _ = Packet.create_packet(bit_converter(Packet.expand(line)))
return packet
@ staticmethod
def create_packet(data: BitConverter) -> tuple[Packet, int]:
version = data.send(3)
match data.send(3):
case 4:
return LiteralPacket.create(data, version)
case op:
return OperatorPacket.create(data, version, op)
@abc.abstractmethod
def calc_value(self) -> int:
...
@abc.abstractmethod
def get_version_sum(self) -> int:
...
class LiteralPacket(Packet):
@staticmethod
def create(data: BitConverter, version: int) -> tuple[Packet, int]:
consumed = 6
value = 0
more = True
while more:
more = data.send(1) == 1
value = (value << 4) + data.send(4)
consumed += 5
return LiteralPacket(version, value), consumed
def __init__(self, version: int, value: int):
self.version = version
self.value = value
def calc_value(self) -> int:
return self.value
def get_version_sum(self) -> int:
return self.version
def __eq__(self, other: object) -> bool:
if isinstance(other, LiteralPacket):
return other.value == self.value and other.version == self.version
raise NotImplementedError
class OperatorPacket(Packet):
@staticmethod
def create(data: BitConverter, version: int, op: int) -> tuple[Packet, int]:
consumed = 6
sub_packets: list[Packet] = []
match data.send(1):
case 0:
length = data.send(15)
consumed += 16 + length
while length > 0:
packet, packet_consumed = Packet.create_packet(data)
length -= packet_consumed
sub_packets.append(packet)
case 1:
count = data.send(11)
consumed += 12
for _ in range(count):
packet, packet_consumed = Packet.create_packet(data)
consumed += packet_consumed
sub_packets.append(packet)
case _:
raise NotImplementedError
return OperatorPacket(version, op, sub_packets), consumed
def __init__(self, version: int, op: int, packets: list[Packet]):
self.version = version
self.op = op
self.packets = packets
def calc_value(self) -> int:
match self.op:
case 0:
return sum(packet.calc_value() for packet in self.packets)
case 1:
return prod(packet.calc_value() for packet in self.packets)
case 2:
return min(packet.calc_value() for packet in self.packets)
case 3:
return max(packet.calc_value() for packet in self.packets)
case 5:
return 1 if self.packets[0].calc_value() > self.packets[1].calc_value() else 0
case 6:
return 1 if self.packets[0].calc_value() < self.packets[1].calc_value() else 0
case 7:
return 1 if self.packets[0].calc_value() == self.packets[1].calc_value() else 0
case _:
raise NotImplementedError
def get_version_sum(self) -> int:
return self.version + sum(packet.get_version_sum() for packet in self.packets)
def __eq__(self, other: object) -> bool:
try:
if isinstance(other, OperatorPacket):
return other.op == self.op and other.version == self.version and all(
s == o for s, o in zip(self.packets, other.packets, strict=True))
except ValueError:
return False
raise NotImplementedError
| from __future__ import annotations
import abc
from math import prod
from typing import Generator, Iterator
from advent.common import utils
day_num = 16
def part1(lines: Iterator[str]) -> int:
bit = Packet.from_str(next(lines))
return bit.get_version_sum()
def part2(lines: Iterator[str]) -> int:
bit = Packet.from_str(next(lines))
return bit.calc_value()
BitConverter = Generator[int, int, None]
@utils.coroutine
def bit_converter(data: Iterator[int]) -> BitConverter:
bit_count = yield 0
while True:
result = 0
for _ in range(bit_count):
result = (result << 1) + next(data)
bit_count = yield result
class Packet(abc.ABC):
@staticmethod
def expand(input: str) -> Iterator[int]:
for char in input:
digit = int(char, base=16)
yield int(digit & 0x08 > 0)
yield int(digit & 0x04 > 0)
yield int(digit & 0x02 > 0)
yield int(digit & 0x01 > 0)
@staticmethod
def from_str(line: str) -> Packet:
packet, _ = Packet.create_packet(bit_converter(Packet.expand(line)))
return packet
@ staticmethod
def create_packet(data: BitConverter) -> tuple[Packet, int]:
version = data.send(3)
match data.send(3):
case 4:
return LiteralPacket.create(data, version)
case op:
return OperatorPacket.create(data, version, op)
@abc.abstractmethod
def calc_value(self) -> int:
...
@abc.abstractmethod
def get_version_sum(self) -> int:
...
class LiteralPacket(Packet):
@staticmethod
def create(data: BitConverter, version: int) -> tuple[Packet, int]:
consumed = 6
value = 0
more = True
while more:
more = data.send(1) == 1
value = (value << 4) + data.send(4)
consumed += 5
return LiteralPacket(version, value), consumed
def __init__(self, version: int, value: int):
self.version = version
self.value = value
def calc_value(self) -> int:
return self.value
def get_version_sum(self) -> int:
return self.version
def __eq__(self, other: object) -> bool:
if isinstance(other, LiteralPacket):
return other.value == self.value and other.version == self.version
raise NotImplementedError
class OperatorPacket(Packet):
@staticmethod
def create(data: BitConverter, version: int, op: int) -> tuple[Packet, int]:
consumed = 6
sub_packets: list[Packet] = []
match data.send(1):
case 0:
length = data.send(15)
consumed += 16 + length
while length > 0:
packet, packet_consumed = Packet.create_packet(data)
length -= packet_consumed
sub_packets.append(packet)
case 1:
count = data.send(11)
consumed += 12
for _ in range(count):
packet, packet_consumed = Packet.create_packet(data)
consumed += packet_consumed
sub_packets.append(packet)
case _:
raise NotImplementedError
return OperatorPacket(version, op, sub_packets), consumed
def __init__(self, version: int, op: int, packets: list[Packet]):
self.version = version
self.op = op
self.packets = packets
def calc_value(self) -> int:
match self.op:
case 0:
return sum(packet.calc_value() for packet in self.packets)
case 1:
return prod(packet.calc_value() for packet in self.packets)
case 2:
return min(packet.calc_value() for packet in self.packets)
case 3:
return max(packet.calc_value() for packet in self.packets)
case 5:
return 1 if self.packets[0].calc_value() > self.packets[1].calc_value() else 0
case 6:
return 1 if self.packets[0].calc_value() < self.packets[1].calc_value() else 0
case 7:
return 1 if self.packets[0].calc_value() == self.packets[1].calc_value() else 0
case _:
raise NotImplementedError
def get_version_sum(self) -> int:
return self.version + sum(packet.get_version_sum() for packet in self.packets)
def __eq__(self, other: object) -> bool:
try:
if isinstance(other, OperatorPacket):
return other.op == self.op and other.version == self.version and all(
s == o for s, o in zip(self.packets, other.packets, strict=True))
except ValueError:
return False
raise NotImplementedError
| none | 1 | 2.621989 | 3 | |
ocradmin/ocrtasks/testutils.py | mikesname/ocropodium | 1 | 6623952 | """
Utils for testing the Ocr Task wrapper.
"""
from celery.contrib.abortable import AbortableTask
from decorators import register_handlers
@register_handlers
class TestTask(AbortableTask):
"""
Dummy task for running tests on.
"""
name = "testing.test"
max_retries = None
def run(self, a, b, **kwargs):
return a + b
| """
Utils for testing the Ocr Task wrapper.
"""
from celery.contrib.abortable import AbortableTask
from decorators import register_handlers
@register_handlers
class TestTask(AbortableTask):
"""
Dummy task for running tests on.
"""
name = "testing.test"
max_retries = None
def run(self, a, b, **kwargs):
return a + b
| en | 0.702823 | Utils for testing the Ocr Task wrapper. Dummy task for running tests on. | 2.217952 | 2 |
regex_builder/constants.py | Zomatree/regex-builder | 3 | 6623953 | ANY_CHAR = "."
WHITESPACE = "\\s"
NON_WHITESPACE = "\\S"
DIGIT = "\\d"
NON_DIGIT = "\\D"
WORD_CHAR = "\\w"
NON_WORD_CHAR = "\\W"
NEWLINE = "\\n"
TAB = "\\t"
NULL_CHAR = "\\0"
| ANY_CHAR = "."
WHITESPACE = "\\s"
NON_WHITESPACE = "\\S"
DIGIT = "\\d"
NON_DIGIT = "\\D"
WORD_CHAR = "\\w"
NON_WORD_CHAR = "\\W"
NEWLINE = "\\n"
TAB = "\\t"
NULL_CHAR = "\\0"
| none | 1 | 1.4834 | 1 | |
SCC/local_automation/subscribe_constant.py | Coder-Pham/SCC_application | 0 | 6623954 | <filename>SCC/local_automation/subscribe_constant.py<gh_stars>0
import paho.mqtt.subscribe as mqtts
import paho.mqtt.client as mqtt
import config
import psycopg2
import psycopg2.extras
import random, threading, json
import calendar
import time
# ====================================================
# MQTT Settings
mqtt_broker = config.mqtt_broker
mqtt_port = config.mqtt_port
mqtt_topic = config.mqtt_topic
# ====================================================
# MQTT In action
def on_connect(client, userdata, rc):
if rc != 0:
print("Unable to connect to MQTT Broker...")
else:
print("Connected with MQTT Broker: " + str(mqtt_broker))
def on_publish(client, userdata, mid):
pass
def on_disconnect(client, userdata, rc):
if rc != 0:
pass
mqttc = mqtt.Client()
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
mqttc.on_publish = on_publish
mqttc.connect(mqtt_broker, int(mqtt_port))
mqttc.subscribe(config.mqtt_fake_topic)
def message(client, userdata, msg):
# ====================================================
# PostgreSQL Settings
try:
db = psycopg2.connect(user = config.db_user,
password = <PASSWORD>,
host = config.db_host,
port = config.db_port,
database = config.db_name)
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Print PostgreSQL Connection properties
print ( db.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
payloads = str(msg.payload.decode("utf-8"))
dic = json.loads(payloads)
print("Received: " + str(payloads) + " " + "on MQTT Topic: " + str(msg.topic),"\n")
# Change temperature and humidity
sql = """SELECT constant_value FROM constant WHERE constant_id = 1"""
try:
# Execute the SQL command
cursor.execute(sql)
result = cursor.fetchone()
new_value = int(result["constant_value"])
if (dic["values"]):
if new_value > 0:
new_value *= -1
else:
if new_value < 0:
new_value *= -1
sql = """UPDATE constant SET constant_value = '""" + str(new_value) + """' WHERE constant_id = 1"""
cursor.execute(sql)
except (Exception, psycopg2.Error) as error :
print ("1a: ", error)
# Rollback in case there is any error
db.rollback()
# Get current timestamp
ts = calendar.timegm(time.gmtime())
# Get recent status
sql = """SELECT * FROM device_log ORDER BY device_timestamp DESC LIMIT 1 """
try:
# Execute the SQL command
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
if result["device_status"] != dic["values"]:
sql = """INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES ('""" + str(dic["device_id"]) + """', '""" + str(dic["values"]) + """', '""" + str(ts) + """', '""" + str(dic['device_updated_by']) + """')"""
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2: ", error)
# Rollback in case there is any error
db.rollback()
sql = """UPDATE device SET device_status = '""" + str(dic["values"]) + """', device_updated_by = '""" + str(dic["device_updated_by"]) + """' WHERE device_id = '""" + str(dic["device_id"]) + """' """
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2a: ", error)
# Rollback in case there is any error
db.rollback()
else:
sql = """INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES ('""" + str(dic["device_id"]) + """', '""" + str(dic["values"]) + """', '""" + str(ts) + """', '""" + str(dic['device_updated_by']) + """')"""
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2: ", error)
# Rollback in case there is any error
db.rollback()
sql = """UPDATE device SET device_status = '""" + str(dic["values"]) + """', device_updated_by = '""" + str(dic["device_updated_by"]) + """' WHERE device_id = '""" + str(dic["device_id"]) + """' """
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2b: ", error)
# Rollback in case there is any error
db.rollback()
except (Exception, psycopg2.Error) as error :
print ("1: ", error)
# Rollback in case there is any error
db.rollback()
mqtts.callback(message,config.mqtt_fake_topic,hostname=config.mqtt_broker)
| <filename>SCC/local_automation/subscribe_constant.py<gh_stars>0
import paho.mqtt.subscribe as mqtts
import paho.mqtt.client as mqtt
import config
import psycopg2
import psycopg2.extras
import random, threading, json
import calendar
import time
# ====================================================
# MQTT Settings
mqtt_broker = config.mqtt_broker
mqtt_port = config.mqtt_port
mqtt_topic = config.mqtt_topic
# ====================================================
# MQTT In action
def on_connect(client, userdata, rc):
if rc != 0:
print("Unable to connect to MQTT Broker...")
else:
print("Connected with MQTT Broker: " + str(mqtt_broker))
def on_publish(client, userdata, mid):
pass
def on_disconnect(client, userdata, rc):
if rc != 0:
pass
mqttc = mqtt.Client()
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
mqttc.on_publish = on_publish
mqttc.connect(mqtt_broker, int(mqtt_port))
mqttc.subscribe(config.mqtt_fake_topic)
def message(client, userdata, msg):
# ====================================================
# PostgreSQL Settings
try:
db = psycopg2.connect(user = config.db_user,
password = <PASSWORD>,
host = config.db_host,
port = config.db_port,
database = config.db_name)
cursor = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Print PostgreSQL Connection properties
print ( db.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
payloads = str(msg.payload.decode("utf-8"))
dic = json.loads(payloads)
print("Received: " + str(payloads) + " " + "on MQTT Topic: " + str(msg.topic),"\n")
# Change temperature and humidity
sql = """SELECT constant_value FROM constant WHERE constant_id = 1"""
try:
# Execute the SQL command
cursor.execute(sql)
result = cursor.fetchone()
new_value = int(result["constant_value"])
if (dic["values"]):
if new_value > 0:
new_value *= -1
else:
if new_value < 0:
new_value *= -1
sql = """UPDATE constant SET constant_value = '""" + str(new_value) + """' WHERE constant_id = 1"""
cursor.execute(sql)
except (Exception, psycopg2.Error) as error :
print ("1a: ", error)
# Rollback in case there is any error
db.rollback()
# Get current timestamp
ts = calendar.timegm(time.gmtime())
# Get recent status
sql = """SELECT * FROM device_log ORDER BY device_timestamp DESC LIMIT 1 """
try:
# Execute the SQL command
cursor.execute(sql)
result = cursor.fetchone()
if result is not None:
if result["device_status"] != dic["values"]:
sql = """INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES ('""" + str(dic["device_id"]) + """', '""" + str(dic["values"]) + """', '""" + str(ts) + """', '""" + str(dic['device_updated_by']) + """')"""
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2: ", error)
# Rollback in case there is any error
db.rollback()
sql = """UPDATE device SET device_status = '""" + str(dic["values"]) + """', device_updated_by = '""" + str(dic["device_updated_by"]) + """' WHERE device_id = '""" + str(dic["device_id"]) + """' """
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2a: ", error)
# Rollback in case there is any error
db.rollback()
else:
sql = """INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES ('""" + str(dic["device_id"]) + """', '""" + str(dic["values"]) + """', '""" + str(ts) + """', '""" + str(dic['device_updated_by']) + """')"""
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2: ", error)
# Rollback in case there is any error
db.rollback()
sql = """UPDATE device SET device_status = '""" + str(dic["values"]) + """', device_updated_by = '""" + str(dic["device_updated_by"]) + """' WHERE device_id = '""" + str(dic["device_id"]) + """' """
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except (Exception, psycopg2.Error) as error :
print ("2b: ", error)
# Rollback in case there is any error
db.rollback()
except (Exception, psycopg2.Error) as error :
print ("1: ", error)
# Rollback in case there is any error
db.rollback()
mqtts.callback(message,config.mqtt_fake_topic,hostname=config.mqtt_broker)
| en | 0.6139 | # ==================================================== # MQTT Settings # ==================================================== # MQTT In action # ==================================================== # PostgreSQL Settings # Print PostgreSQL Connection properties # Print PostgreSQL version # Change temperature and humidity SELECT constant_value FROM constant WHERE constant_id = 1 # Execute the SQL command UPDATE constant SET constant_value = ' ' WHERE constant_id = 1 # Rollback in case there is any error # Get current timestamp # Get recent status SELECT * FROM device_log ORDER BY device_timestamp DESC LIMIT 1 # Execute the SQL command INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES (' ', ' ', ' ', ' ') # Execute the SQL command # Commit your changes in the database # Rollback in case there is any error UPDATE device SET device_status = ' ', device_updated_by = ' ' WHERE device_id = ' ' # Execute the SQL command # Commit your changes in the database # Rollback in case there is any error INSERT INTO device_log(device_id, device_status, device_timestamp, device_updated_by) VALUES (' ', ' ', ' ', ' ') # Execute the SQL command # Commit your changes in the database # Rollback in case there is any error UPDATE device SET device_status = ' ', device_updated_by = ' ' WHERE device_id = ' ' # Execute the SQL command # Commit your changes in the database # Rollback in case there is any error # Rollback in case there is any error | 2.587594 | 3 |
pomidor/pomidor_exceptions.py | symon-storozhenko/pomidor | 1 | 6623955 | from selenium.webdriver.support.color import Colors
pomidor = 'Pomidor'
class PomidorKeyDoesNotExist(Exception):
"""PomidorCantRunOneBrowserInstanceInParallel Exception"""
def __init__(self, key):
self.key = key
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nKeyboard key {self.key} does ' \
f'not exist{Colors.ENDC}'
class PomidorCantRunOneBrowserInstanceInParallel(Exception):
"""PomidorCantRunOneBrowserInstanceInParallel Exception"""
def __init__(self):
pass
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nCannot run browser=\'one\' ' \
f'with parallel enabled.\nEither set browser=\'per_file\' or ' \
f'browser=\'per_test\' or remove parallel from run(..) function' \
f'{Colors.ENDC}'
class PomidorDataFeedNoKeyError(Exception):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path, line_num, key, data_file):
self.key = key
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'{Colors.FAIL}"PomidorDataFeedNoKeyError\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\n"{self.data_file}" file doesn\'t have ' \
f'<<{self.key}>> column{Colors.ENDC}\n'
class PomidorDataFeedNoAngleKeysProvidedException(Exception):
""" PomidorDataFeedNoAngleKeysProvidedException"""
def __init__(self, path, line_num, data_file):
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorDataFeedNoAngleKeysProvidedException\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\nYou have data csv file in @params line.' \
f' Either remove {Colors.WARNING}data=example.csv ' \
f'{Colors.FAIL}or include csv column ' \
f'name(s) in double angle brackets: \nExample: {Colors.WARNING}' \
f'type <<FirstName>> in #name_field\n{Colors.ENDC}'
class PomidorDataFeedNoCSVFileProvided(Exception):
""" PPomidorDataFeedNoCSVFileProvidedException"""
def __init__(self, path, line_num, data_file):
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorDataFeedNoCSVFileProvided\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\nIf you want to use keys from double angle ' \
f'brackets {Colors.WARNING}<<key>>{Colors.FAIL}, add ' \
f'data marker with a csv file ' \
f'in the @params line.\nExample: {Colors.WARNING}\n' \
f'@params data=csv_file_name.csv{Colors.ENDC}'
class PomidorFileNotFoundError(BaseException):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path):
self.path = path
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nPomidorFileNotFoundError' \
f'No pomidor files found.\nFile Path: {self.path}{Colors.ENDC}'
class PomidorSyntaxErrorTooManyActions(Exception):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path, line_num, *args, **kwargs):
self.path = path
self.line_num = line_num
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorSyntaxErrorTooManyActions\nFile Path: ' \
f'{self.path}\nParagraph starts on line: {self.line_num}\n' \
f'ERROR: You have more actions than objects. Number of actions ' \
f'(click, type, wait, etc.) should match number of your objects' \
f' (Ex. #home_button){Colors.ENDC}'
class PomidorSyntaxErrorTooManyObjects(Exception):
""" Pomidor syntax error class: more objects than actions """
def __init__(self, path, line_num, *args, **kwargs):
self.path = path
self.line_num = line_num
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorSyntaxErrorTooManyObjects' \
f'\nFile Path: {self.path}\nParagraph ' \
f'starts on line: {self.line_num}\nERROR: You have more ' \
f'objects than actions. Number of actions ' \
f'(click, type, wait, etc.) should match number of your ' \
f'objects (Ex. #home_button){Colors.ENDC}'
class PomidorObjectDoesNotExistInCSVFile(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, *args, **kwargs):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorObjectDoesNotExistInCSVFile\nFilePath: ' \
f'{self.path}\nParagraph starts on line: {self.line_num}\n' \
f'ERROR: {Colors.WARNING}#{self.obj}{Colors.FAIL} does not ' \
f'exist in page object csv file.' \
f' Please check page object selector and value{Colors.ENDC}'
class PageObjectNotFound(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PageObjectNotFound{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: {Colors.WARNING}' \
f'#{self.obj}{Colors.FAIL} was not found on page.' \
f' Please check page object selector and value{Colors.ENDC}'
class PomidorAssertError(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, act):
self.path = path
self.line_num = line_num
self.obj = obj
self.act = act
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PomidorAssertError{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj} is {self.act}{Colors.FAIL} ' \
f'is FALSE {Colors.ENDC}'
class PomidorEqualAssertError(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, act, string, actual_string):
self.path = path
self.line_num = line_num
self.obj = obj
self.act = act
self.string = string
self.actual_str = actual_string
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PomidorAssertError{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj} {self.act} [[{self.string}'\
f']]{Colors.FAIL} is FALSE. {Colors.OKGREEN}Actual ' \
f'#{self.obj} text equals [[{self.actual_str}]]{Colors.ENDC}'
class ElementNotClickable(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}ElementNotClickable{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj}{Colors.FAIL} is ' \
f'hidden from view. Consider using \'max\' and/or \'scroll\'\n' \
f'Example:\n{Colors.WARNING}@params max, scroll\n{Colors.ENDC}'
class PomidorPrerequisiteScenarioNotFoundError(Exception):
def __init__(self, path, line_num, prereq_path, story, *args, **kwargs):
self.path = path
self.line_num = line_num
self.prereq_path = prereq_path
self.story = story
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorPrerequisiteScenarioNotFoundError\n' \
f'FilePath: {self.path}\nParagraph starts on line ' \
f'{self.line_num}\nERROR: {Colors.WARNING}{self.story}' \
f'{Colors.FAIL} prerequisite scenario not found in ' \
f'prerequisites file ' \
f'{Colors.WARNING}{self.prereq_path}{Colors.ENDC}'
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ORANGE = '\033[91m'
| from selenium.webdriver.support.color import Colors
pomidor = 'Pomidor'
class PomidorKeyDoesNotExist(Exception):
"""PomidorCantRunOneBrowserInstanceInParallel Exception"""
def __init__(self, key):
self.key = key
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nKeyboard key {self.key} does ' \
f'not exist{Colors.ENDC}'
class PomidorCantRunOneBrowserInstanceInParallel(Exception):
"""PomidorCantRunOneBrowserInstanceInParallel Exception"""
def __init__(self):
pass
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nCannot run browser=\'one\' ' \
f'with parallel enabled.\nEither set browser=\'per_file\' or ' \
f'browser=\'per_test\' or remove parallel from run(..) function' \
f'{Colors.ENDC}'
class PomidorDataFeedNoKeyError(Exception):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path, line_num, key, data_file):
self.key = key
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'{Colors.FAIL}"PomidorDataFeedNoKeyError\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\n"{self.data_file}" file doesn\'t have ' \
f'<<{self.key}>> column{Colors.ENDC}\n'
class PomidorDataFeedNoAngleKeysProvidedException(Exception):
""" PomidorDataFeedNoAngleKeysProvidedException"""
def __init__(self, path, line_num, data_file):
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorDataFeedNoAngleKeysProvidedException\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\nYou have data csv file in @params line.' \
f' Either remove {Colors.WARNING}data=example.csv ' \
f'{Colors.FAIL}or include csv column ' \
f'name(s) in double angle brackets: \nExample: {Colors.WARNING}' \
f'type <<FirstName>> in #name_field\n{Colors.ENDC}'
class PomidorDataFeedNoCSVFileProvided(Exception):
""" PPomidorDataFeedNoCSVFileProvidedException"""
def __init__(self, path, line_num, data_file):
self.path = path
self.line_num = line_num
self.data_file = data_file
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorDataFeedNoCSVFileProvided\n' \
f'File Path: {self.path}\nParagraph starts on line: ' \
f'{self.line_num}\nIf you want to use keys from double angle ' \
f'brackets {Colors.WARNING}<<key>>{Colors.FAIL}, add ' \
f'data marker with a csv file ' \
f'in the @params line.\nExample: {Colors.WARNING}\n' \
f'@params data=csv_file_name.csv{Colors.ENDC}'
class PomidorFileNotFoundError(BaseException):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path):
self.path = path
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\nPomidorFileNotFoundError' \
f'No pomidor files found.\nFile Path: {self.path}{Colors.ENDC}'
class PomidorSyntaxErrorTooManyActions(Exception):
""" Pomidor syntax error class: more actions than objects """
def __init__(self, path, line_num, *args, **kwargs):
self.path = path
self.line_num = line_num
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorSyntaxErrorTooManyActions\nFile Path: ' \
f'{self.path}\nParagraph starts on line: {self.line_num}\n' \
f'ERROR: You have more actions than objects. Number of actions ' \
f'(click, type, wait, etc.) should match number of your objects' \
f' (Ex. #home_button){Colors.ENDC}'
class PomidorSyntaxErrorTooManyObjects(Exception):
""" Pomidor syntax error class: more objects than actions """
def __init__(self, path, line_num, *args, **kwargs):
self.path = path
self.line_num = line_num
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorSyntaxErrorTooManyObjects' \
f'\nFile Path: {self.path}\nParagraph ' \
f'starts on line: {self.line_num}\nERROR: You have more ' \
f'objects than actions. Number of actions ' \
f'(click, type, wait, etc.) should match number of your ' \
f'objects (Ex. #home_button){Colors.ENDC}'
class PomidorObjectDoesNotExistInCSVFile(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, *args, **kwargs):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorObjectDoesNotExistInCSVFile\nFilePath: ' \
f'{self.path}\nParagraph starts on line: {self.line_num}\n' \
f'ERROR: {Colors.WARNING}#{self.obj}{Colors.FAIL} does not ' \
f'exist in page object csv file.' \
f' Please check page object selector and value{Colors.ENDC}'
class PageObjectNotFound(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PageObjectNotFound{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: {Colors.WARNING}' \
f'#{self.obj}{Colors.FAIL} was not found on page.' \
f' Please check page object selector and value{Colors.ENDC}'
class PomidorAssertError(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, act):
self.path = path
self.line_num = line_num
self.obj = obj
self.act = act
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PomidorAssertError{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj} is {self.act}{Colors.FAIL} ' \
f'is FALSE {Colors.ENDC}'
class PomidorEqualAssertError(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj, act, string, actual_string):
self.path = path
self.line_num = line_num
self.obj = obj
self.act = act
self.string = string
self.actual_str = actual_string
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}PomidorAssertError{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj} {self.act} [[{self.string}'\
f']]{Colors.FAIL} is FALSE. {Colors.OKGREEN}Actual ' \
f'#{self.obj} text equals [[{self.actual_str}]]{Colors.ENDC}'
class ElementNotClickable(Exception):
""" Pomidor syntax error class: Page object does not exist on the page """
def __init__(self, path, line_num, obj):
self.path = path
self.line_num = line_num
self.obj = obj
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR{Colors.ENDC}\n' \
f'{Colors.FAIL}ElementNotClickable{Colors.ENDC}\n' \
f'{Colors.FAIL}FilePath: {self.path}\n' \
f'Paragraph starts on line: {self.line_num}\nERROR: ' \
f'{Colors.WARNING}#{self.obj}{Colors.FAIL} is ' \
f'hidden from view. Consider using \'max\' and/or \'scroll\'\n' \
f'Example:\n{Colors.WARNING}@params max, scroll\n{Colors.ENDC}'
class PomidorPrerequisiteScenarioNotFoundError(Exception):
def __init__(self, path, line_num, prereq_path, story, *args, **kwargs):
self.path = path
self.line_num = line_num
self.prereq_path = prereq_path
self.story = story
def __repr__(self):
return f'{Colors.FAIL}\n{pomidor}ERROR\n' \
f'PomidorPrerequisiteScenarioNotFoundError\n' \
f'FilePath: {self.path}\nParagraph starts on line ' \
f'{self.line_num}\nERROR: {Colors.WARNING}{self.story}' \
f'{Colors.FAIL} prerequisite scenario not found in ' \
f'prerequisites file ' \
f'{Colors.WARNING}{self.prereq_path}{Colors.ENDC}'
class Colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ORANGE = '\033[91m'
| en | 0.442567 | PomidorCantRunOneBrowserInstanceInParallel Exception PomidorCantRunOneBrowserInstanceInParallel Exception Pomidor syntax error class: more actions than objects PomidorDataFeedNoAngleKeysProvidedException #name_field\n{Colors.ENDC}' PPomidorDataFeedNoCSVFileProvidedException Pomidor syntax error class: more actions than objects Pomidor syntax error class: more actions than objects #home_button){Colors.ENDC}' Pomidor syntax error class: more objects than actions #home_button){Colors.ENDC}' Pomidor syntax error class: Page object does not exist on the page #{self.obj}{Colors.FAIL} does not ' \ Pomidor syntax error class: Page object does not exist on the page Pomidor syntax error class: Page object does not exist on the page #{self.obj} is {self.act}{Colors.FAIL} ' \ Pomidor syntax error class: Page object does not exist on the page #{self.obj} {self.act} [[{self.string}'\ Pomidor syntax error class: Page object does not exist on the page #{self.obj}{Colors.FAIL} is ' \ | 2.89895 | 3 |
flightaware2columbus/geo_distance.py | KenMercusLai/FlightAware2columbus | 0 | 6623956 | #!/usr/bin/python3
from math import sin, asin, cos, radians, fabs, sqrt
EARTH_RADIUS = 6371 # 地球平均半径,6371km
def hav(theta):
s = sin(theta / 2)
return s * s
def get_distance_hav(lat0, lng0, lat1, lng1):
"""用haversine公式计算球面两点间的距离."""
# 经纬度转换成弧度
lat0 = radians(lat0)
lat1 = radians(lat1)
lng0 = radians(lng0)
lng1 = radians(lng1)
dlng = fabs(lng0 - lng1)
dlat = fabs(lat0 - lat1)
h = hav(dlat) + cos(lat0) * cos(lat1) * hav(dlng)
distance = 2 * EARTH_RADIUS * asin(sqrt(h))
return distance
| #!/usr/bin/python3
from math import sin, asin, cos, radians, fabs, sqrt
EARTH_RADIUS = 6371 # 地球平均半径,6371km
def hav(theta):
s = sin(theta / 2)
return s * s
def get_distance_hav(lat0, lng0, lat1, lng1):
"""用haversine公式计算球面两点间的距离."""
# 经纬度转换成弧度
lat0 = radians(lat0)
lat1 = radians(lat1)
lng0 = radians(lng0)
lng1 = radians(lng1)
dlng = fabs(lng0 - lng1)
dlat = fabs(lat0 - lat1)
h = hav(dlat) + cos(lat0) * cos(lat1) * hav(dlng)
distance = 2 * EARTH_RADIUS * asin(sqrt(h))
return distance
| zh | 0.790623 | #!/usr/bin/python3 # 地球平均半径,6371km 用haversine公式计算球面两点间的距离. # 经纬度转换成弧度 | 3.442067 | 3 |
model_codes/liver.py | Sarth6961/Health-app--based-on-Un-17-Guidelines | 0 | 6623957 | <reponame>Sarth6961/Health-app--based-on-Un-17-Guidelines
import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import joblib
patients=pd.read_csv('../data/indian_liver_patient.csv')
patients['Gender']=patients['Gender'].apply(lambda x:1 if x=='Male' else 0)
patients=patients.fillna(0.94)
X=patients[['Total_Bilirubin', 'Direct_Bilirubin',
'Alkaline_Phosphotase', 'Alamine_Aminotransferase',
'Total_Protiens', 'Albumin', 'Albumin_and_Globulin_Ratio']]
y=patients['Dataset']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=123)
print('Shape training set: X:{}, y:{}'.format(X_train.shape, y_train.shape))
print('Shape test set: X:{}, y:{}'.format(X_test.shape, y_test.shape))
model = ensemble.RandomForestClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy : {}'.format(accuracy_score(y_test, y_pred)))
clf_report = classification_report(y_test, y_pred)
print('Classification report')
print("---------------------")
print(clf_report)
print("_____________________")
joblib.dump(model,r"C:\Users\<NAME>\Downloads\Health-App-main\Liver_API\liver_model.pkl") | import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import joblib
patients=pd.read_csv('../data/indian_liver_patient.csv')
patients['Gender']=patients['Gender'].apply(lambda x:1 if x=='Male' else 0)
patients=patients.fillna(0.94)
X=patients[['Total_Bilirubin', 'Direct_Bilirubin',
'Alkaline_Phosphotase', 'Alamine_Aminotransferase',
'Total_Protiens', 'Albumin', 'Albumin_and_Globulin_Ratio']]
y=patients['Dataset']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=123)
print('Shape training set: X:{}, y:{}'.format(X_train.shape, y_train.shape))
print('Shape test set: X:{}, y:{}'.format(X_test.shape, y_test.shape))
model = ensemble.RandomForestClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('Accuracy : {}'.format(accuracy_score(y_test, y_pred)))
clf_report = classification_report(y_test, y_pred)
print('Classification report')
print("---------------------")
print(clf_report)
print("_____________________")
joblib.dump(model,r"C:\Users\<NAME>\Downloads\Health-App-main\Liver_API\liver_model.pkl") | none | 1 | 2.824535 | 3 | |
analysis/scripts/project_functions.py | data301-2021-summer2/project-group6-project | 0 | 6623958 | <filename>analysis/scripts/project_functions.py
import pandas as pd
import numpy as np
import seaborn as sns
import pandas_profiling as pf
import matplotlib.pyplot as plt
path='../data/raw/adult.data'
def load_and_process(path):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
pd.read_csv('../data/raw/adult.data')
.drop([' 13', ' 2174', ' 0', ' 40', 'Unnamed: 0', ' Never-married', ' Not-in-family'], axis=1)
.rename({' State-gov': 'Work Class', ' 77516': 'Final Weight', ' <=50K':'Income', '39': 'Age', ' White':'Race', ' Adm-clerical': 'Occupation', ' Bachelors': 'Education Level', ' Never-married': 'Marital-Status', ' Male': 'Sex', ' Not-in-family': 'Relationship', ' United-States': 'Native-Country'}, axis=1)
)
# Method Chain 2 (Create new columns, drop others, and do processing)
df2 = (
df1
.replace([' 1st-4th', ' 5th-6th', ' 7th-8th'], 'Elementary')
.replace([' 9th', ' 10th', ' 11th', ' 12th'], 'High School')
)
# Make sure to return the latest dataframe
return df2
load_and_process(path)
| <filename>analysis/scripts/project_functions.py
import pandas as pd
import numpy as np
import seaborn as sns
import pandas_profiling as pf
import matplotlib.pyplot as plt
path='../data/raw/adult.data'
def load_and_process(path):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
pd.read_csv('../data/raw/adult.data')
.drop([' 13', ' 2174', ' 0', ' 40', 'Unnamed: 0', ' Never-married', ' Not-in-family'], axis=1)
.rename({' State-gov': 'Work Class', ' 77516': 'Final Weight', ' <=50K':'Income', '39': 'Age', ' White':'Race', ' Adm-clerical': 'Occupation', ' Bachelors': 'Education Level', ' Never-married': 'Marital-Status', ' Male': 'Sex', ' Not-in-family': 'Relationship', ' United-States': 'Native-Country'}, axis=1)
)
# Method Chain 2 (Create new columns, drop others, and do processing)
df2 = (
df1
.replace([' 1st-4th', ' 5th-6th', ' 7th-8th'], 'Elementary')
.replace([' 9th', ' 10th', ' 11th', ' 12th'], 'High School')
)
# Make sure to return the latest dataframe
return df2
load_and_process(path)
| en | 0.70319 | # Method Chain 1 (Load data and deal with missing data) # Method Chain 2 (Create new columns, drop others, and do processing) # Make sure to return the latest dataframe | 3.007687 | 3 |
dataset.py | navigator8972/vae_dyn | 4 | 6623959 | <reponame>navigator8972/vae_dyn
import numpy as np
class DataSets(object):
pass
class DataSet(object):
def __init__(self, data, labels=None):
if labels is not None:
#check consistency
assert data.shape[0]==labels.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
else:
#goahead
self._num_examples = data.shape[0]
self._data = data
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
return
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self._data[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is not None:
return self._data[start:end], self._labels[start:end]
else:
return self._data[start:end], None
def construct_datasets(data, labels=None, shuffle=True, validation_ratio=.1, test_ratio=.1):
data_sets = DataSets()
if shuffle:
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data_shuffled = data[perm]
if labels is not None:
labels_shuffled = labels[perm]
else:
data_shuffled = data
labels_shuffled = labels
test_start_idx = int((1-test_ratio)*data_shuffled.shape[0])
validation_start_idx = int((1-validation_ratio-test_ratio)*data_shuffled.shape[0])
if labels is not None:
assert data_shuffled.shape[0] == labels_shuffled.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :], labels_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :], labels_shuffled[validation_start_idx, test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :], labels_shuffled[test_start_idx:, :])
else:
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :])
return data_sets
| import numpy as np
class DataSets(object):
pass
class DataSet(object):
def __init__(self, data, labels=None):
if labels is not None:
#check consistency
assert data.shape[0]==labels.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
else:
#goahead
self._num_examples = data.shape[0]
self._data = data
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
return
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self._data[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is not None:
return self._data[start:end], self._labels[start:end]
else:
return self._data[start:end], None
def construct_datasets(data, labels=None, shuffle=True, validation_ratio=.1, test_ratio=.1):
data_sets = DataSets()
if shuffle:
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data_shuffled = data[perm]
if labels is not None:
labels_shuffled = labels[perm]
else:
data_shuffled = data
labels_shuffled = labels
test_start_idx = int((1-test_ratio)*data_shuffled.shape[0])
validation_start_idx = int((1-validation_ratio-test_ratio)*data_shuffled.shape[0])
if labels is not None:
assert data_shuffled.shape[0] == labels_shuffled.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :], labels_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :], labels_shuffled[validation_start_idx, test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :], labels_shuffled[test_start_idx:, :])
else:
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :])
return data_sets | en | 0.687118 | #check consistency #goahead Return the next `batch_size` examples from this data set. # Finished epoch # Shuffle the data # Start next epoch | 3.056996 | 3 |
standardised_logging/__init__.py | srbry/standardised-logging | 0 | 6623960 | <reponame>srbry/standardised-logging
from .handler import ImmutableContextError, StandardisedLogHandler
from .logger import LogLevelException, StandardisedLogger
__all__ = [
"StandardisedLogger",
"StandardisedLogHandler",
"ImmutableContextError",
"LogLevelException",
]
| from .handler import ImmutableContextError, StandardisedLogHandler
from .logger import LogLevelException, StandardisedLogger
__all__ = [
"StandardisedLogger",
"StandardisedLogHandler",
"ImmutableContextError",
"LogLevelException",
] | none | 1 | 1.527238 | 2 | |
Command.py | NaraMish/Python- | 0 | 6623961 | <reponame>NaraMish/Python-
#!/usr/bin/python3
#wifi hotspot enabler
import os
os.system('cls')
print('\n\n\n\n\n')
print('Nexus Wifi Hotspot Enabler')
print('(c)2021 Nexus Group.All right reserved.')
print()
cmd='0'
while cmd != '3' :
print('1-Start Hotspot')
print('2-Stop Hotspot')
print('3-exit')
cmd=input('Please Enter Your Choice(1,2,3): ')
if cmd == '1':
print('Starting Wifi hotspot....')
os.system("netsh wlan set hostednetwork mode=alow ssid=Nexus key=12345678")
os.system('netsh wlan start hostednetwork')
elif cmd == '2':
print('Stopping Wifi hotspot....')
os.system('netsh wlan stop hostednetwork')
elif cmd == '3':
print('Exiting Program....')
quit()
else:
print("Bad input! Please try again (Only 1,2,3)")
os.system('pause')
| #!/usr/bin/python3
#wifi hotspot enabler
import os
os.system('cls')
print('\n\n\n\n\n')
print('Nexus Wifi Hotspot Enabler')
print('(c)2021 Nexus Group.All right reserved.')
print()
cmd='0'
while cmd != '3' :
print('1-Start Hotspot')
print('2-Stop Hotspot')
print('3-exit')
cmd=input('Please Enter Your Choice(1,2,3): ')
if cmd == '1':
print('Starting Wifi hotspot....')
os.system("netsh wlan set hostednetwork mode=alow ssid=Nexus key=12345678")
os.system('netsh wlan start hostednetwork')
elif cmd == '2':
print('Stopping Wifi hotspot....')
os.system('netsh wlan stop hostednetwork')
elif cmd == '3':
print('Exiting Program....')
quit()
else:
print("Bad input! Please try again (Only 1,2,3)")
os.system('pause') | zh | 0.155782 | #!/usr/bin/python3 #wifi hotspot enabler | 3.179178 | 3 |
src/nlp/text_parsing.py | Hazoom/covid19 | 1 | 6623962 | from typing import List
import spacy
from nlp import blingfire_sentence_splitter
__CACHE = {}
def parse_texts(texts: List[str]):
return get_nlp_parser().pipe(texts)
def parse_text(text: str):
return get_nlp_parser()(text)
def get_nlp_parser():
if 'nlp' not in __CACHE:
print("Loading NLP model")
nlp = spacy.load('en_core_sci_sm')
nlp.add_pipe(blingfire_sentence_splitter.mark_sentence_boundaries,
name='mark-sentence-boundaries',
before="parser")
nlp.max_length = 2000000
__CACHE['nlp'] = nlp
return __CACHE['nlp']
if __name__ == "__main__":
doc = parse_text("Alterations in the hypocretin receptor 2 and preprohypocretin genes produce narcolepsy in some "
"animals.")
print(doc.ents)
for token in doc:
print(token)
| from typing import List
import spacy
from nlp import blingfire_sentence_splitter
__CACHE = {}
def parse_texts(texts: List[str]):
return get_nlp_parser().pipe(texts)
def parse_text(text: str):
return get_nlp_parser()(text)
def get_nlp_parser():
if 'nlp' not in __CACHE:
print("Loading NLP model")
nlp = spacy.load('en_core_sci_sm')
nlp.add_pipe(blingfire_sentence_splitter.mark_sentence_boundaries,
name='mark-sentence-boundaries',
before="parser")
nlp.max_length = 2000000
__CACHE['nlp'] = nlp
return __CACHE['nlp']
if __name__ == "__main__":
doc = parse_text("Alterations in the hypocretin receptor 2 and preprohypocretin genes produce narcolepsy in some "
"animals.")
print(doc.ents)
for token in doc:
print(token)
| none | 1 | 2.790142 | 3 | |
helix/matching/matcher.py | ckrivacic/helix_matcher | 2 | 6623963 | <filename>helix/matching/matcher.py
'''
Create bins or match a query protein.
Usage:
matcher.py bin <helix_dataframe> [options]
matcher.py match <match_workspace> [options]
options:
--local, -l Run locally
--tasks=NUM, -j Run on the cluster using SGE. Argument should be # of
tasks per dataframe.
--length, -e Bin by length
--verbose, -v Verbose output
--database=PATH, -d Database of relative helix orientations
[default: database/]
--out=PATH, -o Where to save outputs [default: .]
--angstroms=NUM, -a Binning option. How fine should the distance bins
be? [default: 2.5]
--degrees=NUM, -g Binning option. How fine should the angle bins be?
[default: 15]
--settings=YML, -s Provide a settings file.
--scaffold=PDB Only run matching for a given helix length/RIFDock
scaffold.
'''
import collections
import os, psutil, sys
import pickle
import subprocess
import docopt
import numpy as np
import pandas as pd
import networkx as nx
from helix import workspace as ws
from helix.matching.scan_helices import final_vector
from helix.utils import numeric
from itertools import product
from pyrosetta import init, pose_from_file
# import graph_tool.all as gt
def plot_vectors(vectors, color='darkgray'):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for vector in vectors:
x = [point[0] for point in vector]
y = [point[1] for point in vector]
z = [point[2] for point in vector]
ax.plot(x, y, z, color=color, linewidth=4)
plt.show()
def bin_array(array, bins):
'''
Digitize a numpy array.
TO DO: Circularize the binning of angles somehow.
'''
inds = np.digitize(array, bins)
binned = tuple([bins[inds[n]-1] for n in range(array.size)])
return binned
def relative_position(row1, row2, vectortype='normalized_vector'):
'''
Gives the internal relative orientation of two lines, given their
row from the pandas dataframe created in scan_helices.
The relative orientation of two lines should be able to be described
with just 4 parameters, since they are 2D objects in 3D space. If we
have lines consisting of points [a,b] and [c,d], those parameters are:
- The distance between their centroids
- Angle abc
- Angle bcd
- Dihedral abcd
'''
norm_v1 = row1[vectortype]
norm_v2 = row2[vectortype]
centroid_dist = numeric.euclidean_distance(row1['centroid'],
row2['centroid'])
abc = numeric.angle(norm_v1[0], norm_v1[1], norm_v2[0])
bcd = numeric.angle(norm_v1[1], norm_v2[0], norm_v2[1])
dihedral = numeric.dihedral(norm_v1[0], norm_v1[1], norm_v2[0],
norm_v2[1])
# plot_vectors([norm_v1, norm_v2], color='black')
return centroid_dist, abc, bcd, dihedral
class Match(object):
'''
Class to construct a potential match.
'''
def __init__(self, name, query_db, main_db, verbose=False):
self.verbose = verbose
self.name = name
self.query = query_db
self.db = main_db.xs(name, level='name')
# self.graph = gt.Graph(directed=False)
self.graph = nx.Graph()
# Track helix pairs so we don't add them to the graph more than
# once
def max_subgraph(self):
'''
Finds dense subgraphs, which represent compatible sets of helix
pairs between the query helices and the database PDB. The
longest such subgraph represents the best overlay of the PDB
with the set of query helices.
'''
max_subgraph_len = 0
# for f in gt.max_cliques(self.graph):
for f in nx.find_cliques(self.graph):
if len(f) > max_subgraph_len:
max_subgraph_len = len(f)
print('Max number of matches:')
print(max_subgraph_len)
return max_subgraph_len
def plot_graph(self):
import matplotlib.pyplot as plt
# import graph_tool.draw as draw
plt.subplot(111)
# gt.remove_parallel_edges(self.graph)
# pos = gt.fruchterman_reingold_layout(self.graph, n_iter=1000)
# gt.graph_draw(self.graph, pos=pos)
plt.show()
def find_edges(self):
'''
Populate the graph with nodes and edges.
Each node consists of a pair of indices, one from the main
database and one from the query database. This pairing
represents the case where the helix in the first index is
overlaid on the helix of the second index. Edges represent
compatibility between adjacent nodes.
'''
print('Finding edges')
edges = []
self.nodes = set()
property_map = {}
i = 0
for doc in self.db.iterrows():
if doc[0] in self.query.index:
compatible_bins = self.query.xs(doc[0])
# compatible_bins = self.query.find({'bin': doc['bin']})
for result in compatible_bins.iterrows():
idx_pair1 = (doc[1]['idx1'], result[1]['idx1'])
idx_pair2 = (doc[1]['idx2'], result[1]['idx2'])
# Track which nodes have been sampled
if idx_pair1 not in self.nodes:
self.nodes.add(idx_pair1)
self.graph.add_node(idx_pair1)
# self.nodes[idx_pair1] = i
# property_map[i] = idx_pair1
i += 1
# self.nodes.append(idx_pair1)
# self.graph.add_node(idx_pair1)
if idx_pair2 not in self.nodes:
# self.nodes[idx_pair2] = i
# property_map[i] = idx_pair2
self.nodes.add(idx_pair2)
self.graph.add_node(idx_pair2)
i += 1
# self.nodes.append(idx_pair2)
# self.graph.add_node(idx_pair2)
self.graph.add_edge(idx_pair1, idx_pair2)
# print('Edge found:')
# print(idx_pair1)
# print(idx_pair2)
# edges.append((self.nodes[idx_pair1],
# self.nodes[idx_pair2]))
# i += 2
# nodes = set(self.nodes)
# self.graph.add_edge(idx_pair1, idx_pair2)
# print(nodes)
# if self.verbose:
# print('All edges:')
# print(edges)
# self.graph.add_edge_list(edges)
# Add properties
# prop_dict = self.graph.new_vertex_property('object')
# for v in self.graph.vertices():
# prop_dict[v] = {'query_idx':property_map[v][0],
# 'lookup_idx':property_map[v][1]}
class HelixBin(object):
def __init__(self, helix_db, exposed_cutoff=0.3, length_cutoff=10.8,
query_df=None, query_name=None, angstroms=2.5, degrees=15,
verbose=False, start=None, stop=None):
self.verbose = verbose
self.df = helix_db
self.df['idx'] = self.df.index
# Binning parameters
self.degrees = degrees
self.angstroms = angstroms
self.setup_bins()
binned_name = 'bins_{}A_{}D'.format(self.angstroms,
self.degrees)
self.start = start
self.stop = stop
# Trimming dataframe
if length_cutoff:
self.df = self.df[self.df['length'] > length_cutoff]
if exposed_cutoff:
self.df = self.df[self.df['percent_exposed'] >
exposed_cutoff]
if 'normalized_vector' not in self.df.columns:
self.df['normalized_vector'] = self.df.apply(lambda x:
final_vector(x['direction'], 1, x['centroid']), axis=1)
def setup_bins(self):
nrbins = int(360//self.degrees) + 1
self.rbins = np.linspace(-180, 180, nrbins)
tstart = -10000
tstop = 10000
ntbins = int((tstop - tstart) // self.angstroms) + 1
self.tbins = np.linspace(tstart, tstop, ntbins)
def bin_db(self, outdir=None, bin_length=False):
'''
Bin dataframes.
'''
from scipy.spatial.transform import Rotation as R
import subprocess
import time
# db = self.client[dbname]
# bins = db['bins_{}A_{}D'.format(
# self.angstroms, self.degrees
# )]
bins = pd.DataFrame(columns=['bin', 'name', 'idx1', 'idx2'])
# Pandas indices are hash lookups and we can have multiple of
# them, but they cannot be added piecewise. Therefore we will
# create partial tables, then create the indices and save the
# dataframes. Results will be saved in chunks.
# bins.set_index(['bin', 'name'], inplace=True)
total_proteins = len(set(self.df['name']))
interval = 500
# import shelve
# binned = shelve.open('binned_0p3/hashtable', 'c', writeback=True)
# i tracks # of names analyzed
i = 0
# saveno tracks how many dataframes have been saved.
self.saveno = 1
unsaved_docs = []
start_time = time.time()
def update(bins, start_time, unsaved_docs, interval, i,
final=False):
print('{} of {} PDBs processed so far.'.format(
i, total_proteins))
mem_used = psutil.Process(os.getpid()).memory_info().rss
if self.verbose:
print('Currently using {} GB of memory'.format(
mem_used * 10**-9
))
df_mem = bins.memory_usage(index=True, deep=True).sum()
if self.verbose:
print('Dataframe is using {} GB of memory'.format(
df_mem * 10**-9
))
elapsed = time.time() - start_time
rate = interval / elapsed
remaining = (total_proteins - i) / rate / 3600
print('Analysis of 500 pdbs took {} seconds. Est. {} h remaining'.format(
elapsed, remaining
))
if len(unsaved_docs) > 0:
if self.verbose:
print('Adding to dataframe...')
bins = bins.append(unsaved_docs, ignore_index=True)
if self.verbose:
print(bins)
else:
if self.verbose:
print('Nothing to update for this batch.')
# Save when memory footprint of dataframe gets larger than 4
# GB. This way each sub-dataframe can be read into memory.
if outdir:
if df_mem * 10**-9 > 4 or final:
bins.set_index(['bin', 'name'], inplace=True)
outfile = 'bins_{}A_{}D_{:04d}.pkl'.format(self.angstroms,
self.degrees, self.saveno)
out = os.path.join(outdir, outfile)
print('Saving current dataframe to {}'.format(out))
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
bins.to_pickle(out)
self.saveno += 1
if self.verbose:
print('Saved.')
# If saved to disk, return an empty dataframe.
return pd.DataFrame()
elif final:
bins.set_index(['bin', 'name'], inplace=True)
# Return input dataframe if we have not saved it to disk.
return bins
groups = self.df.groupby(['name'])
names = sorted(list(groups.groups.keys()))
if self.start:
names = names[self.start:]
if self.stop:
names = names[:self.stop]
for name in names:
# for name, group in df.groupby(['name']):
group = groups.groups[name]
i += 1
for combination in product(self.df.loc[group].T.to_dict().values(),
repeat=2):
if combination[0]['idx'] != combination[1]['idx']:
# vector1 = combination[0]['vector']
# vector2 = combination[1]['vector']
# plot_vectors([vector1, vector2], color='purple')
idx1 = combination[0]['idx']
idx2 = combination[1]['idx']
# if self.verbose:
# print('------------------------------------')
# print(combination[0])
# print(combination[1])
dist, angle1, angle2, dihedral =\
relative_position(combination[0], combination[1])
dist = np.array([dist])
angles = np.array([angle1, angle2, dihedral])
lengths = np.array([combination[0]['length'],
combination[1]['length']])
lbin = bin_array(lengths, self.tbins)
lbin2 = bin_array(lengths, self.tbins +
(self.angstroms/2))
rbin = bin_array(angles, self.rbins)
tbin = bin_array(dist, self.tbins)
rbin2 = bin_array(angles, self.rbins + (self.degrees/2))
tbin2 = bin_array(dist, self.tbins +
(self.angstroms/2))
x = [tbin[0], tbin2[0]]
abc = [rbin[0], rbin2[0]]
bcd = [rbin[1], rbin2[1]]
dih = [rbin[2], rbin2[2]]
lengths = [lbin, lbin2]
if bin_length:
all_bins = product(x, abc, bcd, dih, lengths)
else:
all_bins = product(x, abc, bcd, dih)
for bin_12 in all_bins:
bin_12 = ' '.join(map(str, bin_12))
doc = {
'bin':bin_12,
'name': name,
'idx1':idx1,
'idx2':idx2
}
# if check_dups:
# if len(list(bins.find(doc))) == 0:
# unsaved_docs.append(doc)
# else:
unsaved_docs.append(doc)
if i%interval == 0:
bins = update(bins, start_time, unsaved_docs, interval, i)
start_time = time.time()
unsaved_docs = []
bins = update(bins, start_time, unsaved_docs, interval, i, final=True)
return bins
class HelixLookup(object):
'''
Class to handle binning and matching of helix databases. This maybe
should be two classes, one for binning and one for matching, but
this is it for now.
'''
def __init__(self, lookup_folder, query, name='unknown',
verbose=False):
self.verbose = verbose
self.lookup_folder = lookup_folder
self.query = query
self.name = name
def score_match(self, list_of_index_pairs):
"""
Idea (idk where else to put this):
To get 3rd, 4th, etc. helices, do a reverse lookup. That is,
for each bin in the FOUND PDB, look for matches in the QUERY
pdb.
"""
# TO DO: score clashes
return
def submit_local(self, outdir):
import glob
lookups = sorted(glob.glob(self.lookup_folder + '/*.pkl'))
print(self.lookup_folder)
print(lookups)
i = 0
os.makedirs(outdir, exist_ok=True)
for lookup in lookups:
print('MATCHING AGAINST {}'.format(lookup))
out = os.path.join(outdir, '{}_results_{:03d}.pkl'.format(
self.name, i)
)
self.match(pd.read_pickle(lookup), out=out)
i += 1
def submit_cluster(self, outdir, tasks):
import glob
lookups = sorted(glob.glob(self.lookup_folder + '/*.pkl'))
total_tasks = tasks * len(lookups)
task = int(os.environ['SGE_TASK_ID']) - 1
os.makedirs(outdir, exist_ok=True)
out = os.path.join(outdir, '{}_results_{:03d}.pkl'.format(self.name,
task))
print('Results will be saved to {}'.format(out))
# Warning: total_tasks must be a multiple of len(lookups) for
# now.
increment = total_tasks // len(lookups)
print('Increment {}'.format(increment))
lookups_idx = task//increment
print('Reading database file # {}'.format(lookups_idx))
lookup = pd.read_pickle(lookups[lookups_idx])
num_rows = lookup.shape[0]
row_increment = num_rows // increment
rowstart = (task%increment) * row_increment
rowend = rowstart + row_increment
lookup = lookup.iloc[rowstart:rowend]
print('Looking up rows {} through {}'.format(rowstart, rowend))
print(lookup)
self.match(lookup, out=out)
def match(self, lookup, out=None):
names = []
# Pandas rewrite
print('Starting forward search...')
for _bin, group in self.query.groupby(level='bin'):
if self.verbose:
print('Searching bin {}'.format(_bin))
if _bin in lookup.index:
for result in lookup.xs(_bin, level='bin').iterrows():
# xs results in (index, row) tuples; db is indexed by
# name, so row[0] is the name.
if self.verbose:
print('Matched to pdb {}'.format(result[0]))
names.append(
result[0]
)
print('Forward search done.')
print('Original name list:')
print(names)
min_matches = 2
names = [item for item, count in
collections.Counter(names).items() if
count >= min_matches]
print('All matches:')
print(names)
print(len(names))
results = []
# TEMP
# sys.exit()
i = 0
for name in names:
i += 1
result = {}
result['name'] = name
print('-------------------------------------------------')
print('Name: {}'.format(name))
match = Match(name, self.query, lookup, verbose=self.verbose)
match.find_edges()
result['matches'] = match.max_subgraph()
result['graph'] = match.graph
results.append(result)
# match.plot_graph()
# print('searching {}'.format(name))
# for _bin in self.binned.find({'name': name[0]}):
# if _bin['idx1'] == name[1]:
# print('-------')
# print(_bin)
# for doc in self.query_bins.find({'bin':_bin['bin']}):
# print('MATCH:')
# results[name].append((doc['idx1'], doc['idx2']))
# print(doc)
df = pd.DataFrame(results)
if out:
df.to_pickle(out)
return df
# for key in results:
# print('------------------RESULTS FOR {}----------------'.format(
# key
# ))
# for pair in set(results[key]):
# print(pair)
# for key in results:
# print('PDB {} had {} matching transformations'.format(
# key, len(set(results[key]))
# ))
def test():
# import scan_helices
from helix.matchign import scan_helices
test_path = 'test_files/6r9d.cif'
init()
pose = pose_from_file(test_path).split_by_chain(2)
print(pose.size())
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices()
helices = pd.DataFrame(helices)
print(helices)
helices = helices[helices['percent_exposed'] > 0.3]
print(helices)
print(helices.shape)
print(helices['name'])
# lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'),
# query_df=helices, query_name='6r9d')
lookup = HelixLookup(pd.DataFrame(),
query_df=helices, query_name='6r9d', angstroms=5,
# degrees=15, reset_querydb=True, dbname='nr')
degrees=30, reset_querydb=True, dbname='test_bins')
lookup.match()
def test_rifdock():
from helix.matching import scan_helices
test_path = 'test_files/test_rifgen/cluster_representatives/matchme.pdb'
init()
pose = pose_from_file(test_path)
print(pose.size())
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices(split_chains=False,
name='rifdock_test')
helices = pd.DataFrame(helices)
helices.to_pickle('rifdock_helices.pkl')
sys.exit()
print(helices)
# helices = helices[helices['percent_exposed'] > 0.3]
print(helices)
print(helices.shape)
print(helices['name'])
# lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'),
# query_df=helices, query_name='6r9d')
lookup = HelixLookup(pd.DataFrame(),
query_df=helices, query_name='6r9d', angstroms=2.5,
degrees=15, reset_querydb=True, dbname='nr')
# degrees=30, reset_querydb=True, dbname='test_bins')
lookup.match()
def make_hash_table():
print('Loading database and setting up lookup object...')
# length cutoff of 2 turns or 10.8 angstroms
lookup = HelixLookup(pd.read_pickle('nr_dataframes/final.pkl'),
exposed_cutoff=0.3, length_cutoff=10.8, angstroms=2.5,
degrees=15, dbname='nr')
print('Done.')
# binned = lookup.bin_db(lookup.df)
lookup.update_bin_db()
# out = "binned_0p3/last.pkl"
# with open(out, 'wb') as f:
# pickle.dump(binned, f)
def make_test_hash_table():
client = MongoClient()
deg=15
angstroms=2.5
# client['test_bins']['bins_{}A_{}D'.format(angstroms, deg)].drop()
lookup=HelixLookup(pd.read_pickle('out.pkl'), exposed_cutoff=0.3,
length_cutoff=10.8, angstroms=angstroms, degrees=deg,
dbname='test_bins')
lookup.update_bin_db()
def main():
args = docopt.docopt(__doc__)
print(args)
if args['--settings']:
# Deprecated; settings handled by submission command
import yaml
runtype = 'bin' if args['bin'] else 'match'
settings = yaml.load(open(args['--settings'], 'r'))
print(settings)
for option in settings[runtype]:
args[option] = settings[runtype][option]
print(args)
dbpath = os.path.join(
args['--database'],
"bins_{}A_{}D".format(
float(args['--angstroms']),
float(args['--degrees'])
)
)
if args['bin']:
lookup = HelixBin(pd.read_pickle(args['<helix_dataframe>']),
exposed_cutoff=0.3, length_cutoff=10.8,
angstroms=float(args['--angstroms']),
degrees=float(args['--degrees']),
verbose=args['--verbose'])
lookup.bin_db(outdir=dbpath, bin_length=args['--length'])
if args['match']:
# import scan_helices
from helix.matching import scan_helices
workspace = ws.workspace_from_dir(args['<match_workspace>'])
# Import pdb
if args['--scaffold']:
pdbfolders = [workspace.scaffold_clusters(args['--scaffold'])]
else:
pdbfolders = workspace.all_scaffold_clusters
init()
if not args['--scaffold'] and \
os.path.exists(workspace.all_scaffold_dataframe):
all_helices = pd.read_pickle(workspace.all_scaffold_dataframe)
else:
all_helices = []
for pdbfolder in pdbfolders:
# helicepath = os.path.join(pdbfolder, 'query_helices.pkl')
helicepath = workspace.scaffold_dataframe(pdbfolder)
if os.path.exists(helicepath):
helices = pd.read_pickle(helicepath)
else:
folder_helices = []
import glob
gz = glob.glob(pdbfolder + '/*.pdb.gz')
dotpdb = glob.glob(pdbfolder + '/*.pdb')
gz.extend(dotpdb)
pdbs = sorted(gz)
for path in pdbs:
# First chain is the docked helix
pose = pose_from_file(path).split_by_chain(1)
# Scan pdb helices
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices(name='query',
split_chains=False, path=path)
folder_helices.extend(helices)
helices = pd.DataFrame(folder_helices)
helices.to_pickle(helicepath)
all_helices.append(helices)
all_helices = pd.concat(all_helices, ignore_index=True)
if not args['--scaffold']:
# Don't save to the all_scaffold path if not using all
# scaffolds
all_helices.to_pickle(workspace.all_scaffold_dataframe)
print("HELICES")
print(all_helices)
print(all_helices['vector'])
# Bin pdb helices
query = HelixBin(all_helices, exposed_cutoff=0.3,
length_cutoff=10.8,
angstroms=float(args['--angstroms']),
degrees=float(args['--degrees']),
verbose=args['--verbose'])
query_bins = query.bin_db(bin_length=args['--length'])
print('QUERY BINS')
print(query_bins)
# Match
# name = os.path.basename(path).split('.')[0]
name = 'query'
print('Database:')
print(dbpath)
matcher = HelixLookup(dbpath, query_bins, name=name,
verbose=args['--verbose'])
if args['--local']:
matcher.submit_local(workspace.output_dir)
elif args['--tasks']:
matcher.submit_cluster(workspace.output_dir, int(args['--tasks']))
else:
matcher.submit_cluster(workspace.output_dir, 1)
if __name__=='__main__':
# test()
# test_rifdock()
# make_hash_table()
# make_test_hash_table()
main()
| <filename>helix/matching/matcher.py
'''
Create bins or match a query protein.
Usage:
matcher.py bin <helix_dataframe> [options]
matcher.py match <match_workspace> [options]
options:
--local, -l Run locally
--tasks=NUM, -j Run on the cluster using SGE. Argument should be # of
tasks per dataframe.
--length, -e Bin by length
--verbose, -v Verbose output
--database=PATH, -d Database of relative helix orientations
[default: database/]
--out=PATH, -o Where to save outputs [default: .]
--angstroms=NUM, -a Binning option. How fine should the distance bins
be? [default: 2.5]
--degrees=NUM, -g Binning option. How fine should the angle bins be?
[default: 15]
--settings=YML, -s Provide a settings file.
--scaffold=PDB Only run matching for a given helix length/RIFDock
scaffold.
'''
import collections
import os, psutil, sys
import pickle
import subprocess
import docopt
import numpy as np
import pandas as pd
import networkx as nx
from helix import workspace as ws
from helix.matching.scan_helices import final_vector
from helix.utils import numeric
from itertools import product
from pyrosetta import init, pose_from_file
# import graph_tool.all as gt
def plot_vectors(vectors, color='darkgray'):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for vector in vectors:
x = [point[0] for point in vector]
y = [point[1] for point in vector]
z = [point[2] for point in vector]
ax.plot(x, y, z, color=color, linewidth=4)
plt.show()
def bin_array(array, bins):
'''
Digitize a numpy array.
TO DO: Circularize the binning of angles somehow.
'''
inds = np.digitize(array, bins)
binned = tuple([bins[inds[n]-1] for n in range(array.size)])
return binned
def relative_position(row1, row2, vectortype='normalized_vector'):
'''
Gives the internal relative orientation of two lines, given their
row from the pandas dataframe created in scan_helices.
The relative orientation of two lines should be able to be described
with just 4 parameters, since they are 2D objects in 3D space. If we
have lines consisting of points [a,b] and [c,d], those parameters are:
- The distance between their centroids
- Angle abc
- Angle bcd
- Dihedral abcd
'''
norm_v1 = row1[vectortype]
norm_v2 = row2[vectortype]
centroid_dist = numeric.euclidean_distance(row1['centroid'],
row2['centroid'])
abc = numeric.angle(norm_v1[0], norm_v1[1], norm_v2[0])
bcd = numeric.angle(norm_v1[1], norm_v2[0], norm_v2[1])
dihedral = numeric.dihedral(norm_v1[0], norm_v1[1], norm_v2[0],
norm_v2[1])
# plot_vectors([norm_v1, norm_v2], color='black')
return centroid_dist, abc, bcd, dihedral
class Match(object):
'''
Class to construct a potential match.
'''
def __init__(self, name, query_db, main_db, verbose=False):
self.verbose = verbose
self.name = name
self.query = query_db
self.db = main_db.xs(name, level='name')
# self.graph = gt.Graph(directed=False)
self.graph = nx.Graph()
# Track helix pairs so we don't add them to the graph more than
# once
def max_subgraph(self):
'''
Finds dense subgraphs, which represent compatible sets of helix
pairs between the query helices and the database PDB. The
longest such subgraph represents the best overlay of the PDB
with the set of query helices.
'''
max_subgraph_len = 0
# for f in gt.max_cliques(self.graph):
for f in nx.find_cliques(self.graph):
if len(f) > max_subgraph_len:
max_subgraph_len = len(f)
print('Max number of matches:')
print(max_subgraph_len)
return max_subgraph_len
def plot_graph(self):
import matplotlib.pyplot as plt
# import graph_tool.draw as draw
plt.subplot(111)
# gt.remove_parallel_edges(self.graph)
# pos = gt.fruchterman_reingold_layout(self.graph, n_iter=1000)
# gt.graph_draw(self.graph, pos=pos)
plt.show()
def find_edges(self):
'''
Populate the graph with nodes and edges.
Each node consists of a pair of indices, one from the main
database and one from the query database. This pairing
represents the case where the helix in the first index is
overlaid on the helix of the second index. Edges represent
compatibility between adjacent nodes.
'''
print('Finding edges')
edges = []
self.nodes = set()
property_map = {}
i = 0
for doc in self.db.iterrows():
if doc[0] in self.query.index:
compatible_bins = self.query.xs(doc[0])
# compatible_bins = self.query.find({'bin': doc['bin']})
for result in compatible_bins.iterrows():
idx_pair1 = (doc[1]['idx1'], result[1]['idx1'])
idx_pair2 = (doc[1]['idx2'], result[1]['idx2'])
# Track which nodes have been sampled
if idx_pair1 not in self.nodes:
self.nodes.add(idx_pair1)
self.graph.add_node(idx_pair1)
# self.nodes[idx_pair1] = i
# property_map[i] = idx_pair1
i += 1
# self.nodes.append(idx_pair1)
# self.graph.add_node(idx_pair1)
if idx_pair2 not in self.nodes:
# self.nodes[idx_pair2] = i
# property_map[i] = idx_pair2
self.nodes.add(idx_pair2)
self.graph.add_node(idx_pair2)
i += 1
# self.nodes.append(idx_pair2)
# self.graph.add_node(idx_pair2)
self.graph.add_edge(idx_pair1, idx_pair2)
# print('Edge found:')
# print(idx_pair1)
# print(idx_pair2)
# edges.append((self.nodes[idx_pair1],
# self.nodes[idx_pair2]))
# i += 2
# nodes = set(self.nodes)
# self.graph.add_edge(idx_pair1, idx_pair2)
# print(nodes)
# if self.verbose:
# print('All edges:')
# print(edges)
# self.graph.add_edge_list(edges)
# Add properties
# prop_dict = self.graph.new_vertex_property('object')
# for v in self.graph.vertices():
# prop_dict[v] = {'query_idx':property_map[v][0],
# 'lookup_idx':property_map[v][1]}
class HelixBin(object):
def __init__(self, helix_db, exposed_cutoff=0.3, length_cutoff=10.8,
query_df=None, query_name=None, angstroms=2.5, degrees=15,
verbose=False, start=None, stop=None):
self.verbose = verbose
self.df = helix_db
self.df['idx'] = self.df.index
# Binning parameters
self.degrees = degrees
self.angstroms = angstroms
self.setup_bins()
binned_name = 'bins_{}A_{}D'.format(self.angstroms,
self.degrees)
self.start = start
self.stop = stop
# Trimming dataframe
if length_cutoff:
self.df = self.df[self.df['length'] > length_cutoff]
if exposed_cutoff:
self.df = self.df[self.df['percent_exposed'] >
exposed_cutoff]
if 'normalized_vector' not in self.df.columns:
self.df['normalized_vector'] = self.df.apply(lambda x:
final_vector(x['direction'], 1, x['centroid']), axis=1)
def setup_bins(self):
nrbins = int(360//self.degrees) + 1
self.rbins = np.linspace(-180, 180, nrbins)
tstart = -10000
tstop = 10000
ntbins = int((tstop - tstart) // self.angstroms) + 1
self.tbins = np.linspace(tstart, tstop, ntbins)
def bin_db(self, outdir=None, bin_length=False):
'''
Bin dataframes.
'''
from scipy.spatial.transform import Rotation as R
import subprocess
import time
# db = self.client[dbname]
# bins = db['bins_{}A_{}D'.format(
# self.angstroms, self.degrees
# )]
bins = pd.DataFrame(columns=['bin', 'name', 'idx1', 'idx2'])
# Pandas indices are hash lookups and we can have multiple of
# them, but they cannot be added piecewise. Therefore we will
# create partial tables, then create the indices and save the
# dataframes. Results will be saved in chunks.
# bins.set_index(['bin', 'name'], inplace=True)
total_proteins = len(set(self.df['name']))
interval = 500
# import shelve
# binned = shelve.open('binned_0p3/hashtable', 'c', writeback=True)
# i tracks # of names analyzed
i = 0
# saveno tracks how many dataframes have been saved.
self.saveno = 1
unsaved_docs = []
start_time = time.time()
def update(bins, start_time, unsaved_docs, interval, i,
final=False):
print('{} of {} PDBs processed so far.'.format(
i, total_proteins))
mem_used = psutil.Process(os.getpid()).memory_info().rss
if self.verbose:
print('Currently using {} GB of memory'.format(
mem_used * 10**-9
))
df_mem = bins.memory_usage(index=True, deep=True).sum()
if self.verbose:
print('Dataframe is using {} GB of memory'.format(
df_mem * 10**-9
))
elapsed = time.time() - start_time
rate = interval / elapsed
remaining = (total_proteins - i) / rate / 3600
print('Analysis of 500 pdbs took {} seconds. Est. {} h remaining'.format(
elapsed, remaining
))
if len(unsaved_docs) > 0:
if self.verbose:
print('Adding to dataframe...')
bins = bins.append(unsaved_docs, ignore_index=True)
if self.verbose:
print(bins)
else:
if self.verbose:
print('Nothing to update for this batch.')
# Save when memory footprint of dataframe gets larger than 4
# GB. This way each sub-dataframe can be read into memory.
if outdir:
if df_mem * 10**-9 > 4 or final:
bins.set_index(['bin', 'name'], inplace=True)
outfile = 'bins_{}A_{}D_{:04d}.pkl'.format(self.angstroms,
self.degrees, self.saveno)
out = os.path.join(outdir, outfile)
print('Saving current dataframe to {}'.format(out))
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
bins.to_pickle(out)
self.saveno += 1
if self.verbose:
print('Saved.')
# If saved to disk, return an empty dataframe.
return pd.DataFrame()
elif final:
bins.set_index(['bin', 'name'], inplace=True)
# Return input dataframe if we have not saved it to disk.
return bins
groups = self.df.groupby(['name'])
names = sorted(list(groups.groups.keys()))
if self.start:
names = names[self.start:]
if self.stop:
names = names[:self.stop]
for name in names:
# for name, group in df.groupby(['name']):
group = groups.groups[name]
i += 1
for combination in product(self.df.loc[group].T.to_dict().values(),
repeat=2):
if combination[0]['idx'] != combination[1]['idx']:
# vector1 = combination[0]['vector']
# vector2 = combination[1]['vector']
# plot_vectors([vector1, vector2], color='purple')
idx1 = combination[0]['idx']
idx2 = combination[1]['idx']
# if self.verbose:
# print('------------------------------------')
# print(combination[0])
# print(combination[1])
dist, angle1, angle2, dihedral =\
relative_position(combination[0], combination[1])
dist = np.array([dist])
angles = np.array([angle1, angle2, dihedral])
lengths = np.array([combination[0]['length'],
combination[1]['length']])
lbin = bin_array(lengths, self.tbins)
lbin2 = bin_array(lengths, self.tbins +
(self.angstroms/2))
rbin = bin_array(angles, self.rbins)
tbin = bin_array(dist, self.tbins)
rbin2 = bin_array(angles, self.rbins + (self.degrees/2))
tbin2 = bin_array(dist, self.tbins +
(self.angstroms/2))
x = [tbin[0], tbin2[0]]
abc = [rbin[0], rbin2[0]]
bcd = [rbin[1], rbin2[1]]
dih = [rbin[2], rbin2[2]]
lengths = [lbin, lbin2]
if bin_length:
all_bins = product(x, abc, bcd, dih, lengths)
else:
all_bins = product(x, abc, bcd, dih)
for bin_12 in all_bins:
bin_12 = ' '.join(map(str, bin_12))
doc = {
'bin':bin_12,
'name': name,
'idx1':idx1,
'idx2':idx2
}
# if check_dups:
# if len(list(bins.find(doc))) == 0:
# unsaved_docs.append(doc)
# else:
unsaved_docs.append(doc)
if i%interval == 0:
bins = update(bins, start_time, unsaved_docs, interval, i)
start_time = time.time()
unsaved_docs = []
bins = update(bins, start_time, unsaved_docs, interval, i, final=True)
return bins
class HelixLookup(object):
'''
Class to handle binning and matching of helix databases. This maybe
should be two classes, one for binning and one for matching, but
this is it for now.
'''
def __init__(self, lookup_folder, query, name='unknown',
verbose=False):
self.verbose = verbose
self.lookup_folder = lookup_folder
self.query = query
self.name = name
def score_match(self, list_of_index_pairs):
"""
Idea (idk where else to put this):
To get 3rd, 4th, etc. helices, do a reverse lookup. That is,
for each bin in the FOUND PDB, look for matches in the QUERY
pdb.
"""
# TO DO: score clashes
return
def submit_local(self, outdir):
import glob
lookups = sorted(glob.glob(self.lookup_folder + '/*.pkl'))
print(self.lookup_folder)
print(lookups)
i = 0
os.makedirs(outdir, exist_ok=True)
for lookup in lookups:
print('MATCHING AGAINST {}'.format(lookup))
out = os.path.join(outdir, '{}_results_{:03d}.pkl'.format(
self.name, i)
)
self.match(pd.read_pickle(lookup), out=out)
i += 1
def submit_cluster(self, outdir, tasks):
import glob
lookups = sorted(glob.glob(self.lookup_folder + '/*.pkl'))
total_tasks = tasks * len(lookups)
task = int(os.environ['SGE_TASK_ID']) - 1
os.makedirs(outdir, exist_ok=True)
out = os.path.join(outdir, '{}_results_{:03d}.pkl'.format(self.name,
task))
print('Results will be saved to {}'.format(out))
# Warning: total_tasks must be a multiple of len(lookups) for
# now.
increment = total_tasks // len(lookups)
print('Increment {}'.format(increment))
lookups_idx = task//increment
print('Reading database file # {}'.format(lookups_idx))
lookup = pd.read_pickle(lookups[lookups_idx])
num_rows = lookup.shape[0]
row_increment = num_rows // increment
rowstart = (task%increment) * row_increment
rowend = rowstart + row_increment
lookup = lookup.iloc[rowstart:rowend]
print('Looking up rows {} through {}'.format(rowstart, rowend))
print(lookup)
self.match(lookup, out=out)
def match(self, lookup, out=None):
names = []
# Pandas rewrite
print('Starting forward search...')
for _bin, group in self.query.groupby(level='bin'):
if self.verbose:
print('Searching bin {}'.format(_bin))
if _bin in lookup.index:
for result in lookup.xs(_bin, level='bin').iterrows():
# xs results in (index, row) tuples; db is indexed by
# name, so row[0] is the name.
if self.verbose:
print('Matched to pdb {}'.format(result[0]))
names.append(
result[0]
)
print('Forward search done.')
print('Original name list:')
print(names)
min_matches = 2
names = [item for item, count in
collections.Counter(names).items() if
count >= min_matches]
print('All matches:')
print(names)
print(len(names))
results = []
# TEMP
# sys.exit()
i = 0
for name in names:
i += 1
result = {}
result['name'] = name
print('-------------------------------------------------')
print('Name: {}'.format(name))
match = Match(name, self.query, lookup, verbose=self.verbose)
match.find_edges()
result['matches'] = match.max_subgraph()
result['graph'] = match.graph
results.append(result)
# match.plot_graph()
# print('searching {}'.format(name))
# for _bin in self.binned.find({'name': name[0]}):
# if _bin['idx1'] == name[1]:
# print('-------')
# print(_bin)
# for doc in self.query_bins.find({'bin':_bin['bin']}):
# print('MATCH:')
# results[name].append((doc['idx1'], doc['idx2']))
# print(doc)
df = pd.DataFrame(results)
if out:
df.to_pickle(out)
return df
# for key in results:
# print('------------------RESULTS FOR {}----------------'.format(
# key
# ))
# for pair in set(results[key]):
# print(pair)
# for key in results:
# print('PDB {} had {} matching transformations'.format(
# key, len(set(results[key]))
# ))
def test():
# import scan_helices
from helix.matchign import scan_helices
test_path = 'test_files/6r9d.cif'
init()
pose = pose_from_file(test_path).split_by_chain(2)
print(pose.size())
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices()
helices = pd.DataFrame(helices)
print(helices)
helices = helices[helices['percent_exposed'] > 0.3]
print(helices)
print(helices.shape)
print(helices['name'])
# lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'),
# query_df=helices, query_name='6r9d')
lookup = HelixLookup(pd.DataFrame(),
query_df=helices, query_name='6r9d', angstroms=5,
# degrees=15, reset_querydb=True, dbname='nr')
degrees=30, reset_querydb=True, dbname='test_bins')
lookup.match()
def test_rifdock():
from helix.matching import scan_helices
test_path = 'test_files/test_rifgen/cluster_representatives/matchme.pdb'
init()
pose = pose_from_file(test_path)
print(pose.size())
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices(split_chains=False,
name='rifdock_test')
helices = pd.DataFrame(helices)
helices.to_pickle('rifdock_helices.pkl')
sys.exit()
print(helices)
# helices = helices[helices['percent_exposed'] > 0.3]
print(helices)
print(helices.shape)
print(helices['name'])
# lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'),
# query_df=helices, query_name='6r9d')
lookup = HelixLookup(pd.DataFrame(),
query_df=helices, query_name='6r9d', angstroms=2.5,
degrees=15, reset_querydb=True, dbname='nr')
# degrees=30, reset_querydb=True, dbname='test_bins')
lookup.match()
def make_hash_table():
print('Loading database and setting up lookup object...')
# length cutoff of 2 turns or 10.8 angstroms
lookup = HelixLookup(pd.read_pickle('nr_dataframes/final.pkl'),
exposed_cutoff=0.3, length_cutoff=10.8, angstroms=2.5,
degrees=15, dbname='nr')
print('Done.')
# binned = lookup.bin_db(lookup.df)
lookup.update_bin_db()
# out = "binned_0p3/last.pkl"
# with open(out, 'wb') as f:
# pickle.dump(binned, f)
def make_test_hash_table():
client = MongoClient()
deg=15
angstroms=2.5
# client['test_bins']['bins_{}A_{}D'.format(angstroms, deg)].drop()
lookup=HelixLookup(pd.read_pickle('out.pkl'), exposed_cutoff=0.3,
length_cutoff=10.8, angstroms=angstroms, degrees=deg,
dbname='test_bins')
lookup.update_bin_db()
def main():
args = docopt.docopt(__doc__)
print(args)
if args['--settings']:
# Deprecated; settings handled by submission command
import yaml
runtype = 'bin' if args['bin'] else 'match'
settings = yaml.load(open(args['--settings'], 'r'))
print(settings)
for option in settings[runtype]:
args[option] = settings[runtype][option]
print(args)
dbpath = os.path.join(
args['--database'],
"bins_{}A_{}D".format(
float(args['--angstroms']),
float(args['--degrees'])
)
)
if args['bin']:
lookup = HelixBin(pd.read_pickle(args['<helix_dataframe>']),
exposed_cutoff=0.3, length_cutoff=10.8,
angstroms=float(args['--angstroms']),
degrees=float(args['--degrees']),
verbose=args['--verbose'])
lookup.bin_db(outdir=dbpath, bin_length=args['--length'])
if args['match']:
# import scan_helices
from helix.matching import scan_helices
workspace = ws.workspace_from_dir(args['<match_workspace>'])
# Import pdb
if args['--scaffold']:
pdbfolders = [workspace.scaffold_clusters(args['--scaffold'])]
else:
pdbfolders = workspace.all_scaffold_clusters
init()
if not args['--scaffold'] and \
os.path.exists(workspace.all_scaffold_dataframe):
all_helices = pd.read_pickle(workspace.all_scaffold_dataframe)
else:
all_helices = []
for pdbfolder in pdbfolders:
# helicepath = os.path.join(pdbfolder, 'query_helices.pkl')
helicepath = workspace.scaffold_dataframe(pdbfolder)
if os.path.exists(helicepath):
helices = pd.read_pickle(helicepath)
else:
folder_helices = []
import glob
gz = glob.glob(pdbfolder + '/*.pdb.gz')
dotpdb = glob.glob(pdbfolder + '/*.pdb')
gz.extend(dotpdb)
pdbs = sorted(gz)
for path in pdbs:
# First chain is the docked helix
pose = pose_from_file(path).split_by_chain(1)
# Scan pdb helices
scanner = scan_helices.PoseScanner(pose)
helices = scanner.scan_pose_helices(name='query',
split_chains=False, path=path)
folder_helices.extend(helices)
helices = pd.DataFrame(folder_helices)
helices.to_pickle(helicepath)
all_helices.append(helices)
all_helices = pd.concat(all_helices, ignore_index=True)
if not args['--scaffold']:
# Don't save to the all_scaffold path if not using all
# scaffolds
all_helices.to_pickle(workspace.all_scaffold_dataframe)
print("HELICES")
print(all_helices)
print(all_helices['vector'])
# Bin pdb helices
query = HelixBin(all_helices, exposed_cutoff=0.3,
length_cutoff=10.8,
angstroms=float(args['--angstroms']),
degrees=float(args['--degrees']),
verbose=args['--verbose'])
query_bins = query.bin_db(bin_length=args['--length'])
print('QUERY BINS')
print(query_bins)
# Match
# name = os.path.basename(path).split('.')[0]
name = 'query'
print('Database:')
print(dbpath)
matcher = HelixLookup(dbpath, query_bins, name=name,
verbose=args['--verbose'])
if args['--local']:
matcher.submit_local(workspace.output_dir)
elif args['--tasks']:
matcher.submit_cluster(workspace.output_dir, int(args['--tasks']))
else:
matcher.submit_cluster(workspace.output_dir, 1)
if __name__=='__main__':
# test()
# test_rifdock()
# make_hash_table()
# make_test_hash_table()
main()
| en | 0.586934 | Create bins or match a query protein. Usage: matcher.py bin <helix_dataframe> [options] matcher.py match <match_workspace> [options] options: --local, -l Run locally --tasks=NUM, -j Run on the cluster using SGE. Argument should be # of tasks per dataframe. --length, -e Bin by length --verbose, -v Verbose output --database=PATH, -d Database of relative helix orientations [default: database/] --out=PATH, -o Where to save outputs [default: .] --angstroms=NUM, -a Binning option. How fine should the distance bins be? [default: 2.5] --degrees=NUM, -g Binning option. How fine should the angle bins be? [default: 15] --settings=YML, -s Provide a settings file. --scaffold=PDB Only run matching for a given helix length/RIFDock scaffold. # import graph_tool.all as gt Digitize a numpy array. TO DO: Circularize the binning of angles somehow. Gives the internal relative orientation of two lines, given their row from the pandas dataframe created in scan_helices. The relative orientation of two lines should be able to be described with just 4 parameters, since they are 2D objects in 3D space. If we have lines consisting of points [a,b] and [c,d], those parameters are: - The distance between their centroids - Angle abc - Angle bcd - Dihedral abcd # plot_vectors([norm_v1, norm_v2], color='black') Class to construct a potential match. # self.graph = gt.Graph(directed=False) # Track helix pairs so we don't add them to the graph more than # once Finds dense subgraphs, which represent compatible sets of helix pairs between the query helices and the database PDB. The longest such subgraph represents the best overlay of the PDB with the set of query helices. # for f in gt.max_cliques(self.graph): # import graph_tool.draw as draw # gt.remove_parallel_edges(self.graph) # pos = gt.fruchterman_reingold_layout(self.graph, n_iter=1000) # gt.graph_draw(self.graph, pos=pos) Populate the graph with nodes and edges. Each node consists of a pair of indices, one from the main database and one from the query database. This pairing represents the case where the helix in the first index is overlaid on the helix of the second index. Edges represent compatibility between adjacent nodes. # compatible_bins = self.query.find({'bin': doc['bin']}) # Track which nodes have been sampled # self.nodes[idx_pair1] = i # property_map[i] = idx_pair1 # self.nodes.append(idx_pair1) # self.graph.add_node(idx_pair1) # self.nodes[idx_pair2] = i # property_map[i] = idx_pair2 # self.nodes.append(idx_pair2) # self.graph.add_node(idx_pair2) # print('Edge found:') # print(idx_pair1) # print(idx_pair2) # edges.append((self.nodes[idx_pair1], # self.nodes[idx_pair2])) # i += 2 # nodes = set(self.nodes) # self.graph.add_edge(idx_pair1, idx_pair2) # print(nodes) # if self.verbose: # print('All edges:') # print(edges) # self.graph.add_edge_list(edges) # Add properties # prop_dict = self.graph.new_vertex_property('object') # for v in self.graph.vertices(): # prop_dict[v] = {'query_idx':property_map[v][0], # 'lookup_idx':property_map[v][1]} # Binning parameters # Trimming dataframe Bin dataframes. # db = self.client[dbname] # bins = db['bins_{}A_{}D'.format( # self.angstroms, self.degrees # )] # Pandas indices are hash lookups and we can have multiple of # them, but they cannot be added piecewise. Therefore we will # create partial tables, then create the indices and save the # dataframes. Results will be saved in chunks. # bins.set_index(['bin', 'name'], inplace=True) # import shelve # binned = shelve.open('binned_0p3/hashtable', 'c', writeback=True) # i tracks # of names analyzed # saveno tracks how many dataframes have been saved. # Save when memory footprint of dataframe gets larger than 4 # GB. This way each sub-dataframe can be read into memory. # If saved to disk, return an empty dataframe. # Return input dataframe if we have not saved it to disk. # for name, group in df.groupby(['name']): # vector1 = combination[0]['vector'] # vector2 = combination[1]['vector'] # plot_vectors([vector1, vector2], color='purple') # if self.verbose: # print('------------------------------------') # print(combination[0]) # print(combination[1]) # if check_dups: # if len(list(bins.find(doc))) == 0: # unsaved_docs.append(doc) # else: Class to handle binning and matching of helix databases. This maybe should be two classes, one for binning and one for matching, but this is it for now. Idea (idk where else to put this): To get 3rd, 4th, etc. helices, do a reverse lookup. That is, for each bin in the FOUND PDB, look for matches in the QUERY pdb. # TO DO: score clashes # Warning: total_tasks must be a multiple of len(lookups) for # now. # {}'.format(lookups_idx)) # Pandas rewrite # xs results in (index, row) tuples; db is indexed by # name, so row[0] is the name. # TEMP # sys.exit() # match.plot_graph() # print('searching {}'.format(name)) # for _bin in self.binned.find({'name': name[0]}): # if _bin['idx1'] == name[1]: # print('-------') # print(_bin) # for doc in self.query_bins.find({'bin':_bin['bin']}): # print('MATCH:') # results[name].append((doc['idx1'], doc['idx2'])) # print(doc) # for key in results: # print('------------------RESULTS FOR {}----------------'.format( # key # )) # for pair in set(results[key]): # print(pair) # for key in results: # print('PDB {} had {} matching transformations'.format( # key, len(set(results[key])) # )) # import scan_helices # lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'), # query_df=helices, query_name='6r9d') # degrees=15, reset_querydb=True, dbname='nr') # helices = helices[helices['percent_exposed'] > 0.3] # lookup = HelixLookup(pd.read_pickle('dataframes/final.pkl'), # query_df=helices, query_name='6r9d') # degrees=30, reset_querydb=True, dbname='test_bins') # length cutoff of 2 turns or 10.8 angstroms # binned = lookup.bin_db(lookup.df) # out = "binned_0p3/last.pkl" # with open(out, 'wb') as f: # pickle.dump(binned, f) # client['test_bins']['bins_{}A_{}D'.format(angstroms, deg)].drop() # Deprecated; settings handled by submission command # import scan_helices # Import pdb # helicepath = os.path.join(pdbfolder, 'query_helices.pkl') # First chain is the docked helix # Scan pdb helices # Don't save to the all_scaffold path if not using all # scaffolds # Bin pdb helices # Match # name = os.path.basename(path).split('.')[0] # test() # test_rifdock() # make_hash_table() # make_test_hash_table() | 2.593081 | 3 |
auto_trainer/callbacks/test_wandb.py | WPI-MMR/learning_experiments | 0 | 6623964 | <gh_stars>0
import unittest
from unittest import mock
import importlib
import sys
class TestWandbEvalAndRecord(unittest.TestCase):
def setUp(self):
# TODO: Create a parent test case that encompasses this W&B mocking logic
if 'wandb' in sys.modules:
import wandb
del wandb
self.mock_env = mock.MagicMock()
self.eval_episodes = 10
self.render_freq = 2
self.fps = 30
self.wandb = mock.MagicMock()
with mock.patch.dict('sys.modules', {'wandb': self.wandb}):
import auto_trainer.callbacks.wandb
importlib.reload(auto_trainer.callbacks.wandb)
self.cb = auto_trainer.callbacks.wandb.WandbEvalAndRecord(
self.mock_env, self.eval_episodes, self.render_freq, self.fps)
@mock.patch('numpy.transpose')
@mock.patch('auto_trainer.callbacks.wandb.evaluate_policy')
def test_step(self, mock_eval, mock_transpose):
mean_reward = 69
std_reward = 420
mock_eval.return_value = mean_reward, std_reward
self.cb.model = mock.MagicMock()
self.cb.model.predict.return_value = None, None
# Create an episode with length 10
step_return_vals = [(None, None, False, None)] * 9
step_return_vals.append((None, None, True, None))
self.mock_env.step.side_effect = step_return_vals
self.cb.n_calls = 1
self.cb.num_timesteps = self.cb.n_calls * 4
self.assertTrue(self.cb._on_step(plot=False))
self.assertEqual(
len(self.mock_env.step.call_args_list), 10)
self.assertEqual(
len(self.mock_env.render.call_args_list), 10 / self.render_freq)
self.wandb.log.assert_called_once()
log = self.wandb.log.call_args[0][0]
self.assertEqual(log['test_reward_mean'], mean_reward)
self.assertEqual(log['test_reward_std'], std_reward)
self.assertEqual(log['global_step'], 4)
self.assertEqual(log['evaluations'], 1)
if __name__ == '__main__':
pass | import unittest
from unittest import mock
import importlib
import sys
class TestWandbEvalAndRecord(unittest.TestCase):
def setUp(self):
# TODO: Create a parent test case that encompasses this W&B mocking logic
if 'wandb' in sys.modules:
import wandb
del wandb
self.mock_env = mock.MagicMock()
self.eval_episodes = 10
self.render_freq = 2
self.fps = 30
self.wandb = mock.MagicMock()
with mock.patch.dict('sys.modules', {'wandb': self.wandb}):
import auto_trainer.callbacks.wandb
importlib.reload(auto_trainer.callbacks.wandb)
self.cb = auto_trainer.callbacks.wandb.WandbEvalAndRecord(
self.mock_env, self.eval_episodes, self.render_freq, self.fps)
@mock.patch('numpy.transpose')
@mock.patch('auto_trainer.callbacks.wandb.evaluate_policy')
def test_step(self, mock_eval, mock_transpose):
mean_reward = 69
std_reward = 420
mock_eval.return_value = mean_reward, std_reward
self.cb.model = mock.MagicMock()
self.cb.model.predict.return_value = None, None
# Create an episode with length 10
step_return_vals = [(None, None, False, None)] * 9
step_return_vals.append((None, None, True, None))
self.mock_env.step.side_effect = step_return_vals
self.cb.n_calls = 1
self.cb.num_timesteps = self.cb.n_calls * 4
self.assertTrue(self.cb._on_step(plot=False))
self.assertEqual(
len(self.mock_env.step.call_args_list), 10)
self.assertEqual(
len(self.mock_env.render.call_args_list), 10 / self.render_freq)
self.wandb.log.assert_called_once()
log = self.wandb.log.call_args[0][0]
self.assertEqual(log['test_reward_mean'], mean_reward)
self.assertEqual(log['test_reward_std'], std_reward)
self.assertEqual(log['global_step'], 4)
self.assertEqual(log['evaluations'], 1)
if __name__ == '__main__':
pass | en | 0.823675 | # TODO: Create a parent test case that encompasses this W&B mocking logic # Create an episode with length 10 | 2.482316 | 2 |
packages/w3af/w3af/core/data/parsers/doc/swf.py | ZooAtmosphereGroup/HelloPackages | 3 | 6623965 | <reponame>ZooAtmosphereGroup/HelloPackages
"""
swf.py
Copyright 2006 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import zlib
from w3af.core.data.parsers.doc.baseparser import BaseParser
from w3af.core.data.parsers.utils.re_extract import ReExtract
class SWFParser(BaseParser):
"""
This class is a SWF (flash) parser which just focuses on extracting URLs.
The parser is based on "SWF File Format Specification Version 10"
http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/swf_file_format_spec_v10.pdf
:author: <NAME> (<EMAIL>)
"""
def __init__(self, http_response):
BaseParser.__init__(self, http_response)
self._re_urls = set()
@staticmethod
def can_parse(http_resp):
"""
:return: True if the http_resp contains a SWF file.
"""
if http_resp.content_type != 'application/x-shockwave-flash':
return False
body = http_resp.get_body()
if len(body) > 5:
magic = body[:3]
# TODO: Add more checks here?
if magic in ('FWS', 'CWS'):
return True
return False
def _is_compressed(self, swf_document):
"""
:param swf_content: The SWF file.
:return: True if the SWF is compressed
"""
return swf_document.startswith('CWS')
def _inflate(self, swf_document):
"""
zlib.inflate the SWF file.
:param swf_content: The SWF file.
:return: A decompressed version of the SWF
"""
compressed_data = swf_document[8:]
try:
uncompressed_data = zlib.decompress(compressed_data)
except zlib.error, e:
raise ValueError('Failed to inflate: ' + str(e))
else:
# TODO: Strings in SWF are NULL-Byte delimited. Maybe we can
# use that to extract strings and apply regular expressions
# more carefully?
return uncompressed_data
def parse(self):
"""
Parse the SWF bytecode.
For now... don't decompile anything, just apply regular
expressions to it.
"""
swf_body = self.get_http_response().get_body()
if self._is_compressed(swf_body):
try:
swf_body = self._inflate(swf_body)
except Exception:
# If the inflate fails... there is nothing else to do.
return
self._0x83_getURL_parse(swf_body)
self._re_extract(swf_body)
def _re_extract(self, swf_body):
"""
Get the URLs using a regex
"""
re_extract = ReExtract(swf_body, self._base_url, self._encoding)
re_extract.parse()
self._re_urls.update(re_extract.get_references())
def _0x83_getURL_parse(self, swf_body):
"""
After reading a couple of SWF files with a hex editor it was possible
to identify the following pattern:
0x83 0xLENGTH 0x00 (0xLENGTH - 2 chars) 0x00
0x83 is the bytecode for Adobe's getURL
0xLENGTH is the string length of the first parameter including the two
0x00 string delimiters.
So, with this information I'll extract links!
:return: Store new URLs in self._re_urls, None is returned.
"""
for index, char in enumerate(swf_body):
if char == '\x83':
try:
plus_two_zero = swf_body[index+2] == '\x00'
except IndexError:
continue
else:
if not plus_two_zero:
continue
# potential getURL with string as first parameter
# lets get the length and verify that there is a 0x00 where
# we expect it to be
str_len = ord(swf_body[index+1])
try:
str_end = swf_body[index + 1 + str_len]
except IndexError:
# The str_len was too long and took us out of the string
# length, this is a "common" bug since our parser is not
# very smart
#
# https://github.com/andresriancho/w3af/issues/5535
continue
# Strings in SWF bytecode have 0x00 content 0x00 and the len
# counts the delimiters, so a length of 2 or less is useless
if str_len <= 2:
continue
if str_end == '\x00':
# Getting closer... lets reduce more false positives by
# verifying that all chars in the url are ASCII
start = index + 3
end = start + str_len - 2
url_str = swf_body[start:end]
if all(32 < ord(c) < 127 for c in url_str):
# All chars are ASCII, we've got a URL!
#
# In case you're wondering, this url_join does work with
# both relative and full URLs
try:
url = self._base_url.url_join(url_str)
except ValueError:
# Handle cases like "javascript:foo(1)" URLs
# https://github.com/andresriancho/w3af/issues/2091
pass
else:
self._re_urls.add(url)
def get_clear_text_body(self):
return u''
def get_references(self):
"""
Searches for references on a page. w3af searches references in every
html tag, including:
- a
- forms
- images
- frames
- etc.
:return: Two lists, one with the parsed URLs, and one with the URLs
that came out of a regular expression. The second list if less
trustworthy.
"""
return [], list(self._re_urls)
get_references_of_tag = get_forms = BaseParser._return_empty_list
get_comments = BaseParser._return_empty_list
get_meta_redir = get_meta_tags = get_emails = BaseParser._return_empty_list
| """
swf.py
Copyright 2006 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import zlib
from w3af.core.data.parsers.doc.baseparser import BaseParser
from w3af.core.data.parsers.utils.re_extract import ReExtract
class SWFParser(BaseParser):
"""
This class is a SWF (flash) parser which just focuses on extracting URLs.
The parser is based on "SWF File Format Specification Version 10"
http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/swf_file_format_spec_v10.pdf
:author: <NAME> (<EMAIL>)
"""
def __init__(self, http_response):
BaseParser.__init__(self, http_response)
self._re_urls = set()
@staticmethod
def can_parse(http_resp):
"""
:return: True if the http_resp contains a SWF file.
"""
if http_resp.content_type != 'application/x-shockwave-flash':
return False
body = http_resp.get_body()
if len(body) > 5:
magic = body[:3]
# TODO: Add more checks here?
if magic in ('FWS', 'CWS'):
return True
return False
def _is_compressed(self, swf_document):
"""
:param swf_content: The SWF file.
:return: True if the SWF is compressed
"""
return swf_document.startswith('CWS')
def _inflate(self, swf_document):
"""
zlib.inflate the SWF file.
:param swf_content: The SWF file.
:return: A decompressed version of the SWF
"""
compressed_data = swf_document[8:]
try:
uncompressed_data = zlib.decompress(compressed_data)
except zlib.error, e:
raise ValueError('Failed to inflate: ' + str(e))
else:
# TODO: Strings in SWF are NULL-Byte delimited. Maybe we can
# use that to extract strings and apply regular expressions
# more carefully?
return uncompressed_data
def parse(self):
"""
Parse the SWF bytecode.
For now... don't decompile anything, just apply regular
expressions to it.
"""
swf_body = self.get_http_response().get_body()
if self._is_compressed(swf_body):
try:
swf_body = self._inflate(swf_body)
except Exception:
# If the inflate fails... there is nothing else to do.
return
self._0x83_getURL_parse(swf_body)
self._re_extract(swf_body)
def _re_extract(self, swf_body):
"""
Get the URLs using a regex
"""
re_extract = ReExtract(swf_body, self._base_url, self._encoding)
re_extract.parse()
self._re_urls.update(re_extract.get_references())
def _0x83_getURL_parse(self, swf_body):
"""
After reading a couple of SWF files with a hex editor it was possible
to identify the following pattern:
0x83 0xLENGTH 0x00 (0xLENGTH - 2 chars) 0x00
0x83 is the bytecode for Adobe's getURL
0xLENGTH is the string length of the first parameter including the two
0x00 string delimiters.
So, with this information I'll extract links!
:return: Store new URLs in self._re_urls, None is returned.
"""
for index, char in enumerate(swf_body):
if char == '\x83':
try:
plus_two_zero = swf_body[index+2] == '\x00'
except IndexError:
continue
else:
if not plus_two_zero:
continue
# potential getURL with string as first parameter
# lets get the length and verify that there is a 0x00 where
# we expect it to be
str_len = ord(swf_body[index+1])
try:
str_end = swf_body[index + 1 + str_len]
except IndexError:
# The str_len was too long and took us out of the string
# length, this is a "common" bug since our parser is not
# very smart
#
# https://github.com/andresriancho/w3af/issues/5535
continue
# Strings in SWF bytecode have 0x00 content 0x00 and the len
# counts the delimiters, so a length of 2 or less is useless
if str_len <= 2:
continue
if str_end == '\x00':
# Getting closer... lets reduce more false positives by
# verifying that all chars in the url are ASCII
start = index + 3
end = start + str_len - 2
url_str = swf_body[start:end]
if all(32 < ord(c) < 127 for c in url_str):
# All chars are ASCII, we've got a URL!
#
# In case you're wondering, this url_join does work with
# both relative and full URLs
try:
url = self._base_url.url_join(url_str)
except ValueError:
# Handle cases like "javascript:foo(1)" URLs
# https://github.com/andresriancho/w3af/issues/2091
pass
else:
self._re_urls.add(url)
def get_clear_text_body(self):
return u''
def get_references(self):
"""
Searches for references on a page. w3af searches references in every
html tag, including:
- a
- forms
- images
- frames
- etc.
:return: Two lists, one with the parsed URLs, and one with the URLs
that came out of a regular expression. The second list if less
trustworthy.
"""
return [], list(self._re_urls)
get_references_of_tag = get_forms = BaseParser._return_empty_list
get_comments = BaseParser._return_empty_list
get_meta_redir = get_meta_tags = get_emails = BaseParser._return_empty_list | en | 0.847311 | swf.py Copyright 2006 <NAME> This file is part of w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 2 of the License. w3af is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA This class is a SWF (flash) parser which just focuses on extracting URLs. The parser is based on "SWF File Format Specification Version 10" http://www.adobe.com/content/dam/Adobe/en/devnet/swf/pdf/swf_file_format_spec_v10.pdf :author: <NAME> (<EMAIL>) :return: True if the http_resp contains a SWF file. # TODO: Add more checks here? :param swf_content: The SWF file. :return: True if the SWF is compressed zlib.inflate the SWF file. :param swf_content: The SWF file. :return: A decompressed version of the SWF # TODO: Strings in SWF are NULL-Byte delimited. Maybe we can # use that to extract strings and apply regular expressions # more carefully? Parse the SWF bytecode. For now... don't decompile anything, just apply regular expressions to it. # If the inflate fails... there is nothing else to do. Get the URLs using a regex After reading a couple of SWF files with a hex editor it was possible to identify the following pattern: 0x83 0xLENGTH 0x00 (0xLENGTH - 2 chars) 0x00 0x83 is the bytecode for Adobe's getURL 0xLENGTH is the string length of the first parameter including the two 0x00 string delimiters. So, with this information I'll extract links! :return: Store new URLs in self._re_urls, None is returned. # potential getURL with string as first parameter # lets get the length and verify that there is a 0x00 where # we expect it to be # The str_len was too long and took us out of the string # length, this is a "common" bug since our parser is not # very smart # # https://github.com/andresriancho/w3af/issues/5535 # Strings in SWF bytecode have 0x00 content 0x00 and the len # counts the delimiters, so a length of 2 or less is useless # Getting closer... lets reduce more false positives by # verifying that all chars in the url are ASCII # All chars are ASCII, we've got a URL! # # In case you're wondering, this url_join does work with # both relative and full URLs # Handle cases like "javascript:foo(1)" URLs # https://github.com/andresriancho/w3af/issues/2091 Searches for references on a page. w3af searches references in every html tag, including: - a - forms - images - frames - etc. :return: Two lists, one with the parsed URLs, and one with the URLs that came out of a regular expression. The second list if less trustworthy. | 2.020983 | 2 |
mooc_scraper/pipelines.py | ralphqq/MOOCScraper | 0 | 6623966 | # -*- coding: utf-8 -*-
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import sessionmaker
from class_central.models import db_connect, create_opencourse_table, OpenCourse
class MoocScraperPipeline(object):
def process_item(self, item, spider):
item.setdefault('course', None)
item.setdefault('subject', None)
item.setdefault('university', None)
item.setdefault('provider', None)
item.setdefault('start_date', None)
item.setdefault('duration', None)
item.setdefault('link', None)
item.setdefault('date_scraped', None)
return item
class DBPipeline(object):
def __init__(self):
engine = db_connect()
create_opencourse_table(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
try:
insert_stmt = insert(OpenCourse .__table__).values(**item)
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
constraint='uix'
)
session.execute(do_nothing_stmt)
session.commit()
except Exception as e:
session.rollback()
raise
finally:
session.close()
return item
| # -*- coding: utf-8 -*-
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import sessionmaker
from class_central.models import db_connect, create_opencourse_table, OpenCourse
class MoocScraperPipeline(object):
def process_item(self, item, spider):
item.setdefault('course', None)
item.setdefault('subject', None)
item.setdefault('university', None)
item.setdefault('provider', None)
item.setdefault('start_date', None)
item.setdefault('duration', None)
item.setdefault('link', None)
item.setdefault('date_scraped', None)
return item
class DBPipeline(object):
def __init__(self):
engine = db_connect()
create_opencourse_table(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
try:
insert_stmt = insert(OpenCourse .__table__).values(**item)
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
constraint='uix'
)
session.execute(do_nothing_stmt)
session.commit()
except Exception as e:
session.rollback()
raise
finally:
session.close()
return item
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.473372 | 2 |
bale/__main__.py | kianahmadian/bale-bot | 2 | 6623967 | import sys
def main():
print("Pyhton-Bale-Bot By <NAME>")
print("Python Version: ", sys.version)
if __name__ == '__main__':
main()
| import sys
def main():
print("Pyhton-Bale-Bot By <NAME>")
print("Python Version: ", sys.version)
if __name__ == '__main__':
main()
| none | 1 | 1.736442 | 2 | |
geometry_tools/__init__.py | gitter-badger/neuromorpho | 9 | 6623968 | """ Geometry tools """
__version__ = '0.0.1a0'
| """ Geometry tools """
__version__ = '0.0.1a0'
| en | 0.619272 | Geometry tools | 0.97231 | 1 |
leetcode/Leetcode 54. Spiral Matrix.py | agarun/algorithms | 0 | 6623969 | <filename>leetcode/Leetcode 54. Spiral Matrix.py
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
m = len(matrix[0])
n = len(matrix)
# boundaries
top = 0
bottom = n - 1
left = 0
right = m - 1
out = []
curr_dir = "right"
while len(out) < m * n and top <= bottom and left <= right:
if curr_dir == "right":
for i in range(left, right + 1):
out.append(matrix[top][i])
top += 1
curr_dir = "down"
elif curr_dir == "down":
for i in range(top, bottom + 1):
out.append(matrix[i][right])
right -= 1
curr_dir = "left"
elif curr_dir == "left":
for i in range(right, left - 1, -1):
out.append(matrix[bottom][i])
bottom -= 1
curr_dir = "up"
elif curr_dir == "up":
for i in range(bottom, top - 1, -1):
out.append(matrix[i][left])
left += 1
curr_dir = "right"
return out
| <filename>leetcode/Leetcode 54. Spiral Matrix.py
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
m = len(matrix[0])
n = len(matrix)
# boundaries
top = 0
bottom = n - 1
left = 0
right = m - 1
out = []
curr_dir = "right"
while len(out) < m * n and top <= bottom and left <= right:
if curr_dir == "right":
for i in range(left, right + 1):
out.append(matrix[top][i])
top += 1
curr_dir = "down"
elif curr_dir == "down":
for i in range(top, bottom + 1):
out.append(matrix[i][right])
right -= 1
curr_dir = "left"
elif curr_dir == "left":
for i in range(right, left - 1, -1):
out.append(matrix[bottom][i])
bottom -= 1
curr_dir = "up"
elif curr_dir == "up":
for i in range(bottom, top - 1, -1):
out.append(matrix[i][left])
left += 1
curr_dir = "right"
return out
| en | 0.867373 | # boundaries | 3.773998 | 4 |
src/MatrixVisualisation.py | Handterpret/Infrared_Analysis | 0 | 6623970 | <filename>src/MatrixVisualisation.py
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--input", default=".", help="Input folder with data to plot")
parser.add_argument("--output", default="./viz", help="Output folder for images")
args = parser.parse_args()
if __name__ == "__main__":
fig, ax = plt.subplots()
if not os.path.exists(args.output):
os.makedirs(args.output)
for file in [file for file in os.listdir(args.input) if file.endswith(".npy")]:
matrix = np.load(os.path.join(args.input, file))
matrix = np.mean(matrix, axis=0)
ax.matshow(matrix, cmap=plt.cm.Blues)
for i in range(8):
for j in range(8):
c = matrix[j,i]
ax.text(i, j, str("%.2f" % c), va='center', ha='center')
plt.savefig(os.path.join(args.output, f"img_{file[:5]}.png")) | <filename>src/MatrixVisualisation.py
import numpy as np
import argparse
import os
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--input", default=".", help="Input folder with data to plot")
parser.add_argument("--output", default="./viz", help="Output folder for images")
args = parser.parse_args()
if __name__ == "__main__":
fig, ax = plt.subplots()
if not os.path.exists(args.output):
os.makedirs(args.output)
for file in [file for file in os.listdir(args.input) if file.endswith(".npy")]:
matrix = np.load(os.path.join(args.input, file))
matrix = np.mean(matrix, axis=0)
ax.matshow(matrix, cmap=plt.cm.Blues)
for i in range(8):
for j in range(8):
c = matrix[j,i]
ax.text(i, j, str("%.2f" % c), va='center', ha='center')
plt.savefig(os.path.join(args.output, f"img_{file[:5]}.png")) | none | 1 | 2.910083 | 3 | |
catalog/bindings/gmd/md_medium_type.py | NIVANorge/s-enda-playground | 0 | 6623971 | <filename>catalog/bindings/gmd/md_medium_type.py
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.integer_property_type import IntegerPropertyType
from bindings.gmd.md_medium_format_code_property_type import (
MdMediumFormatCodePropertyType,
)
from bindings.gmd.md_medium_name_code_property_type import MdMediumNameCodePropertyType
from bindings.gmd.real_property_type import RealPropertyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdMediumType(AbstractObjectType):
"""
Information about the media on which the data can be distributed.
"""
class Meta:
name = "MD_Medium_Type"
name: Optional[MdMediumNameCodePropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
density: List[RealPropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
density_units: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "densityUnits",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
volumes: Optional[IntegerPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
medium_format: List[MdMediumFormatCodePropertyType] = field(
default_factory=list,
metadata={
"name": "mediumFormat",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
medium_note: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "mediumNote",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
| <filename>catalog/bindings/gmd/md_medium_type.py
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_object_type import AbstractObjectType
from bindings.gmd.character_string_property_type import CharacterStringPropertyType
from bindings.gmd.integer_property_type import IntegerPropertyType
from bindings.gmd.md_medium_format_code_property_type import (
MdMediumFormatCodePropertyType,
)
from bindings.gmd.md_medium_name_code_property_type import MdMediumNameCodePropertyType
from bindings.gmd.real_property_type import RealPropertyType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdMediumType(AbstractObjectType):
"""
Information about the media on which the data can be distributed.
"""
class Meta:
name = "MD_Medium_Type"
name: Optional[MdMediumNameCodePropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
density: List[RealPropertyType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
density_units: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "densityUnits",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
volumes: Optional[IntegerPropertyType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
medium_format: List[MdMediumFormatCodePropertyType] = field(
default_factory=list,
metadata={
"name": "mediumFormat",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
medium_note: Optional[CharacterStringPropertyType] = field(
default=None,
metadata={
"name": "mediumNote",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
| en | 0.891154 | Information about the media on which the data can be distributed. | 2.048244 | 2 |
titan/react_state_pkg/stateprovider/props.py | mnieber/moonleap | 0 | 6623972 | <gh_stars>0
import os
from moonleap.typespec.get_member_field_spec import get_member_field_spec
from moonleap.utils.case import l0
from moonleap.utils.inflect import plural
from titan.react_pkg.component.resources import get_component_base_url
from titan.react_pkg.pkg.get_chain import (
ExtractItemFromItem,
ExtractItemListFromItem,
TakeHighlightedElmFromState,
TakeItemFromState,
TakeItemFromStore,
TakeItemListFromState,
TakeItemListFromStore,
get_chain_to,
)
from titan.react_pkg.pkg.ts_var import (
ts_type,
ts_type_import_path,
ts_var,
ts_var_by_id,
)
from titan.react_view_pkg.pkg.create_component_router_config import (
create_component_router_config,
)
def create_router_configs(self):
result = []
if self.state:
router_config = create_component_router_config(
self, wraps=True, url=get_component_base_url(self, "")
)
result.append(router_config)
return result
def _get_default_input_props(chain):
result = []
for elm in chain:
if isinstance(
elm, (TakeItemListFromState, TakeItemFromState, TakeHighlightedElmFromState)
):
result += [elm.obj]
return result
def _get_input_stores(chain):
result = []
for elm in chain:
if isinstance(elm, (TakeItemListFromStore, TakeItemFromStore)):
result += [elm.subj]
if isinstance(elm, (ExtractItemFromItem, ExtractItemListFromItem)):
result += [elm.obj.provider_react_store]
return result
def _start_pos(chain):
for pos in reversed(range(len(chain))):
elm = chain[pos]
if isinstance(
elm,
(
TakeItemListFromState,
TakeItemFromState,
TakeHighlightedElmFromState,
TakeItemListFromStore,
TakeItemFromStore,
),
):
return pos
return 0
def _expression(chain):
result = ""
for elm in chain:
if isinstance(elm, (TakeItemListFromStore)):
result = (
f"R.values({ts_var(elm.subj)}.{ts_var_by_id(elm.obj.item)})" + result
)
elif isinstance(elm, (TakeItemFromStore)):
result = f"{ts_var(elm.subj)}.{ts_var(elm.obj)}" + result
elif isinstance(
elm, (TakeItemListFromState, TakeItemFromState, TakeHighlightedElmFromState)
):
result = f"props.{ts_var(elm.obj)}?" + result
elif isinstance(elm, (ExtractItemFromItem)):
store = ts_var(elm.obj.item_list.provider_react_store)
var_by_id = ts_var_by_id(elm.obj)
member = get_member_field_spec(
parent_item=elm.subj, member_item=elm.obj
).name
result = f"{store}.{var_by_id}[{result}.{member}]"
elif isinstance(elm, (ExtractItemListFromItem)):
store = ts_var(elm.obj.provider_react_store)
var_by_id = ts_var_by_id(elm.obj.item)
member = get_member_field_spec(
parent_item=elm.subj, member_item=elm.obj
).name
result = (
f"R.reject(R.isNil)"
+ f"(lookUp({result}.{member} ?? [], {store}.{var_by_id}))"
)
return result
def get_context(state_provider):
_ = lambda: None
_.state = state_provider.state
_.chains = []
for target in list(_.state.items_provided) + list(_.state.item_lists_provided):
chain = get_chain_to(target, _.state)
_.chains.append(chain[_start_pos(chain) : len(chain)])
_.default_input_props = []
for chain in _.chains:
for default_input_prop in _get_default_input_props(chain):
if default_input_prop not in _.default_input_props:
_.default_input_props.append(default_input_prop)
_.stores = []
for chain in _.chains:
for store in _get_input_stores(chain):
if store not in _.stores:
_.stores.append(store)
class Sections:
def declare_default_input_props(self):
result = [f"{ts_var(x)}: {ts_type(x)}," for x in _.default_input_props]
return "; ".join(result)
def input_stores(self):
result = [ts_var(x) for x in _.stores]
return ", ".join(result)
def default_prop_type_imports(self):
result = [
f"import {{ {ts_type(x)} }} from '{ts_type_import_path(x)}';"
for x in _.default_input_props
]
return os.linesep.join(result)
def get_state_input_values(self):
result = []
for chain in _.chains:
provided = chain[-1].obj
expression = _expression(chain)
result.append(f"{ts_var(provided)}: {expression},")
return os.linesep.join(result)
def set_state_input_values(self):
result = []
tab = " " * 8
for chain in _.chains:
provided = chain[-1].obj
result.append(
f"{tab}state.inputs.{ts_var(provided)} = inputs.{ts_var(provided)};"
)
return os.linesep.join(result)
def default_props(self):
result = ""
if _.state:
result = f" {_.state.name}State: () => state,\n"
store_by_item_name = _.state.store_by_item_name
for item_name, bvrs in _.state.bvrs_by_item_name.items():
store = store_by_item_name.get(item_name)
items_name = plural(item_name)
result += f" {items_name}: () => state.outputs.{items_name}Display,\n"
result += f" {items_name}ResUrl: () => {l0(store.name)}.resUrls().{item_name}ById,\n" # noqa: E501
for bvr in bvrs:
result += bvr.sections.default_props(store)
return result
return dict(sections=Sections())
| import os
from moonleap.typespec.get_member_field_spec import get_member_field_spec
from moonleap.utils.case import l0
from moonleap.utils.inflect import plural
from titan.react_pkg.component.resources import get_component_base_url
from titan.react_pkg.pkg.get_chain import (
ExtractItemFromItem,
ExtractItemListFromItem,
TakeHighlightedElmFromState,
TakeItemFromState,
TakeItemFromStore,
TakeItemListFromState,
TakeItemListFromStore,
get_chain_to,
)
from titan.react_pkg.pkg.ts_var import (
ts_type,
ts_type_import_path,
ts_var,
ts_var_by_id,
)
from titan.react_view_pkg.pkg.create_component_router_config import (
create_component_router_config,
)
def create_router_configs(self):
result = []
if self.state:
router_config = create_component_router_config(
self, wraps=True, url=get_component_base_url(self, "")
)
result.append(router_config)
return result
def _get_default_input_props(chain):
result = []
for elm in chain:
if isinstance(
elm, (TakeItemListFromState, TakeItemFromState, TakeHighlightedElmFromState)
):
result += [elm.obj]
return result
def _get_input_stores(chain):
result = []
for elm in chain:
if isinstance(elm, (TakeItemListFromStore, TakeItemFromStore)):
result += [elm.subj]
if isinstance(elm, (ExtractItemFromItem, ExtractItemListFromItem)):
result += [elm.obj.provider_react_store]
return result
def _start_pos(chain):
for pos in reversed(range(len(chain))):
elm = chain[pos]
if isinstance(
elm,
(
TakeItemListFromState,
TakeItemFromState,
TakeHighlightedElmFromState,
TakeItemListFromStore,
TakeItemFromStore,
),
):
return pos
return 0
def _expression(chain):
result = ""
for elm in chain:
if isinstance(elm, (TakeItemListFromStore)):
result = (
f"R.values({ts_var(elm.subj)}.{ts_var_by_id(elm.obj.item)})" + result
)
elif isinstance(elm, (TakeItemFromStore)):
result = f"{ts_var(elm.subj)}.{ts_var(elm.obj)}" + result
elif isinstance(
elm, (TakeItemListFromState, TakeItemFromState, TakeHighlightedElmFromState)
):
result = f"props.{ts_var(elm.obj)}?" + result
elif isinstance(elm, (ExtractItemFromItem)):
store = ts_var(elm.obj.item_list.provider_react_store)
var_by_id = ts_var_by_id(elm.obj)
member = get_member_field_spec(
parent_item=elm.subj, member_item=elm.obj
).name
result = f"{store}.{var_by_id}[{result}.{member}]"
elif isinstance(elm, (ExtractItemListFromItem)):
store = ts_var(elm.obj.provider_react_store)
var_by_id = ts_var_by_id(elm.obj.item)
member = get_member_field_spec(
parent_item=elm.subj, member_item=elm.obj
).name
result = (
f"R.reject(R.isNil)"
+ f"(lookUp({result}.{member} ?? [], {store}.{var_by_id}))"
)
return result
def get_context(state_provider):
_ = lambda: None
_.state = state_provider.state
_.chains = []
for target in list(_.state.items_provided) + list(_.state.item_lists_provided):
chain = get_chain_to(target, _.state)
_.chains.append(chain[_start_pos(chain) : len(chain)])
_.default_input_props = []
for chain in _.chains:
for default_input_prop in _get_default_input_props(chain):
if default_input_prop not in _.default_input_props:
_.default_input_props.append(default_input_prop)
_.stores = []
for chain in _.chains:
for store in _get_input_stores(chain):
if store not in _.stores:
_.stores.append(store)
class Sections:
def declare_default_input_props(self):
result = [f"{ts_var(x)}: {ts_type(x)}," for x in _.default_input_props]
return "; ".join(result)
def input_stores(self):
result = [ts_var(x) for x in _.stores]
return ", ".join(result)
def default_prop_type_imports(self):
result = [
f"import {{ {ts_type(x)} }} from '{ts_type_import_path(x)}';"
for x in _.default_input_props
]
return os.linesep.join(result)
def get_state_input_values(self):
result = []
for chain in _.chains:
provided = chain[-1].obj
expression = _expression(chain)
result.append(f"{ts_var(provided)}: {expression},")
return os.linesep.join(result)
def set_state_input_values(self):
result = []
tab = " " * 8
for chain in _.chains:
provided = chain[-1].obj
result.append(
f"{tab}state.inputs.{ts_var(provided)} = inputs.{ts_var(provided)};"
)
return os.linesep.join(result)
def default_props(self):
result = ""
if _.state:
result = f" {_.state.name}State: () => state,\n"
store_by_item_name = _.state.store_by_item_name
for item_name, bvrs in _.state.bvrs_by_item_name.items():
store = store_by_item_name.get(item_name)
items_name = plural(item_name)
result += f" {items_name}: () => state.outputs.{items_name}Display,\n"
result += f" {items_name}ResUrl: () => {l0(store.name)}.resUrls().{item_name}ById,\n" # noqa: E501
for bvr in bvrs:
result += bvr.sections.default_props(store)
return result
return dict(sections=Sections()) | it | 0.356793 | # noqa: E501 | 1.788465 | 2 |
sitator/site_descriptors/SOAP.py | ahzeeshan/sitator | 0 | 6623973 |
import numpy as np
from abc import ABCMeta, abstractmethod
from sitator.SiteNetwork import SiteNetwork
from sitator.SiteTrajectory import SiteTrajectory
try:
import quippy as qp
from quippy import descriptors
except ImportError:
raise ImportError("Quippy with GAP is required for using SOAP descriptors.")
from ase.data import atomic_numbers
DEFAULT_SOAP_PARAMS = {
'cutoff' : 3.0,
'cutoff_transition_width' : 1.0,
'l_max' : 6, 'n_max' : 6,
'atom_sigma' : 0.4
}
# From https://github.com/tqdm/tqdm/issues/506#issuecomment-373126698
import sys
try:
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str:
from tqdm import tqdm_notebook as tqdm
if 'terminal' in ipy_str:
from tqdm import tqdm
except:
if sys.stderr.isatty():
from tqdm import tqdm
else:
def tqdm(iterable, **kwargs):
return iterable
class SOAP(object):
"""Abstract base class for computing SOAP vectors in a SiteNetwork.
SOAP computations are *not* thread-safe; use one SOAP object per thread.
:param int tracer_atomic_number: The atomic number of the tracer.
:param list environment: The atomic numbers or atomic symbols
of the environment to consider. I.e. for Li2CO3, can be set to ['O'] or [8]
for oxygen only, or ['C', 'O'] / ['C', 8] / [6,8] if carbon and oxygen
are considered an environment.
Defaults to `None`, in which case all non-mobile atoms are considered
regardless of species.
:param soap_mask: Which atoms in the SiteNetwork's structure
to use in SOAP calculations.
Can be either a boolean mask ndarray or a tuple of species.
If `None`, the entire static_structure of the SiteNetwork will be used.
Mobile atoms cannot be used for the SOAP host structure.
Even not masked, species not considered in environment will be not accounted for.
For ideal performance: Specify environment and soap_mask correctly!
:param dict soap_params = {}: Any custom SOAP params.
"""
__metaclass__ = ABCMeta
def __init__(self, tracer_atomic_number, environment = None,
soap_mask=None, soap_params={}, verbose =True):
from ase.data import atomic_numbers
# Creating a dictionary for convenience, to check the types and values:
self.tracer_atomic_number = 3
centers_list = [self.tracer_atomic_number]
self._soap_mask = soap_mask
# -- Create the descriptor object
soap_opts = dict(DEFAULT_SOAP_PARAMS)
soap_opts.update(soap_params)
soap_cmd_line = ["soap"]
# User options
for opt in soap_opts:
soap_cmd_line.append("{}={}".format(opt, soap_opts[opt]))
#
soap_cmd_line.append('n_Z={} Z={{{}}}'.format(len(centers_list), ' '.join(map(str, centers_list))))
# - Add environment species controls if given
self._environment = None
if not environment is None:
if not isinstance(environment, (list, tuple)):
raise TypeError('environment has to be a list or tuple of species (atomic number'
' or symbol of the environment to consider')
environment_list = []
for e in environment:
if isinstance(e, int):
assert 0 < e <= max(atomic_numbers.values())
environment_list.append(e)
elif isinstance(e, str):
try:
environment_list.append(atomic_numbers[e])
except KeyError:
raise KeyError("You provided a string that is not a valid atomic symbol")
else:
raise TypeError("Environment has to be a list of atomic numbers or atomic symbols")
self._environment = environment_list
soap_cmd_line.append('n_species={} species_Z={{{}}}'.format(len(environment_list), ' '.join(map(str, environment_list))))
soap_cmd_line = " ".join(soap_cmd_line)
if verbose:
print("SOAP command line: %s" % soap_cmd_line)
self._soaper = descriptors.Descriptor(soap_cmd_line)
self._verbose = verbose
self._cutoff = soap_opts['cutoff']
@property
def n_dim(self):
return self._soaper.n_dim
def get_descriptors(self, stn):
"""
Get the descriptors.
:param stn: A valid instance of SiteTrajectory or SiteNetwork
:returns: an array of descriptor vectors and an equal length array of
labels indicating which descriptors correspond to which sites.
"""
# Build SOAP host structure
if isinstance(stn, SiteTrajectory):
structure, tracer_index, soap_mask = self._make_structure(stn.site_network)
elif isinstance(stn, SiteNetwork):
structure, tracer_index, soap_mask = self._make_structure(stn)
else:
raise TypeError("`stn` must be SiteNetwork or SiteTrajectory")
# Compute descriptors
return self._get_descriptors(stn, structure, tracer_index, soap_mask)
# ----
def _make_structure(self, sn):
if self._soap_mask is None:
# Make a copy of the static structure
structure = qp.Atoms(sn.static_structure)
soap_mask = sn.static_mask # soap mask is the
else:
if isinstance(self._soap_mask, tuple):
soap_mask = np.in1d(sn.structure.get_chemical_species(), self._soap_mask)
else:
soap_mask = self._soap_mask
assert not np.any(soap_mask & sn.mobile_mask), "Error for atoms %s; No atom can be both static and mobile" % np.where(soap_mask & sn.mobile_mask)[0]
structure = qp.Atoms(sn.structure[soap_mask])
assert np.any(soap_mask), "Given `soap_mask` excluded all host atoms."
if not self._environment is None:
assert np.any(np.isin(sn.structure.get_atomic_numbers()[soap_mask], self._environment)), "Combination of given `soap_mask` with the given `environment` excludes all host atoms."
# Add a tracer
if self.tracer_atomic_number is None:
tracer_atomic_number = sn.structure.get_atomic_numbers()[sn.mobile_mask][0]
else:
tracer_atomic_number = self.tracer_atomic_number
structure.add_atoms((0.0, 0.0, 0.0), tracer_atomic_number)
structure.set_pbc([True, True, True])
tracer_index = len(structure) - 1
return structure, tracer_index, soap_mask
@abstractmethod
def _get_descriptors(self, stn, structure, tracer_index):
pass
class SOAPCenters(SOAP):
"""Compute the SOAPs of the site centers in the fixed host structure.
Requires a SiteNetwork as input.
"""
def _get_descriptors(self, sn, structure, tracer_index, soap_mask):
assert isinstance(sn, SiteNetwork), "SOAPCenters requires a SiteNetwork, not `%s`" % sn
pts = sn.centers
out = np.empty(shape = (len(pts), self.n_dim), dtype = np.float)
structure.set_cutoff(self._soaper.cutoff())
for i, pt in enumerate(tqdm(pts, desc="SOAP") if self._verbose else pts):
# Move tracer
structure.positions[tracer_index] = pt
# SOAP requires connectivity data to be computed first
structure.calc_connect()
#There should only be one descriptor, since there should only be one Li
out[i] = self._soaper.calc(structure)['descriptor'][0]
return out, np.arange(sn.n_sites)
class SOAPSampledCenters(SOAPCenters):
"""Compute the SOAPs of representative points for each site, as determined by `sampling_transform`.
Takes either a SiteNetwork or SiteTrajectory as input; requires that
`sampling_transform` produce a SiteNetwork where `site_types` indicates
which site in the original SiteNetwork/SiteTrajectory it was sampled from.
Typical sampling transforms are `sitator.misc.NAvgsPerSite` (for a SiteTrajectory)
and `sitator.misc.GenerateAroundSites` (for a SiteNetwork).
"""
def __init__(self, *args, **kwargs):
self.sampling_transform = kwargs.pop('sampling_transform', 1)
super(SOAPSampledCenters, self).__init__(*args, **kwargs)
def get_descriptors(self, stn):
# Do sampling
sampled = self.sampling_transform.run(stn)
assert isinstance(sampled, SiteNetwork), "Sampling transform returned `%s`, not a SiteNetwork" % sampled
# Compute actual dvecs
dvecs, _ = super(SOAPSampledCenters, self).get_descriptors(sampled)
# Return right corersponding sites
return dvecs, sampled.site_types
class SOAPDescriptorAverages(SOAP):
"""Compute many instantaneous SOAPs for each site, and then average them in SOAP space.
Computes the SOAP descriptors for mobile particles assigned to each site,
in the host structure *as it was at that moment*. Those descriptor vectors are
then averaged in SOAP space to give the final SOAP vectors for each site.
This method often performs better than SOAPSampledCenters on more dynamic
systems, but requires significantly more computation.
:param int stepsize: Stride (in frames) when computing SOAPs. Default 1.
:param int averaging: Number of SOAP vectors to average for each output vector.
:param int avg_descriptors_per_site: Can be specified instead of `averaging`.
Specifies the _average_ number of average SOAP vectors to compute for each
site. This does not guerantee that number of SOAP vectors for any site,
rather, it allows a trajectory-size agnostic way to specify approximately
how many descriptors are desired.
"""
def __init__(self, *args, **kwargs):
averaging_key = 'averaging'
stepsize_key = 'stepsize'
avg_desc_per_key = 'avg_descriptors_per_site'
assert not ((averaging_key in kwargs) and (avg_desc_per_key in kwargs)), "`averaging` and `avg_descriptors_per_site` cannot be specified at the same time."
self._stepsize = kwargs.pop(stepsize_key, 1)
d = {stepsize_key : self._stepsize}
if averaging_key in kwargs:
self._averaging = kwargs.pop(averaging_key)
d[averaging_key] = self._averaging
self._avg_desc_per_site = None
elif avg_desc_per_key in kwargs:
self._avg_desc_per_site = kwargs.pop(avg_desc_per_key)
d[avg_desc_per_key] = self._avg_desc_per_site
self._averaging = None
else:
raise RuntimeError("Either the `averaging` or `avg_descriptors_per_site` option must be provided.")
for k,v in d.items():
if not isinstance(v, int):
raise TypeError('{} has to be an integer'.format(k))
if not ( v > 0):
raise ValueError('{} has to be an positive'.format(k))
del d # not needed anymore!
super(SOAPDescriptorAverages, self).__init__(*args, **kwargs)
def _get_descriptors(self, site_trajectory, structure, tracer_index, soap_mask):
"""
calculate descriptors
"""
# the number of sites in the network
nsit = site_trajectory.site_network.n_sites
# I load the indices of the mobiles species into mob_indices:
mob_indices = np.where(site_trajectory.site_network.mobile_mask)[0]
# real_traj is the real space positions, site_traj the site trajectory
# (i.e. for every mobile species the site index)
# I load into new variable, only the steps I need (memory???)
real_traj = site_trajectory._real_traj[::self._stepsize]
site_traj = site_trajectory.traj[::self._stepsize]
# Now, I need to allocate the output
# so for each site, I count how much data there is!
counts = np.array([np.count_nonzero(site_traj==site_idx) for site_idx in xrange(nsit)], dtype=int)
if self._averaging is not None:
averaging = self._averaging
else:
averaging = int(np.floor(np.mean(counts) / self._avg_desc_per_site))
nr_of_descs = counts // averaging
if np.any(nr_of_descs == 0):
raise ValueError("You are asking too much, averaging with {} gives a problem".format(averaging))
# This is where I load the descriptor:
descs = np.zeros((np.sum(nr_of_descs), self.n_dim))
# An array that tells me the index I'm at for each site type
desc_index = [np.sum(nr_of_descs[:i]) for i in range(len(nr_of_descs))]
max_index = [np.sum(nr_of_descs[:i+1]) for i in range(len(nr_of_descs))]
count_of_site = np.zeros(len(nr_of_descs), dtype=int)
blocked = np.empty(nsit, dtype=bool)
blocked[:] = False
structure.set_cutoff(self._soaper.cutoff())
for site_traj_t, pos in tqdm(zip(site_traj, real_traj), desc="SOAP"):
# I update the host lattice positions here, once for every timestep
structure.positions[:tracer_index] = pos[soap_mask]
for mob_idx, site_idx in enumerate(site_traj_t):
if site_idx >= 0 and not blocked[site_idx]:
# Now, for every lithium that has been associated to a site of index site_idx,
# I take my structure and load the position of this mobile atom:
structure.positions[tracer_index] = pos[mob_indices[mob_idx]]
# calc_connect to calculated distance
# structure.calc_connect()
#There should only be one descriptor, since there should only be one mobile
# I also divide by averaging, to avoid getting into large numbers.
# soapv = self._soaper.calc(structure)['descriptor'][0] / self._averaging
structure.set_cutoff(self._cutoff)
structure.calc_connect()
soapv = self._soaper.calc(structure, grad=False)["descriptor"]
#~ soapv ,_,_ = get_fingerprints([structure], d)
# So, now I need to figure out where to load the soapv into desc
idx_to_add_desc = desc_index[site_idx]
descs[idx_to_add_desc, :] += soapv[0] / averaging
count_of_site[site_idx] += 1
# Now, if the count reaches the averaging I want, I augment
if count_of_site[site_idx] == averaging:
desc_index[site_idx] += 1
count_of_site[site_idx] = 0
# Now I check whether I have to block this site from accumulating more descriptors
if max_index[site_idx] == desc_index[site_idx]:
blocked[site_idx] = True
desc_to_site = np.repeat(range(nsit), nr_of_descs)
return descs, desc_to_site
|
import numpy as np
from abc import ABCMeta, abstractmethod
from sitator.SiteNetwork import SiteNetwork
from sitator.SiteTrajectory import SiteTrajectory
try:
import quippy as qp
from quippy import descriptors
except ImportError:
raise ImportError("Quippy with GAP is required for using SOAP descriptors.")
from ase.data import atomic_numbers
DEFAULT_SOAP_PARAMS = {
'cutoff' : 3.0,
'cutoff_transition_width' : 1.0,
'l_max' : 6, 'n_max' : 6,
'atom_sigma' : 0.4
}
# From https://github.com/tqdm/tqdm/issues/506#issuecomment-373126698
import sys
try:
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str:
from tqdm import tqdm_notebook as tqdm
if 'terminal' in ipy_str:
from tqdm import tqdm
except:
if sys.stderr.isatty():
from tqdm import tqdm
else:
def tqdm(iterable, **kwargs):
return iterable
class SOAP(object):
"""Abstract base class for computing SOAP vectors in a SiteNetwork.
SOAP computations are *not* thread-safe; use one SOAP object per thread.
:param int tracer_atomic_number: The atomic number of the tracer.
:param list environment: The atomic numbers or atomic symbols
of the environment to consider. I.e. for Li2CO3, can be set to ['O'] or [8]
for oxygen only, or ['C', 'O'] / ['C', 8] / [6,8] if carbon and oxygen
are considered an environment.
Defaults to `None`, in which case all non-mobile atoms are considered
regardless of species.
:param soap_mask: Which atoms in the SiteNetwork's structure
to use in SOAP calculations.
Can be either a boolean mask ndarray or a tuple of species.
If `None`, the entire static_structure of the SiteNetwork will be used.
Mobile atoms cannot be used for the SOAP host structure.
Even not masked, species not considered in environment will be not accounted for.
For ideal performance: Specify environment and soap_mask correctly!
:param dict soap_params = {}: Any custom SOAP params.
"""
__metaclass__ = ABCMeta
def __init__(self, tracer_atomic_number, environment = None,
soap_mask=None, soap_params={}, verbose =True):
from ase.data import atomic_numbers
# Creating a dictionary for convenience, to check the types and values:
self.tracer_atomic_number = 3
centers_list = [self.tracer_atomic_number]
self._soap_mask = soap_mask
# -- Create the descriptor object
soap_opts = dict(DEFAULT_SOAP_PARAMS)
soap_opts.update(soap_params)
soap_cmd_line = ["soap"]
# User options
for opt in soap_opts:
soap_cmd_line.append("{}={}".format(opt, soap_opts[opt]))
#
soap_cmd_line.append('n_Z={} Z={{{}}}'.format(len(centers_list), ' '.join(map(str, centers_list))))
# - Add environment species controls if given
self._environment = None
if not environment is None:
if not isinstance(environment, (list, tuple)):
raise TypeError('environment has to be a list or tuple of species (atomic number'
' or symbol of the environment to consider')
environment_list = []
for e in environment:
if isinstance(e, int):
assert 0 < e <= max(atomic_numbers.values())
environment_list.append(e)
elif isinstance(e, str):
try:
environment_list.append(atomic_numbers[e])
except KeyError:
raise KeyError("You provided a string that is not a valid atomic symbol")
else:
raise TypeError("Environment has to be a list of atomic numbers or atomic symbols")
self._environment = environment_list
soap_cmd_line.append('n_species={} species_Z={{{}}}'.format(len(environment_list), ' '.join(map(str, environment_list))))
soap_cmd_line = " ".join(soap_cmd_line)
if verbose:
print("SOAP command line: %s" % soap_cmd_line)
self._soaper = descriptors.Descriptor(soap_cmd_line)
self._verbose = verbose
self._cutoff = soap_opts['cutoff']
@property
def n_dim(self):
return self._soaper.n_dim
def get_descriptors(self, stn):
"""
Get the descriptors.
:param stn: A valid instance of SiteTrajectory or SiteNetwork
:returns: an array of descriptor vectors and an equal length array of
labels indicating which descriptors correspond to which sites.
"""
# Build SOAP host structure
if isinstance(stn, SiteTrajectory):
structure, tracer_index, soap_mask = self._make_structure(stn.site_network)
elif isinstance(stn, SiteNetwork):
structure, tracer_index, soap_mask = self._make_structure(stn)
else:
raise TypeError("`stn` must be SiteNetwork or SiteTrajectory")
# Compute descriptors
return self._get_descriptors(stn, structure, tracer_index, soap_mask)
# ----
def _make_structure(self, sn):
if self._soap_mask is None:
# Make a copy of the static structure
structure = qp.Atoms(sn.static_structure)
soap_mask = sn.static_mask # soap mask is the
else:
if isinstance(self._soap_mask, tuple):
soap_mask = np.in1d(sn.structure.get_chemical_species(), self._soap_mask)
else:
soap_mask = self._soap_mask
assert not np.any(soap_mask & sn.mobile_mask), "Error for atoms %s; No atom can be both static and mobile" % np.where(soap_mask & sn.mobile_mask)[0]
structure = qp.Atoms(sn.structure[soap_mask])
assert np.any(soap_mask), "Given `soap_mask` excluded all host atoms."
if not self._environment is None:
assert np.any(np.isin(sn.structure.get_atomic_numbers()[soap_mask], self._environment)), "Combination of given `soap_mask` with the given `environment` excludes all host atoms."
# Add a tracer
if self.tracer_atomic_number is None:
tracer_atomic_number = sn.structure.get_atomic_numbers()[sn.mobile_mask][0]
else:
tracer_atomic_number = self.tracer_atomic_number
structure.add_atoms((0.0, 0.0, 0.0), tracer_atomic_number)
structure.set_pbc([True, True, True])
tracer_index = len(structure) - 1
return structure, tracer_index, soap_mask
@abstractmethod
def _get_descriptors(self, stn, structure, tracer_index):
pass
class SOAPCenters(SOAP):
"""Compute the SOAPs of the site centers in the fixed host structure.
Requires a SiteNetwork as input.
"""
def _get_descriptors(self, sn, structure, tracer_index, soap_mask):
assert isinstance(sn, SiteNetwork), "SOAPCenters requires a SiteNetwork, not `%s`" % sn
pts = sn.centers
out = np.empty(shape = (len(pts), self.n_dim), dtype = np.float)
structure.set_cutoff(self._soaper.cutoff())
for i, pt in enumerate(tqdm(pts, desc="SOAP") if self._verbose else pts):
# Move tracer
structure.positions[tracer_index] = pt
# SOAP requires connectivity data to be computed first
structure.calc_connect()
#There should only be one descriptor, since there should only be one Li
out[i] = self._soaper.calc(structure)['descriptor'][0]
return out, np.arange(sn.n_sites)
class SOAPSampledCenters(SOAPCenters):
"""Compute the SOAPs of representative points for each site, as determined by `sampling_transform`.
Takes either a SiteNetwork or SiteTrajectory as input; requires that
`sampling_transform` produce a SiteNetwork where `site_types` indicates
which site in the original SiteNetwork/SiteTrajectory it was sampled from.
Typical sampling transforms are `sitator.misc.NAvgsPerSite` (for a SiteTrajectory)
and `sitator.misc.GenerateAroundSites` (for a SiteNetwork).
"""
def __init__(self, *args, **kwargs):
self.sampling_transform = kwargs.pop('sampling_transform', 1)
super(SOAPSampledCenters, self).__init__(*args, **kwargs)
def get_descriptors(self, stn):
# Do sampling
sampled = self.sampling_transform.run(stn)
assert isinstance(sampled, SiteNetwork), "Sampling transform returned `%s`, not a SiteNetwork" % sampled
# Compute actual dvecs
dvecs, _ = super(SOAPSampledCenters, self).get_descriptors(sampled)
# Return right corersponding sites
return dvecs, sampled.site_types
class SOAPDescriptorAverages(SOAP):
"""Compute many instantaneous SOAPs for each site, and then average them in SOAP space.
Computes the SOAP descriptors for mobile particles assigned to each site,
in the host structure *as it was at that moment*. Those descriptor vectors are
then averaged in SOAP space to give the final SOAP vectors for each site.
This method often performs better than SOAPSampledCenters on more dynamic
systems, but requires significantly more computation.
:param int stepsize: Stride (in frames) when computing SOAPs. Default 1.
:param int averaging: Number of SOAP vectors to average for each output vector.
:param int avg_descriptors_per_site: Can be specified instead of `averaging`.
Specifies the _average_ number of average SOAP vectors to compute for each
site. This does not guerantee that number of SOAP vectors for any site,
rather, it allows a trajectory-size agnostic way to specify approximately
how many descriptors are desired.
"""
def __init__(self, *args, **kwargs):
averaging_key = 'averaging'
stepsize_key = 'stepsize'
avg_desc_per_key = 'avg_descriptors_per_site'
assert not ((averaging_key in kwargs) and (avg_desc_per_key in kwargs)), "`averaging` and `avg_descriptors_per_site` cannot be specified at the same time."
self._stepsize = kwargs.pop(stepsize_key, 1)
d = {stepsize_key : self._stepsize}
if averaging_key in kwargs:
self._averaging = kwargs.pop(averaging_key)
d[averaging_key] = self._averaging
self._avg_desc_per_site = None
elif avg_desc_per_key in kwargs:
self._avg_desc_per_site = kwargs.pop(avg_desc_per_key)
d[avg_desc_per_key] = self._avg_desc_per_site
self._averaging = None
else:
raise RuntimeError("Either the `averaging` or `avg_descriptors_per_site` option must be provided.")
for k,v in d.items():
if not isinstance(v, int):
raise TypeError('{} has to be an integer'.format(k))
if not ( v > 0):
raise ValueError('{} has to be an positive'.format(k))
del d # not needed anymore!
super(SOAPDescriptorAverages, self).__init__(*args, **kwargs)
def _get_descriptors(self, site_trajectory, structure, tracer_index, soap_mask):
"""
calculate descriptors
"""
# the number of sites in the network
nsit = site_trajectory.site_network.n_sites
# I load the indices of the mobiles species into mob_indices:
mob_indices = np.where(site_trajectory.site_network.mobile_mask)[0]
# real_traj is the real space positions, site_traj the site trajectory
# (i.e. for every mobile species the site index)
# I load into new variable, only the steps I need (memory???)
real_traj = site_trajectory._real_traj[::self._stepsize]
site_traj = site_trajectory.traj[::self._stepsize]
# Now, I need to allocate the output
# so for each site, I count how much data there is!
counts = np.array([np.count_nonzero(site_traj==site_idx) for site_idx in xrange(nsit)], dtype=int)
if self._averaging is not None:
averaging = self._averaging
else:
averaging = int(np.floor(np.mean(counts) / self._avg_desc_per_site))
nr_of_descs = counts // averaging
if np.any(nr_of_descs == 0):
raise ValueError("You are asking too much, averaging with {} gives a problem".format(averaging))
# This is where I load the descriptor:
descs = np.zeros((np.sum(nr_of_descs), self.n_dim))
# An array that tells me the index I'm at for each site type
desc_index = [np.sum(nr_of_descs[:i]) for i in range(len(nr_of_descs))]
max_index = [np.sum(nr_of_descs[:i+1]) for i in range(len(nr_of_descs))]
count_of_site = np.zeros(len(nr_of_descs), dtype=int)
blocked = np.empty(nsit, dtype=bool)
blocked[:] = False
structure.set_cutoff(self._soaper.cutoff())
for site_traj_t, pos in tqdm(zip(site_traj, real_traj), desc="SOAP"):
# I update the host lattice positions here, once for every timestep
structure.positions[:tracer_index] = pos[soap_mask]
for mob_idx, site_idx in enumerate(site_traj_t):
if site_idx >= 0 and not blocked[site_idx]:
# Now, for every lithium that has been associated to a site of index site_idx,
# I take my structure and load the position of this mobile atom:
structure.positions[tracer_index] = pos[mob_indices[mob_idx]]
# calc_connect to calculated distance
# structure.calc_connect()
#There should only be one descriptor, since there should only be one mobile
# I also divide by averaging, to avoid getting into large numbers.
# soapv = self._soaper.calc(structure)['descriptor'][0] / self._averaging
structure.set_cutoff(self._cutoff)
structure.calc_connect()
soapv = self._soaper.calc(structure, grad=False)["descriptor"]
#~ soapv ,_,_ = get_fingerprints([structure], d)
# So, now I need to figure out where to load the soapv into desc
idx_to_add_desc = desc_index[site_idx]
descs[idx_to_add_desc, :] += soapv[0] / averaging
count_of_site[site_idx] += 1
# Now, if the count reaches the averaging I want, I augment
if count_of_site[site_idx] == averaging:
desc_index[site_idx] += 1
count_of_site[site_idx] = 0
# Now I check whether I have to block this site from accumulating more descriptors
if max_index[site_idx] == desc_index[site_idx]:
blocked[site_idx] = True
desc_to_site = np.repeat(range(nsit), nr_of_descs)
return descs, desc_to_site
| en | 0.836132 | # From https://github.com/tqdm/tqdm/issues/506#issuecomment-373126698 Abstract base class for computing SOAP vectors in a SiteNetwork. SOAP computations are *not* thread-safe; use one SOAP object per thread. :param int tracer_atomic_number: The atomic number of the tracer. :param list environment: The atomic numbers or atomic symbols of the environment to consider. I.e. for Li2CO3, can be set to ['O'] or [8] for oxygen only, or ['C', 'O'] / ['C', 8] / [6,8] if carbon and oxygen are considered an environment. Defaults to `None`, in which case all non-mobile atoms are considered regardless of species. :param soap_mask: Which atoms in the SiteNetwork's structure to use in SOAP calculations. Can be either a boolean mask ndarray or a tuple of species. If `None`, the entire static_structure of the SiteNetwork will be used. Mobile atoms cannot be used for the SOAP host structure. Even not masked, species not considered in environment will be not accounted for. For ideal performance: Specify environment and soap_mask correctly! :param dict soap_params = {}: Any custom SOAP params. # Creating a dictionary for convenience, to check the types and values: # -- Create the descriptor object # User options # # - Add environment species controls if given Get the descriptors. :param stn: A valid instance of SiteTrajectory or SiteNetwork :returns: an array of descriptor vectors and an equal length array of labels indicating which descriptors correspond to which sites. # Build SOAP host structure # Compute descriptors # ---- # Make a copy of the static structure # soap mask is the # Add a tracer Compute the SOAPs of the site centers in the fixed host structure. Requires a SiteNetwork as input. # Move tracer # SOAP requires connectivity data to be computed first #There should only be one descriptor, since there should only be one Li Compute the SOAPs of representative points for each site, as determined by `sampling_transform`. Takes either a SiteNetwork or SiteTrajectory as input; requires that `sampling_transform` produce a SiteNetwork where `site_types` indicates which site in the original SiteNetwork/SiteTrajectory it was sampled from. Typical sampling transforms are `sitator.misc.NAvgsPerSite` (for a SiteTrajectory) and `sitator.misc.GenerateAroundSites` (for a SiteNetwork). # Do sampling # Compute actual dvecs # Return right corersponding sites Compute many instantaneous SOAPs for each site, and then average them in SOAP space. Computes the SOAP descriptors for mobile particles assigned to each site, in the host structure *as it was at that moment*. Those descriptor vectors are then averaged in SOAP space to give the final SOAP vectors for each site. This method often performs better than SOAPSampledCenters on more dynamic systems, but requires significantly more computation. :param int stepsize: Stride (in frames) when computing SOAPs. Default 1. :param int averaging: Number of SOAP vectors to average for each output vector. :param int avg_descriptors_per_site: Can be specified instead of `averaging`. Specifies the _average_ number of average SOAP vectors to compute for each site. This does not guerantee that number of SOAP vectors for any site, rather, it allows a trajectory-size agnostic way to specify approximately how many descriptors are desired. # not needed anymore! calculate descriptors # the number of sites in the network # I load the indices of the mobiles species into mob_indices: # real_traj is the real space positions, site_traj the site trajectory # (i.e. for every mobile species the site index) # I load into new variable, only the steps I need (memory???) # Now, I need to allocate the output # so for each site, I count how much data there is! # This is where I load the descriptor: # An array that tells me the index I'm at for each site type # I update the host lattice positions here, once for every timestep # Now, for every lithium that has been associated to a site of index site_idx, # I take my structure and load the position of this mobile atom: # calc_connect to calculated distance # structure.calc_connect() #There should only be one descriptor, since there should only be one mobile # I also divide by averaging, to avoid getting into large numbers. # soapv = self._soaper.calc(structure)['descriptor'][0] / self._averaging #~ soapv ,_,_ = get_fingerprints([structure], d) # So, now I need to figure out where to load the soapv into desc # Now, if the count reaches the averaging I want, I augment # Now I check whether I have to block this site from accumulating more descriptors | 2.256482 | 2 |
src/declarativeTask3/ld_GUI_adjust_sound_volumes.py | labdoyon/declarativeTask3 | 0 | 6623974 | import sys
import pickle
import os
from expyriment import control, misc, design, stimuli, io
from expyriment.misc import constants
from expyriment.misc._timer import get_time
from declarativeTask3.config import debug, windowMode, windowSize, classPictures, sounds, \
bgColor, arrow, textSize, textColor, cardColor, responseTime, mouseButton, clickColor, clicPeriod
from declarativeTask3.config import experiment_session
from declarativeTask3.ld_utils import getLanguage, setCursor, cardSize, readMouse, rename_output_files_to_BIDS
from declarativeTask3.ld_stimuli_names import soundNames, ttl_instructions_text
from declarativeTask3.ld_sound import change_volume, play_sound, delete_temp_files, create_temp_sound_files
from declarativeTask3.ttl_catch_keyboard import wait_for_ttl_keyboard
if not windowMode: # Check WindowMode and Resolution
control.defaults.window_mode = windowMode
control.defaults.window_size = misc.get_monitor_resolution()
windowSize = control.defaults.window_size
else:
control.defaults.window_mode = windowMode
control.defaults.window_size = windowSize
if debug:
control.set_develop_mode(on=True, intensive_logging=False, skip_wait_methods=True)
arguments = str(''.join(sys.argv[1:])).split(',') # Get arguments - experiment name and subject
experimentName = arguments[0]
subject_name = arguments[1]
exp = design.Experiment(experimentName) # Save experiment name
session = experiment_session[experimentName]
session_dir = os.path.normpath(os.path.join('sourcedata', 'sub-' + subject_name, 'ses-' + session))
output_dir = os.path.normpath(os.path.join(session_dir, 'beh'))
if not os.path.isdir(session_dir):
os.mkdir(session_dir)
io.defaults.datafile_directory = output_dir
io.defaults.eventfile_directory = output_dir
control.initialize(exp)
exp.add_experiment_info('Subject: ')
exp.add_experiment_info(subject_name)
language = str(getLanguage(subject_name, 0, 'choose-language'))
exp.add_experiment_info('language: ')
exp.add_experiment_info(language) # Save Subject Code
exp.add_experiment_info('Image categories (original order; src/config.py order): ')
exp.add_experiment_info(str(classPictures))
# 0. Starting Experiment
control.start(exp, auto_create_subject_id=True, skip_ready_screen=True)
bids_datafile, bids_eventfile = rename_output_files_to_BIDS(subject_name, session, experimentName,
io.defaults.datafile_directory,
io.defaults.eventfile_directory)
exp.data.rename(bids_datafile)
exp.events.rename(bids_eventfile)
exp.add_experiment_info(['StartExp: {}'.format(exp.clock.time)]) # Add sync info
mouse = io.Mouse() # Create Mouse instance
mouse.set_logging(True) # Log mouse
mouse.hide_cursor(True, True) # Hide cursor
setCursor(arrow)
bs = stimuli.BlankScreen(bgColor) # Create blank screen
subject_file = 'soundsVolumeAdjustmentIndB_' + subject_name + '.pkl'
with open(io.defaults.datafile_directory + os.path.sep + subject_file, 'wb') as f:
pickle.dump([0] * len(sounds), f)
soundsVolumeAdjustmentIndB = create_temp_sound_files(subject_name, io.defaults.datafile_directory)
# 1. PLOT INTERFACE
up_volume_box_contour = stimuli.Rectangle(size=(3*textSize, 3*textSize),
position=(-2.5*cardSize[0], 0),
colour=constants.C_WHITE)
up_volume_box = stimuli.Shape(position=(-2.5*cardSize[0], 0),
vertex_list=misc.geometry.vertices_cross((2*textSize, 2*textSize), textSize/2),
colour=textColor)
lower_volume_box_contour = stimuli.Rectangle(size=(3*textSize, 3*textSize),
position=(2.5*cardSize[0], 0),
colour=constants.C_WHITE)
lower_volume_box = stimuli.Shape(position=(2.5*cardSize[0], 0),
vertex_list=[(2*textSize, 0), (0, -textSize/2), (-2*textSize, 0)],
colour=textColor)
up_volume_box_contour.plot(bs)
lower_volume_box_contour.plot(bs)
up_volume_box.plot(bs)
lower_volume_box.plot(bs)
bs.present(False, True)
# 2. WAIT FOR TTL
instructions_ttl = stimuli.TextLine(ttl_instructions_text[language],
position=(
0, -windowSize[1] / float(2) + (cardSize[1]) / float(2)),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=bgColor,
max_width=None)
instructionRectangle = stimuli.Rectangle(size=(windowSize[0], textSize),
position=(0, -windowSize[1]/float(2) + (cardSize[1])/float(2)),
colour=bgColor)
instructionRectangle.plot(bs)
instructions_ttl.plot(bs)
bs.present(False, True)
wait_for_ttl_keyboard()
exp.add_experiment_info(['TTL_RECEIVED_timing_{}'.format(exp.clock.time)])
instructionRectangle.plot(bs)
bs.present(False, True)
for sound_index in range(len(sounds)):
sound_title_box = stimuli.TextLine(text=' ' + soundNames[language][sound_index] + ' ',
position=(0, windowSize[1] / float(2) - cardSize[1]),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=cardColor,
max_width=None)
sound_title_box_hide_rectangle = stimuli.Rectangle(size=(windowSize[0], textSize*1.2),
position=(0, windowSize[1] / float(2) - cardSize[1]),
colour=bgColor)
if sound_index == len(sounds) - 1:
next_sound_or_end_text = ' End '
else:
next_sound_or_end_text = ' Next Sound '
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=cardColor,
max_width=None)
next_sound_or_end_box_hide_rectangle = stimuli.Rectangle(size=(windowSize[0], textSize*1.2),
position=(0, -windowSize[1] / float(2) + cardSize[1]),
colour=bgColor)
sound_title_box_hide_rectangle.plot(bs)
sound_title_box.plot(bs)
next_sound_or_end_box_hide_rectangle.plot(bs)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
play_sound(sound_index)
move_on = False
while not move_on:
mouse.show_cursor(True, True)
start = get_time()
rt, position = readMouse(start, mouseButton, responseTime)
mouse.hide_cursor(True, True)
if position is not None:
if next_sound_or_end_box.overlapping_with_position(position):
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None,
text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=clickColor,
max_width=None)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
exp.clock.wait(clicPeriod)
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None,
text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=bgColor,
max_width=None)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
move_on = True
elif lower_volume_box_contour.overlapping_with_position(position):
soundsVolumeAdjustmentIndB[sound_index] -= 5
change_volume(sound_index, volume_adjustment_db=soundsVolumeAdjustmentIndB[sound_index])
play_sound(sound_index)
elif up_volume_box_contour.overlapping_with_position(position):
if soundsVolumeAdjustmentIndB[sound_index] + 5 <= 0:
soundsVolumeAdjustmentIndB[sound_index] += 5
change_volume(sound_index, volume_adjustment_db=soundsVolumeAdjustmentIndB[sound_index])
play_sound(sound_index)
# Saving sounds adjustment: (this script is supposed to be executed in src)
exp.add_experiment_info('Sounds Volume adjustment (in dB):')
exp.add_experiment_info(str(soundsVolumeAdjustmentIndB))
with open(io.defaults.datafile_directory + os.path.sep + subject_file, 'wb') as f:
pickle.dump(soundsVolumeAdjustmentIndB, f)
control.end()
delete_temp_files()
| import sys
import pickle
import os
from expyriment import control, misc, design, stimuli, io
from expyriment.misc import constants
from expyriment.misc._timer import get_time
from declarativeTask3.config import debug, windowMode, windowSize, classPictures, sounds, \
bgColor, arrow, textSize, textColor, cardColor, responseTime, mouseButton, clickColor, clicPeriod
from declarativeTask3.config import experiment_session
from declarativeTask3.ld_utils import getLanguage, setCursor, cardSize, readMouse, rename_output_files_to_BIDS
from declarativeTask3.ld_stimuli_names import soundNames, ttl_instructions_text
from declarativeTask3.ld_sound import change_volume, play_sound, delete_temp_files, create_temp_sound_files
from declarativeTask3.ttl_catch_keyboard import wait_for_ttl_keyboard
if not windowMode: # Check WindowMode and Resolution
control.defaults.window_mode = windowMode
control.defaults.window_size = misc.get_monitor_resolution()
windowSize = control.defaults.window_size
else:
control.defaults.window_mode = windowMode
control.defaults.window_size = windowSize
if debug:
control.set_develop_mode(on=True, intensive_logging=False, skip_wait_methods=True)
arguments = str(''.join(sys.argv[1:])).split(',') # Get arguments - experiment name and subject
experimentName = arguments[0]
subject_name = arguments[1]
exp = design.Experiment(experimentName) # Save experiment name
session = experiment_session[experimentName]
session_dir = os.path.normpath(os.path.join('sourcedata', 'sub-' + subject_name, 'ses-' + session))
output_dir = os.path.normpath(os.path.join(session_dir, 'beh'))
if not os.path.isdir(session_dir):
os.mkdir(session_dir)
io.defaults.datafile_directory = output_dir
io.defaults.eventfile_directory = output_dir
control.initialize(exp)
exp.add_experiment_info('Subject: ')
exp.add_experiment_info(subject_name)
language = str(getLanguage(subject_name, 0, 'choose-language'))
exp.add_experiment_info('language: ')
exp.add_experiment_info(language) # Save Subject Code
exp.add_experiment_info('Image categories (original order; src/config.py order): ')
exp.add_experiment_info(str(classPictures))
# 0. Starting Experiment
control.start(exp, auto_create_subject_id=True, skip_ready_screen=True)
bids_datafile, bids_eventfile = rename_output_files_to_BIDS(subject_name, session, experimentName,
io.defaults.datafile_directory,
io.defaults.eventfile_directory)
exp.data.rename(bids_datafile)
exp.events.rename(bids_eventfile)
exp.add_experiment_info(['StartExp: {}'.format(exp.clock.time)]) # Add sync info
mouse = io.Mouse() # Create Mouse instance
mouse.set_logging(True) # Log mouse
mouse.hide_cursor(True, True) # Hide cursor
setCursor(arrow)
bs = stimuli.BlankScreen(bgColor) # Create blank screen
subject_file = 'soundsVolumeAdjustmentIndB_' + subject_name + '.pkl'
with open(io.defaults.datafile_directory + os.path.sep + subject_file, 'wb') as f:
pickle.dump([0] * len(sounds), f)
soundsVolumeAdjustmentIndB = create_temp_sound_files(subject_name, io.defaults.datafile_directory)
# 1. PLOT INTERFACE
up_volume_box_contour = stimuli.Rectangle(size=(3*textSize, 3*textSize),
position=(-2.5*cardSize[0], 0),
colour=constants.C_WHITE)
up_volume_box = stimuli.Shape(position=(-2.5*cardSize[0], 0),
vertex_list=misc.geometry.vertices_cross((2*textSize, 2*textSize), textSize/2),
colour=textColor)
lower_volume_box_contour = stimuli.Rectangle(size=(3*textSize, 3*textSize),
position=(2.5*cardSize[0], 0),
colour=constants.C_WHITE)
lower_volume_box = stimuli.Shape(position=(2.5*cardSize[0], 0),
vertex_list=[(2*textSize, 0), (0, -textSize/2), (-2*textSize, 0)],
colour=textColor)
up_volume_box_contour.plot(bs)
lower_volume_box_contour.plot(bs)
up_volume_box.plot(bs)
lower_volume_box.plot(bs)
bs.present(False, True)
# 2. WAIT FOR TTL
instructions_ttl = stimuli.TextLine(ttl_instructions_text[language],
position=(
0, -windowSize[1] / float(2) + (cardSize[1]) / float(2)),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=bgColor,
max_width=None)
instructionRectangle = stimuli.Rectangle(size=(windowSize[0], textSize),
position=(0, -windowSize[1]/float(2) + (cardSize[1])/float(2)),
colour=bgColor)
instructionRectangle.plot(bs)
instructions_ttl.plot(bs)
bs.present(False, True)
wait_for_ttl_keyboard()
exp.add_experiment_info(['TTL_RECEIVED_timing_{}'.format(exp.clock.time)])
instructionRectangle.plot(bs)
bs.present(False, True)
for sound_index in range(len(sounds)):
sound_title_box = stimuli.TextLine(text=' ' + soundNames[language][sound_index] + ' ',
position=(0, windowSize[1] / float(2) - cardSize[1]),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=cardColor,
max_width=None)
sound_title_box_hide_rectangle = stimuli.Rectangle(size=(windowSize[0], textSize*1.2),
position=(0, windowSize[1] / float(2) - cardSize[1]),
colour=bgColor)
if sound_index == len(sounds) - 1:
next_sound_or_end_text = ' End '
else:
next_sound_or_end_text = ' Next Sound '
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None, text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=cardColor,
max_width=None)
next_sound_or_end_box_hide_rectangle = stimuli.Rectangle(size=(windowSize[0], textSize*1.2),
position=(0, -windowSize[1] / float(2) + cardSize[1]),
colour=bgColor)
sound_title_box_hide_rectangle.plot(bs)
sound_title_box.plot(bs)
next_sound_or_end_box_hide_rectangle.plot(bs)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
play_sound(sound_index)
move_on = False
while not move_on:
mouse.show_cursor(True, True)
start = get_time()
rt, position = readMouse(start, mouseButton, responseTime)
mouse.hide_cursor(True, True)
if position is not None:
if next_sound_or_end_box.overlapping_with_position(position):
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None,
text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=clickColor,
max_width=None)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
exp.clock.wait(clicPeriod)
next_sound_or_end_box = stimuli.TextLine(text=next_sound_or_end_text,
position=(0, -windowSize[1] / float(2) + cardSize[1]),
text_font=None, text_size=textSize, text_bold=None,
text_italic=None,
text_underline=None, text_colour=textColor,
background_colour=bgColor,
max_width=None)
next_sound_or_end_box.plot(bs)
bs.present(False, True)
move_on = True
elif lower_volume_box_contour.overlapping_with_position(position):
soundsVolumeAdjustmentIndB[sound_index] -= 5
change_volume(sound_index, volume_adjustment_db=soundsVolumeAdjustmentIndB[sound_index])
play_sound(sound_index)
elif up_volume_box_contour.overlapping_with_position(position):
if soundsVolumeAdjustmentIndB[sound_index] + 5 <= 0:
soundsVolumeAdjustmentIndB[sound_index] += 5
change_volume(sound_index, volume_adjustment_db=soundsVolumeAdjustmentIndB[sound_index])
play_sound(sound_index)
# Saving sounds adjustment: (this script is supposed to be executed in src)
exp.add_experiment_info('Sounds Volume adjustment (in dB):')
exp.add_experiment_info(str(soundsVolumeAdjustmentIndB))
with open(io.defaults.datafile_directory + os.path.sep + subject_file, 'wb') as f:
pickle.dump(soundsVolumeAdjustmentIndB, f)
control.end()
delete_temp_files()
| en | 0.720387 | # Check WindowMode and Resolution # Get arguments - experiment name and subject # Save experiment name # Save Subject Code # 0. Starting Experiment # Add sync info # Create Mouse instance # Log mouse # Hide cursor # Create blank screen # 1. PLOT INTERFACE # 2. WAIT FOR TTL # Saving sounds adjustment: (this script is supposed to be executed in src) | 1.931413 | 2 |
setup.py | Raijeku/qmeans | 0 | 6623975 | <filename>setup.py
"""Module including package metadata"""
from setuptools import setup
with open("README.md", 'r', encoding="utf-8") as f:
long_description = f.read()
setup(
name='qmeans',
version='0.1.1',
description='Q-Means algorithm implementation using Qiskit compatible with Scikit-Learn.',
license="Apache-2.0",
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url="http://qmeans.readthedocs.io/",
packages=['qmeans'],
install_requires=['wheel', 'twine', 'setuptools', 'numpy', 'pandas', 'qiskit', 'sklearn', 'pytest', 'hypothesis', 'sphinx', 'sphinx-rtd-theme', 'sphinxcontrib-napoleon'],
)
| <filename>setup.py
"""Module including package metadata"""
from setuptools import setup
with open("README.md", 'r', encoding="utf-8") as f:
long_description = f.read()
setup(
name='qmeans',
version='0.1.1',
description='Q-Means algorithm implementation using Qiskit compatible with Scikit-Learn.',
license="Apache-2.0",
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url="http://qmeans.readthedocs.io/",
packages=['qmeans'],
install_requires=['wheel', 'twine', 'setuptools', 'numpy', 'pandas', 'qiskit', 'sklearn', 'pytest', 'hypothesis', 'sphinx', 'sphinx-rtd-theme', 'sphinxcontrib-napoleon'],
)
| en | 0.794808 | Module including package metadata | 1.333227 | 1 |
hard-gists/931984/snippet.py | jjhenkel/dockerizeme | 21 | 6623976 | #!/usr/bin/env python
import MySQLdb
import os, sys
import pprint
pp = pprint.PrettyPrinter()
mysql_host = "localhost"
mysql_user = "dbusername"
mysql_pass = "<PASSWORD>"
mysql_db = "powerdns"
#ClientIP, ClientMac, host-decl-name
if (len(sys.argv) > 1):
command = sys.argv[1]
clientIP = sys.argv[2]
clientMac = sys.argv[3]
hostname = sys.argv[4]
if command == "commit":
f = open("/tmp/leases",'a')
s = "Leased: %s to %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=<PASSWORD>, db=mysql_db)
cursor = db.cursor()
cursor.execute("INSERT INTO records (domain_id,name,type,content,ttl,prio,change_date) VALUES (%s,%s,%s,%s,%s,%s,UNIX_TIMESTAMP(NOW()))", [1,hostname,"A",clientIP,3600,0])
# pp.pprint(cursor.__dict__)
cursor.close()
db.commit()
db.close()
elif command == "release":
f = open("/tmp/leases",'a')
s = "Released: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=mysql_pass, db=mysql_db)
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
elif command == "expiry":
f = open("/tmp/leases",'a')
s = "Expired: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=mysql_pass, db=mysql_db)
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
| #!/usr/bin/env python
import MySQLdb
import os, sys
import pprint
pp = pprint.PrettyPrinter()
mysql_host = "localhost"
mysql_user = "dbusername"
mysql_pass = "<PASSWORD>"
mysql_db = "powerdns"
#ClientIP, ClientMac, host-decl-name
if (len(sys.argv) > 1):
command = sys.argv[1]
clientIP = sys.argv[2]
clientMac = sys.argv[3]
hostname = sys.argv[4]
if command == "commit":
f = open("/tmp/leases",'a')
s = "Leased: %s to %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=<PASSWORD>, db=mysql_db)
cursor = db.cursor()
cursor.execute("INSERT INTO records (domain_id,name,type,content,ttl,prio,change_date) VALUES (%s,%s,%s,%s,%s,%s,UNIX_TIMESTAMP(NOW()))", [1,hostname,"A",clientIP,3600,0])
# pp.pprint(cursor.__dict__)
cursor.close()
db.commit()
db.close()
elif command == "release":
f = open("/tmp/leases",'a')
s = "Released: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=mysql_pass, db=mysql_db)
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
elif command == "expiry":
f = open("/tmp/leases",'a')
s = "Expired: %s from %s\n" % (clientIP, hostname)
f.write(s)
f.flush()
f.close()
db = MySQLdb.connect(host=mysql_host, user=mysql_user, passwd=mysql_pass, db=mysql_db)
cursor = db.cursor()
cursor.execute("DELETE FROM records WHERE content = %s AND name = %s",[clientIP,hostname])
#pp.pprint(cursor.__dict__)
db.commit()
db.close()
| en | 0.291083 | #!/usr/bin/env python #ClientIP, ClientMac, host-decl-name # pp.pprint(cursor.__dict__) #pp.pprint(cursor.__dict__) #pp.pprint(cursor.__dict__) | 2.36292 | 2 |
utils/helper_functions.py | Sunnigen/pywave-function-collapse | 2 | 6623977 | <filename>utils/helper_functions.py
from collections import Counter
from math import sqrt
import random
import string
from numpy.random import choice as WeightedChoice
def distance_value(p0, p1, range_val):
dist = sqrt(((p0[0] - p1[0]) ** 2) + ((p0[1] - p1[1]) ** 2))
dist_modifier = range_val - dist
# print('Distance from origin: (%s, %s), to area: (%s, %s) is %s' % (p1[0], p1[1], p0[0], p0[1], dist))
# print('range_val = %s' % range_val)
# print('dist_modifier = %s' % dist_modifier)
if dist_modifier < 0:
dist_modifier = 0
return dist_modifier
def find_opposite(side):
side = side.lower()
# North, East, South, West <-- directions
# 0, 1, 2, 3 <-- indexes
# return index with opposite direction
if side == 'north':
return 0, 2 # return 'south'
if side == 'east':
return 1, 3 # return 'west'
if side == 'south':
return 2, 0 # return 'north'
if side == 'west':
return 3, 1 # return 'east'
def direction_from_origin(new, origin):
new_dir = 0
if new > origin:
new_dir -= 1
if new < origin:
new_dir += 1
return new_dir
def super_print(s):
print('\n%s\n=== %s ===\n%s' % ('=' * (len(s) + 8), s.title(), '=' * (len(s) + 8)))
def generate_string(length=6, chars=string.ascii_uppercase + string.digits):
# Returns random string of letters and number characters
# string = ''.join(random.choice(chars) for _ in range(length))
return ''.join(random.choice(chars) for _ in range(length))
# def flatten(seq, container=None):
# # Flatten arbitrary nesting!
# # Note: Recursive genius!
# if container is None:
# container = []
#
# for s in seq:
# try:
# iter(s) # check if it's iterable
# except TypeError:
# container.append(s)
# else:
# flatten(s, container)
#
# return container
def list_intersect(lists):
# Matching Values Between Lists
result = lists[0]
if len(lists) > 1:
for l in lists[1:]:
result = set(result).intersection(l)
# print('list_intersect result:', result)
return list(result)
def dict_combine(dicts):
# Combine Dictionaries
results = Counter()
for dictionary in dicts:
results += Counter(dictionary)
return results
def dict_intersect(dicts):
# return dicts[0]
# Increment and Find Common Keys Between Dictionaries
# print("number of dicts: ", len(dicts))
comm_keys = dicts[0].keys()
for d in dicts[1:]:
# intersect keys first
comm_keys &= d.keys()
# then build a result dict with nested comprehension
# result = {key:{d[key] for d in dicts} for key in comm_keys}
results = {}
for key in comm_keys:
# base_probability = self.base_probability[str(key)][side]
for d in dicts:
# if key in results:
# results[key] += d[key]
# else:
# results[key] = d[key]
results[key] = d[key]
return results
def weighted_choice(dict):
# Normalize List of Probabilities
keys = list(dict.keys())
values = list(dict.values())
probabilities = []
total = sum(values)
if total == 0:
print('No tile can be found due to small probabilities!')
return 1
for val in values:
# Remove Possibility if Less than Half
# if val <= total/2:
# index = values.index(fval)
# keys.pop(index)
# else:
try:
probabilities.append(val/total)
except:
print("Error in calculating probabilities!!!")
print('val/total: %s/%s' % (val, total))
val = 0.01
total = len(dict.keys())
probabilities.append(val/total)
# print('keys:', keys)
# print('probabilities:', probabilities)
result = WeightedChoice(keys, p=probabilities)
# print("keys: ", keys)
return random.choice(keys)
def determine_probability_value(x, y, origin_x, origin_y, tile_range):
return 1
# How Far Selected Tile is from Origin Tile
value = distance_value((x, y), (origin_x, origin_y), tile_range)
return value | <filename>utils/helper_functions.py
from collections import Counter
from math import sqrt
import random
import string
from numpy.random import choice as WeightedChoice
def distance_value(p0, p1, range_val):
dist = sqrt(((p0[0] - p1[0]) ** 2) + ((p0[1] - p1[1]) ** 2))
dist_modifier = range_val - dist
# print('Distance from origin: (%s, %s), to area: (%s, %s) is %s' % (p1[0], p1[1], p0[0], p0[1], dist))
# print('range_val = %s' % range_val)
# print('dist_modifier = %s' % dist_modifier)
if dist_modifier < 0:
dist_modifier = 0
return dist_modifier
def find_opposite(side):
side = side.lower()
# North, East, South, West <-- directions
# 0, 1, 2, 3 <-- indexes
# return index with opposite direction
if side == 'north':
return 0, 2 # return 'south'
if side == 'east':
return 1, 3 # return 'west'
if side == 'south':
return 2, 0 # return 'north'
if side == 'west':
return 3, 1 # return 'east'
def direction_from_origin(new, origin):
new_dir = 0
if new > origin:
new_dir -= 1
if new < origin:
new_dir += 1
return new_dir
def super_print(s):
print('\n%s\n=== %s ===\n%s' % ('=' * (len(s) + 8), s.title(), '=' * (len(s) + 8)))
def generate_string(length=6, chars=string.ascii_uppercase + string.digits):
# Returns random string of letters and number characters
# string = ''.join(random.choice(chars) for _ in range(length))
return ''.join(random.choice(chars) for _ in range(length))
# def flatten(seq, container=None):
# # Flatten arbitrary nesting!
# # Note: Recursive genius!
# if container is None:
# container = []
#
# for s in seq:
# try:
# iter(s) # check if it's iterable
# except TypeError:
# container.append(s)
# else:
# flatten(s, container)
#
# return container
def list_intersect(lists):
# Matching Values Between Lists
result = lists[0]
if len(lists) > 1:
for l in lists[1:]:
result = set(result).intersection(l)
# print('list_intersect result:', result)
return list(result)
def dict_combine(dicts):
# Combine Dictionaries
results = Counter()
for dictionary in dicts:
results += Counter(dictionary)
return results
def dict_intersect(dicts):
# return dicts[0]
# Increment and Find Common Keys Between Dictionaries
# print("number of dicts: ", len(dicts))
comm_keys = dicts[0].keys()
for d in dicts[1:]:
# intersect keys first
comm_keys &= d.keys()
# then build a result dict with nested comprehension
# result = {key:{d[key] for d in dicts} for key in comm_keys}
results = {}
for key in comm_keys:
# base_probability = self.base_probability[str(key)][side]
for d in dicts:
# if key in results:
# results[key] += d[key]
# else:
# results[key] = d[key]
results[key] = d[key]
return results
def weighted_choice(dict):
# Normalize List of Probabilities
keys = list(dict.keys())
values = list(dict.values())
probabilities = []
total = sum(values)
if total == 0:
print('No tile can be found due to small probabilities!')
return 1
for val in values:
# Remove Possibility if Less than Half
# if val <= total/2:
# index = values.index(fval)
# keys.pop(index)
# else:
try:
probabilities.append(val/total)
except:
print("Error in calculating probabilities!!!")
print('val/total: %s/%s' % (val, total))
val = 0.01
total = len(dict.keys())
probabilities.append(val/total)
# print('keys:', keys)
# print('probabilities:', probabilities)
result = WeightedChoice(keys, p=probabilities)
# print("keys: ", keys)
return random.choice(keys)
def determine_probability_value(x, y, origin_x, origin_y, tile_range):
return 1
# How Far Selected Tile is from Origin Tile
value = distance_value((x, y), (origin_x, origin_y), tile_range)
return value | en | 0.551135 | # print('Distance from origin: (%s, %s), to area: (%s, %s) is %s' % (p1[0], p1[1], p0[0], p0[1], dist)) # print('range_val = %s' % range_val) # print('dist_modifier = %s' % dist_modifier) # North, East, South, West <-- directions # 0, 1, 2, 3 <-- indexes # return index with opposite direction # return 'south' # return 'west' # return 'north' # return 'east' # Returns random string of letters and number characters # string = ''.join(random.choice(chars) for _ in range(length)) # def flatten(seq, container=None): # # Flatten arbitrary nesting! # # Note: Recursive genius! # if container is None: # container = [] # # for s in seq: # try: # iter(s) # check if it's iterable # except TypeError: # container.append(s) # else: # flatten(s, container) # # return container # Matching Values Between Lists # print('list_intersect result:', result) # Combine Dictionaries # return dicts[0] # Increment and Find Common Keys Between Dictionaries # print("number of dicts: ", len(dicts)) # intersect keys first # then build a result dict with nested comprehension # result = {key:{d[key] for d in dicts} for key in comm_keys} # base_probability = self.base_probability[str(key)][side] # if key in results: # results[key] += d[key] # else: # results[key] = d[key] # Normalize List of Probabilities # Remove Possibility if Less than Half # if val <= total/2: # index = values.index(fval) # keys.pop(index) # else: # print('keys:', keys) # print('probabilities:', probabilities) # print("keys: ", keys) # How Far Selected Tile is from Origin Tile | 3.554475 | 4 |
lib/utils_plots.py | octaviomtz/Growing-Neural-Cellular-Automata | 0 | 6623978 | import os
import numpy as np
import matplotlib.pyplot as plt
import wandb
from lib.utils_vis import SamplePool, to_alpha_1ch, to_rgb_1ch
def visualize_batch(x0, x, save=True, text=''):
plt.style.use("Solarize_Light2")
vis0 = to_rgb_1ch(x0)
vis1 = to_rgb_1ch(x)
# vis0 = x0[...,0]
# vis1 = x[...,0]
print('batch (before/after):')
plt.figure(figsize=[15,5])
for i in range(x0.shape[0]):
plt.subplot(2,x0.shape[0],i+1)
plt.imshow(np.squeeze(vis0[i]))
plt.axis('off')
for i in range(x0.shape[0]):
plt.subplot(2,x0.shape[0],i+1+x0.shape[0])
plt.imshow(np.squeeze(vis1[i]))
plt.axis('off')
if save==True:
plt.savefig(f'visualize_batch{text}.png')
def plot_loss(loss_log, SCALE_GROWTH, loss_log_base=-1, epochs=2000, save=True, save_wandb=False, text=''):
plt.figure(figsize=(10, 4))
plt.title('Loss history (log10)')
plt.plot(np.log10(loss_log_base), '.', alpha=0.1, label='base scale=1')
plt.plot(np.log10(loss_log), '.', alpha=0.1, c='r', label=f'scale={SCALE_GROWTH:.02f}')
plt.ylim([-5, np.max(loss_log)])
plt.xlim([0, epochs])
plt.legend()
plt.xlabel('epochs')
plt.ylabel('log10(MSE)')
if save==True:
plt.savefig(f'loss_training{text}.png')
if save_wandb:
wandb.log({f'loss_training{text}.png': wandb.Image(plt)})
def plot_loss_max_intensity_and_mse(loss_log, loss_log_base, SCALE_GROWTH, SCALE_GROWTH_SYN, grow_max, mse_recons, max_base, max_base2, mse_base, mse_base2, epochs=2000, save=True, save_wandb=False, text=''):
plt.style.use("Solarize_Light2")
fig = plt.figure(figsize=(12,8))
gs = fig.add_gridspec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.plot(np.log10(loss_log_base), '.', alpha=0.1, label='base scale=1')
ax1.plot(np.log10(loss_log), '.', alpha=0.1, c='r', label=f'scale={SCALE_GROWTH:.02f}')
ax1.set_ylim([-5, np.max(loss_log)])
ax1.set_xlim([0, epochs])
ax1.legend()
ax1.set_xlabel('epochs')
ax1.set_ylabel('log10(MSE)')
ax2.plot(max_base, label='(10k) scale = 1', alpha=.5)
ax2.plot(max_base2, label='(2k) scale = 1', alpha=.5)
ax2.plot(grow_max, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax2.legend(loc = 'lower left')
ax2.set_xlabel('reconstruction epochs (x2)')
ax2.set_ylabel('max intensity')
ax3.semilogy(mse_base, label='(10k) scale = 1', alpha=.5)
ax3.semilogy(mse_base2, label='(2k) scale = 1', alpha=.5)
ax3.semilogy(mse_recons, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax3.legend(loc = 'lower left')
ax3.set_xlabel('reconstruction epochs (x2)')
ax3.set_ylabel('MSE')
fig.tight_layout()
if save:
plt.savefig(f'train_loss_and_synthesis{text}.png')
if save_wandb:
wandb.log({f'train_loss_and_synthesis{text}.png': wandb.Image(plt)})
def plot_max_intensity_and_mse(grow_max, mse_recons, SCALE_GROWTH, SCALE_GROWTH_SYN, max_base=-1, max_base2=-1, mse_base=-1, mse_base2=-1, save=True, save_wandb=False, text=''):
# %% PLOT MAX INTENSITY AND MSE
plt.style.use("Solarize_Light2")
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(max_base, label='(10k) scale = 1', alpha=.3)
ax[0].plot(max_base2, label='(2k) scale = 1', alpha=.3)
ax[0].plot(grow_max, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax[0].legend(loc = 'lower left')
ax[0].set_xlabel('reconstruction epochs')
ax[0].set_ylabel('max intensity')
ax[1].semilogy(mse_base, label='(10k) scale = 1', alpha=.3)
ax[1].semilogy(mse_base2, label='(2k) scale = 1', alpha=.3)
ax[1].semilogy(mse_recons, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax[1].legend(loc = 'lower left')
ax[1].set_xlabel('reconstruction epochs')
ax[1].set_ylabel('MSE')
fig.tight_layout()
if save:
plt.savefig(f'max_intensity_and_mse{text}.png')
if save_wandb:
wandb.log({f'max_intensity_and_mse{text}.png': wandb.Image(plt)})
def plot_lesion_growing(grow_sel, target_img, ITER_SAVE, save=True, text=''):
fig, ax = plt.subplots(5,6, figsize=(18,12))
for i in range(ITER_SAVE):
ax.flat[i].imshow(grow_sel[i], vmin=0, vmax=1)
ax.flat[i].axis('off')
ax.flat[0].imshow(target_img[...,0], vmin=0, vmax=1)
fig.tight_layout()
if save:
plt.savefig(f'lesion_growing{text}.png')
def load_baselines(path_orig, extra_text, path='outputs/baselines/'):
files = os.listdir(f'{path_orig}/{path}')
outputs = []
for key in ['max_syn_SG=1_ep=2k', 'mse_syn_SG=1_ep=2k', 'train_loss_SG=1_ep=2k', 'max_syn_SG=1_ep=10k', 'mse_syn_SG=1_ep=10k', 'train_loss_SG=1_ep=10k']:
file = f'{key}{extra_text}.npy'
if file in files:
outputs.append(np.load(f'{path_orig}/{path}{file}'))
else:
outputs.append([.001, .001])
return outputs
def make_seed_1ch(shape, n_channels):
seed = np.zeros([shape[0], shape[1], n_channels], np.float32)
seed[shape[0]//2, shape[1]//2, 1:] = 1.0
return seed
def plot_seeds(targets,seeds, save=True):
fig, ax = plt.subplots(2,2)
for idx, (t,s) in enumerate(zip(targets,seeds)):
# print(f'target={np.shape(t)}{np.unique(t[...,1])} seed={np.shape(s)}{np.unique(s)}')
ax.flat[idx].imshow(t[...,1])
ax.flat[idx].imshow(s, alpha=.3)
if save:
plt.savefig('seeds.png')
def save_cell_auto_reconstruction_vars(grow_sel, coord, mask, losses, name_prefix, idx_lesion):
outs_float = np.asarray(grow_sel)
np.savez_compressed(f'{name_prefix}_lesion_{idx_lesion:02d}.npz', outs_float)
np.save(f'{name_prefix}_coords_{idx_lesion:02d}.npy', coord)
np.savez_compressed(f'{name_prefix}_mask_{idx_lesion:02d}.npz', mask)
np.save(f'{name_prefix}_loss_{idx_lesion:02d}.npy', losses) | import os
import numpy as np
import matplotlib.pyplot as plt
import wandb
from lib.utils_vis import SamplePool, to_alpha_1ch, to_rgb_1ch
def visualize_batch(x0, x, save=True, text=''):
plt.style.use("Solarize_Light2")
vis0 = to_rgb_1ch(x0)
vis1 = to_rgb_1ch(x)
# vis0 = x0[...,0]
# vis1 = x[...,0]
print('batch (before/after):')
plt.figure(figsize=[15,5])
for i in range(x0.shape[0]):
plt.subplot(2,x0.shape[0],i+1)
plt.imshow(np.squeeze(vis0[i]))
plt.axis('off')
for i in range(x0.shape[0]):
plt.subplot(2,x0.shape[0],i+1+x0.shape[0])
plt.imshow(np.squeeze(vis1[i]))
plt.axis('off')
if save==True:
plt.savefig(f'visualize_batch{text}.png')
def plot_loss(loss_log, SCALE_GROWTH, loss_log_base=-1, epochs=2000, save=True, save_wandb=False, text=''):
plt.figure(figsize=(10, 4))
plt.title('Loss history (log10)')
plt.plot(np.log10(loss_log_base), '.', alpha=0.1, label='base scale=1')
plt.plot(np.log10(loss_log), '.', alpha=0.1, c='r', label=f'scale={SCALE_GROWTH:.02f}')
plt.ylim([-5, np.max(loss_log)])
plt.xlim([0, epochs])
plt.legend()
plt.xlabel('epochs')
plt.ylabel('log10(MSE)')
if save==True:
plt.savefig(f'loss_training{text}.png')
if save_wandb:
wandb.log({f'loss_training{text}.png': wandb.Image(plt)})
def plot_loss_max_intensity_and_mse(loss_log, loss_log_base, SCALE_GROWTH, SCALE_GROWTH_SYN, grow_max, mse_recons, max_base, max_base2, mse_base, mse_base2, epochs=2000, save=True, save_wandb=False, text=''):
plt.style.use("Solarize_Light2")
fig = plt.figure(figsize=(12,8))
gs = fig.add_gridspec(2,2)
ax1 = fig.add_subplot(gs[0, :])
ax2 = fig.add_subplot(gs[1, 0])
ax3 = fig.add_subplot(gs[1, 1])
ax1.plot(np.log10(loss_log_base), '.', alpha=0.1, label='base scale=1')
ax1.plot(np.log10(loss_log), '.', alpha=0.1, c='r', label=f'scale={SCALE_GROWTH:.02f}')
ax1.set_ylim([-5, np.max(loss_log)])
ax1.set_xlim([0, epochs])
ax1.legend()
ax1.set_xlabel('epochs')
ax1.set_ylabel('log10(MSE)')
ax2.plot(max_base, label='(10k) scale = 1', alpha=.5)
ax2.plot(max_base2, label='(2k) scale = 1', alpha=.5)
ax2.plot(grow_max, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax2.legend(loc = 'lower left')
ax2.set_xlabel('reconstruction epochs (x2)')
ax2.set_ylabel('max intensity')
ax3.semilogy(mse_base, label='(10k) scale = 1', alpha=.5)
ax3.semilogy(mse_base2, label='(2k) scale = 1', alpha=.5)
ax3.semilogy(mse_recons, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax3.legend(loc = 'lower left')
ax3.set_xlabel('reconstruction epochs (x2)')
ax3.set_ylabel('MSE')
fig.tight_layout()
if save:
plt.savefig(f'train_loss_and_synthesis{text}.png')
if save_wandb:
wandb.log({f'train_loss_and_synthesis{text}.png': wandb.Image(plt)})
def plot_max_intensity_and_mse(grow_max, mse_recons, SCALE_GROWTH, SCALE_GROWTH_SYN, max_base=-1, max_base2=-1, mse_base=-1, mse_base2=-1, save=True, save_wandb=False, text=''):
# %% PLOT MAX INTENSITY AND MSE
plt.style.use("Solarize_Light2")
fig, ax = plt.subplots(1,2, figsize=(12,4))
ax[0].plot(max_base, label='(10k) scale = 1', alpha=.3)
ax[0].plot(max_base2, label='(2k) scale = 1', alpha=.3)
ax[0].plot(grow_max, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax[0].legend(loc = 'lower left')
ax[0].set_xlabel('reconstruction epochs')
ax[0].set_ylabel('max intensity')
ax[1].semilogy(mse_base, label='(10k) scale = 1', alpha=.3)
ax[1].semilogy(mse_base2, label='(2k) scale = 1', alpha=.3)
ax[1].semilogy(mse_recons, label=f'scales={SCALE_GROWTH:.02f}_{SCALE_GROWTH_SYN:.02f}')
ax[1].legend(loc = 'lower left')
ax[1].set_xlabel('reconstruction epochs')
ax[1].set_ylabel('MSE')
fig.tight_layout()
if save:
plt.savefig(f'max_intensity_and_mse{text}.png')
if save_wandb:
wandb.log({f'max_intensity_and_mse{text}.png': wandb.Image(plt)})
def plot_lesion_growing(grow_sel, target_img, ITER_SAVE, save=True, text=''):
fig, ax = plt.subplots(5,6, figsize=(18,12))
for i in range(ITER_SAVE):
ax.flat[i].imshow(grow_sel[i], vmin=0, vmax=1)
ax.flat[i].axis('off')
ax.flat[0].imshow(target_img[...,0], vmin=0, vmax=1)
fig.tight_layout()
if save:
plt.savefig(f'lesion_growing{text}.png')
def load_baselines(path_orig, extra_text, path='outputs/baselines/'):
files = os.listdir(f'{path_orig}/{path}')
outputs = []
for key in ['max_syn_SG=1_ep=2k', 'mse_syn_SG=1_ep=2k', 'train_loss_SG=1_ep=2k', 'max_syn_SG=1_ep=10k', 'mse_syn_SG=1_ep=10k', 'train_loss_SG=1_ep=10k']:
file = f'{key}{extra_text}.npy'
if file in files:
outputs.append(np.load(f'{path_orig}/{path}{file}'))
else:
outputs.append([.001, .001])
return outputs
def make_seed_1ch(shape, n_channels):
seed = np.zeros([shape[0], shape[1], n_channels], np.float32)
seed[shape[0]//2, shape[1]//2, 1:] = 1.0
return seed
def plot_seeds(targets,seeds, save=True):
fig, ax = plt.subplots(2,2)
for idx, (t,s) in enumerate(zip(targets,seeds)):
# print(f'target={np.shape(t)}{np.unique(t[...,1])} seed={np.shape(s)}{np.unique(s)}')
ax.flat[idx].imshow(t[...,1])
ax.flat[idx].imshow(s, alpha=.3)
if save:
plt.savefig('seeds.png')
def save_cell_auto_reconstruction_vars(grow_sel, coord, mask, losses, name_prefix, idx_lesion):
outs_float = np.asarray(grow_sel)
np.savez_compressed(f'{name_prefix}_lesion_{idx_lesion:02d}.npz', outs_float)
np.save(f'{name_prefix}_coords_{idx_lesion:02d}.npy', coord)
np.savez_compressed(f'{name_prefix}_mask_{idx_lesion:02d}.npz', mask)
np.save(f'{name_prefix}_loss_{idx_lesion:02d}.npy', losses) | en | 0.418877 | # vis0 = x0[...,0] # vis1 = x[...,0] # %% PLOT MAX INTENSITY AND MSE # print(f'target={np.shape(t)}{np.unique(t[...,1])} seed={np.shape(s)}{np.unique(s)}') | 2.600944 | 3 |
amnesia/modules/file/model.py | silenius/amnesia | 4 | 6623979 | <filename>amnesia/modules/file/model.py
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import os.path
from hashids import Hashids
from amnesia.modules.content import Content
class File(Content):
def feed(self, **kwargs):
for c in ('file_size', 'mime_id', 'original_name'):
if c in kwargs:
setattr(self, c, kwargs.pop(c))
super().feed(**kwargs)
@property
def fa_icon(self):
if self.mime.major.name == 'image':
return 'fa-file-image-o'
if self.mime.major.name == 'video':
return 'fa-file-video-o'
if self.mime.full == 'application/pdf':
return 'fa-file-pdf-o'
return super().fa_icon
@property
def extension(self):
return os.path.splitext(self.original_name)[1].lower()
@property
def alnum_fname(self):
file_name, file_ext = os.path.splitext(self.original_name)
return ''.join(s for s in file_name if s.isalnum()) + file_ext
def get_hashid(self, salt, min_length=8):
hashid = Hashids(salt=salt, min_length=min_length)
return hashid.encode(self.path_name)
| <filename>amnesia/modules/file/model.py
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import os.path
from hashids import Hashids
from amnesia.modules.content import Content
class File(Content):
def feed(self, **kwargs):
for c in ('file_size', 'mime_id', 'original_name'):
if c in kwargs:
setattr(self, c, kwargs.pop(c))
super().feed(**kwargs)
@property
def fa_icon(self):
if self.mime.major.name == 'image':
return 'fa-file-image-o'
if self.mime.major.name == 'video':
return 'fa-file-video-o'
if self.mime.full == 'application/pdf':
return 'fa-file-pdf-o'
return super().fa_icon
@property
def extension(self):
return os.path.splitext(self.original_name)[1].lower()
@property
def alnum_fname(self):
file_name, file_ext = os.path.splitext(self.original_name)
return ''.join(s for s in file_name if s.isalnum()) + file_ext
def get_hashid(self, salt, min_length=8):
hashid = Hashids(salt=salt, min_length=min_length)
return hashid.encode(self.path_name)
| en | 0.455158 | # -*- coding: utf-8 -*- # pylint: disable=E1101 | 2.289096 | 2 |
Table_3_6.py | Jonghyun-Kim-73/SAMG_Project | 0 | 6623980 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class table_3_6(QWidget):
qss = """
QWidget {
background: rgb(221, 221, 221);
border : 0px solid;
}
QPushButton{
background-color: rgb(221,221,221);
border: 1px solid rgb(0,0,0);
font-size: 14pt;
font-weight: bold
}
QCheckBox::indicator {
width: 38px;
height: 60px;
}
QCheckBox::indicator::unchecked {
width: 38px;
height: 60px;
border : 0px solid;
}
QCheckBox::indicator::checked {
image : url(./check.png);
height:30px;
width:38px;
}
QTextEdit{
font-size: 18pt;
Color : black;
border : 0px solid
}
QTextEdit#button{
font-size: 12pt;
font-weight:bold;
Color : black;
border : 0px solid
}
QTableView {
gridline-color : black;
}
QHeaderView::section {
background: black;
}
"""
def __init__(self, parent=None):
super(table_3_6, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet(self.qss)
# 기본 속성
layout = QVBoxLayout(self)
label = QTextEdit("5. 증기발생기 급수 주입 실시 여부를 결정한다.")
label.setStyleSheet("font-size: 18pt;font-weight: bold")
label.setContentsMargins(10, 10, 10, 20)
label.setDisabled(True)
label.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label1 = QTextEdit("가. 증기발생기 급수 주입을 실시하지 않았을 때의 결과를 평가한다.")
label1.setStyleSheet("font-size: 18pt;font-weight: bold")
label1.setContentsMargins(10, 10, 10, 20)
label1.setDisabled(True)
label1.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label2 = QTextEdit("<p style=\"line-height:130%\">나. 증기발생기 급수 주입을 실시하지 않았을 때 결과와 증기발생기 급수<p>"
"<p style=\"line-height:130%\">주입을 실시하였을 떄의 부정적 영향을 비교한다.<p>")
label2.setStyleSheet("font-size: 18pt;font-weight: bold")
label2.setContentsMargins(10, 10, 10, 20)
label2.setDisabled(True)
label2.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label3 = QTextEdit("<p style=\"line-height:130%\">다. 증기발생기 급수 주입을 실시하지 않기로 결정되었다면 전략수행<p>"
"<p style=\"line-height:130%\">제어도 또는 이 전략 수행 직전에 주행중이든 전략으로 되돌아간다.<p>")
label3.setStyleSheet("font-size: 18pt;font-weight: bold")
label3.setContentsMargins(10, 10, 10, 20)
label3.setDisabled(True)
label3.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
self.setLayout(layout)
para_table = ParaTable(self)
layout.addWidget(label)
layout.addWidget(label1)
layout.addWidget(para_table)
layout.addWidget(label2)
layout.addWidget(label3)
layout.addStretch(1)
class ParaTable(QTableWidget):
def __init__(self, parent):
super(ParaTable, self).__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground, True)
self.horizontalHeader().setFixedHeight(1)
self.verticalHeader().setFixedWidth(1)
self.setContentsMargins(0, 0, 0, 0)
self.setFixedHeight(200)
self.setColumnCount(2)
self.setRowCount(4)
# 편집 불가
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 행 너비 조절
self.setColumnWidth(0, 798)
self.setColumnWidth(1, 38)
for i in range(0, 5):
self.setRowHeight(i, 40)
self.setItem(0, 0, QTableWidgetItem(" 증기발생기가 RCS의 열제거원 역할을 할 수 없음"))
self.setItem(1, 0, QTableWidgetItem(" 증기발생기 튜브의 건전성이 위협받을 수 있음"))
self.setItem(2, 0, QTableWidgetItem(" RCS를 감압하는 데 증기발생기를 사용할 수 없음"))
self.setItem(3, 0, QTableWidgetItem(" 증기발생기 튜브 파손부로 부터 누출된 핵분열 생성물을 세정할 수 없음"))
# 체크박스
for i in range(0, self.rowCount()):
self.checkbox = QCheckBox(self)
self.setCellWidget(i, 1, self.checkbox)
fnt = self.font()
fnt.setBold(True)
fnt.setPointSize(12)
self.setFont(fnt)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle("fusion")
window = table_3_6()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_() | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class table_3_6(QWidget):
qss = """
QWidget {
background: rgb(221, 221, 221);
border : 0px solid;
}
QPushButton{
background-color: rgb(221,221,221);
border: 1px solid rgb(0,0,0);
font-size: 14pt;
font-weight: bold
}
QCheckBox::indicator {
width: 38px;
height: 60px;
}
QCheckBox::indicator::unchecked {
width: 38px;
height: 60px;
border : 0px solid;
}
QCheckBox::indicator::checked {
image : url(./check.png);
height:30px;
width:38px;
}
QTextEdit{
font-size: 18pt;
Color : black;
border : 0px solid
}
QTextEdit#button{
font-size: 12pt;
font-weight:bold;
Color : black;
border : 0px solid
}
QTableView {
gridline-color : black;
}
QHeaderView::section {
background: black;
}
"""
def __init__(self, parent=None):
super(table_3_6, self).__init__()
self.setAttribute(Qt.WA_StyledBackground, True)
self.setContentsMargins(0, 0, 0, 0)
self.setStyleSheet(self.qss)
# 기본 속성
layout = QVBoxLayout(self)
label = QTextEdit("5. 증기발생기 급수 주입 실시 여부를 결정한다.")
label.setStyleSheet("font-size: 18pt;font-weight: bold")
label.setContentsMargins(10, 10, 10, 20)
label.setDisabled(True)
label.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label1 = QTextEdit("가. 증기발생기 급수 주입을 실시하지 않았을 때의 결과를 평가한다.")
label1.setStyleSheet("font-size: 18pt;font-weight: bold")
label1.setContentsMargins(10, 10, 10, 20)
label1.setDisabled(True)
label1.setFixedHeight(80) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label2 = QTextEdit("<p style=\"line-height:130%\">나. 증기발생기 급수 주입을 실시하지 않았을 때 결과와 증기발생기 급수<p>"
"<p style=\"line-height:130%\">주입을 실시하였을 떄의 부정적 영향을 비교한다.<p>")
label2.setStyleSheet("font-size: 18pt;font-weight: bold")
label2.setContentsMargins(10, 10, 10, 20)
label2.setDisabled(True)
label2.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
label3 = QTextEdit("<p style=\"line-height:130%\">다. 증기발생기 급수 주입을 실시하지 않기로 결정되었다면 전략수행<p>"
"<p style=\"line-height:130%\">제어도 또는 이 전략 수행 직전에 주행중이든 전략으로 되돌아간다.<p>")
label3.setStyleSheet("font-size: 18pt;font-weight: bold")
label3.setContentsMargins(10, 10, 10, 20)
label3.setDisabled(True)
label3.setFixedHeight(160) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음)
self.setLayout(layout)
para_table = ParaTable(self)
layout.addWidget(label)
layout.addWidget(label1)
layout.addWidget(para_table)
layout.addWidget(label2)
layout.addWidget(label3)
layout.addStretch(1)
class ParaTable(QTableWidget):
def __init__(self, parent):
super(ParaTable, self).__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground, True)
self.horizontalHeader().setFixedHeight(1)
self.verticalHeader().setFixedWidth(1)
self.setContentsMargins(0, 0, 0, 0)
self.setFixedHeight(200)
self.setColumnCount(2)
self.setRowCount(4)
# 편집 불가
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setFocusPolicy(Qt.NoFocus)
self.setSelectionMode(QAbstractItemView.NoSelection)
# 테이블 행 너비 조절
self.setColumnWidth(0, 798)
self.setColumnWidth(1, 38)
for i in range(0, 5):
self.setRowHeight(i, 40)
self.setItem(0, 0, QTableWidgetItem(" 증기발생기가 RCS의 열제거원 역할을 할 수 없음"))
self.setItem(1, 0, QTableWidgetItem(" 증기발생기 튜브의 건전성이 위협받을 수 있음"))
self.setItem(2, 0, QTableWidgetItem(" RCS를 감압하는 데 증기발생기를 사용할 수 없음"))
self.setItem(3, 0, QTableWidgetItem(" 증기발생기 튜브 파손부로 부터 누출된 핵분열 생성물을 세정할 수 없음"))
# 체크박스
for i in range(0, self.rowCount()):
self.checkbox = QCheckBox(self)
self.setCellWidget(i, 1, self.checkbox)
fnt = self.font()
fnt.setBold(True)
fnt.setPointSize(12)
self.setFont(fnt)
class AlignDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(AlignDelegate, self).initStyleOption(option, index)
option.displayAlignment = Qt.AlignCenter
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle("fusion")
window = table_3_6()
window.show()
font = QFontDatabase()
font.addApplicationFont('./맑은 고딕.ttf')
app.setFont(QFont('맑은 고딕'))
app.exec_() | ko | 0.14888 | QWidget { background: rgb(221, 221, 221); border : 0px solid; } QPushButton{ background-color: rgb(221,221,221); border: 1px solid rgb(0,0,0); font-size: 14pt; font-weight: bold } QCheckBox::indicator { width: 38px; height: 60px; } QCheckBox::indicator::unchecked { width: 38px; height: 60px; border : 0px solid; } QCheckBox::indicator::checked { image : url(./check.png); height:30px; width:38px; } QTextEdit{ font-size: 18pt; Color : black; border : 0px solid } QTextEdit#button{ font-size: 12pt; font-weight:bold; Color : black; border : 0px solid } QTableView { gridline-color : black; } QHeaderView::section { background: black; } # 기본 속성 # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음) # QTextEdit 때문에 설정해줘야함 (addStretch 안먹음) # 편집 불가 # 테이블 행 너비 조절 # 체크박스 | 2.339239 | 2 |
Section 2/source/federated_learning_for_image_classification.py | PacktPublishing/Federated-Learning-with-TensorFlow | 11 | 6623981 | # -*- coding: utf-8 -*-
# Based on the original code example:
# https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification
# Simplified, added an example of random client sampling.
import collections
import numpy as np
np.random.seed(0)
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff
from random import choices
NUM_EPOCHS = 5
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500
NUM_CLIENTS = 3
tf.compat.v1.enable_v2_behavior()
# Loading simulation data
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids]
sample_clients = emnist_train.client_ids[0: NUM_CLIENTS]
federated_train_data = make_federated_data(emnist_train, sample_clients)
sample_clients_test = emnist_test.client_ids[0: NUM_CLIENTS]
federated_test_data = make_federated_data(emnist_test, sample_clients_test)
# This is only needed to create the "federated" ver of the model
sample_batch = iter(federated_train_data[0]).next()
sample_batch = collections.OrderedDict([
('x', sample_batch['x'].numpy()),
('y', sample_batch['y'].numpy()),
])
# Training
# Create a new model
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
10, activation=tf.nn.softmax, kernel_initializer='zeros', input_shape=(784,))])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
y_true, y_pred))
model.compile(
loss=loss_fn,
optimizer=gradient_descent.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
# Turn model into one that can be used with TFF
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
# Initialize training
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
state = iterative_process.initialize()
trained_clients=[]
def get_train_data(keep_it_stupid_simple=False):
if keep_it_stupid_simple:
if not trained_clients:
trained_clients.append(sample_clients)
return federated_train_data
sc = choices(emnist_train.client_ids, k=NUM_CLIENTS)
for c in sc:
while True:
if c in trained_clients:
sc.remove(c)
newc=choices(emnist_train.client_ids, k=1)[0]
if newc not in trained_clients:
sc.append(newc)
break
else:
break
trained_clients.append(sc)
new_federated_train_data = make_federated_data(emnist_train, sc)
return new_federated_train_data
# Training process
for round_num in range(1, NUM_EPOCHS+1):
federated_train_data=get_train_data(True)
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
print('Trained {:2d} clients'.format(len(trained_clients)*NUM_CLIENTS))
print(trained_clients)
# Evaluation
evaluation = tff.learning.build_federated_evaluation(model_fn)
train_metrics = evaluation(state.model, federated_train_data)
print('Train metrics', str(train_metrics))
test_metrics = evaluation(state.model, federated_test_data)
print('Test metrics', str(test_metrics))
| # -*- coding: utf-8 -*-
# Based on the original code example:
# https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification
# Simplified, added an example of random client sampling.
import collections
import numpy as np
np.random.seed(0)
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff
from random import choices
NUM_EPOCHS = 5
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500
NUM_CLIENTS = 3
tf.compat.v1.enable_v2_behavior()
# Loading simulation data
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
def make_federated_data(client_data, client_ids):
return [preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids]
sample_clients = emnist_train.client_ids[0: NUM_CLIENTS]
federated_train_data = make_federated_data(emnist_train, sample_clients)
sample_clients_test = emnist_test.client_ids[0: NUM_CLIENTS]
federated_test_data = make_federated_data(emnist_test, sample_clients_test)
# This is only needed to create the "federated" ver of the model
sample_batch = iter(federated_train_data[0]).next()
sample_batch = collections.OrderedDict([
('x', sample_batch['x'].numpy()),
('y', sample_batch['y'].numpy()),
])
# Training
# Create a new model
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
10, activation=tf.nn.softmax, kernel_initializer='zeros', input_shape=(784,))])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
y_true, y_pred))
model.compile(
loss=loss_fn,
optimizer=gradient_descent.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
# Turn model into one that can be used with TFF
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
# Initialize training
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
state = iterative_process.initialize()
trained_clients=[]
def get_train_data(keep_it_stupid_simple=False):
if keep_it_stupid_simple:
if not trained_clients:
trained_clients.append(sample_clients)
return federated_train_data
sc = choices(emnist_train.client_ids, k=NUM_CLIENTS)
for c in sc:
while True:
if c in trained_clients:
sc.remove(c)
newc=choices(emnist_train.client_ids, k=1)[0]
if newc not in trained_clients:
sc.append(newc)
break
else:
break
trained_clients.append(sc)
new_federated_train_data = make_federated_data(emnist_train, sc)
return new_federated_train_data
# Training process
for round_num in range(1, NUM_EPOCHS+1):
federated_train_data=get_train_data(True)
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
print('Trained {:2d} clients'.format(len(trained_clients)*NUM_CLIENTS))
print(trained_clients)
# Evaluation
evaluation = tff.learning.build_federated_evaluation(model_fn)
train_metrics = evaluation(state.model, federated_train_data)
print('Train metrics', str(train_metrics))
test_metrics = evaluation(state.model, federated_test_data)
print('Test metrics', str(test_metrics))
| en | 0.845743 | # -*- coding: utf-8 -*- # Based on the original code example: # https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification # Simplified, added an example of random client sampling. # Loading simulation data # This is only needed to create the "federated" ver of the model # Training # Create a new model # Turn model into one that can be used with TFF # Initialize training # Training process # Evaluation | 3.05324 | 3 |
yproblem/__init__.py | DarioBojanjac/effective_2D | 1 | 6623982 | from .yproblem import Yproblem
from .utils import save_field_plots, save_pvd
| from .yproblem import Yproblem
from .utils import save_field_plots, save_pvd
| none | 1 | 0.989021 | 1 | |
tests/behavior/test.py | iblech/autopiper | 50 | 6623983 | <reponame>iblech/autopiper
#!/usr/bin/env python3
import os.path
import re
import sys
import tempfile
import subprocess
VERBOSE = 1
def run(exe, args):
sub = subprocess.Popen(executable = exe, args = args,
stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
retcode = sub.wait()
return (stdout, stderr, retcode)
class TestCmd(object):
# command types
PORT = 1 # define a port on the DUT
CYCLE = 2 # advance to a given cycle
WRITE = 3 # write an input to the DUT
EXPECT = 4 # expect a given value on a given output from the DUT
num = '((\d+)|(0x[0-9a-fA-F]+)|(0b[01]+))'
port_re = re.compile('^port (\w+) (\d+)$')
cycle_re = re.compile('^cycle (\d+)$')
write_re = re.compile('^write (\w+)\s* \s*' + num + '$')
expect_re = re.compile('^expect (\w+)\s* \s*' + num + '$')
def __init__(self, text):
self.text = text
self.cmdtype = 0
self.cycle = 0
self.port = 0
self.data = 0
self.width = 0
if not self.parse():
raise Exception("Could not parse text: " + text)
def __str__(self):
type_str = '(none)'
if self.cmdtype == TestCmd.PORT: type_str = "PORT"
elif self.cmdtype == TestCmd.CYCLE: type_str = "CYCLE"
elif self.cmdtype == TestCmd.WRITE: type_str = "WRITE"
elif self.cmdtype == TestCmd.EXPECT: type_str = "EXPECT"
return ("TestCmd(type=%s,cycle=%d,port=%s,data=%d,width=%d)" %
(type_str, self.cycle, self.port, self.data, self.width))
def parse_num(self, t):
if t.startswith('0x'):
return int(t[2:], 16)
elif t.startswith('0b'):
return int(t[2:], 2)
else:
return int(t)
def parse(self):
self.text = self.text.strip()
m = TestCmd.port_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.PORT
self.port = g[0]
self.width = int(g[1])
return True
m = TestCmd.cycle_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.CYCLE
self.cycle = int(g[0])
return True
m = TestCmd.write_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.WRITE
self.port = g[0]
self.data = self.parse_num(g[1])
return True
m = TestCmd.expect_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.EXPECT
self.port = g[0]
self.data = self.parse_num(g[1])
return True
class TestCase(object):
def __init__(self, filename):
self.filename = filename
self.testcmds = []
def load(self):
with open(self.filename) as of:
for line in of.readlines():
if line.startswith('#test:'):
self.testcmds.append(TestCmd(line.strip()[6:]))
def write_tb(self, out_filename):
with open(out_filename, 'w') as of:
of.write("module tb;\n\n")
of.write("reg clock;\nreg reset;\ninitial clock = 0;\ninitial reset = 0;\n\n")
of.write("reg [63:0] cycle_counter;\ninitial cycle_counter = 0;\n")
of.write("always begin #5; clock = 1; cycle_counter = cycle_counter + 1; #5; clock = 0; end\n\n")
of.write("main dut(.clock(clock), .reset(reset)")
portwidths = []
portwidth_map = {}
port_written = {}
for c in self.testcmds:
if c.cmdtype == TestCmd.PORT:
of.write(",\n.%s(%s)" % (c.port, c.port))
portwidths.append( (c.port, c.width) )
portwidth_map[c.port] = c.width
port_written[c.port] = False
if c.cmdtype == TestCmd.WRITE:
port_written[c.port] = True
of.write(");\n\n")
for (port, width) in portwidths:
if port_written[port]:
of.write("reg [%d:0] %s;\n" % (width - 1, port))
else:
of.write("wire [%d:0] %s;\n" % (width - 1, port))
of.write("\n")
if VERBOSE:
of.write("always @(negedge clock) begin\n")
of.write("$display(\"\\n====== cycle %d: ======\\n\", cycle_counter);\n")
for (port, width) in portwidths:
of.write("$display(\"* %s = %%d\", %s);\n" % (port, port))
of.write("end\n")
cur_cycle = 0
of.write("initial begin\n")
for (port, width) in portwidths:
if port_written[port]:
of.write(" %s = %d'd0;\n" % (port, width))
of.write(" reset = 1; #5; reset = 0; #5;\n")
for c in self.testcmds:
if c.cmdtype == TestCmd.CYCLE:
if c.cycle < cur_cycle:
print("Warning: trying to reverse time (cycle %d)" % c.cycle)
continue
of.write(" #%d;\n" % ((c.cycle - cur_cycle) * 10))
cur_cycle = c.cycle
if c.cmdtype == TestCmd.WRITE:
of.write(" %s = %d'd%d;\n" % (c.port, portwidth_map[c.port], c.data))
if c.cmdtype == TestCmd.EXPECT:
of.write(" if (%s != %d'd%d) begin\n" % (c.port, portwidth_map[c.port], c.data))
of.write(" $display(\"Data mismatch (cycle %%d): port %s should be %d but is %%d.\", cycle_counter, %s);\n" %
(c.port, c.data, c.port))
of.write(" $display(\"FAILED.\");\n")
of.write(" $finish;\n")
of.write(" end\n")
of.write(" #10;\n")
of.write(" $display(\"PASSED.\");\n")
of.write(" $finish;\n")
of.write("end\n\n")
of.write("endmodule\n")
def run(self, autopiper_bin):
tmppath = tempfile.mkdtemp()
exe = tmppath + os.path.sep + os.path.basename(self.filename) + '_test'
dut_v = tmppath + os.path.sep + os.path.basename(self.filename) + '_dut.v'
tb_v = tmppath + os.path.sep + os.path.basename(self.filename) + '_tb.v'
stdout, stderr, ret = run(autopiper_bin, [autopiper_bin, '-o', dut_v, self.filename])
if ret != 0:
print("Error compiling DUT:")
print(stderr.decode('utf-8'))
return False
self.write_tb(tb_v)
stdout, stderr, ret = run("iverilog", ["iverilog", '-o', exe, dut_v, tb_v])
if ret != 0:
print("Error compiling DUT and testbench Verilog to test executable:")
print(stderr.decode('utf-8'))
return False
stdout, stderr, ret = run(exe, [exe])
if ret != 0:
print("Error running test.")
print(stderr.decode('utf-8'))
return False
if not stdout.endswith(b'PASSED.\n'):
print("Test failed:")
print(stdout.decode('utf-8'))
return False
os.system('rm -rf ' + tmppath)
return True
t = TestCase(sys.argv[2])
t.load()
if t.run(sys.argv[1]):
sys.exit(0)
else:
sys.exit(1)
| #!/usr/bin/env python3
import os.path
import re
import sys
import tempfile
import subprocess
VERBOSE = 1
def run(exe, args):
sub = subprocess.Popen(executable = exe, args = args,
stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
retcode = sub.wait()
return (stdout, stderr, retcode)
class TestCmd(object):
# command types
PORT = 1 # define a port on the DUT
CYCLE = 2 # advance to a given cycle
WRITE = 3 # write an input to the DUT
EXPECT = 4 # expect a given value on a given output from the DUT
num = '((\d+)|(0x[0-9a-fA-F]+)|(0b[01]+))'
port_re = re.compile('^port (\w+) (\d+)$')
cycle_re = re.compile('^cycle (\d+)$')
write_re = re.compile('^write (\w+)\s* \s*' + num + '$')
expect_re = re.compile('^expect (\w+)\s* \s*' + num + '$')
def __init__(self, text):
self.text = text
self.cmdtype = 0
self.cycle = 0
self.port = 0
self.data = 0
self.width = 0
if not self.parse():
raise Exception("Could not parse text: " + text)
def __str__(self):
type_str = '(none)'
if self.cmdtype == TestCmd.PORT: type_str = "PORT"
elif self.cmdtype == TestCmd.CYCLE: type_str = "CYCLE"
elif self.cmdtype == TestCmd.WRITE: type_str = "WRITE"
elif self.cmdtype == TestCmd.EXPECT: type_str = "EXPECT"
return ("TestCmd(type=%s,cycle=%d,port=%s,data=%d,width=%d)" %
(type_str, self.cycle, self.port, self.data, self.width))
def parse_num(self, t):
if t.startswith('0x'):
return int(t[2:], 16)
elif t.startswith('0b'):
return int(t[2:], 2)
else:
return int(t)
def parse(self):
self.text = self.text.strip()
m = TestCmd.port_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.PORT
self.port = g[0]
self.width = int(g[1])
return True
m = TestCmd.cycle_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.CYCLE
self.cycle = int(g[0])
return True
m = TestCmd.write_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.WRITE
self.port = g[0]
self.data = self.parse_num(g[1])
return True
m = TestCmd.expect_re.match(self.text)
if m is not None:
g = m.groups()
self.cmdtype = TestCmd.EXPECT
self.port = g[0]
self.data = self.parse_num(g[1])
return True
class TestCase(object):
def __init__(self, filename):
self.filename = filename
self.testcmds = []
def load(self):
with open(self.filename) as of:
for line in of.readlines():
if line.startswith('#test:'):
self.testcmds.append(TestCmd(line.strip()[6:]))
def write_tb(self, out_filename):
with open(out_filename, 'w') as of:
of.write("module tb;\n\n")
of.write("reg clock;\nreg reset;\ninitial clock = 0;\ninitial reset = 0;\n\n")
of.write("reg [63:0] cycle_counter;\ninitial cycle_counter = 0;\n")
of.write("always begin #5; clock = 1; cycle_counter = cycle_counter + 1; #5; clock = 0; end\n\n")
of.write("main dut(.clock(clock), .reset(reset)")
portwidths = []
portwidth_map = {}
port_written = {}
for c in self.testcmds:
if c.cmdtype == TestCmd.PORT:
of.write(",\n.%s(%s)" % (c.port, c.port))
portwidths.append( (c.port, c.width) )
portwidth_map[c.port] = c.width
port_written[c.port] = False
if c.cmdtype == TestCmd.WRITE:
port_written[c.port] = True
of.write(");\n\n")
for (port, width) in portwidths:
if port_written[port]:
of.write("reg [%d:0] %s;\n" % (width - 1, port))
else:
of.write("wire [%d:0] %s;\n" % (width - 1, port))
of.write("\n")
if VERBOSE:
of.write("always @(negedge clock) begin\n")
of.write("$display(\"\\n====== cycle %d: ======\\n\", cycle_counter);\n")
for (port, width) in portwidths:
of.write("$display(\"* %s = %%d\", %s);\n" % (port, port))
of.write("end\n")
cur_cycle = 0
of.write("initial begin\n")
for (port, width) in portwidths:
if port_written[port]:
of.write(" %s = %d'd0;\n" % (port, width))
of.write(" reset = 1; #5; reset = 0; #5;\n")
for c in self.testcmds:
if c.cmdtype == TestCmd.CYCLE:
if c.cycle < cur_cycle:
print("Warning: trying to reverse time (cycle %d)" % c.cycle)
continue
of.write(" #%d;\n" % ((c.cycle - cur_cycle) * 10))
cur_cycle = c.cycle
if c.cmdtype == TestCmd.WRITE:
of.write(" %s = %d'd%d;\n" % (c.port, portwidth_map[c.port], c.data))
if c.cmdtype == TestCmd.EXPECT:
of.write(" if (%s != %d'd%d) begin\n" % (c.port, portwidth_map[c.port], c.data))
of.write(" $display(\"Data mismatch (cycle %%d): port %s should be %d but is %%d.\", cycle_counter, %s);\n" %
(c.port, c.data, c.port))
of.write(" $display(\"FAILED.\");\n")
of.write(" $finish;\n")
of.write(" end\n")
of.write(" #10;\n")
of.write(" $display(\"PASSED.\");\n")
of.write(" $finish;\n")
of.write("end\n\n")
of.write("endmodule\n")
def run(self, autopiper_bin):
tmppath = tempfile.mkdtemp()
exe = tmppath + os.path.sep + os.path.basename(self.filename) + '_test'
dut_v = tmppath + os.path.sep + os.path.basename(self.filename) + '_dut.v'
tb_v = tmppath + os.path.sep + os.path.basename(self.filename) + '_tb.v'
stdout, stderr, ret = run(autopiper_bin, [autopiper_bin, '-o', dut_v, self.filename])
if ret != 0:
print("Error compiling DUT:")
print(stderr.decode('utf-8'))
return False
self.write_tb(tb_v)
stdout, stderr, ret = run("iverilog", ["iverilog", '-o', exe, dut_v, tb_v])
if ret != 0:
print("Error compiling DUT and testbench Verilog to test executable:")
print(stderr.decode('utf-8'))
return False
stdout, stderr, ret = run(exe, [exe])
if ret != 0:
print("Error running test.")
print(stderr.decode('utf-8'))
return False
if not stdout.endswith(b'PASSED.\n'):
print("Test failed:")
print(stdout.decode('utf-8'))
return False
os.system('rm -rf ' + tmppath)
return True
t = TestCase(sys.argv[2])
t.load()
if t.run(sys.argv[1]):
sys.exit(0)
else:
sys.exit(1) | en | 0.52521 | #!/usr/bin/env python3 # command types # define a port on the DUT # advance to a given cycle # write an input to the DUT # expect a given value on a given output from the DUT #5; clock = 1; cycle_counter = cycle_counter + 1; #5; clock = 0; end\n\n") #5; reset = 0; #5;\n") #%d;\n" % ((c.cycle - cur_cycle) * 10)) #10;\n") | 2.880659 | 3 |
microci/web/ui.py | linkdd/microci | 4 | 6623984 | # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort
from microci.web.jobs import fetch as fetch_jobs, serialize as serialize_job
from microci.model.job import JobStatus
from microci.web import db
blueprint = Blueprint('ui', __name__)
@blueprint.errorhandler(404)
def not_found(e):
return render_template(
'error.html',
error={'code': 404, 'message': 'Not Found'},
obj=e
), 404
@blueprint.route('/', defaults={'status': 'all'})
@blueprint.route('/<status>')
def index(status):
database = db.get()
if status == 'all':
filter = database.jobs
else:
filter = database.jobs.status == getattr(JobStatus, status.upper())
return render_template(
'index.html',
jobs=fetch_jobs(database, filter),
active=status
)
@blueprint.route('/job/<int:jid>')
def detail(jid):
database = db.get()
job = database.jobs(jid)
if job is None:
abort(404)
return render_template('detail.html', job=serialize_job(job))
| # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort
from microci.web.jobs import fetch as fetch_jobs, serialize as serialize_job
from microci.model.job import JobStatus
from microci.web import db
blueprint = Blueprint('ui', __name__)
@blueprint.errorhandler(404)
def not_found(e):
return render_template(
'error.html',
error={'code': 404, 'message': 'Not Found'},
obj=e
), 404
@blueprint.route('/', defaults={'status': 'all'})
@blueprint.route('/<status>')
def index(status):
database = db.get()
if status == 'all':
filter = database.jobs
else:
filter = database.jobs.status == getattr(JobStatus, status.upper())
return render_template(
'index.html',
jobs=fetch_jobs(database, filter),
active=status
)
@blueprint.route('/job/<int:jid>')
def detail(jid):
database = db.get()
job = database.jobs(jid)
if job is None:
abort(404)
return render_template('detail.html', job=serialize_job(job))
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.250262 | 2 |
src/randomwalk.py | Hilbert1024/sim2nd | 5 | 6623985 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 22:29:50 2020
@author: Hilbert1024
"""
import numpy as np
import random
class RandomWalk(object):
"""
Simulate a random walk series by given transition matrix.
Parameters
----------
graph : networkx.classes.graph.Graph
A graph in networkx.
transMat : dict
The dictionary includes node:prob and edge:prob.
The node:prob dictionary gets transition probablities from current node to its neighbors.
The edge:prob dictionary gets transition probablities from previous node and current node to neighbors of current node.
walkNum : int
Numbers of random walks. Default is 10.
walkLen : int
Length of the series each time.
name : str
Name of file.
"""
def __init__(self, graph, transMat, graphName, walkNum = 10, walkLen = 80, name = ""):
super(RandomWalk, self).__init__()
self.graph = graph
self.nodes = graph.nodes()
self.transMat = transMat
self.walkNum = walkNum
self.walkLen = walkLen
self.graphName = graphName
if name == "":
self.name = str(random.randint(0,10000))
else:
self.name = name
def _nodeChoice(self, probArr):
"""
Generates a random sample from np.arange(len(probArr)).
ProbArr is the probabilities associated with each entry in np.arange(len(probArr)).
"""
probArr /= np.sum(probArr) #normalized
return np.random.choice(len(probArr), p = probArr)
def nodeSeries(self, method):
"""
Simulate a random walk series when next movement only depends on current node, apply to deepwalk.
"""
walks = []
count = 0
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
walk.append(curNbr[self._nodeChoice(self.transMat[curNode])])
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks
def edgeSeries(self, method):
"""
Simulate a random walk series when next movement only depends on current node, apply to node2vec.
"""
walks = []
count = 0
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
if len(walk) == 1: # First step walk to neighbors uniformly
nextNode = curNbr[self._nodeChoice([1 / len(curNbr)] * len(curNbr))]
else:
preNode = walk[-2]
nextNode = curNbr[self._nodeChoice(self.transMat[(preNode, curNode)])]
walk.append(nextNode)
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks
def nodeVisitSeries(self, method, alpha = 1):
"""
Simulate a random walk series when next movement only depends on current node, apply to visitgraph.
"""
walks = []
count = 0
visit = np.array([1] * len(self.nodes))
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
randomIndex = self._nodeChoice(self.transMat[curNode] * (1 / (visit[curNbr] ** alpha)))
nextNode = curNbr[randomIndex]
walk.append(nextNode)
visit[nextNode] += 1
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 22:29:50 2020
@author: Hilbert1024
"""
import numpy as np
import random
class RandomWalk(object):
"""
Simulate a random walk series by given transition matrix.
Parameters
----------
graph : networkx.classes.graph.Graph
A graph in networkx.
transMat : dict
The dictionary includes node:prob and edge:prob.
The node:prob dictionary gets transition probablities from current node to its neighbors.
The edge:prob dictionary gets transition probablities from previous node and current node to neighbors of current node.
walkNum : int
Numbers of random walks. Default is 10.
walkLen : int
Length of the series each time.
name : str
Name of file.
"""
def __init__(self, graph, transMat, graphName, walkNum = 10, walkLen = 80, name = ""):
super(RandomWalk, self).__init__()
self.graph = graph
self.nodes = graph.nodes()
self.transMat = transMat
self.walkNum = walkNum
self.walkLen = walkLen
self.graphName = graphName
if name == "":
self.name = str(random.randint(0,10000))
else:
self.name = name
def _nodeChoice(self, probArr):
"""
Generates a random sample from np.arange(len(probArr)).
ProbArr is the probabilities associated with each entry in np.arange(len(probArr)).
"""
probArr /= np.sum(probArr) #normalized
return np.random.choice(len(probArr), p = probArr)
def nodeSeries(self, method):
"""
Simulate a random walk series when next movement only depends on current node, apply to deepwalk.
"""
walks = []
count = 0
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
walk.append(curNbr[self._nodeChoice(self.transMat[curNode])])
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks
def edgeSeries(self, method):
"""
Simulate a random walk series when next movement only depends on current node, apply to node2vec.
"""
walks = []
count = 0
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
if len(walk) == 1: # First step walk to neighbors uniformly
nextNode = curNbr[self._nodeChoice([1 / len(curNbr)] * len(curNbr))]
else:
preNode = walk[-2]
nextNode = curNbr[self._nodeChoice(self.transMat[(preNode, curNode)])]
walk.append(nextNode)
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks
def nodeVisitSeries(self, method, alpha = 1):
"""
Simulate a random walk series when next movement only depends on current node, apply to visitgraph.
"""
walks = []
count = 0
visit = np.array([1] * len(self.nodes))
for _ in np.arange(self.walkNum):
nodes = list(self.nodes)
random.shuffle(nodes)
for node in nodes:
walk = [node]
while len(walk) < self.walkLen:
curNode = walk[-1]
curNbr = list(self.graph.neighbors(curNode))
if len(curNbr) > 0:
randomIndex = self._nodeChoice(self.transMat[curNode] * (1 / (visit[curNbr] ** alpha)))
nextNode = curNbr[randomIndex]
walk.append(nextNode)
visit[nextNode] += 1
else:
break
count += 1
walks.append(walk)
print('\r',"Simulating random walk series, process : {}%".format(round(100 * count / (self.walkNum * len(self.nodes)), 2)), end='', flush=True)
try:
np.save('../data/{}/{}/walkseries/walkseries_{}.npy'.format(self.graphName, method, self.name), walks)
except FileNotFoundError:
print("File can not found!")
else:
return walks | en | 0.778886 | # -*- coding: utf-8 -*- Created on Thu Feb 13 22:29:50 2020 @author: Hilbert1024 Simulate a random walk series by given transition matrix. Parameters ---------- graph : networkx.classes.graph.Graph A graph in networkx. transMat : dict The dictionary includes node:prob and edge:prob. The node:prob dictionary gets transition probablities from current node to its neighbors. The edge:prob dictionary gets transition probablities from previous node and current node to neighbors of current node. walkNum : int Numbers of random walks. Default is 10. walkLen : int Length of the series each time. name : str Name of file. Generates a random sample from np.arange(len(probArr)). ProbArr is the probabilities associated with each entry in np.arange(len(probArr)). #normalized Simulate a random walk series when next movement only depends on current node, apply to deepwalk. Simulate a random walk series when next movement only depends on current node, apply to node2vec. # First step walk to neighbors uniformly Simulate a random walk series when next movement only depends on current node, apply to visitgraph. | 3.573456 | 4 |
tests/test_okopf.py | naydyonov/allrucodes | 0 | 6623986 | <filename>tests/test_okopf.py
import unittest
from allrucodes import OKOPFCodes
class TestOKSMCodes(unittest.TestCase):
def test_full_search(self):
test_values = {'акционерного общества': '12200'}
oksm = OKOPFCodes()
for value, code in test_values.items():
self.assertEqual(oksm.find_by_value(value), code, value) | <filename>tests/test_okopf.py
import unittest
from allrucodes import OKOPFCodes
class TestOKSMCodes(unittest.TestCase):
def test_full_search(self):
test_values = {'акционерного общества': '12200'}
oksm = OKOPFCodes()
for value, code in test_values.items():
self.assertEqual(oksm.find_by_value(value), code, value) | none | 1 | 3.036717 | 3 | |
simulation/code/fd/sample.py | sungcheolkim78/FDclassifieR | 3 | 6623987 | <gh_stars>1-10
"""Sample rank data sets from Gaussian distributions.
This module implements Gustavo's prescription for generating synthetic
data. The data consists of a (M, N) ndarray, R, of N sample rank predictions
by M base classifiers and (N,) ndarray of true sample labels. The synthetic
rank predictions may be correlated by specifying a correlation coefficient.
Available Functions:
- data_set: generate a synthetic data set composed of sample
ranks and class labels
- multivariate_gauss: generate samples from the multivariate Gaussian
distribution
"""
import numpy as np
from scipy.special import ndtri # inverse standard normal cumulative
from scipy.stats import rankdata
def _construct_corr_matrix(M, rho):
"""Construct correlation matrix.
Construct a correlation matrix in which
C_{ij} = rho for all i \neq j.
Args:
M: (int) > 0, representing the number of rows and columns
rho: (float) on interval [0, 1) representing the correlation coefficient
Returns:
((M, M) ndarray) correlation matrix
"""
if rho < 0 or rho >= 1:
raise ValueError("The correlation coefficient (rho)"
" is defined on interval [0,1).")
elif M < 1:
raise ValueError("Required that M > 1.")
c = rho + np.zeros(shape=(M, M))
for i in range(M):
c[i, i] = 1
return c
def multivariate_gauss(m, cov, N, seed=None):
"""Sample from multivariate Gaussian distribution.
Algorithm designed by <NAME>
Args:
m: ((M,) ndarray) M > 0, of means
cov: ((M,M) ndarray) M > 0, covariance matrix
N: (int) > 1, number of samples draw
seed: seed value for np.random.default_rng, default is None,
under default value (None) a seed is produced by the OS
Returns:
X: ((M, N) ndarray) of sampled Gaussian scores
"""
M = m.size
if m.ndim != 1:
raise ValueError("m must be a 1-d ndarray of means")
elif cov.shape != (M, M):
raise ValueError("cov must have shape (m.size, m.size)")
elif N < 1:
raise ValueError("Required that N >= 1.")
elif (cov != cov.transpose()).any():
raise ValueError("Covariance matrix must be symmetric")
# sample from N(0, 1)
rng = np.random.default_rng(seed)
x = rng.normal(size=(M, N))
# l (M,) ndarray of eigenvalues,
# v ((M,M) ndarray) of column eigenvectors where v[:, i] corresponds to l[i]
l, v = np.linalg.eigh(cov)
l = np.diag(np.sqrt(l))
m = np.tile(m.reshape(M,1), (1, N))
y = np.dot(v, np.dot(l, x))
return y + m
def _auc_2_delta(auc, v):
"""Compute the difference of class conditioned means (delta) from the AUC.
According to Marzban, reference below, delta is related to the AUC by
delta = \sqrt{\sigma_0^2 + \sigma_1^2} \Phi^{-1} (AUC)
with \sigma_y^2 begin the conditional variance given y and \Phi the standard
normal cumulative distribution.
Args:
auc: (float) [0, 1]
v: ((2) tuple) of (\sigma_0^2, \sigma_1^2)
Returns:
(float) E[s|y = 0] - E[s|y = 1]
Reference:
Marzban, "The ROC Curve and the Area under It as Performance Measures",
Weather and Forecasting, 2004.
"""
if auc < 0 or auc > 1:
raise ValueError("AUC is defined on interval [0,1].")
if len(v) != 2:
raise ValueError(("Must supply len 2 tuple with class conditioned "
"variances"))
if v[0] < 0 or v[1] < 0:
raise ValueError("By definition, variances must be greater than 0.")
return np.sqrt(v[0] + v[1]) * ndtri(auc)
def data_set(auc, corr_coef, prevalence, N, seed=None):
"""Sample rank data and sample class labels.
Rank data are produced by rank ordering samples drawn from two Gaussian
distributions. Each Gaussian is representative of samples drawn from one
of the two sample classes, and have unit variance and correlation specified by
corr_coef. The distance between Gaussians are determined by their respective
means, which are computed from the specified AUC.
Two samples with identical scores are ordinally assigned a rank value so that
no two samples have identical rank.
Args:
auc: ((M,) ndarray) of auc values on the interval [0, 1]
corr_coef: (float) correlation between classifier predictions [0, 1)
prevalence: (float) number of positive class / number samples (0, 1)
N: (int) > 1
seed: any seed compatible with np.random.default_rng
Returns:
R: ((M, N) ndarray) independent rows of sample ranks, no ties in row
y: ((N,) ndarray) binary [0,1] sample class labels
"""
if isinstance(auc, float):
auc = [auc]
if prevalence <= 0 or prevalence >= 1:
raise ValueError("Prevalence must by in interval (0,1).")
# stats for sampling from multivariate Gaussian
M = len(auc)
N1 = int(N * prevalence)
c = _construct_corr_matrix(M, corr_coef)
delta = np.zeros(M)
for i, auc_val in enumerate(auc):
delta[i] = _auc_2_delta(auc_val, (c[i, i], c[i, i]))
# create random number generator object accoring to seed
rng = np.random.default_rng(seed)
# sample from multivariate Gaussians
s = np.hstack([multivariate_gauss(np.zeros(M), c, N1, seed=rng),
multivariate_gauss(delta, c, N-N1, seed=rng)])
# Construct the label array
y = np.zeros(N)
y[:N1] = 1
# Construct the rank data array
R = np.zeros(shape=(M, N))
for i in range(M):
R[i, :] = rankdata(s[i, :], method="ordinal")
return R, y
| """Sample rank data sets from Gaussian distributions.
This module implements Gustavo's prescription for generating synthetic
data. The data consists of a (M, N) ndarray, R, of N sample rank predictions
by M base classifiers and (N,) ndarray of true sample labels. The synthetic
rank predictions may be correlated by specifying a correlation coefficient.
Available Functions:
- data_set: generate a synthetic data set composed of sample
ranks and class labels
- multivariate_gauss: generate samples from the multivariate Gaussian
distribution
"""
import numpy as np
from scipy.special import ndtri # inverse standard normal cumulative
from scipy.stats import rankdata
def _construct_corr_matrix(M, rho):
"""Construct correlation matrix.
Construct a correlation matrix in which
C_{ij} = rho for all i \neq j.
Args:
M: (int) > 0, representing the number of rows and columns
rho: (float) on interval [0, 1) representing the correlation coefficient
Returns:
((M, M) ndarray) correlation matrix
"""
if rho < 0 or rho >= 1:
raise ValueError("The correlation coefficient (rho)"
" is defined on interval [0,1).")
elif M < 1:
raise ValueError("Required that M > 1.")
c = rho + np.zeros(shape=(M, M))
for i in range(M):
c[i, i] = 1
return c
def multivariate_gauss(m, cov, N, seed=None):
"""Sample from multivariate Gaussian distribution.
Algorithm designed by <NAME>
Args:
m: ((M,) ndarray) M > 0, of means
cov: ((M,M) ndarray) M > 0, covariance matrix
N: (int) > 1, number of samples draw
seed: seed value for np.random.default_rng, default is None,
under default value (None) a seed is produced by the OS
Returns:
X: ((M, N) ndarray) of sampled Gaussian scores
"""
M = m.size
if m.ndim != 1:
raise ValueError("m must be a 1-d ndarray of means")
elif cov.shape != (M, M):
raise ValueError("cov must have shape (m.size, m.size)")
elif N < 1:
raise ValueError("Required that N >= 1.")
elif (cov != cov.transpose()).any():
raise ValueError("Covariance matrix must be symmetric")
# sample from N(0, 1)
rng = np.random.default_rng(seed)
x = rng.normal(size=(M, N))
# l (M,) ndarray of eigenvalues,
# v ((M,M) ndarray) of column eigenvectors where v[:, i] corresponds to l[i]
l, v = np.linalg.eigh(cov)
l = np.diag(np.sqrt(l))
m = np.tile(m.reshape(M,1), (1, N))
y = np.dot(v, np.dot(l, x))
return y + m
def _auc_2_delta(auc, v):
"""Compute the difference of class conditioned means (delta) from the AUC.
According to Marzban, reference below, delta is related to the AUC by
delta = \sqrt{\sigma_0^2 + \sigma_1^2} \Phi^{-1} (AUC)
with \sigma_y^2 begin the conditional variance given y and \Phi the standard
normal cumulative distribution.
Args:
auc: (float) [0, 1]
v: ((2) tuple) of (\sigma_0^2, \sigma_1^2)
Returns:
(float) E[s|y = 0] - E[s|y = 1]
Reference:
Marzban, "The ROC Curve and the Area under It as Performance Measures",
Weather and Forecasting, 2004.
"""
if auc < 0 or auc > 1:
raise ValueError("AUC is defined on interval [0,1].")
if len(v) != 2:
raise ValueError(("Must supply len 2 tuple with class conditioned "
"variances"))
if v[0] < 0 or v[1] < 0:
raise ValueError("By definition, variances must be greater than 0.")
return np.sqrt(v[0] + v[1]) * ndtri(auc)
def data_set(auc, corr_coef, prevalence, N, seed=None):
"""Sample rank data and sample class labels.
Rank data are produced by rank ordering samples drawn from two Gaussian
distributions. Each Gaussian is representative of samples drawn from one
of the two sample classes, and have unit variance and correlation specified by
corr_coef. The distance between Gaussians are determined by their respective
means, which are computed from the specified AUC.
Two samples with identical scores are ordinally assigned a rank value so that
no two samples have identical rank.
Args:
auc: ((M,) ndarray) of auc values on the interval [0, 1]
corr_coef: (float) correlation between classifier predictions [0, 1)
prevalence: (float) number of positive class / number samples (0, 1)
N: (int) > 1
seed: any seed compatible with np.random.default_rng
Returns:
R: ((M, N) ndarray) independent rows of sample ranks, no ties in row
y: ((N,) ndarray) binary [0,1] sample class labels
"""
if isinstance(auc, float):
auc = [auc]
if prevalence <= 0 or prevalence >= 1:
raise ValueError("Prevalence must by in interval (0,1).")
# stats for sampling from multivariate Gaussian
M = len(auc)
N1 = int(N * prevalence)
c = _construct_corr_matrix(M, corr_coef)
delta = np.zeros(M)
for i, auc_val in enumerate(auc):
delta[i] = _auc_2_delta(auc_val, (c[i, i], c[i, i]))
# create random number generator object accoring to seed
rng = np.random.default_rng(seed)
# sample from multivariate Gaussians
s = np.hstack([multivariate_gauss(np.zeros(M), c, N1, seed=rng),
multivariate_gauss(delta, c, N-N1, seed=rng)])
# Construct the label array
y = np.zeros(N)
y[:N1] = 1
# Construct the rank data array
R = np.zeros(shape=(M, N))
for i in range(M):
R[i, :] = rankdata(s[i, :], method="ordinal")
return R, y | en | 0.790962 | Sample rank data sets from Gaussian distributions. This module implements Gustavo's prescription for generating synthetic data. The data consists of a (M, N) ndarray, R, of N sample rank predictions by M base classifiers and (N,) ndarray of true sample labels. The synthetic rank predictions may be correlated by specifying a correlation coefficient. Available Functions: - data_set: generate a synthetic data set composed of sample ranks and class labels - multivariate_gauss: generate samples from the multivariate Gaussian distribution # inverse standard normal cumulative Construct correlation matrix. Construct a correlation matrix in which C_{ij} = rho for all i \neq j. Args: M: (int) > 0, representing the number of rows and columns rho: (float) on interval [0, 1) representing the correlation coefficient Returns: ((M, M) ndarray) correlation matrix Sample from multivariate Gaussian distribution. Algorithm designed by <NAME> Args: m: ((M,) ndarray) M > 0, of means cov: ((M,M) ndarray) M > 0, covariance matrix N: (int) > 1, number of samples draw seed: seed value for np.random.default_rng, default is None, under default value (None) a seed is produced by the OS Returns: X: ((M, N) ndarray) of sampled Gaussian scores # sample from N(0, 1) # l (M,) ndarray of eigenvalues, # v ((M,M) ndarray) of column eigenvectors where v[:, i] corresponds to l[i] Compute the difference of class conditioned means (delta) from the AUC. According to Marzban, reference below, delta is related to the AUC by delta = \sqrt{\sigma_0^2 + \sigma_1^2} \Phi^{-1} (AUC) with \sigma_y^2 begin the conditional variance given y and \Phi the standard normal cumulative distribution. Args: auc: (float) [0, 1] v: ((2) tuple) of (\sigma_0^2, \sigma_1^2) Returns: (float) E[s|y = 0] - E[s|y = 1] Reference: Marzban, "The ROC Curve and the Area under It as Performance Measures", Weather and Forecasting, 2004. Sample rank data and sample class labels. Rank data are produced by rank ordering samples drawn from two Gaussian distributions. Each Gaussian is representative of samples drawn from one of the two sample classes, and have unit variance and correlation specified by corr_coef. The distance between Gaussians are determined by their respective means, which are computed from the specified AUC. Two samples with identical scores are ordinally assigned a rank value so that no two samples have identical rank. Args: auc: ((M,) ndarray) of auc values on the interval [0, 1] corr_coef: (float) correlation between classifier predictions [0, 1) prevalence: (float) number of positive class / number samples (0, 1) N: (int) > 1 seed: any seed compatible with np.random.default_rng Returns: R: ((M, N) ndarray) independent rows of sample ranks, no ties in row y: ((N,) ndarray) binary [0,1] sample class labels # stats for sampling from multivariate Gaussian # create random number generator object accoring to seed # sample from multivariate Gaussians # Construct the label array # Construct the rank data array | 3.32125 | 3 |
GUI/Basic-train/MatplotWidget/MatplotlibWidget.py | muyuuuu/PyQt-learn | 12 | 6623988 | <filename>GUI/Basic-train/MatplotWidget/MatplotlibWidget.py
#!/bin/bash
# -*- coding: UTF-8 -*-
import sys
import numpy as np
import PyQt5
# 基本控件都在这里面
from PyQt5.QtWidgets import (QApplication, QMainWindow, QDesktopWidget, QStyleFactory, QWidget,
QSizePolicy, QPushButton, QGridLayout)
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import Qt, QTimer
from mainwidget import Ui_Form
from mainwindow import Ui_MainWindow
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
# 绘图的空白界面
class MymplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111) # 多界面绘图
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding, QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
def static_plot(self):
self.axes.clear()
self.fig.suptitle("static FIG")
t = np.linspace(1, 10, 10)
s = np.sin(np.pi * t)
self.axes.plot(t, s)
self.axes.grid(True)
self.draw()
# 为何要加参数
def dynamic_plot(self, *args, **kwargs):
timer = QTimer(self)
timer.timeout.connect(self.update_fig)
timer.start(1000)
def update_fig(self):
self.axes.clear()
self.fig.suptitle("dynamic FIG")
l = np.random.randint(1, 10, 4)
self.axes.plot([0, 1, 2, 3], l, 'r')
self.axes.grid(True)
self.draw()
# 实现绘图类
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
super(MatplotlibWidget, self).__init__(parent)
# 封装绘图类
self.gridLayout = QGridLayout()
self.mpl = MymplCanvas(self)
self.mpl_tool = NavigationToolbar(self.mpl, self)
# self.quit_btn_2 = QPushButton()
# self.quit_btn_3 = QPushButton()
# self.quit_btn_2.clicked.connect(self.static)
# self.quit_btn_3.clicked.connect(self.dynamic)
self.setLayout(self.gridLayout)
self.gridLayout.addWidget(self.mpl)
self.gridLayout.addWidget(self.mpl_tool)
# self.gridLayout.addWidget(self.quit_btn_2)
# self.gridLayout.addWidget(self.quit_btn_3)
def static(self):
self.mpl.static_plot()
def dynamic(self):
self.mpl.dynamic_plot() | <filename>GUI/Basic-train/MatplotWidget/MatplotlibWidget.py
#!/bin/bash
# -*- coding: UTF-8 -*-
import sys
import numpy as np
import PyQt5
# 基本控件都在这里面
from PyQt5.QtWidgets import (QApplication, QMainWindow, QDesktopWidget, QStyleFactory, QWidget,
QSizePolicy, QPushButton, QGridLayout)
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import Qt, QTimer
from mainwidget import Ui_Form
from mainwindow import Ui_MainWindow
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
# 绘图的空白界面
class MymplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111) # 多界面绘图
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding, QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
def static_plot(self):
self.axes.clear()
self.fig.suptitle("static FIG")
t = np.linspace(1, 10, 10)
s = np.sin(np.pi * t)
self.axes.plot(t, s)
self.axes.grid(True)
self.draw()
# 为何要加参数
def dynamic_plot(self, *args, **kwargs):
timer = QTimer(self)
timer.timeout.connect(self.update_fig)
timer.start(1000)
def update_fig(self):
self.axes.clear()
self.fig.suptitle("dynamic FIG")
l = np.random.randint(1, 10, 4)
self.axes.plot([0, 1, 2, 3], l, 'r')
self.axes.grid(True)
self.draw()
# 实现绘图类
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
super(MatplotlibWidget, self).__init__(parent)
# 封装绘图类
self.gridLayout = QGridLayout()
self.mpl = MymplCanvas(self)
self.mpl_tool = NavigationToolbar(self.mpl, self)
# self.quit_btn_2 = QPushButton()
# self.quit_btn_3 = QPushButton()
# self.quit_btn_2.clicked.connect(self.static)
# self.quit_btn_3.clicked.connect(self.dynamic)
self.setLayout(self.gridLayout)
self.gridLayout.addWidget(self.mpl)
self.gridLayout.addWidget(self.mpl_tool)
# self.gridLayout.addWidget(self.quit_btn_2)
# self.gridLayout.addWidget(self.quit_btn_3)
def static(self):
self.mpl.static_plot()
def dynamic(self):
self.mpl.dynamic_plot() | zh | 0.215633 | #!/bin/bash # -*- coding: UTF-8 -*- # 基本控件都在这里面 # 绘图的空白界面 # 多界面绘图 # 为何要加参数 # 实现绘图类 # 封装绘图类 # self.quit_btn_2 = QPushButton() # self.quit_btn_3 = QPushButton() # self.quit_btn_2.clicked.connect(self.static) # self.quit_btn_3.clicked.connect(self.dynamic) # self.gridLayout.addWidget(self.quit_btn_2) # self.gridLayout.addWidget(self.quit_btn_3) | 2.345041 | 2 |
nhs_plot.py | palfrey/autocovid | 0 | 6623989 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Heirarchical map plot for local Covid data - England
Created on Sun Oct 4 14:16:24 2020
@author: jah-photoshop
"""
print("________________________________________________________________________________")
print("Covid Local Data Map Plotter - version 1.1 - @jah-photoshop Oct 2020")
print("________________________________________________________________________________")
import os,csv, numpy as np,geopandas as gpd,pandas as pd,scipy.stats as ss, matplotlib.pyplot as plt, random, sys, time, pickle, shutil, mapclassify as mc
from datetime import datetime, timedelta
from math import log
#Populations: [githubcom/russss/covidtracker]
#London 8908081
#South East 8852361
#South West 5605997
#East of England 6493188
#Midlands 10537679
#North East and Yorkshire 8566925
#North West 7012947
reg_pops = [8.908081,8.852361,5.605997,6.493188,10.537679,8.566925,7.012947]
def_days = 0 #Plot since 20th March
debug = False
overwrite_mode = True #If set to false, program will halt if output folder exists
data_path = "data"
output_path = "nhs"
archive_path = datetime.now().strftime("/home/robotlab/jah-photoshop-googledrive/output-%Y%m%d/")
#ltla_vmax=200
if(os.path.isdir(output_path)):
if not overwrite_mode:
print("Output path %s already exists; aborting" % (output_path))
sys.exit()
nhs_map_filename = "zip://" + data_path + os.path.sep + "NHS_England_Regions__April_2020__Boundaries_EN_BUC-shp.zip"
admissions_filename = data_path + os.path.sep + "r_admissions.csv"
print("________________________________________________________________________________")
print("LOADING MAP DATA")
#Load map data for England [and Wales] from shape file
print("Loading NHS region map data from " + nhs_map_filename)
nhs=gpd.read_file(nhs_map_filename)
nhs_regions = nhs.nhser20nm.to_list()
print("________________________________________________________________________________")
print("LOADING ADMISSON DATA")
print("Loading admission data from " + admissions_filename)
with open(admissions_filename) as csv_file: ad_data = [row for row in csv.reader(csv_file, delimiter=',')][1:]
start_date = datetime(2021,12,30)
end_date = datetime(2020,1,1)
#regions = []
for data_line in ad_data:
if data_line[0] not in nhs_regions:
print("Error: region mismatch")
#regions.append(data_line[0])
l_date = datetime.strptime(data_line[1],"%Y-%m-%d")
if l_date > end_date: end_date=l_date
if l_date < start_date: start_date = l_date
print(start_date.strftime('Start date: %d %m %Y'))
print(end_date.strftime('End date: %d %m %Y'))
number_of_days = (end_date-start_date).days + 1
number_of_regions = len(nhs_regions)
print("Number of days: %d" % number_of_days)
print("Number of regions: %d" % number_of_regions)
#admissions = [[0] * number_of_regions] * number_of_days
admissions = np.zeros((number_of_days,number_of_regions))
for data_line in ad_data:
ix = nhs_regions.index(data_line[0])
day_ix = (datetime.strptime(data_line[1],"%Y-%m-%d") - start_date).days
val = int(data_line[2])
admissions[day_ix][ix]=val
av_admissions = np.zeros((number_of_days,number_of_regions))
for day in range(number_of_days):
start_day = day-6
if day<6: start_day = 0
n_days = day - start_day + 1
for r in range(number_of_regions):
sumt = 0
for n in range(n_days):
sumt += admissions[start_day + n][r]
sumt /= n_days
av_admissions[day][r] = sumt
ad_rate = np.zeros((number_of_days,number_of_regions))
for day in range(number_of_days):
for r in range(number_of_regions):
ad_rate[day][r]=admissions[day][r]/reg_pops[r]
max_admissions = np.max(admissions)
max_ad_rate = np.max(ad_rate)
max_ad_rate = 400
print("Building map data")
for day in range(number_of_days):
c_date = start_date + timedelta(days=day)
admissions_series = pd.Series(admissions[day])
admissions_title = c_date.strftime('admissions_%m%d')
nhs[admissions_title]=admissions_series
ad_rate_series = pd.Series(ad_rate[day])
ad_rate_title = c_date.strftime('rate_%m%d')
nhs[ad_rate_title]=ad_rate_series
#
print("________________________________________________________________________________")
print("PRODUCING PLOTS")
fig=plt.figure(figsize=(24.77,24.77),frameon=False)
if not os.path.exists(output_path): os.makedirs(output_path)
for day in range(def_days,number_of_days):
c_date = start_date + timedelta(days=day)
f_string = output_path+os.path.sep+c_date.strftime("map-%Y%m%d.png")
print("Creating file %s" % (f_string))
ax=plt.gca()
ax.set_aspect('equal')
ax.axis([132000, 659000, 9600, 675000])
plt.axis('off')
#nhs.plot(column=c_date.strftime('admissions_%m%d'),ax=ax,cmap='jet',vmin=0,vmax=max_admissions,zorder=0)
nhs.plot(column=c_date.strftime('rate_%m%d'),ax=ax,cmap='jet',vmin=0,vmax=max_ad_rate,zorder=0)
nhs.boundary.plot(ax=ax,zorder=1,linewidth=2,color='#22222288')
plt.text(546000,590000,c_date.strftime("%B %d"), horizontalalignment='center', style='italic',fontsize=50)
#plt.text(541000,655000,"Hospital Cases by Region",horizontalalignment='center',fontsize=42)
plt.savefig(f_string, bbox_inches='tight')
fig.clf()
print("________________________________________________________________________________")
print("Operation complete.")
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Heirarchical map plot for local Covid data - England
Created on Sun Oct 4 14:16:24 2020
@author: jah-photoshop
"""
print("________________________________________________________________________________")
print("Covid Local Data Map Plotter - version 1.1 - @jah-photoshop Oct 2020")
print("________________________________________________________________________________")
import os,csv, numpy as np,geopandas as gpd,pandas as pd,scipy.stats as ss, matplotlib.pyplot as plt, random, sys, time, pickle, shutil, mapclassify as mc
from datetime import datetime, timedelta
from math import log
#Populations: [githubcom/russss/covidtracker]
#London 8908081
#South East 8852361
#South West 5605997
#East of England 6493188
#Midlands 10537679
#North East and Yorkshire 8566925
#North West 7012947
reg_pops = [8.908081,8.852361,5.605997,6.493188,10.537679,8.566925,7.012947]
def_days = 0 #Plot since 20th March
debug = False
overwrite_mode = True #If set to false, program will halt if output folder exists
data_path = "data"
output_path = "nhs"
archive_path = datetime.now().strftime("/home/robotlab/jah-photoshop-googledrive/output-%Y%m%d/")
#ltla_vmax=200
if(os.path.isdir(output_path)):
if not overwrite_mode:
print("Output path %s already exists; aborting" % (output_path))
sys.exit()
nhs_map_filename = "zip://" + data_path + os.path.sep + "NHS_England_Regions__April_2020__Boundaries_EN_BUC-shp.zip"
admissions_filename = data_path + os.path.sep + "r_admissions.csv"
print("________________________________________________________________________________")
print("LOADING MAP DATA")
#Load map data for England [and Wales] from shape file
print("Loading NHS region map data from " + nhs_map_filename)
nhs=gpd.read_file(nhs_map_filename)
nhs_regions = nhs.nhser20nm.to_list()
print("________________________________________________________________________________")
print("LOADING ADMISSON DATA")
print("Loading admission data from " + admissions_filename)
with open(admissions_filename) as csv_file: ad_data = [row for row in csv.reader(csv_file, delimiter=',')][1:]
start_date = datetime(2021,12,30)
end_date = datetime(2020,1,1)
#regions = []
for data_line in ad_data:
if data_line[0] not in nhs_regions:
print("Error: region mismatch")
#regions.append(data_line[0])
l_date = datetime.strptime(data_line[1],"%Y-%m-%d")
if l_date > end_date: end_date=l_date
if l_date < start_date: start_date = l_date
print(start_date.strftime('Start date: %d %m %Y'))
print(end_date.strftime('End date: %d %m %Y'))
number_of_days = (end_date-start_date).days + 1
number_of_regions = len(nhs_regions)
print("Number of days: %d" % number_of_days)
print("Number of regions: %d" % number_of_regions)
#admissions = [[0] * number_of_regions] * number_of_days
admissions = np.zeros((number_of_days,number_of_regions))
for data_line in ad_data:
ix = nhs_regions.index(data_line[0])
day_ix = (datetime.strptime(data_line[1],"%Y-%m-%d") - start_date).days
val = int(data_line[2])
admissions[day_ix][ix]=val
av_admissions = np.zeros((number_of_days,number_of_regions))
for day in range(number_of_days):
start_day = day-6
if day<6: start_day = 0
n_days = day - start_day + 1
for r in range(number_of_regions):
sumt = 0
for n in range(n_days):
sumt += admissions[start_day + n][r]
sumt /= n_days
av_admissions[day][r] = sumt
ad_rate = np.zeros((number_of_days,number_of_regions))
for day in range(number_of_days):
for r in range(number_of_regions):
ad_rate[day][r]=admissions[day][r]/reg_pops[r]
max_admissions = np.max(admissions)
max_ad_rate = np.max(ad_rate)
max_ad_rate = 400
print("Building map data")
for day in range(number_of_days):
c_date = start_date + timedelta(days=day)
admissions_series = pd.Series(admissions[day])
admissions_title = c_date.strftime('admissions_%m%d')
nhs[admissions_title]=admissions_series
ad_rate_series = pd.Series(ad_rate[day])
ad_rate_title = c_date.strftime('rate_%m%d')
nhs[ad_rate_title]=ad_rate_series
#
print("________________________________________________________________________________")
print("PRODUCING PLOTS")
fig=plt.figure(figsize=(24.77,24.77),frameon=False)
if not os.path.exists(output_path): os.makedirs(output_path)
for day in range(def_days,number_of_days):
c_date = start_date + timedelta(days=day)
f_string = output_path+os.path.sep+c_date.strftime("map-%Y%m%d.png")
print("Creating file %s" % (f_string))
ax=plt.gca()
ax.set_aspect('equal')
ax.axis([132000, 659000, 9600, 675000])
plt.axis('off')
#nhs.plot(column=c_date.strftime('admissions_%m%d'),ax=ax,cmap='jet',vmin=0,vmax=max_admissions,zorder=0)
nhs.plot(column=c_date.strftime('rate_%m%d'),ax=ax,cmap='jet',vmin=0,vmax=max_ad_rate,zorder=0)
nhs.boundary.plot(ax=ax,zorder=1,linewidth=2,color='#22222288')
plt.text(546000,590000,c_date.strftime("%B %d"), horizontalalignment='center', style='italic',fontsize=50)
#plt.text(541000,655000,"Hospital Cases by Region",horizontalalignment='center',fontsize=42)
plt.savefig(f_string, bbox_inches='tight')
fig.clf()
print("________________________________________________________________________________")
print("Operation complete.")
| en | 0.62422 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Heirarchical map plot for local Covid data - England Created on Sun Oct 4 14:16:24 2020 @author: jah-photoshop #Populations: [githubcom/russss/covidtracker] #London 8908081 #South East 8852361 #South West 5605997 #East of England 6493188 #Midlands 10537679 #North East and Yorkshire 8566925 #North West 7012947 #Plot since 20th March #If set to false, program will halt if output folder exists #ltla_vmax=200 #Load map data for England [and Wales] from shape file #regions = [] #regions.append(data_line[0]) #admissions = [[0] * number_of_regions] * number_of_days # #nhs.plot(column=c_date.strftime('admissions_%m%d'),ax=ax,cmap='jet',vmin=0,vmax=max_admissions,zorder=0) #plt.text(541000,655000,"Hospital Cases by Region",horizontalalignment='center',fontsize=42) | 2.485391 | 2 |
pb_link/pb_client.py | NabazOwner/micropython-iot | 1 | 6623990 | # pb_client.py Run on Pyboard/STM device. Communicate with IOT server via an
# ESP8266 running esp_link.py
# Copyright (c) <NAME> 2018
# Released under the MIT licence. Full text in root of this repository.
# Communication uses I2C slave mode.
import uasyncio as asyncio
import ujson
from . import app_base
from . import config as cfg
# Server-side connection ID: any newline-terminated string not containing an
# internal newline.
CONN_ID = '1\n'
# User application: must be class subclassed from AppBase
class App(app_base.AppBase):
def __init__(self, loop, conn_id, config, hardware, verbose):
super().__init__(loop, conn_id, config, hardware, verbose)
def start(self): # Apps must implement a synchronous start method
self.loop.create_task(self.receiver())
self.loop.create_task(self.sender())
# If server is running s_app_cp.py it sends
# [approx app uptime in secs/5, echoed count, echoed 99]
async def receiver(self):
self.verbose and print('Starting receiver.')
while True:
line = await self.readline()
data = ujson.loads(line)
self.verbose and print('Received', data)
async def sender(self):
self.verbose and print('Starting sender.')
data = [42, 0, 99] # s_app_cp.py expects a 3-list
while True:
await asyncio.sleep(5)
data[1] += 1
await self.write(ujson.dumps(data))
self.verbose and print('Sent', data)
loop = asyncio.get_event_loop()
app = App(loop, CONN_ID, cfg.config, cfg.hardware, True)
try:
loop.run_forever()
finally:
app.close() # for subsequent runs
| # pb_client.py Run on Pyboard/STM device. Communicate with IOT server via an
# ESP8266 running esp_link.py
# Copyright (c) <NAME> 2018
# Released under the MIT licence. Full text in root of this repository.
# Communication uses I2C slave mode.
import uasyncio as asyncio
import ujson
from . import app_base
from . import config as cfg
# Server-side connection ID: any newline-terminated string not containing an
# internal newline.
CONN_ID = '1\n'
# User application: must be class subclassed from AppBase
class App(app_base.AppBase):
def __init__(self, loop, conn_id, config, hardware, verbose):
super().__init__(loop, conn_id, config, hardware, verbose)
def start(self): # Apps must implement a synchronous start method
self.loop.create_task(self.receiver())
self.loop.create_task(self.sender())
# If server is running s_app_cp.py it sends
# [approx app uptime in secs/5, echoed count, echoed 99]
async def receiver(self):
self.verbose and print('Starting receiver.')
while True:
line = await self.readline()
data = ujson.loads(line)
self.verbose and print('Received', data)
async def sender(self):
self.verbose and print('Starting sender.')
data = [42, 0, 99] # s_app_cp.py expects a 3-list
while True:
await asyncio.sleep(5)
data[1] += 1
await self.write(ujson.dumps(data))
self.verbose and print('Sent', data)
loop = asyncio.get_event_loop()
app = App(loop, CONN_ID, cfg.config, cfg.hardware, True)
try:
loop.run_forever()
finally:
app.close() # for subsequent runs
| en | 0.761389 | # pb_client.py Run on Pyboard/STM device. Communicate with IOT server via an # ESP8266 running esp_link.py # Copyright (c) <NAME> 2018 # Released under the MIT licence. Full text in root of this repository. # Communication uses I2C slave mode. # Server-side connection ID: any newline-terminated string not containing an # internal newline. # User application: must be class subclassed from AppBase # Apps must implement a synchronous start method # If server is running s_app_cp.py it sends # [approx app uptime in secs/5, echoed count, echoed 99] # s_app_cp.py expects a 3-list # for subsequent runs | 2.598488 | 3 |
djangoFiles/jeklog/urls.py | silvrwolfboy/theJekyllProject | 20 | 6623991 | from django.views.generic import TemplateView
urlpatterns = []
handler404 = TemplateView.as_view(template_name='jeklog/404.html')
handler500 = TemplateView.as_view(template_name='jeklog/500.html')
handler403 = TemplateView.as_view(template_name='jeklog/403.html')
handler400 = TemplateView.as_view(template_name='jeklog/400.html')
| from django.views.generic import TemplateView
urlpatterns = []
handler404 = TemplateView.as_view(template_name='jeklog/404.html')
handler500 = TemplateView.as_view(template_name='jeklog/500.html')
handler403 = TemplateView.as_view(template_name='jeklog/403.html')
handler400 = TemplateView.as_view(template_name='jeklog/400.html')
| none | 1 | 1.59685 | 2 | |
TenBasicAlgorithm.py | LikeSnooker/TenBasicAlgorithm-Python- | 0 | 6623992 | <gh_stars>0
from collections import deque
#冒泡排序
#思路很简单 每一趟循环将 最大的数 冒泡到最右端
def bubbleSort(Q):
for s in range(len(Q)-1):
for m in range(len(Q)-1):
if Q[m] > Q[m+1]:
Q[m],Q[m+1] = Q[m+1],Q[m]
return Q
print("bubbleSort:")
print(bubbleSort([8,2,7,3,9,1,4,5,6]))
#
# 快速排序
# 核心 思路 选一个基准 小的放左边 大的放右边,并对左右分别递归
#
def quickSort(Q):
if len(Q) <= 1:
return Q
left = [x for x in Q if x < Q[-1]]
right = [x for x in Q if x > Q[-1]]
return quickSort(left) + [Q[-1]] + quickSort(right)
S = [8,2,7,3,9,1,4,5,6]
quickSort(S)
print("quicksort:")
print(S)
#
# 归并排序
# 假设有两个已经排序好的数组 [1,3,5,7] [2,4,6] 我们很容易将这两个数组排序 步骤为
# 选取两个数组中最小的元素,将两者之间的更小者放入新数组,直到某个数组为空,
# 然后将另一个数组中的剩余元素全部放入新数组
# [1,3,5,7]
# [3,4,6]
# []
# ↓
# [3,5,7]
# [2,4,6]
# [1]
# ↓
# [3,5,7]
# [4,6]
# [1,3]
# :
#
def mergeSort(Q):
if len(Q) <= 1:
return Q
middle = (0 + len(Q) ) >> 1
left = mergeSort(Q[0:middle])
right = mergeSort(Q[middle:])
newQ = []
while left and right:
if left[0] < right[0]:
newQ.append(left.pop(0))
else:
newQ.append(right.pop(0))
newQ.extend(left)
newQ.extend(right)
return newQ
S1 = [8,2,7,3,9,1,4,5,6]
mergeSort(S1)
print("mergesort")
print(mergeSort(S1))
#
# 堆排序 利用了 堆结构
#
def leftI(index):
return (index << 1) + 1
def rightI(index):
return (index + 1) << 1
def maxheapify(Q,index,size):
if leftI(index) <= size:
if Q[leftI(index)] > Q[index]:
Q[leftI(index)],Q[index] = Q[index],Q[leftI(index)]
maxheapify(Q,leftI(index),size)
if rightI(index) <= size:
if Q[rightI(index)] > Q[index]:
Q[rightI(index)],Q[index] = Q[index],Q[rightI(index)]
maxheapify(Q,rightI(index),size)
def buildmaxheap(Q,size):
for m in range(size):
maxheapify(Q,0,size)
def heapsort(Q):
for m in range(len(Q)-1,0,-1):
buildmaxheap(Q,m)
Q[0],Q[m] = Q[m],Q[0]
S2 = [8,2,7,3,9,1,4,5,6]
# buildmaxheap(S2,8)
heapsort(S2)
print("heapsort")
print(S2)
###################################################################
a,b,c,d,e,f,g,h = range(8)
N = [
{b,d},
{c},
{f},
{e},
{f},
{g,h},
{},
{}
]
#深度优先搜索
def dfs(graph,node):
searched,query_queue = set(),[]
query_queue.append(node)
while query_queue:
q_node = query_queue.pop()
if q_node in searched:
continue
searched.add(q_node)
for neighbor in graph[q_node]:
query_queue.append(neighbor)
yield q_node
#广度优先搜索
def bfs(graph,node):
parents,query_queue = {node:None},deque([node])
while query_queue:
q_node = query_queue.popleft()
for neighbor in graph[q_node]:
if neighbor in parents:
continue
parents[neighbor] = q_node
query_queue.append(neighbor)
return parents
print("dfs search")
for dfs_node in dfs(N,a):
print(dfs_node)
print("bfs search")
for bfs_node in bfs(N,a):
print(bfs_node)
def mybfs(graph,node):
explore_queue ,history = deque([node]),set()
history.add(node)
while explore_queue:
wait_explore_node = explore_queue.popleft()
for neighbor in graph[wait_explore_node]:
if neighbor in history:
continue
history.add(neighbor)
explore_queue.append(neighbor)
return history
for my_node in mybfs(N,a):
print (my_node)
print ("mydfs")
def mydfs(graph,node):
explore_queue,history = [],set()
history.add(node)
explore_queue.append(node)
while explore_queue:
cur_node = explore_queue.pop()
for neighbor in graph[cur_node]:
if neighbor in history:
continue
history.add(neighbor)
explore_queue.append(neighbor)
print(cur_node)
mydfs(N,a)
#递归版的深度优先搜索
def rec_dfs(graph,node,history):
for neighbor in graph[node]:
if neighbor in history:
continue
print(neighbor)
history.add(neighbor)
rec_dfs(graph,neighbor,history)
print("rec_dfs")
rec_dfs(N,a,set())
#
# 迪杰斯特拉(dijkstra)算法
#
import copy
INI = 999
graph = [[0 ,10,4,8,INI],
[INI,0,INI,INI,5],
[INI ,INI,0,2,11],
[INI,INI,INI,0,3],
[INI,INI,INI,INI,0]]
def Dijkstra(graph,s,e):
openList = [s]
closeList = [s]
dists = copy.copy(graph)
while openList:
sorted(openList,key = lambda k:dists[s][k])
v = openList.pop(0)
for i in range(len(graph[v])):
if graph[v][i] == INI:
continue
if i in closeList:
continue
if dists[s][v] + graph[v][i] < dists[s][i] :
dists[s][i] = dists[s][v] + graph[v][i]
openList.append(i)
closeList.append(v)
print(dists)
Dijkstra(graph,0,4)
| from collections import deque
#冒泡排序
#思路很简单 每一趟循环将 最大的数 冒泡到最右端
def bubbleSort(Q):
for s in range(len(Q)-1):
for m in range(len(Q)-1):
if Q[m] > Q[m+1]:
Q[m],Q[m+1] = Q[m+1],Q[m]
return Q
print("bubbleSort:")
print(bubbleSort([8,2,7,3,9,1,4,5,6]))
#
# 快速排序
# 核心 思路 选一个基准 小的放左边 大的放右边,并对左右分别递归
#
def quickSort(Q):
if len(Q) <= 1:
return Q
left = [x for x in Q if x < Q[-1]]
right = [x for x in Q if x > Q[-1]]
return quickSort(left) + [Q[-1]] + quickSort(right)
S = [8,2,7,3,9,1,4,5,6]
quickSort(S)
print("quicksort:")
print(S)
#
# 归并排序
# 假设有两个已经排序好的数组 [1,3,5,7] [2,4,6] 我们很容易将这两个数组排序 步骤为
# 选取两个数组中最小的元素,将两者之间的更小者放入新数组,直到某个数组为空,
# 然后将另一个数组中的剩余元素全部放入新数组
# [1,3,5,7]
# [3,4,6]
# []
# ↓
# [3,5,7]
# [2,4,6]
# [1]
# ↓
# [3,5,7]
# [4,6]
# [1,3]
# :
#
def mergeSort(Q):
if len(Q) <= 1:
return Q
middle = (0 + len(Q) ) >> 1
left = mergeSort(Q[0:middle])
right = mergeSort(Q[middle:])
newQ = []
while left and right:
if left[0] < right[0]:
newQ.append(left.pop(0))
else:
newQ.append(right.pop(0))
newQ.extend(left)
newQ.extend(right)
return newQ
S1 = [8,2,7,3,9,1,4,5,6]
mergeSort(S1)
print("mergesort")
print(mergeSort(S1))
#
# 堆排序 利用了 堆结构
#
def leftI(index):
return (index << 1) + 1
def rightI(index):
return (index + 1) << 1
def maxheapify(Q,index,size):
if leftI(index) <= size:
if Q[leftI(index)] > Q[index]:
Q[leftI(index)],Q[index] = Q[index],Q[leftI(index)]
maxheapify(Q,leftI(index),size)
if rightI(index) <= size:
if Q[rightI(index)] > Q[index]:
Q[rightI(index)],Q[index] = Q[index],Q[rightI(index)]
maxheapify(Q,rightI(index),size)
def buildmaxheap(Q,size):
for m in range(size):
maxheapify(Q,0,size)
def heapsort(Q):
for m in range(len(Q)-1,0,-1):
buildmaxheap(Q,m)
Q[0],Q[m] = Q[m],Q[0]
S2 = [8,2,7,3,9,1,4,5,6]
# buildmaxheap(S2,8)
heapsort(S2)
print("heapsort")
print(S2)
###################################################################
a,b,c,d,e,f,g,h = range(8)
N = [
{b,d},
{c},
{f},
{e},
{f},
{g,h},
{},
{}
]
#深度优先搜索
def dfs(graph,node):
searched,query_queue = set(),[]
query_queue.append(node)
while query_queue:
q_node = query_queue.pop()
if q_node in searched:
continue
searched.add(q_node)
for neighbor in graph[q_node]:
query_queue.append(neighbor)
yield q_node
#广度优先搜索
def bfs(graph,node):
parents,query_queue = {node:None},deque([node])
while query_queue:
q_node = query_queue.popleft()
for neighbor in graph[q_node]:
if neighbor in parents:
continue
parents[neighbor] = q_node
query_queue.append(neighbor)
return parents
print("dfs search")
for dfs_node in dfs(N,a):
print(dfs_node)
print("bfs search")
for bfs_node in bfs(N,a):
print(bfs_node)
def mybfs(graph,node):
explore_queue ,history = deque([node]),set()
history.add(node)
while explore_queue:
wait_explore_node = explore_queue.popleft()
for neighbor in graph[wait_explore_node]:
if neighbor in history:
continue
history.add(neighbor)
explore_queue.append(neighbor)
return history
for my_node in mybfs(N,a):
print (my_node)
print ("mydfs")
def mydfs(graph,node):
explore_queue,history = [],set()
history.add(node)
explore_queue.append(node)
while explore_queue:
cur_node = explore_queue.pop()
for neighbor in graph[cur_node]:
if neighbor in history:
continue
history.add(neighbor)
explore_queue.append(neighbor)
print(cur_node)
mydfs(N,a)
#递归版的深度优先搜索
def rec_dfs(graph,node,history):
for neighbor in graph[node]:
if neighbor in history:
continue
print(neighbor)
history.add(neighbor)
rec_dfs(graph,neighbor,history)
print("rec_dfs")
rec_dfs(N,a,set())
#
# 迪杰斯特拉(dijkstra)算法
#
import copy
INI = 999
graph = [[0 ,10,4,8,INI],
[INI,0,INI,INI,5],
[INI ,INI,0,2,11],
[INI,INI,INI,0,3],
[INI,INI,INI,INI,0]]
def Dijkstra(graph,s,e):
openList = [s]
closeList = [s]
dists = copy.copy(graph)
while openList:
sorted(openList,key = lambda k:dists[s][k])
v = openList.pop(0)
for i in range(len(graph[v])):
if graph[v][i] == INI:
continue
if i in closeList:
continue
if dists[s][v] + graph[v][i] < dists[s][i] :
dists[s][i] = dists[s][v] + graph[v][i]
openList.append(i)
closeList.append(v)
print(dists)
Dijkstra(graph,0,4) | zh | 0.906896 | #冒泡排序 #思路很简单 每一趟循环将 最大的数 冒泡到最右端 # # 快速排序 # 核心 思路 选一个基准 小的放左边 大的放右边,并对左右分别递归 # # # 归并排序 # 假设有两个已经排序好的数组 [1,3,5,7] [2,4,6] 我们很容易将这两个数组排序 步骤为 # 选取两个数组中最小的元素,将两者之间的更小者放入新数组,直到某个数组为空, # 然后将另一个数组中的剩余元素全部放入新数组 # [1,3,5,7] # [3,4,6] # [] # ↓ # [3,5,7] # [2,4,6] # [1] # ↓ # [3,5,7] # [4,6] # [1,3] # : # # # 堆排序 利用了 堆结构 # # buildmaxheap(S2,8) ################################################################### #深度优先搜索 #广度优先搜索 #递归版的深度优先搜索 # # 迪杰斯特拉(dijkstra)算法 # | 3.885751 | 4 |
writeToExcel.py | kkkelicheng/PythonExcelDemo | 0 | 6623993 | <reponame>kkkelicheng/PythonExcelDemo
import openpyxl
from openpyxl.utils import get_column_letter
# 创建一个新的工作簿
wb = openpyxl.Workbook()
# 活跃的,创建wb,应该自带一个active的表单
sheet = wb.active
# 先取3个变量
sheetName_happy2020 = "happy2020"
sheetName_first = "first"
sheetName_middle = "middle"
# change the name of sheet
print(sheet.title)
sheet.title = sheetName_happy2020
print(wb.get_sheet_names())
# 如果你不调用save ,就不会写到硬盘上面的
# wb.save("pyCreatedExcel.xlsx")
# 修改xlsx 的原则: 不改变源文件,重新取一个名字去保存。防止出错,取同名会覆盖。
# 创建其他的表单
# 创建一个name为first sheet的,插在happy2020的前面,假如不指定index,会放在happy2020的后面
wb.create_sheet(index=0,title=sheetName_first)
wb.create_sheet(index=1,title=sheetName_middle)
wb.create_sheet(index=2,title="willRemove")
print(wb.get_sheet_names())
# 删除一个表单
# 首先获取到要删除的表单,2种方式获取,随便用一个,在readExcel中有写
wb.remove_sheet(wb.get_sheet_by_name("willRemove"))
# 如果你不调用save ,就不会写到硬盘上面的
# wb.save("pyCreatedExcel.xlsx")
"""
==========================向cells中写数据==========================
"""
# 向cells中写数据
sh_2020 = wb.get_sheet_by_name(sheetName_happy2020)
# 「赋值形式1」 以cell为单位赋值
sh_2020["A1"] = "Hello Python"
print(sheet["A1"].value)
# 「赋值形式2」 以row为单位赋值
sh_list = wb.get_sheet_by_name(sheetName_first)
rowsData = [
['Number','Batch 1','Batch 2'],
[2,30,35],
[4,40,35],
[6,50,35],
[9,60,35],
[10,70,35],
[12,80,35]
]
for rowData in rowsData:
# 就是依次赋值
sh_list.append(rowData)
# 「赋值形式3」 用cell的自带函数赋值,方式1的简写,一句话搞定
sh_m = wb.get_sheet_by_name(sheetName_middle)
for row in range(5,30): #5行到29行
for col in range(15,30): #15列到29列
sh_m.cell(column=col,row=row,value=get_column_letter(col))
print('sh_m[aa10] = {}'.format(sh_m['AA10'].value))
wb.save(filename = "pyCreatedExcel.xlsx")
| import openpyxl
from openpyxl.utils import get_column_letter
# 创建一个新的工作簿
wb = openpyxl.Workbook()
# 活跃的,创建wb,应该自带一个active的表单
sheet = wb.active
# 先取3个变量
sheetName_happy2020 = "happy2020"
sheetName_first = "first"
sheetName_middle = "middle"
# change the name of sheet
print(sheet.title)
sheet.title = sheetName_happy2020
print(wb.get_sheet_names())
# 如果你不调用save ,就不会写到硬盘上面的
# wb.save("pyCreatedExcel.xlsx")
# 修改xlsx 的原则: 不改变源文件,重新取一个名字去保存。防止出错,取同名会覆盖。
# 创建其他的表单
# 创建一个name为first sheet的,插在happy2020的前面,假如不指定index,会放在happy2020的后面
wb.create_sheet(index=0,title=sheetName_first)
wb.create_sheet(index=1,title=sheetName_middle)
wb.create_sheet(index=2,title="willRemove")
print(wb.get_sheet_names())
# 删除一个表单
# 首先获取到要删除的表单,2种方式获取,随便用一个,在readExcel中有写
wb.remove_sheet(wb.get_sheet_by_name("willRemove"))
# 如果你不调用save ,就不会写到硬盘上面的
# wb.save("pyCreatedExcel.xlsx")
"""
==========================向cells中写数据==========================
"""
# 向cells中写数据
sh_2020 = wb.get_sheet_by_name(sheetName_happy2020)
# 「赋值形式1」 以cell为单位赋值
sh_2020["A1"] = "Hello Python"
print(sheet["A1"].value)
# 「赋值形式2」 以row为单位赋值
sh_list = wb.get_sheet_by_name(sheetName_first)
rowsData = [
['Number','Batch 1','Batch 2'],
[2,30,35],
[4,40,35],
[6,50,35],
[9,60,35],
[10,70,35],
[12,80,35]
]
for rowData in rowsData:
# 就是依次赋值
sh_list.append(rowData)
# 「赋值形式3」 用cell的自带函数赋值,方式1的简写,一句话搞定
sh_m = wb.get_sheet_by_name(sheetName_middle)
for row in range(5,30): #5行到29行
for col in range(15,30): #15列到29列
sh_m.cell(column=col,row=row,value=get_column_letter(col))
print('sh_m[aa10] = {}'.format(sh_m['AA10'].value))
wb.save(filename = "pyCreatedExcel.xlsx") | zh | 0.942378 | # 创建一个新的工作簿 # 活跃的,创建wb,应该自带一个active的表单 # 先取3个变量 # change the name of sheet # 如果你不调用save ,就不会写到硬盘上面的 # wb.save("pyCreatedExcel.xlsx") # 修改xlsx 的原则: 不改变源文件,重新取一个名字去保存。防止出错,取同名会覆盖。 # 创建其他的表单 # 创建一个name为first sheet的,插在happy2020的前面,假如不指定index,会放在happy2020的后面 # 删除一个表单 # 首先获取到要删除的表单,2种方式获取,随便用一个,在readExcel中有写 # 如果你不调用save ,就不会写到硬盘上面的 # wb.save("pyCreatedExcel.xlsx") ==========================向cells中写数据========================== # 向cells中写数据 # 「赋值形式1」 以cell为单位赋值 # 「赋值形式2」 以row为单位赋值 # 就是依次赋值 # 「赋值形式3」 用cell的自带函数赋值,方式1的简写,一句话搞定 #5行到29行 #15列到29列 | 3.426986 | 3 |
python_scripts/setup.py | webanpick/webanpick-master | 1 | 6623994 | <filename>python_scripts/setup.py
from setuptools import setup
setup( name='webanpickdebugnode',
version='0.1',
description='A wrapper for launching and interacting with a Webanpick Debug Node',
url='http://github.com/webanpickit/webanpick',
author='<NAME>.',
author_email='<EMAIL>',
license='See LICENSE.md',
packages=['webanpickdebugnode'],
#install_requires=['webanpickapi'],
zip_safe=False ) | <filename>python_scripts/setup.py
from setuptools import setup
setup( name='webanpickdebugnode',
version='0.1',
description='A wrapper for launching and interacting with a Webanpick Debug Node',
url='http://github.com/webanpickit/webanpick',
author='<NAME>.',
author_email='<EMAIL>',
license='See LICENSE.md',
packages=['webanpickdebugnode'],
#install_requires=['webanpickapi'],
zip_safe=False ) | en | 0.127112 | #install_requires=['webanpickapi'], | 1.319907 | 1 |
py_tdlib/constructors/search_messages.py | Mr-TelegramBot/python-tdlib | 24 | 6623995 | <reponame>Mr-TelegramBot/python-tdlib
from ..factory import Method
class searchMessages(Method):
query = None # type: "string"
offset_date = None # type: "int32"
offset_chat_id = None # type: "int53"
offset_message_id = None # type: "int53"
limit = None # type: "int32"
| from ..factory import Method
class searchMessages(Method):
query = None # type: "string"
offset_date = None # type: "int32"
offset_chat_id = None # type: "int53"
offset_message_id = None # type: "int53"
limit = None # type: "int32" | en | 0.605481 | # type: "string" # type: "int32" # type: "int53" # type: "int53" # type: "int32" | 1.857358 | 2 |
src/service/encrypted/encrypted/views.py | cs5331-group12/rest-api-development | 0 | 6623996 | <reponame>cs5331-group12/rest-api-development
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.response import Response
from rest_framework.decorators import api_view
@api_view(["GET"])
def root(request):
"""
Retrieve all endpoints that are implemented
"""
data = {
"status": True,
"result": [
"/",
"/meta/heartbeat",
"/meta/members",
"/users/",
"/users/register",
"/users/authenticate",
"/users/expire",
"/diary/",
"/diary/create",
"/diary/delete",
"/diary/permission",
]
}
return Response(data)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework.response import Response
from rest_framework.decorators import api_view
@api_view(["GET"])
def root(request):
"""
Retrieve all endpoints that are implemented
"""
data = {
"status": True,
"result": [
"/",
"/meta/heartbeat",
"/meta/members",
"/users/",
"/users/register",
"/users/authenticate",
"/users/expire",
"/diary/",
"/diary/create",
"/diary/delete",
"/diary/permission",
]
}
return Response(data) | en | 0.879936 | # -*- coding: utf-8 -*- Retrieve all endpoints that are implemented | 2.34671 | 2 |
pybpod_soundcard_module/module_api.py | pybpod/pybpod-gui-plugin-soundcard | 0 | 6623997 | <filename>pybpod_soundcard_module/module_api.py
import array
import math
import time
import numpy as np
from enum import Enum, IntEnum
from aenum import auto
import os
import collections
import usb.core
import usb.util
from usb.backend import libusb1 as libusb
class SampleRate(IntEnum):
"""
Enumeration for the Sample rate of the sounds in the Sound Card
"""
#: 96KHz sample rate
_96000HZ = 96000
#: 192KHz sample rate
_192000HZ = 192000
class DataType(IntEnum):
"""
Type of the data to be send to the Sound Card
"""
#: Integer 32 bits
INT32 = 0,
#: Single precision float
FLOAT32 = 1
class SoundCardErrorCode(Enum):
OK = 0,
BAD_USER_INPUT = -1,
HARP_SOUND_CARD_NOT_DETECTED = -1000,
NOT_ABLE_TO_SEND_METADATA = auto(),
NOT_ABLE_TO_READ_METADATA_COMMAND_REPLY = auto(),
METADATA_COMMAND_REPLY_NOT_CORRECT = auto(),
NOT_ABLE_TO_SEND_DATA = auto(),
NOT_ABLE_TO_READ_DATA_COMMAND_REPLY = auto(),
DATA_COMMAND_REPLY_NOT_CORRECT = auto(),
NOT_ABLE_TO_SEND_READ_METADATA = auto(),
NOT_ABLE_TO_READ_READ_METADATA_COMMAND_REPLY = auto(),
READ_METADATA_COMMAND_REPLY_NOT_CORRECT = auto(),
BAD_SOUND_INDEX = -1020,
BAD_SOUND_LENGTH = auto(),
BAD_SAMPLE_RATE = auto(),
BAD_DATA_TYPE = auto(),
DATA_TYPE_DO_NOT_MATCH = auto(),
BAD_DATA_INDEX = auto(),
PRODUCING_SOUND = -1030,
STARTED_PRODUCING_SOUND = auto(),
NOT_ABLE_TO_OPEN_FILE = -1040
class SoundMetadata(object):
def __init__(self, sound_index, sound_length, sample_rate, data_type):
"""
:param self:
:param sound_index: Sound index in the soundcard (2 -> 31 since 0 and 1 are reserved)
:param sound_length: Sound length in number of samples
:param sample_rate: Sample rate
:param data_type: 0 for Int32 and 1 for Float32 (not available right now)
"""
self._sound_index = sound_index
self._sound_length = sound_length
self._sample_rate = sample_rate
self._data_type = data_type
def check_data(self):
if self._sound_index < 2 or self._sound_index > 32:
return SoundCardErrorCode.BAD_SOUND_INDEX
if self._sound_length < 16:
return SoundCardErrorCode.BAD_SOUND_LENGTH
if self._sample_rate is not SampleRate._96000HZ and self._sample_rate is not SampleRate._192000HZ:
return SoundCardErrorCode.BAD_SAMPLE_RATE
if self._data_type is not DataType.INT32 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.BAD_DATA_TYPE
if self._sound_index == 0 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
if self._sound_index == 1 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
if self._sound_index > 1 and self._data_type is not DataType.INT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
return SoundCardErrorCode.OK
def as_array(self):
return np.array([self._sound_index, self._sound_length, self._sample_rate, self._data_type], dtype=np.int32)
class SoundCardModule(object):
"""
Provides access to the Harp Sound Card. It allows to send and read the sounds in the Sound Card, through a normal
USB connection.
"""
def __init__(self, device=None):
"""
If a libUSB's device is given, it will try to open it. If none is given it will try to connect to the first Sound Card that is connected to the computer.
:param device: (Optional) libUSB device to use. If nothing is passed, it will try to connect automatically.
"""
self._backend = libusb.get_backend()
try:
self._devices = list(usb.core.find(backend=self._backend, idVendor=0x04d8, idProduct=0xee6a, find_all=True))
except OSError as e:
pass
self._dev = self._devices[0] if self._devices else None
self._cfg = None
self._port = None
self._connected = False
self.open(self._dev if device is None else device)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self, device=None):
"""
Opens the connection to the Sound Card. If no device is given, it will try to connect to the first Sound Card that is connected to the computer.
:param device: (Optional) Already initialized libUSB's device to use.
"""
if device is None:
self._backend = libusb.get_backend()
try:
self._dev = usb.core.find(backend=self._backend, idVendor=0x04d8, idProduct=0xee6a)
except OSError as e:
self._dev = None
pass
else:
self._dev = device
if self._dev is None:
print(
"Unable to connect to the Sound Card through the USB port. You will be unable to send and receive sounds.")
else:
# set the active configuration. With no arguments, the first configuration will be the active one
# note: some devices reset when setting an already selected configuration so we should check for it before
self._cfg = self._dev.get_active_configuration()
if self._cfg is None or self._cfg.bConfigurationValue != 1:
self._dev.set_configuration(1)
self._connected = True if self._dev else False
@property
def devices(self):
return self._devices
@property
def connected(self):
return self._connected
def close(self):
"""
Closes the connection with the Sound Card. It will close USB connection (to read and save sounds)
"""
if self._dev:
usb.util.dispose_resources(self._dev)
def reset(self):
"""
Resets the device, waits 700ms and tries to connect again so that the current instance of the SoundCard object can still be used.
.. note:: Necessary at the moment after sending a sound.
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
# Reset command length: 'c' 'm' 'd' '0x88' + 'f'
reset_cmd = [ord('c'), ord('m'), ord('d'), 0x88, ord('f')]
# cmd = 'cmd' + chr(0x88) + 'f'
wrt = self._dev.write(1, reset_cmd, 100)
assert wrt == len(reset_cmd)
time.sleep(700.0 / 1000.0)
self.open()
def read_sounds(self, output_folder=None, sound_index=None, clean_dst_folder=True):
"""
Reads sounds from the sound card.
.. note:: by default, it will clear the destination folder of all data. It will also write by default to a
"from_soundcard" folder in the working directory if none is given.
:param output_folder: Destination folder's path.
:param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes.
:param clean_dst_folder: Flag that defines if the method should clean the destination folder or not
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
# admit that if the output_folder is None, write inside a 'from_soundcard' folder in the current directory
if not output_folder:
output_folder = os.path.join(os.getcwd(), 'from_soundcard')
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
else:
# create folder if it doesn't exists
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if clean_dst_folder:
for file in os.listdir(output_folder):
file_path = os.path.join(output_folder, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
# probably a permissions error while deleting, ignore and try the next one
print("Error occurred when deleting file '{file_path}'. Ignoring error and continuing.".format(file_path=file_path))
continue
if sound_index is None:
for i in range(2, 32):
self._from_soundcard(output_folder, i)
else:
self._from_soundcard(output_folder, sound_index)
print("All files read!")
def send_sound(self, wave_int, sound_index, sample_rate, data_type,
sound_filename=None, metadata_filename=None,
description_filename=None):
"""
This method will send the sound to the Harp Sound Card as a byte array (int8)
:param wave_int: NumPy array as int32 that represents the sound data
:param sound_index: The destination index in the Sound Card (>=2 and <= 32)
:param sample_rate: The SampleRate enum value for either 96KHz or 192KHz
:param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware)
:param sound_filename: The name of the sound filename to be saved with the sound in the board (str)
:param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str)
:param description_filename: The name of the description filename to be saved with the sound in the board (str)
"""
self._to_soundcard(wave_int, sound_index, sample_rate, data_type, sound_filename, metadata_filename,
description_filename)
def _from_soundcard(self, output_folder=None, sound_index=None):
"""
Reads sounds from the sound card.
:param output_folder: Destination folder's path.
:param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes.
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
if sound_index is None or sound_index < 2 or sound_index > 31:
raise Exception("sound_index must have a value between 2 and 31")
metadata = self.__get_metadata_from_device(sound_index)
if metadata is None:
raise Exception('SoundCardModule: Error while getting metadata from device')
# define prefix
prefix = 'i'
if sound_index < 9:
prefix += '0' + str(sound_index) + '_'
else:
prefix += str(sound_index) + '_'
sound_filename = metadata.sound_filename.decode('utf-8')
metadata_filename = metadata.metadata_filename.decode('utf-8') if metadata.metadata_filename else None
description_filename = metadata.description_filename.decode(
'utf-8') if metadata.description_filename else None
if prefix not in sound_filename:
sound_filename = prefix + sound_filename
if metadata_filename and prefix not in metadata_filename:
metadata_filename = prefix + metadata_filename
if description_filename and prefix not in description_filename:
description_filename = prefix + description_filename
if metadata.has_sound:
with open(os.path.join(output_folder, sound_filename), 'w', encoding='utf8') as f:
# TODO: read the sound so we can write it here
f.write('TODO')
if metadata.has_metadata:
with open(os.path.join(output_folder, metadata_filename), 'wb') as f:
# clean the zeros at the end
f.write(metadata.metadata_array.tobytes().strip(b'\0'))
if metadata.has_description:
with open(os.path.join(output_folder, description_filename), 'wb') as f:
f.write(metadata.description.tobytes().strip(b'\0'))
# create summary info file
if metadata.has_sound:
with open(os.path.join(output_folder, sound_filename + '.metadata.txt'), 'w') as f:
f.write('SOUND_INDEX = ' + str(sound_index))
used_pos = math.ceil(metadata.sound_length / (33554432.0 * 2.0 / 32.0)) - 1
if used_pos > 0:
f.write(", ")
f.write(", ".join(str(sound_index + idx + 1) for idx in range(used_pos)))
f.write("\n")
f.write("TOTAL_SAMPLES = " + str(metadata.sound_length) + "\n")
f.write(
"TOTAL_LENGTH_MS = " + str(int(metadata.sound_length / 2 / metadata.sample_rate * 1000)) + "\n")
f.write("SAMPLE_RATE = " + str(metadata.sample_rate) + "\n")
if metadata.data_type == 0:
f.write("DATA_TYPE = Int32\n")
else:
f.write("DATA_TYPE = Float32\n")
f.write("SOUND_FILENAME = " + sound_filename + "\n")
if metadata.has_metadata:
f.write("USER_METADATA_FILENAME = " + metadata_filename + "\n")
if metadata.has_description:
f.write("USER_DESCRIPTION_FILENAME = " + description_filename + "\n")
def _to_soundcard(self, wave_int, sound_index, sample_rate, data_type,
sound_filename=None, metadata_filename=None,
description_filename=None):
"""
This method will send the sound to the Harp Sound Card as a byte array (int8)
:param wave_int: NumPy array as int32 that represents the sound data
:param sound_index: The destination index in the Sound Card (>=2 and <= 32)
:param sample_rate: The SampleRate enum value for either 96KHz or 192KHz
:param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware)
:param sound_filename: The name of the sound filename to be saved with the sound in the board (str)
:param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str)
:param description_filename: The name of the description filename to be saved with the sound in the board (str)
"""
# confirm that the dev exists and is ready
if not self._dev:
raise EnvironmentError(
'Sound card not initialized. Please call the initialize method before any operation.')
int32_size = np.dtype(np.int32).itemsize
# work with a int8 view of the wave_int (which is int32)
wave_int8 = wave_int.view(np.int8)
# get number of commands to send
sound_file_size_in_samples = len(wave_int8) // 4
commands_to_send = int(sound_file_size_in_samples * 4 // 32768 + (
1 if ((sound_file_size_in_samples * 4) % 32768) is not 0 else 0))
# Metadata command length: 'c' 'm' 'd' '0x80' + random + metadata + 32768 + 2048 + 'f'
metadata_cmd_header_size = 4 + int32_size + (4 * int32_size)
metadata_cmd = np.zeros(metadata_cmd_header_size + 32768 + 2048 + 1, dtype=np.int8)
metadata_cmd[0] = ord('c')
metadata_cmd[1] = ord('m')
metadata_cmd[2] = ord('d')
metadata_cmd[3] = 0x80
metadata_cmd[-1] = ord('f')
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
metadata_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
# create metadata info and add it to the metadata_cmd
metadata = SoundMetadata(sound_index, sound_file_size_in_samples, sample_rate, data_type)
if metadata.check_data() is not SoundCardErrorCode.OK:
print("Input data incorrect, please correct it before proceeding.")
return
metadata_cmd[8: 8 + (4 * int32_size)] = metadata.as_array().view(np.int8)
# add first data block of data to the metadata_cmd
metadata_cmd_data_index = metadata_cmd_header_size
metadata_cmd[metadata_cmd_data_index: metadata_cmd_data_index + 32768] = wave_int8[0: 32768]
# prepare user_metadata
# [0:169] sound_filename
# [170:339] metadata_filename
# [340:511] description_filename
# [512:1535] metadata_filename content
# [1536:2047] description_filename content
user_metadata = np.zeros(2048, dtype=np.int8)
user_metadata_index = metadata_cmd_data_index + 32768
if sound_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(sound_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[0:tmp_size] = tmp[0:tmp_size]
if metadata_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(metadata_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[170: 170 + tmp_size] = tmp[0:tmp_size]
# get file contents, truncate data if required
try:
with open(metadata_filename, 'r', encoding='utf8') as f:
text = f.read()
text_tmp = bytearray()
text_tmp.extend(map(ord, text))
data_tmp = np.array(text_tmp)
data = data_tmp.view(np.int8)
data_size = len(data) if len(data) < 1023 else 1023
user_metadata[512: 512 + data_size] = data[0: data_size]
except OSError as e:
# TODO: should be a stronger error
print("Error opening metadata file.")
if description_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(description_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[340: 340 + tmp_size] = tmp[0: tmp_size]
# get file contents, truncate data if required
try:
with open(description_filename, 'r', encoding='utf8') as f:
text = f.read()
text_tmp = bytearray()
text_tmp.extend(map(ord, text))
data_tmp = np.array(text_tmp)
data = data_tmp.view(np.int8)
data_size = len(data) if len(data) < 511 else 511
user_metadata[1536: 1536 + data_size] = data[0: data_size]
except OSError as e:
print(e)
# TODO: should be a stronger error
print("Error opening description file.")
# add user metadata (2048 bytes) to metadata_cmd
metadata_cmd[user_metadata_index: user_metadata_index + 2048] = user_metadata
# Metadata command reply: 'c' 'm' 'd' '0x80' + random + error
metadata_cmd_reply = array.array('b', [0] * (4 + int32_size + int32_size))
# send metadata_cmd and get it's reply
try:
res_write = self._dev.write(0x01, metadata_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
assert res_write == len(metadata_cmd)
try:
ret = self._dev.read(0x81, metadata_cmd_reply, 1000)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(metadata_cmd_reply[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(metadata_cmd_reply[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
# prepare command to send and to receive
# Data command length: 'c' 'm' 'd' '0x81' + random + dataIndex + 32768 + 'f'
data_cmd = np.zeros(4 + int32_size + int32_size + 32768 + 1, dtype=np.int8)
data_cmd_data_index = 4 + int32_size + int32_size
data_cmd[0] = ord('c')
data_cmd[1] = ord('m')
data_cmd[2] = ord('d')
data_cmd[3] = 0x81
data_cmd[-1] = ord('f')
# Data command reply: 'c' 'm' 'd' '0x81' + random + error
data_cmd_reply = array.array('b', [0] * (4 + int32_size + int32_size))
# loop to send the rest of the commands
# check reply for each command sent
for i in range(1, commands_to_send):
# it has to be as an np.array of int32 so that we can get a view as int8s
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
data_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
# write dataIndex to the data_cmd (2 ints size)
data_cmd[8: 8 + int32_size] = np.array([i], dtype=np.int32).view(np.int8)
# write data from wave_int to cmd
wave_idx = i * 32768
data_block = wave_int8[wave_idx: wave_idx + 32768]
data_cmd[data_cmd_data_index: data_cmd_data_index + len(data_block)] = data_block
# send data to device
try:
res_write = self._dev.write(0x01, data_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
# TODO: we probably should try again
assert res_write == len(data_cmd)
try:
ret = self._dev.read(0x81, data_cmd_reply, 400)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(data_cmd_reply[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(data_cmd_reply[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
def __get_metadata_from_device(self, sound_index):
int32_size = np.dtype(np.int32).itemsize
# Read metadata command length: 'c' 'm' 'd' '0x84' + random + soundIndex + 'f'
read_metadata_cmd = np.zeros(4 + int32_size + int32_size + 1, dtype=np.int8)
read_metadata_cmd[0] = ord('c')
read_metadata_cmd[1] = ord('m')
read_metadata_cmd[2] = ord('d')
read_metadata_cmd[3] = 0x84
read_metadata_cmd[-1] = ord('f')
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
read_metadata_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
read_metadata_cmd[8: 8 + int32_size] = np.array([sound_index], dtype=np.int32).view(np.int8)
# prepare to send command and receive the reply
read_reply_cmd = array.array('b', [0] * (4 + 6 * int32_size + 2048))
try:
res_write = self._dev.write(0x01, read_metadata_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
assert res_write == len(read_metadata_cmd)
try:
ret = self._dev.read(0x81, read_reply_cmd, 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
metadata = collections.namedtuple('Metadata',
['metadata_array', 'description', 'bit_mask', 'sound_length', 'data_type',
'sample_rate', 'sound_filename', 'metadata_filename',
'description_filename',
'has_sound', 'has_metadata', 'has_description'])
# get data from the reply array
metadata.metadata_array = array.array('b', [0] * 1024)
metadata.description = array.array('b', [0] * 512)
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(read_reply_cmd[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(read_reply_cmd[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
# bitmask
metadata.bit_mask = int.from_bytes(read_reply_cmd[12:12 + int32_size + int32_size], byteorder='little',
signed=True)
metadata.has_sound = metadata.bit_mask & (1 << sound_index) == (1 << sound_index)
metadata.sound_length = int.from_bytes(read_reply_cmd[16:16 + int32_size], byteorder='little', signed=True)
metadata.sample_rate = int.from_bytes(read_reply_cmd[20:20 + int32_size], byteorder='little', signed=True)
metadata.data_type = int.from_bytes(read_reply_cmd[24:24 + int32_size], byteorder='little', signed=True)
metadata.sound_filename = read_reply_cmd[28:170].tobytes().strip(b'\0')
metadata.has_metadata = False
metadata.metadata_filename = ''
if read_reply_cmd[28 + 170]:
metadata.has_metadata = True
metadata.metadata_array[0:1024] = read_reply_cmd[28 + 512:28 + 512 + 1024]
metadata.metadata_filename = read_reply_cmd[28 + 170: 28 + 170 + 170].tobytes().strip(b'\0')
metadata.has_description = False
metadata.description_filename = ''
if read_reply_cmd[28 + 170 + 170]:
metadata.has_description = True
metadata.description[0:512] = read_reply_cmd[28 + 512 + 1024:28 + 512 + 1024 + 512]
metadata.description_filename = read_reply_cmd[28 + 170 + 170: 28 + 170 + 170 + 170].tobytes().strip(b'\0')
return metadata
| <filename>pybpod_soundcard_module/module_api.py
import array
import math
import time
import numpy as np
from enum import Enum, IntEnum
from aenum import auto
import os
import collections
import usb.core
import usb.util
from usb.backend import libusb1 as libusb
class SampleRate(IntEnum):
"""
Enumeration for the Sample rate of the sounds in the Sound Card
"""
#: 96KHz sample rate
_96000HZ = 96000
#: 192KHz sample rate
_192000HZ = 192000
class DataType(IntEnum):
"""
Type of the data to be send to the Sound Card
"""
#: Integer 32 bits
INT32 = 0,
#: Single precision float
FLOAT32 = 1
class SoundCardErrorCode(Enum):
OK = 0,
BAD_USER_INPUT = -1,
HARP_SOUND_CARD_NOT_DETECTED = -1000,
NOT_ABLE_TO_SEND_METADATA = auto(),
NOT_ABLE_TO_READ_METADATA_COMMAND_REPLY = auto(),
METADATA_COMMAND_REPLY_NOT_CORRECT = auto(),
NOT_ABLE_TO_SEND_DATA = auto(),
NOT_ABLE_TO_READ_DATA_COMMAND_REPLY = auto(),
DATA_COMMAND_REPLY_NOT_CORRECT = auto(),
NOT_ABLE_TO_SEND_READ_METADATA = auto(),
NOT_ABLE_TO_READ_READ_METADATA_COMMAND_REPLY = auto(),
READ_METADATA_COMMAND_REPLY_NOT_CORRECT = auto(),
BAD_SOUND_INDEX = -1020,
BAD_SOUND_LENGTH = auto(),
BAD_SAMPLE_RATE = auto(),
BAD_DATA_TYPE = auto(),
DATA_TYPE_DO_NOT_MATCH = auto(),
BAD_DATA_INDEX = auto(),
PRODUCING_SOUND = -1030,
STARTED_PRODUCING_SOUND = auto(),
NOT_ABLE_TO_OPEN_FILE = -1040
class SoundMetadata(object):
def __init__(self, sound_index, sound_length, sample_rate, data_type):
"""
:param self:
:param sound_index: Sound index in the soundcard (2 -> 31 since 0 and 1 are reserved)
:param sound_length: Sound length in number of samples
:param sample_rate: Sample rate
:param data_type: 0 for Int32 and 1 for Float32 (not available right now)
"""
self._sound_index = sound_index
self._sound_length = sound_length
self._sample_rate = sample_rate
self._data_type = data_type
def check_data(self):
if self._sound_index < 2 or self._sound_index > 32:
return SoundCardErrorCode.BAD_SOUND_INDEX
if self._sound_length < 16:
return SoundCardErrorCode.BAD_SOUND_LENGTH
if self._sample_rate is not SampleRate._96000HZ and self._sample_rate is not SampleRate._192000HZ:
return SoundCardErrorCode.BAD_SAMPLE_RATE
if self._data_type is not DataType.INT32 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.BAD_DATA_TYPE
if self._sound_index == 0 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
if self._sound_index == 1 and self._data_type is not DataType.FLOAT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
if self._sound_index > 1 and self._data_type is not DataType.INT32:
return SoundCardErrorCode.DATA_TYPE_DO_NOT_MATCH
return SoundCardErrorCode.OK
def as_array(self):
return np.array([self._sound_index, self._sound_length, self._sample_rate, self._data_type], dtype=np.int32)
class SoundCardModule(object):
"""
Provides access to the Harp Sound Card. It allows to send and read the sounds in the Sound Card, through a normal
USB connection.
"""
def __init__(self, device=None):
"""
If a libUSB's device is given, it will try to open it. If none is given it will try to connect to the first Sound Card that is connected to the computer.
:param device: (Optional) libUSB device to use. If nothing is passed, it will try to connect automatically.
"""
self._backend = libusb.get_backend()
try:
self._devices = list(usb.core.find(backend=self._backend, idVendor=0x04d8, idProduct=0xee6a, find_all=True))
except OSError as e:
pass
self._dev = self._devices[0] if self._devices else None
self._cfg = None
self._port = None
self._connected = False
self.open(self._dev if device is None else device)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self, device=None):
"""
Opens the connection to the Sound Card. If no device is given, it will try to connect to the first Sound Card that is connected to the computer.
:param device: (Optional) Already initialized libUSB's device to use.
"""
if device is None:
self._backend = libusb.get_backend()
try:
self._dev = usb.core.find(backend=self._backend, idVendor=0x04d8, idProduct=0xee6a)
except OSError as e:
self._dev = None
pass
else:
self._dev = device
if self._dev is None:
print(
"Unable to connect to the Sound Card through the USB port. You will be unable to send and receive sounds.")
else:
# set the active configuration. With no arguments, the first configuration will be the active one
# note: some devices reset when setting an already selected configuration so we should check for it before
self._cfg = self._dev.get_active_configuration()
if self._cfg is None or self._cfg.bConfigurationValue != 1:
self._dev.set_configuration(1)
self._connected = True if self._dev else False
@property
def devices(self):
return self._devices
@property
def connected(self):
return self._connected
def close(self):
"""
Closes the connection with the Sound Card. It will close USB connection (to read and save sounds)
"""
if self._dev:
usb.util.dispose_resources(self._dev)
def reset(self):
"""
Resets the device, waits 700ms and tries to connect again so that the current instance of the SoundCard object can still be used.
.. note:: Necessary at the moment after sending a sound.
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
# Reset command length: 'c' 'm' 'd' '0x88' + 'f'
reset_cmd = [ord('c'), ord('m'), ord('d'), 0x88, ord('f')]
# cmd = 'cmd' + chr(0x88) + 'f'
wrt = self._dev.write(1, reset_cmd, 100)
assert wrt == len(reset_cmd)
time.sleep(700.0 / 1000.0)
self.open()
def read_sounds(self, output_folder=None, sound_index=None, clean_dst_folder=True):
"""
Reads sounds from the sound card.
.. note:: by default, it will clear the destination folder of all data. It will also write by default to a
"from_soundcard" folder in the working directory if none is given.
:param output_folder: Destination folder's path.
:param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes.
:param clean_dst_folder: Flag that defines if the method should clean the destination folder or not
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
# admit that if the output_folder is None, write inside a 'from_soundcard' folder in the current directory
if not output_folder:
output_folder = os.path.join(os.getcwd(), 'from_soundcard')
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
else:
# create folder if it doesn't exists
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if clean_dst_folder:
for file in os.listdir(output_folder):
file_path = os.path.join(output_folder, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
# probably a permissions error while deleting, ignore and try the next one
print("Error occurred when deleting file '{file_path}'. Ignoring error and continuing.".format(file_path=file_path))
continue
if sound_index is None:
for i in range(2, 32):
self._from_soundcard(output_folder, i)
else:
self._from_soundcard(output_folder, sound_index)
print("All files read!")
def send_sound(self, wave_int, sound_index, sample_rate, data_type,
sound_filename=None, metadata_filename=None,
description_filename=None):
"""
This method will send the sound to the Harp Sound Card as a byte array (int8)
:param wave_int: NumPy array as int32 that represents the sound data
:param sound_index: The destination index in the Sound Card (>=2 and <= 32)
:param sample_rate: The SampleRate enum value for either 96KHz or 192KHz
:param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware)
:param sound_filename: The name of the sound filename to be saved with the sound in the board (str)
:param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str)
:param description_filename: The name of the description filename to be saved with the sound in the board (str)
"""
self._to_soundcard(wave_int, sound_index, sample_rate, data_type, sound_filename, metadata_filename,
description_filename)
def _from_soundcard(self, output_folder=None, sound_index=None):
"""
Reads sounds from the sound card.
:param output_folder: Destination folder's path.
:param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes.
"""
if not self._dev:
raise Exception("Sound card might not be connected. Please connect it before any operation.")
if sound_index is None or sound_index < 2 or sound_index > 31:
raise Exception("sound_index must have a value between 2 and 31")
metadata = self.__get_metadata_from_device(sound_index)
if metadata is None:
raise Exception('SoundCardModule: Error while getting metadata from device')
# define prefix
prefix = 'i'
if sound_index < 9:
prefix += '0' + str(sound_index) + '_'
else:
prefix += str(sound_index) + '_'
sound_filename = metadata.sound_filename.decode('utf-8')
metadata_filename = metadata.metadata_filename.decode('utf-8') if metadata.metadata_filename else None
description_filename = metadata.description_filename.decode(
'utf-8') if metadata.description_filename else None
if prefix not in sound_filename:
sound_filename = prefix + sound_filename
if metadata_filename and prefix not in metadata_filename:
metadata_filename = prefix + metadata_filename
if description_filename and prefix not in description_filename:
description_filename = prefix + description_filename
if metadata.has_sound:
with open(os.path.join(output_folder, sound_filename), 'w', encoding='utf8') as f:
# TODO: read the sound so we can write it here
f.write('TODO')
if metadata.has_metadata:
with open(os.path.join(output_folder, metadata_filename), 'wb') as f:
# clean the zeros at the end
f.write(metadata.metadata_array.tobytes().strip(b'\0'))
if metadata.has_description:
with open(os.path.join(output_folder, description_filename), 'wb') as f:
f.write(metadata.description.tobytes().strip(b'\0'))
# create summary info file
if metadata.has_sound:
with open(os.path.join(output_folder, sound_filename + '.metadata.txt'), 'w') as f:
f.write('SOUND_INDEX = ' + str(sound_index))
used_pos = math.ceil(metadata.sound_length / (33554432.0 * 2.0 / 32.0)) - 1
if used_pos > 0:
f.write(", ")
f.write(", ".join(str(sound_index + idx + 1) for idx in range(used_pos)))
f.write("\n")
f.write("TOTAL_SAMPLES = " + str(metadata.sound_length) + "\n")
f.write(
"TOTAL_LENGTH_MS = " + str(int(metadata.sound_length / 2 / metadata.sample_rate * 1000)) + "\n")
f.write("SAMPLE_RATE = " + str(metadata.sample_rate) + "\n")
if metadata.data_type == 0:
f.write("DATA_TYPE = Int32\n")
else:
f.write("DATA_TYPE = Float32\n")
f.write("SOUND_FILENAME = " + sound_filename + "\n")
if metadata.has_metadata:
f.write("USER_METADATA_FILENAME = " + metadata_filename + "\n")
if metadata.has_description:
f.write("USER_DESCRIPTION_FILENAME = " + description_filename + "\n")
def _to_soundcard(self, wave_int, sound_index, sample_rate, data_type,
sound_filename=None, metadata_filename=None,
description_filename=None):
"""
This method will send the sound to the Harp Sound Card as a byte array (int8)
:param wave_int: NumPy array as int32 that represents the sound data
:param sound_index: The destination index in the Sound Card (>=2 and <= 32)
:param sample_rate: The SampleRate enum value for either 96KHz or 192KHz
:param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware)
:param sound_filename: The name of the sound filename to be saved with the sound in the board (str)
:param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str)
:param description_filename: The name of the description filename to be saved with the sound in the board (str)
"""
# confirm that the dev exists and is ready
if not self._dev:
raise EnvironmentError(
'Sound card not initialized. Please call the initialize method before any operation.')
int32_size = np.dtype(np.int32).itemsize
# work with a int8 view of the wave_int (which is int32)
wave_int8 = wave_int.view(np.int8)
# get number of commands to send
sound_file_size_in_samples = len(wave_int8) // 4
commands_to_send = int(sound_file_size_in_samples * 4 // 32768 + (
1 if ((sound_file_size_in_samples * 4) % 32768) is not 0 else 0))
# Metadata command length: 'c' 'm' 'd' '0x80' + random + metadata + 32768 + 2048 + 'f'
metadata_cmd_header_size = 4 + int32_size + (4 * int32_size)
metadata_cmd = np.zeros(metadata_cmd_header_size + 32768 + 2048 + 1, dtype=np.int8)
metadata_cmd[0] = ord('c')
metadata_cmd[1] = ord('m')
metadata_cmd[2] = ord('d')
metadata_cmd[3] = 0x80
metadata_cmd[-1] = ord('f')
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
metadata_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
# create metadata info and add it to the metadata_cmd
metadata = SoundMetadata(sound_index, sound_file_size_in_samples, sample_rate, data_type)
if metadata.check_data() is not SoundCardErrorCode.OK:
print("Input data incorrect, please correct it before proceeding.")
return
metadata_cmd[8: 8 + (4 * int32_size)] = metadata.as_array().view(np.int8)
# add first data block of data to the metadata_cmd
metadata_cmd_data_index = metadata_cmd_header_size
metadata_cmd[metadata_cmd_data_index: metadata_cmd_data_index + 32768] = wave_int8[0: 32768]
# prepare user_metadata
# [0:169] sound_filename
# [170:339] metadata_filename
# [340:511] description_filename
# [512:1535] metadata_filename content
# [1536:2047] description_filename content
user_metadata = np.zeros(2048, dtype=np.int8)
user_metadata_index = metadata_cmd_data_index + 32768
if sound_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(sound_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[0:tmp_size] = tmp[0:tmp_size]
if metadata_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(metadata_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[170: 170 + tmp_size] = tmp[0:tmp_size]
# get file contents, truncate data if required
try:
with open(metadata_filename, 'r', encoding='utf8') as f:
text = f.read()
text_tmp = bytearray()
text_tmp.extend(map(ord, text))
data_tmp = np.array(text_tmp)
data = data_tmp.view(np.int8)
data_size = len(data) if len(data) < 1023 else 1023
user_metadata[512: 512 + data_size] = data[0: data_size]
except OSError as e:
# TODO: should be a stronger error
print("Error opening metadata file.")
if description_filename:
tmp = bytearray()
tmp.extend(map(ord, os.path.basename(description_filename)))
tmp_size = len(tmp) if len(tmp) < 169 else 169
user_metadata[340: 340 + tmp_size] = tmp[0: tmp_size]
# get file contents, truncate data if required
try:
with open(description_filename, 'r', encoding='utf8') as f:
text = f.read()
text_tmp = bytearray()
text_tmp.extend(map(ord, text))
data_tmp = np.array(text_tmp)
data = data_tmp.view(np.int8)
data_size = len(data) if len(data) < 511 else 511
user_metadata[1536: 1536 + data_size] = data[0: data_size]
except OSError as e:
print(e)
# TODO: should be a stronger error
print("Error opening description file.")
# add user metadata (2048 bytes) to metadata_cmd
metadata_cmd[user_metadata_index: user_metadata_index + 2048] = user_metadata
# Metadata command reply: 'c' 'm' 'd' '0x80' + random + error
metadata_cmd_reply = array.array('b', [0] * (4 + int32_size + int32_size))
# send metadata_cmd and get it's reply
try:
res_write = self._dev.write(0x01, metadata_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
assert res_write == len(metadata_cmd)
try:
ret = self._dev.read(0x81, metadata_cmd_reply, 1000)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(metadata_cmd_reply[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(metadata_cmd_reply[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
# prepare command to send and to receive
# Data command length: 'c' 'm' 'd' '0x81' + random + dataIndex + 32768 + 'f'
data_cmd = np.zeros(4 + int32_size + int32_size + 32768 + 1, dtype=np.int8)
data_cmd_data_index = 4 + int32_size + int32_size
data_cmd[0] = ord('c')
data_cmd[1] = ord('m')
data_cmd[2] = ord('d')
data_cmd[3] = 0x81
data_cmd[-1] = ord('f')
# Data command reply: 'c' 'm' 'd' '0x81' + random + error
data_cmd_reply = array.array('b', [0] * (4 + int32_size + int32_size))
# loop to send the rest of the commands
# check reply for each command sent
for i in range(1, commands_to_send):
# it has to be as an np.array of int32 so that we can get a view as int8s
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
data_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
# write dataIndex to the data_cmd (2 ints size)
data_cmd[8: 8 + int32_size] = np.array([i], dtype=np.int32).view(np.int8)
# write data from wave_int to cmd
wave_idx = i * 32768
data_block = wave_int8[wave_idx: wave_idx + 32768]
data_cmd[data_cmd_data_index: data_cmd_data_index + len(data_block)] = data_block
# send data to device
try:
res_write = self._dev.write(0x01, data_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
# TODO: we probably should try again
assert res_write == len(data_cmd)
try:
ret = self._dev.read(0x81, data_cmd_reply, 400)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(data_cmd_reply[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(data_cmd_reply[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
def __get_metadata_from_device(self, sound_index):
int32_size = np.dtype(np.int32).itemsize
# Read metadata command length: 'c' 'm' 'd' '0x84' + random + soundIndex + 'f'
read_metadata_cmd = np.zeros(4 + int32_size + int32_size + 1, dtype=np.int8)
read_metadata_cmd[0] = ord('c')
read_metadata_cmd[1] = ord('m')
read_metadata_cmd[2] = ord('d')
read_metadata_cmd[3] = 0x84
read_metadata_cmd[-1] = ord('f')
rand_val = np.random.randint(-32768, 32768, size=1, dtype=np.int32)
# copy that random data
read_metadata_cmd[4: 4 + int32_size] = rand_val.view(np.int8)
read_metadata_cmd[8: 8 + int32_size] = np.array([sound_index], dtype=np.int32).view(np.int8)
# prepare to send command and receive the reply
read_reply_cmd = array.array('b', [0] * (4 + 6 * int32_size + 2048))
try:
res_write = self._dev.write(0x01, read_metadata_cmd.tobytes(), 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while writing to the device")
return
assert res_write == len(read_metadata_cmd)
try:
ret = self._dev.read(0x81, read_reply_cmd, 100)
except usb.core.USBError as e:
# TODO: we probably should try again
print("something went wrong while reading from the device")
return
metadata = collections.namedtuple('Metadata',
['metadata_array', 'description', 'bit_mask', 'sound_length', 'data_type',
'sample_rate', 'sound_filename', 'metadata_filename',
'description_filename',
'has_sound', 'has_metadata', 'has_description'])
# get data from the reply array
metadata.metadata_array = array.array('b', [0] * 1024)
metadata.description = array.array('b', [0] * 512)
# get the random received and the error received from the reply command
rand_val_received = int.from_bytes(read_reply_cmd[4: 4 + int32_size], byteorder='little', signed=True)
error_received = int.from_bytes(read_reply_cmd[8: 8 + int32_size], byteorder='little', signed=False)
assert rand_val_received == rand_val[0]
assert error_received == 0
# bitmask
metadata.bit_mask = int.from_bytes(read_reply_cmd[12:12 + int32_size + int32_size], byteorder='little',
signed=True)
metadata.has_sound = metadata.bit_mask & (1 << sound_index) == (1 << sound_index)
metadata.sound_length = int.from_bytes(read_reply_cmd[16:16 + int32_size], byteorder='little', signed=True)
metadata.sample_rate = int.from_bytes(read_reply_cmd[20:20 + int32_size], byteorder='little', signed=True)
metadata.data_type = int.from_bytes(read_reply_cmd[24:24 + int32_size], byteorder='little', signed=True)
metadata.sound_filename = read_reply_cmd[28:170].tobytes().strip(b'\0')
metadata.has_metadata = False
metadata.metadata_filename = ''
if read_reply_cmd[28 + 170]:
metadata.has_metadata = True
metadata.metadata_array[0:1024] = read_reply_cmd[28 + 512:28 + 512 + 1024]
metadata.metadata_filename = read_reply_cmd[28 + 170: 28 + 170 + 170].tobytes().strip(b'\0')
metadata.has_description = False
metadata.description_filename = ''
if read_reply_cmd[28 + 170 + 170]:
metadata.has_description = True
metadata.description[0:512] = read_reply_cmd[28 + 512 + 1024:28 + 512 + 1024 + 512]
metadata.description_filename = read_reply_cmd[28 + 170 + 170: 28 + 170 + 170 + 170].tobytes().strip(b'\0')
return metadata
| en | 0.820666 | Enumeration for the Sample rate of the sounds in the Sound Card #: 96KHz sample rate #: 192KHz sample rate Type of the data to be send to the Sound Card #: Integer 32 bits #: Single precision float :param self: :param sound_index: Sound index in the soundcard (2 -> 31 since 0 and 1 are reserved) :param sound_length: Sound length in number of samples :param sample_rate: Sample rate :param data_type: 0 for Int32 and 1 for Float32 (not available right now) Provides access to the Harp Sound Card. It allows to send and read the sounds in the Sound Card, through a normal USB connection. If a libUSB's device is given, it will try to open it. If none is given it will try to connect to the first Sound Card that is connected to the computer. :param device: (Optional) libUSB device to use. If nothing is passed, it will try to connect automatically. Opens the connection to the Sound Card. If no device is given, it will try to connect to the first Sound Card that is connected to the computer. :param device: (Optional) Already initialized libUSB's device to use. # set the active configuration. With no arguments, the first configuration will be the active one # note: some devices reset when setting an already selected configuration so we should check for it before Closes the connection with the Sound Card. It will close USB connection (to read and save sounds) Resets the device, waits 700ms and tries to connect again so that the current instance of the SoundCard object can still be used. .. note:: Necessary at the moment after sending a sound. # Reset command length: 'c' 'm' 'd' '0x88' + 'f' # cmd = 'cmd' + chr(0x88) + 'f' Reads sounds from the sound card. .. note:: by default, it will clear the destination folder of all data. It will also write by default to a "from_soundcard" folder in the working directory if none is given. :param output_folder: Destination folder's path. :param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes. :param clean_dst_folder: Flag that defines if the method should clean the destination folder or not # admit that if the output_folder is None, write inside a 'from_soundcard' folder in the current directory # create folder if it doesn't exists # probably a permissions error while deleting, ignore and try the next one This method will send the sound to the Harp Sound Card as a byte array (int8) :param wave_int: NumPy array as int32 that represents the sound data :param sound_index: The destination index in the Sound Card (>=2 and <= 32) :param sample_rate: The SampleRate enum value for either 96KHz or 192KHz :param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware) :param sound_filename: The name of the sound filename to be saved with the sound in the board (str) :param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str) :param description_filename: The name of the description filename to be saved with the sound in the board (str) Reads sounds from the sound card. :param output_folder: Destination folder's path. :param sound_index: If a sound_index is given, it will get only that sound, if nothing is passed it will gather all sounds from all indexes. # define prefix # TODO: read the sound so we can write it here # clean the zeros at the end # create summary info file This method will send the sound to the Harp Sound Card as a byte array (int8) :param wave_int: NumPy array as int32 that represents the sound data :param sound_index: The destination index in the Sound Card (>=2 and <= 32) :param sample_rate: The SampleRate enum value for either 96KHz or 192KHz :param data_type: The DataType enum value for either Int32 or Float32 (not implemented yet in the hardware) :param sound_filename: The name of the sound filename to be saved with the sound in the board (str) :param metadata_filename: The name of the metadata filename to be saved with the sound in the board (str) :param description_filename: The name of the description filename to be saved with the sound in the board (str) # confirm that the dev exists and is ready # work with a int8 view of the wave_int (which is int32) # get number of commands to send # Metadata command length: 'c' 'm' 'd' '0x80' + random + metadata + 32768 + 2048 + 'f' # copy that random data # create metadata info and add it to the metadata_cmd # add first data block of data to the metadata_cmd # prepare user_metadata # [0:169] sound_filename # [170:339] metadata_filename # [340:511] description_filename # [512:1535] metadata_filename content # [1536:2047] description_filename content # get file contents, truncate data if required # TODO: should be a stronger error # get file contents, truncate data if required # TODO: should be a stronger error # add user metadata (2048 bytes) to metadata_cmd # Metadata command reply: 'c' 'm' 'd' '0x80' + random + error # send metadata_cmd and get it's reply # TODO: we probably should try again # TODO: we probably should try again # get the random received and the error received from the reply command # prepare command to send and to receive # Data command length: 'c' 'm' 'd' '0x81' + random + dataIndex + 32768 + 'f' # Data command reply: 'c' 'm' 'd' '0x81' + random + error # loop to send the rest of the commands # check reply for each command sent # it has to be as an np.array of int32 so that we can get a view as int8s # copy that random data # write dataIndex to the data_cmd (2 ints size) # write data from wave_int to cmd # send data to device # TODO: we probably should try again # TODO: we probably should try again # TODO: we probably should try again # get the random received and the error received from the reply command # Read metadata command length: 'c' 'm' 'd' '0x84' + random + soundIndex + 'f' # copy that random data # prepare to send command and receive the reply # TODO: we probably should try again # TODO: we probably should try again # get data from the reply array # get the random received and the error received from the reply command # bitmask | 2.826815 | 3 |
fpga-rfnoc/testbenches/noc_block_channelizer_tb/shared_tools/python/fp_utils.py | pjvalla/theseus-cores | 9 | 6623998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: phil
"""
import numpy as np
import binascii
# from cStringIO import StringIO
from io import StringIO
import copy
from mpmath import mp
"""
Quantization vector is of the formed fixed(N, F). Where the first value indicates the
total number of bits and the second number indicates the location of the fractional point.
"""
__version__ = "1.1"
def bit_count(val):
"""
Fast way to count 1's in a 64 bit integer. Based on Hamming weight
"""
val = val - ((val >> 1) & 0x5555555555555555)
val = (val & 0x3333333333333333) + ((val >> 2) & 0x3333333333333333)
return (((val + (val >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56
def r_shift(bin_str, new_val):
"""
Function performs a right shift of a binary string. Placing the new
value into the MSB position.
"""
offset = bin_str.find('b') + 1
new_val = str(new_val) + bin_str[offset:-1]
if (offset != -1):
new_val = '0b' + new_val
return new_val
def l_shift(bin_str, new_val):
"""
Function performs a left shift of a binary string. Placing the new
value into the LSB position.
"""
offset = bin_str.find('b') + 1
new_val = bin_str[offset + 1:] + str(new_val)
if (offset != -1):
new_val = '0b' + new_val
return new_val
def lappend(bin_str, str_append):
"""
Function left appends a binary string with string specified by
string append.
"""
offset_a = bin_str.find('b') + 1
offset_b = str_append.find('b') + 1
new_val = str_append[offset_b:] + bin_str[offset_a:]
if ((offset_a != -1) | (offset_b != -1)):
new_val = '0b' + new_val
return new_val
def lappend_udec(int_val, bit_val, num_bits):
"""
Function left appends int_val with bit_val. bit_val is assumed
to be one bit. num_bits is the number of bits to represent
unsigned integer int_val
"""
temp = np.floor(int_val / 2) + ((1 << (num_bits - 1)) * bit_val)
return temp.astype(np.int)
def collapse_byte(values):
"""
Function collapses a bit stream into unsigned integer representing bytes.
"""
temp = 0
byte_val = []
for i, val in enumerate(values):
idx = 7 - (i % 8)
temp += val << idx
if idx == 0:
byte_val.append(temp)
temp = 0
return byte_val
def uint_to_fp(vec, qvec=(16, 15), signed=0, overflow='wrap'):
max_int = int(comp_max_value(qvec, signed) * 2 ** qvec[1])
min_int = max_int + 1
vec_fp = []
for value in vec:
# value = float(value)
if value > max_int and signed == 1:
# negative value
value = -1 * (min_int - (value % min_int))
vec_fp.append(value * (2 ** -qvec[1]))
return ret_fi(vec_fp, qvec=qvec, overflow=overflow, signed=signed)
class range_fi(object):
def __init__(self, min_int, max_int, step):
self.max = max_int
self.min = min_int
self.step = step
class Fi(object):
def __init__(self, vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Simple fixed integer object to hold parameters related to a \
fixed point object.
"""
self.vec = vec
self.qvec = qvec
self.overflow = overflow
self.signed = signed
self.comp = False
if np.iscomplexobj(vec):
self.comp = True
@property
def bin(self):
"""
Converts vector to 2's complement binary values.
"""
num_chars = self.qvec[0]
if self.comp:
real_vals = [dec_to_bin(np.real(value).astype(np.int), num_chars) for value in self.vec]
imag_vals = [dec_to_bin(np.imag(value).astype(np.int), num_chars) for value in self.vec]
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return [dec_to_bin(value, num_chars) for value in self.vec]
@property
def udec(self):
"""
Returns unsigned decimal integer of the vector
"""
values = copy.deepcopy(self.vec)
# min_int = int(comp_min_value(self.qvec, 0) * 2 ** self.qvec[1])
max_int = int(comp_max_value(self.qvec, 0) * 2 ** self.qvec[1])
num_chars = self.qvec[0]
if self.comp:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
imag_vals = np.imag(values)
neg_idx = (imag_vals < 0)
imag_vals[neg_idx] += (max_int + 1)
return (real_vals + 1j * imag_vals)
else:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
return real_vals
@property
def hex(self):
"""
Converts vector to 2's complement hexadecimal values.
"""
num_chars = int(np.ceil(self.qvec[0] / 4.))
if self.comp:
real_vals = dec_to_hex(np.real(self.vec).astype(np.int), num_chars)
imag_vals = dec_to_hex(np.imag(self.vec).astype(np.int), num_chars)
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return dec_to_hex(self.vec, num_chars)
@property
def len(self):
return (len(self.vec))
# overriding built in len term.
def __len__(self):
return (len(self.vec))
# def __getslice__(self, lidx, ridx):
# """
# Overloaded getslice method.
# """
# self.vec = self.vec[lidx, ridx]
# # return self
# #
# def __getitem__(self, index)
@property
def float(self):
return (self.vec * 2. ** (-self.qvec[1]))
@property
def max_float(self):
return np.max(self.float)
@property
def max_udec(self):
return np.max(self.udec)
@property
def min_udec(self):
return np.min(self.udec)
@property
def min_float(self):
return np.min(self.float)
@property
def max(self):
return np.max(self.vec)
@property
def min(self):
return np.min(self.vec)
@property
def range(self):
min_int = comp_min_value(self.qvec, self.signed)
max_int = comp_max_value(self.qvec, self.signed)
step = comp_slope_value(self.qvec)
return range_fi(min_int, max_int, step)
def __getslice__(self, i, j):
return self.vec[i:j]
def gen_full_data(self):
range_obj = self.range
vec = np.arange(range_obj.min, range_obj.max, range_obj.step)
self.vec = (vec * (2 ** self.qvec[1])).astype(np.int)
def __repr__(self):
c_str = StringIO()
c_str.write(' qvec : {}\n'.format(self.qvec))
c_str.write('overflow : {}\n'.format(self.overflow))
c_str.write(' signed : {}\n'.format(self.signed))
# , self.__class__.__name__, self.block_name
c_str.seek(0)
return c_str.getvalue()
def coe_write(fi_obj, radix=16, file_name=None, filter_type=False):
"""
Function takes a fixed point vector as input and generates a Xilinx
compatibily .coe file for ROM/RAM initialization.
==========
Parameters
==========
* fi_obj : fixed integer object
Fixed Point object generated by fixed point toolbox.
* radix : int (16)
Radix used for formatting .coe file.
* file_name : str
File name used for outputting file to correct location
and name.
=======
Returns
=======
Correctly formatted .coe file for use by Xilinx coregenerator
modules.
"""
fi_vec = fi_obj.vec
signed = fi_obj.signed
word_length = fi_obj.qvec[0]
fraction_length = fi_obj.qvec[1]
assert(file_name is not None), 'User must specify File Name'
# find last forward slash
idx = str(file_name[::-1]).find('/')
if (idx == -1):
idx = 0
else:
idx = len(file_name) - 1 - idx
if (str(file_name).find('.', idx) == -1):
file_name = file_name + '.coe'
str_val = 'Radix must of the following: 2, 8, 10, 16'
assert(radix == 16 or radix == 10 or radix == 8 or radix == 2), str_val
with open(file_name, 'w') as f:
f.write('; Initialization File : \n')
if signed:
f.write('; Signed Fixed Point\n')
else:
f.write('; Unsigned Fixed Point\n')
# skip = 2
f.write('; Word Length : %d\n' % word_length)
f.write('; Fraction Length : %d\n' % fraction_length)
f.write('; Number of Entries : %d\n\n' % len(fi_vec))
if (filter_type is False):
f.write('memory_initialization_radix = ' + str(radix) + ';\n')
f.write('memory_initialization_vector = ' + '\n')
else:
f.write('Radix = ' + str(radix) + ';\n')
f.write('Coefficient_Width = %d;\n' % word_length)
f.write('CoefData = \n')
mod_fac = (1 << word_length)
if radix == 16:
num_chars = int(np.ceil(word_length / 4.))
format_str = '0{}X'.format(num_chars)
elif radix == 8:
num_chars = int(np.ceil(word_length / 3.))
format_str = '0{}o'.format(num_chars)
elif radix == 2:
format_str = '0{}b'.format(word_length)
for (ii, val) in enumerate(fi_vec):
if radix == 16:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 8:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 10:
temp = str(val)
elif radix == 2:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
f.write(temp)
if ii == (len(fi_vec) - 1):
f.write(';')
else:
f.write(',\n')
def comp_frac_width(value, word_width, signed=0):
"""
Function computes the optimal fractional width given the vector and the word_width
"""
shift_val = -1
temp_val = value
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
while bit_shift < 0:
temp_val = temp_val * 2
shift_val += 1
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
if (bit_shift >= shift_val):
shift_val = -bit_shift
frac_width = word_width - signed + shift_val
return frac_width
def comp_min_value(qvec, signed=0):
"""
Computes the mimimum real value given the fixed point representation
"""
word_width = qvec[0]
frac_width = qvec[1]
min_val = -1 * 2.**(word_width - signed) / (2.**frac_width)
if signed == 0:
min_val = 0
return min_val
def comp_max_value(qvec, signed=0):
"""
Computes maximum real value given the fixed point representation, qvec.
"""
word_width = qvec[0]
frac_width = qvec[1]
max_val = 2.**(word_width - signed) / (2.**frac_width)
max_val -= 2.**(-frac_width)
return max_val
def comp_slope_value(qvec):
"""
Returns the fixed point increment per unit increase in binary number.
"""
frac_width = qvec[1]
return 2.**(-frac_width)
def comp_range_vec(qvec, signed=0):
"""
Computes range of real values for a given fixed point representation.
"""
min_val = comp_min_value(qvec, signed)
max_val = comp_max_value(qvec, signed)
slope = comp_slope_value(qvec)
return np.arange(min_val, max_val + slope, slope)
def hex_to_ascii(hex_val):
"""
Converts hex value to ascii string.
"""
offset = hex_val.find('x') + 1
return binascii.unhexlify(hex_val[offset:]) # .decode('hex')
def str_to_dec(str_val, base=2, signed_val=True):
"""
Method converts numerical string to unsigned decimal representation
Can take single value or vector; complex or real. Base 2 : binary
base 8 : octal, base 16 : hexadecimal
"""
if (not isinstance(str_val, np.ndarray)):
val_int = np.atleast_1d(str_val)
else:
val_int = str_val.copy()
fl = val_int.flat
sub_idx = fl.coords
complex_vals = (val_int[sub_idx][-1] == 'j')
if complex_vals:
ret_vals = np.zeros(val_int.shape, dtype=np.complex)
else:
ret_vals = np.zeros(val_int.shape, dtype=int)
num_chars = len(val_int[sub_idx])
if complex_vals:
num_chars = (len(str_val[sub_idx]) - 4) / 2
imag_lidx = num_chars + 3
imag_ridx = len(str_val[sub_idx]) - 1
if signed_val is False:
if complex_vals:
for [sub_idx, value] in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value[0:num_chars], base)
if complex_vals:
ret_vals[sub_idx] += 1j * np.int(value[imag_lidx:imag_ridx], base)
else:
for [sub_idx, value] in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value, base)
else:
offset = str.find(val_int[sub_idx], 'b') + 1
corr_fac = 2 ** (num_chars - offset)
if complex_vals:
offsetI = imag_lidx + 2
for (sub_idx, value) in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value[0:num_chars], base)
if (value[offset] == '1'):
ret_vals[sub_idx] -= corr_fac
if complex_vals:
temp = np.int(value[imag_lidx:imag_ridx], base)
if (value[offsetI] == '1'):
temp -= corr_fac
ret_vals[sub_idx] += 1j * temp
return ret_vals[0] if (ret_vals.size == 1) else ret_vals
def dec_to_list(dec_val, num_bits):
"""
Converts decimal value to list of 1's and 0's.
"""
bin_str = '{0:b}'.format(dec_val)
bin_str = str.zfill(bin_str, num_bits)
ret_list = []
for bin_val in bin_str:
ret_list.append(int(bin_val))
return ret_list
def bin_array_to_uint(data_vec):
"""
Converts 1 / 0 array to unsigned integer array representing
constellation indices.
Each binary vector that is to be converted to an unsigned number
lies on each row of the vector.
"""
data_int = np.atleast_2d(data_vec)
num_bits = np.size(data_int, 1)
mp.prec = num_bits
ret_val = []
for vec in data_int:
sum_value = 0
for idx, bin_bit in enumerate(reversed(vec)):
if bin_bit == 1:
sum_value += int(mp.power(2, idx))
ret_val.append(sum_value)
if len(ret_val) == 1:
ret_val = ret_val[0]
return ret_val
def bin_to_udec(bin_vec):
func = lambda x: int(x, 2)
vfunc = np.vectorize(func)
return vfunc(bin_vec)
def nextpow2(i):
"""
Find 2^n that is equal to or greater than.
"""
n = 0
while (2**n) < i:
n += 1
return n
def ret_bits_comb(value):
"""
Helper function returns number of bits to represent number of combinations, value.
"""
return int(np.ceil(np.log2(value)))
def ret_num_bitsU(value):
"""
Function returns required number of bits for unsigned binary
representation.
"""
val_new = np.floor(value)
if value == 0:
return 1
temp = np.ceil(np.log2(np.abs(val_new + .5)))
return temp.astype(np.int)
def ret_num_bitsS(value):
"""
Function returns required number of bits for 2's
complement representation.
"""
if value < 0:
temp = ret_num_bitsU(np.abs(value) - 1)
else:
temp = ret_num_bitsU(value) + 1
return temp
def bin_to_bool(string):
"""
Helper function converts a binary string into a boolean array
"""
# return map(lambda x: x**2, range(10)
bool_array = np.zeros((len(string),), dtype=np.bool)
for (ii, val) in enumerate(string):
bool_array[ii] = True if (val == '1') else False
return bool_array
def init_str_array(num_chars, array_shape, compType=False):
"""
Initializes a string array.
"""
init_str = ' ' * num_chars
if len(array_shape) == 1:
ret_str = [init_str] * array_shape[0]
else:
ret_str = [[init_str] * array_shape[1] for x in range(array_shape[0])]
return np.array(ret_str)
def flip_bin_vec(bin_str):
"""
Function flip bit order of binary string. Assumed to
"""
offset = bin_str.find('b') + 1
num_bits = len(bin_str) - offset
ret_val = bin_str[:offset]
for ii in range(num_bits):
ret_val += bin_str[offset + num_bits - ii - 1]
return ret_val
def xor_vec(in_val, mask_vec):
"""
Returns the XOR of bits from the result of masking bin_vec with the
mask vector mask_vec.
"""
and_val = in_val & mask_vec
return (bin(and_val).count('1') % 2)
def xor_list(prim_list, sec_list):
"""
Returns the XOR of bits from the primary and secondary lists.
"""
ret_list = []
for (x_val, y_val) in zip(prim_list, sec_list):
ret_list.append(x_val ^ y_val)
return ret_list
def parity_list(list_val, init_value=0):
"""
Helper function computes parity on list of 1's and 0's
"""
curr_value = init_value
for value in list_val:
curr_value = curr_value ^ value
return curr_value
def list_to_bin(list_val):
"""
Converts a 1,0 list and or ndarray to a binary string.
"""
vec = np.atleast_2d(np.array(list_val))
str_vec = '0b'
str_list = []
for val in vec:
str_vec = '0b'
for str_val in val:
str_vec += bin(str_val)[2]
str_list.append(str_vec)
return str_list
def list_to_oct(list_val, num_chars=None):
"""
Converts list of 1's and 0's to unsigned hex string.
"""
num_base_chars = int(np.ceil(len(list_val) / 3.))
num_bits = 3 * num_base_chars
if num_chars is not None:
num_bits = num_chars * 3
remain = len(list_val) % num_bits
pad = np.sign(remain) * num_bits - remain
list_val = [0] * pad + list_val
list_sh = np.reshape(list_val, (-1, 3))
ret_str = ''
for vec in list_sh:
dec_val = list_to_uint(vec)
oct_val = oct(dec_val)[1:]
# ipdb.set_trace()
ret_str += oct_val
ret_str = ret_str[-num_base_chars:]
return ret_str
def list_to_hex(list_val, num_chars=None):
"""
Converts list of 1's and 0's to unsigned hex string.
"""
num_base_chars = int(np.ceil(len(list_val) / 4.))
num_bits = 4 * num_base_chars
if num_chars is not None:
num_bits = num_chars * 4
remain = len(list_val) % num_bits
pad = np.sign(remain) * num_bits - remain
list_val = [0] * pad + list_val
list_sh = np.reshape(list_val, (-1, 4))
ret_str = ''
for vec in list_sh:
dec_val = list_to_uint(vec)
hex_val = hex(dec_val)[2:]
ret_str += hex_val
ret_str = ret_str[-num_base_chars:]
return '0x' + ret_str
def list_to_uint(list_val):
"""
Converts list of 1's and 0's to unsigned integer.
"""
list_val = np.atleast_2d(np.array(list_val))
bin_vec = list_to_bin(list_val)
ret_list = [int(vec, 2) for vec in bin_vec]
if len(ret_list) > 1:
return ret_list
else:
return ret_list[0]
def hex_to_list_vec(hex_str, num_bits=None):
"""
Converts hex string to list of 1's and 0's.
"""
def hex_conv(hex_str):
offset = hex_str.find('x') + 1
hex_str = hex_str[offset:]
ret_list = []
for hex_val in hex_str:
# pdb.set_trace()
temp = bin(int(hex_val, 16))[2:].zfill(4)
temp_bits = [int(bin_val) for bin_val in temp]
ret_list.extend(temp_bits)
if num_bits is not None:
pad = num_bits - len(ret_list)
return [0] * pad + ret_list
else:
return ret_list
# if single hex string
if isinstance(hex_str, str):
return hex_conv(hex_str)
else:
# if list of hex strings
ret_list = [hex_conv(hex_string) for hex_string in hex_str]
return ret_list
def uint_to_list(dec_val, num_bits=8):
"""
Converts hex string to list of 1's and 0's.
"""
format_str = '0{}b'.format(num_bits)
ret_val = format(dec_val, format_str)
temp = [int(bit) for bit in ret_val] # str_val in ret_val for bit in str_val]
return temp
def dec_to_ubin(dec_val, num_bits):
format_str = '0{}b'.format(num_bits)
return format(dec_val, format_str)
def dec_to_bin(dec_val, num_bits):
"""
Helper function convert decimal value to signed 2's complement binary value.
"""
mod_fac = (1 << num_bits)
format_str = '0{}b'.format(num_bits)
return format((dec_val + mod_fac) % mod_fac, format_str) # for value in dec_vals]
def dec_to_hex(dec_vals, num_chars):
if type(dec_vals) is not list and type(dec_vals) is not np.ndarray:
dec_vals = [dec_vals]
mod_fac = (1 << num_chars * 4)
format_str = '0{}X'.format(num_chars)
ret_val = [format((value + mod_fac) % mod_fac, format_str) for value in dec_vals]
return ret_val
def oct_to_udec(oct_str):
"""
Function returns decimal equivalent to octal value.
"""
return int(oct_str, 8)
def hex_to_ubin(hex_str, num_bits):
"""
Method converts hex string (ndarray) to binary string.
"""
format_str = '0{}b'.format(num_bits)
return format(int(hex_str, 16), format_str)
def oct_to_ubin(oct_str, num_bits):
"""
Method converts hex string (ndarray) to binary string.
"""
format_str = '0{}b'.format(num_bits)
return format(int(oct_str, 8), format_str)
def oct_to_list(oct_str, num_bits):
udec_val = oct_to_udec(oct_str)
return uint_to_list(udec_val, num_bits)
def hex_to_udec(hex_str):
"""
Function returns decimal equivalent to hexadecimal value
"""
return int(hex_str, 16)
def hex_to_dec(hex_str):
"""
Function returns decimal equivalent to hexadecimal value
"""
return str_to_dec(hex_str, 16, signed_val=True)
# def comp_frac_width(value, word_width, signed=0):
#
# shift_val = -1
# temp_val = value
# bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
# while bit_shift < 0:
# temp_val = temp_val * 2
# shift_val += 1
# bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
# if (bit_shift >= shift_val):
# shift_val = -bit_shift
# frac_width = word_width - signed + shift_val
# return frac_width
def ret_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Helper function returns a fixed integer vector to the user.
If input is complex it will automatically convert real and
imaginary components separately.
"""
if np.iscomplexobj(vec):
real_temp = ret_dec_fi(vec.real, qvec, overflow, signed)
comp_temp = ret_dec_fi(vec.imag, qvec, overflow, signed)
vec = real_temp.vec + 1j * comp_temp.vec
fi_obj = Fi(vec, qvec, overflow, signed)
return fi_obj
else:
return ret_dec_fi(vec, qvec, overflow, signed)
def ret_flat_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
new_qvec = (qvec[0] * 2, 0)
if np.iscomplexobj(vec):
real_temp = ret_dec_fi(vec.real, qvec, overflow, signed)
comp_temp = ret_dec_fi(vec.imag, qvec, overflow, signed)
new_vec = (real_temp.udec << qvec[0]) + comp_temp.udec
return Fi(new_vec, new_qvec, overflow, signed=0)
else:
return ret_dec_fi(vec, qvec, overflow, signed)
def ret_dec_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Helper function returns a fixed integer vector to the user.
Assumes signed input.
"""
# word_width = qvec[0]
fraction_width = qvec[1]
temp = np.around(np.array(vec) * 2.**fraction_width, decimals=0)
temp = np.atleast_1d(temp)
min_int = comp_min_value(qvec, signed)
min_int *= 2. ** fraction_width
max_int = comp_max_value(qvec, signed)
max_int *= 2. ** fraction_width
if signed == 0 and str.lower(overflow) == 'wrap':
# this is so negative values and wrap appropriately on the
# asymmetric positive number line.
min_int = max_int + 1
if str.lower(overflow) == 'saturate':
idx = (temp >= max_int)
if np.any(idx):
# check for wrapping here.
temp[idx] = max_int
idx = (temp <= min_int)
if (np.any(idx)):
temp[idx] = min_int
if str.lower(overflow) == 'wrap':
idx = (temp > max_int)
if np.any(idx):
# check for wrapping here.
temp[idx] = temp[idx] % max_int
idx = (temp < min_int)
if np.any(idx):
temp[idx] = temp[idx] % min_int
temp = temp.flatten()
# create fi_obj and return it to the user
fi_obj = Fi(temp.astype(np.int), qvec, overflow, signed)
return fi_obj
def concat_fi(first_fi, second_fi):
"""
Function does a bitwise concatenation of 2 fi objects.
Treats both of the them as unsigned -- returns unsigned object that
is of the quantization type [total_ bits 0]. Only
format that makes sense. Uses fi_math of first_fi object.
"""
nbits0 = first_fi.qvec[0]
nbits1 = second_fi.qvec[0]
total_bits = nbits0 + nbits1
new_dec = (first_fi.udec << nbits0) + second_fi.udec
return ret_dec_fi(new_dec, (total_bits, 0), signed=0)
def stack_fi(first_fi, second_fi):
"""
Function does a stacking of 2 fi objects..
Treats both of the them as unsigned -- returns unsigned object that
Both fi object must have the same word lengths.
Only format that makes sense. Uses fi_math of first_fi object.
"""
nbits0 = first_fi.qvec[0]
nbits1 = second_fi.qvec[0]
assert (nbits0 == nbits1), 'Both fi objects must have the same word length'
vals0 = first_fi.udec
vals1 = second_fi.udec
new_dec = np.concatenate((vals0, vals1))
return ret_dec_fi(new_dec, (nbits0, 0), signed=0)
# def add_fi(first_term, sec_term):
# """
# Method is used to perform a trial addition of two fi objects.
# Simply uses the fi_math and numeric_types to generate a new fi object
# with 0 as its data.
#
# Commonly used to determine Integer and Fractional bit widths at the
# output of a fixed point multiplier.
#
# ==========
# Parameters
# ==========
#
# * first_term : (fi Object):
# First fi object used in the multiplication check.
# * sec_term : (fi Object)
# Second fi object used in the multiplication check
#
# =======
# Returns
# =======
#
# * out : (fi Object):
# Returns new fi object -- output of multiplying first and
# second input terms.
# """
# if (not isinstance(sec_term, Fi)):
# sec_term = ret_dec_fi(sec_term)
# if (not isinstance(first_term, Fi)):
# first_term = ret_dec_fi(first_term)
#
# num_type_first = first_term.numeric_type
# num_type_sec = sec_term.numeric_type
#
# first_term = fi(0, numeric_type=num_type_first, sign_val=0)
# sec_term = fi(0, numeric_type=num_type_sec, sign_val=0)
#
# new_obj = first_term + sec_term
# return new_obj
#
#
def mult_fi(first_term, sec_term, use_data=False):
"""
Method is used to perform a trial multiplication of two fi objects.
Simply uses the fi_math and numeric_types to generate a new fi object
with 0 as its data.
Commonly used to determine Integer and Fractional bit widths at the
output of a fixed point multiplier.
==========
Parameters
==========
* first_term : (fi Object):
First fi object used in the multiplication check.
* sec_term : (fi Object)
Second fi object used in the multiplication check
=======
Returns
=======
* out : (fi Object):
Returns new fi object -- output of multiplying first and
second input terms.
"""
if (not isinstance(sec_term, Fi)):
sec_term = ret_dec_fi(sec_term)
if (not isinstance(first_term, Fi)):
first_term = ret_dec_fi(first_term)
frac_length = first_term.qvec[1] + sec_term.qvec[1]
signed = first_term.signed or sec_term.signed
vec = 0.
if first_term.comp or sec_term.comp:
vec = 0. + 0.*1j
if use_data:
fp_step = first_term.range.step * sec_term.range.step
mat = (first_term.max_float * sec_term.max_float, first_term.min_float * sec_term.max_float,
first_term.max_float * sec_term.min_float, first_term.min_float * sec_term.min_float)
if first_term.comp or sec_term.comp:
mat = (np.max(np.abs(mat)), -np.max(np.abs(mat)))
max_data = np.max(mat)
min_data = np.min(mat)
if signed:
whole_bits = np.max((ret_num_bitsS(max_data), ret_num_bitsS(min_data)))
else:
whole_bits = np.max((ret_num_bitsU(max_data), ret_num_bitsU(min_data)))
word_length = whole_bits + frac_length
else:
word_length = first_term.qvec[0] + sec_term.qvec[0]
if first_term.comp and sec_term.comp:
word_length += 1
qvec_new = (word_length, frac_length)
return ret_fi(vec, qvec=qvec_new, overflow='wrap', signed=signed)
if __name__ == "__main__":
list_val = [1, 1, 1, 1]
print(list_to_uint(list_val))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: phil
"""
import numpy as np
import binascii
# from cStringIO import StringIO
from io import StringIO
import copy
from mpmath import mp
"""
Quantization vector is of the formed fixed(N, F). Where the first value indicates the
total number of bits and the second number indicates the location of the fractional point.
"""
__version__ = "1.1"
def bit_count(val):
"""
Fast way to count 1's in a 64 bit integer. Based on Hamming weight
"""
val = val - ((val >> 1) & 0x5555555555555555)
val = (val & 0x3333333333333333) + ((val >> 2) & 0x3333333333333333)
return (((val + (val >> 4)) & 0xF0F0F0F0F0F0F0F) * 0x101010101010101) >> 56
def r_shift(bin_str, new_val):
"""
Function performs a right shift of a binary string. Placing the new
value into the MSB position.
"""
offset = bin_str.find('b') + 1
new_val = str(new_val) + bin_str[offset:-1]
if (offset != -1):
new_val = '0b' + new_val
return new_val
def l_shift(bin_str, new_val):
"""
Function performs a left shift of a binary string. Placing the new
value into the LSB position.
"""
offset = bin_str.find('b') + 1
new_val = bin_str[offset + 1:] + str(new_val)
if (offset != -1):
new_val = '0b' + new_val
return new_val
def lappend(bin_str, str_append):
"""
Function left appends a binary string with string specified by
string append.
"""
offset_a = bin_str.find('b') + 1
offset_b = str_append.find('b') + 1
new_val = str_append[offset_b:] + bin_str[offset_a:]
if ((offset_a != -1) | (offset_b != -1)):
new_val = '0b' + new_val
return new_val
def lappend_udec(int_val, bit_val, num_bits):
"""
Function left appends int_val with bit_val. bit_val is assumed
to be one bit. num_bits is the number of bits to represent
unsigned integer int_val
"""
temp = np.floor(int_val / 2) + ((1 << (num_bits - 1)) * bit_val)
return temp.astype(np.int)
def collapse_byte(values):
"""
Function collapses a bit stream into unsigned integer representing bytes.
"""
temp = 0
byte_val = []
for i, val in enumerate(values):
idx = 7 - (i % 8)
temp += val << idx
if idx == 0:
byte_val.append(temp)
temp = 0
return byte_val
def uint_to_fp(vec, qvec=(16, 15), signed=0, overflow='wrap'):
max_int = int(comp_max_value(qvec, signed) * 2 ** qvec[1])
min_int = max_int + 1
vec_fp = []
for value in vec:
# value = float(value)
if value > max_int and signed == 1:
# negative value
value = -1 * (min_int - (value % min_int))
vec_fp.append(value * (2 ** -qvec[1]))
return ret_fi(vec_fp, qvec=qvec, overflow=overflow, signed=signed)
class range_fi(object):
def __init__(self, min_int, max_int, step):
self.max = max_int
self.min = min_int
self.step = step
class Fi(object):
def __init__(self, vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Simple fixed integer object to hold parameters related to a \
fixed point object.
"""
self.vec = vec
self.qvec = qvec
self.overflow = overflow
self.signed = signed
self.comp = False
if np.iscomplexobj(vec):
self.comp = True
@property
def bin(self):
"""
Converts vector to 2's complement binary values.
"""
num_chars = self.qvec[0]
if self.comp:
real_vals = [dec_to_bin(np.real(value).astype(np.int), num_chars) for value in self.vec]
imag_vals = [dec_to_bin(np.imag(value).astype(np.int), num_chars) for value in self.vec]
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return [dec_to_bin(value, num_chars) for value in self.vec]
@property
def udec(self):
"""
Returns unsigned decimal integer of the vector
"""
values = copy.deepcopy(self.vec)
# min_int = int(comp_min_value(self.qvec, 0) * 2 ** self.qvec[1])
max_int = int(comp_max_value(self.qvec, 0) * 2 ** self.qvec[1])
num_chars = self.qvec[0]
if self.comp:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
imag_vals = np.imag(values)
neg_idx = (imag_vals < 0)
imag_vals[neg_idx] += (max_int + 1)
return (real_vals + 1j * imag_vals)
else:
real_vals = np.real(values)
neg_idx = (real_vals < 0)
real_vals[neg_idx] += (max_int + 1)
return real_vals
@property
def hex(self):
"""
Converts vector to 2's complement hexadecimal values.
"""
num_chars = int(np.ceil(self.qvec[0] / 4.))
if self.comp:
real_vals = dec_to_hex(np.real(self.vec).astype(np.int), num_chars)
imag_vals = dec_to_hex(np.imag(self.vec).astype(np.int), num_chars)
return [real_val + (",j" + imag_val) for (real_val, imag_val) in zip(real_vals, imag_vals)]
else:
return dec_to_hex(self.vec, num_chars)
@property
def len(self):
return (len(self.vec))
# overriding built in len term.
def __len__(self):
return (len(self.vec))
# def __getslice__(self, lidx, ridx):
# """
# Overloaded getslice method.
# """
# self.vec = self.vec[lidx, ridx]
# # return self
# #
# def __getitem__(self, index)
@property
def float(self):
return (self.vec * 2. ** (-self.qvec[1]))
@property
def max_float(self):
return np.max(self.float)
@property
def max_udec(self):
return np.max(self.udec)
@property
def min_udec(self):
return np.min(self.udec)
@property
def min_float(self):
return np.min(self.float)
@property
def max(self):
return np.max(self.vec)
@property
def min(self):
return np.min(self.vec)
@property
def range(self):
min_int = comp_min_value(self.qvec, self.signed)
max_int = comp_max_value(self.qvec, self.signed)
step = comp_slope_value(self.qvec)
return range_fi(min_int, max_int, step)
def __getslice__(self, i, j):
return self.vec[i:j]
def gen_full_data(self):
range_obj = self.range
vec = np.arange(range_obj.min, range_obj.max, range_obj.step)
self.vec = (vec * (2 ** self.qvec[1])).astype(np.int)
def __repr__(self):
c_str = StringIO()
c_str.write(' qvec : {}\n'.format(self.qvec))
c_str.write('overflow : {}\n'.format(self.overflow))
c_str.write(' signed : {}\n'.format(self.signed))
# , self.__class__.__name__, self.block_name
c_str.seek(0)
return c_str.getvalue()
def coe_write(fi_obj, radix=16, file_name=None, filter_type=False):
"""
Function takes a fixed point vector as input and generates a Xilinx
compatibily .coe file for ROM/RAM initialization.
==========
Parameters
==========
* fi_obj : fixed integer object
Fixed Point object generated by fixed point toolbox.
* radix : int (16)
Radix used for formatting .coe file.
* file_name : str
File name used for outputting file to correct location
and name.
=======
Returns
=======
Correctly formatted .coe file for use by Xilinx coregenerator
modules.
"""
fi_vec = fi_obj.vec
signed = fi_obj.signed
word_length = fi_obj.qvec[0]
fraction_length = fi_obj.qvec[1]
assert(file_name is not None), 'User must specify File Name'
# find last forward slash
idx = str(file_name[::-1]).find('/')
if (idx == -1):
idx = 0
else:
idx = len(file_name) - 1 - idx
if (str(file_name).find('.', idx) == -1):
file_name = file_name + '.coe'
str_val = 'Radix must of the following: 2, 8, 10, 16'
assert(radix == 16 or radix == 10 or radix == 8 or radix == 2), str_val
with open(file_name, 'w') as f:
f.write('; Initialization File : \n')
if signed:
f.write('; Signed Fixed Point\n')
else:
f.write('; Unsigned Fixed Point\n')
# skip = 2
f.write('; Word Length : %d\n' % word_length)
f.write('; Fraction Length : %d\n' % fraction_length)
f.write('; Number of Entries : %d\n\n' % len(fi_vec))
if (filter_type is False):
f.write('memory_initialization_radix = ' + str(radix) + ';\n')
f.write('memory_initialization_vector = ' + '\n')
else:
f.write('Radix = ' + str(radix) + ';\n')
f.write('Coefficient_Width = %d;\n' % word_length)
f.write('CoefData = \n')
mod_fac = (1 << word_length)
if radix == 16:
num_chars = int(np.ceil(word_length / 4.))
format_str = '0{}X'.format(num_chars)
elif radix == 8:
num_chars = int(np.ceil(word_length / 3.))
format_str = '0{}o'.format(num_chars)
elif radix == 2:
format_str = '0{}b'.format(word_length)
for (ii, val) in enumerate(fi_vec):
if radix == 16:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 8:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
elif radix == 10:
temp = str(val)
elif radix == 2:
temp = (val + mod_fac) % mod_fac
temp = format(temp, format_str)
f.write(temp)
if ii == (len(fi_vec) - 1):
f.write(';')
else:
f.write(',\n')
def comp_frac_width(value, word_width, signed=0):
"""
Function computes the optimal fractional width given the vector and the word_width
"""
shift_val = -1
temp_val = value
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
while bit_shift < 0:
temp_val = temp_val * 2
shift_val += 1
bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
if (bit_shift >= shift_val):
shift_val = -bit_shift
frac_width = word_width - signed + shift_val
return frac_width
def comp_min_value(qvec, signed=0):
"""
Computes the mimimum real value given the fixed point representation
"""
word_width = qvec[0]
frac_width = qvec[1]
min_val = -1 * 2.**(word_width - signed) / (2.**frac_width)
if signed == 0:
min_val = 0
return min_val
def comp_max_value(qvec, signed=0):
"""
Computes maximum real value given the fixed point representation, qvec.
"""
word_width = qvec[0]
frac_width = qvec[1]
max_val = 2.**(word_width - signed) / (2.**frac_width)
max_val -= 2.**(-frac_width)
return max_val
def comp_slope_value(qvec):
"""
Returns the fixed point increment per unit increase in binary number.
"""
frac_width = qvec[1]
return 2.**(-frac_width)
def comp_range_vec(qvec, signed=0):
"""
Computes range of real values for a given fixed point representation.
"""
min_val = comp_min_value(qvec, signed)
max_val = comp_max_value(qvec, signed)
slope = comp_slope_value(qvec)
return np.arange(min_val, max_val + slope, slope)
def hex_to_ascii(hex_val):
"""
Converts hex value to ascii string.
"""
offset = hex_val.find('x') + 1
return binascii.unhexlify(hex_val[offset:]) # .decode('hex')
def str_to_dec(str_val, base=2, signed_val=True):
"""
Method converts numerical string to unsigned decimal representation
Can take single value or vector; complex or real. Base 2 : binary
base 8 : octal, base 16 : hexadecimal
"""
if (not isinstance(str_val, np.ndarray)):
val_int = np.atleast_1d(str_val)
else:
val_int = str_val.copy()
fl = val_int.flat
sub_idx = fl.coords
complex_vals = (val_int[sub_idx][-1] == 'j')
if complex_vals:
ret_vals = np.zeros(val_int.shape, dtype=np.complex)
else:
ret_vals = np.zeros(val_int.shape, dtype=int)
num_chars = len(val_int[sub_idx])
if complex_vals:
num_chars = (len(str_val[sub_idx]) - 4) / 2
imag_lidx = num_chars + 3
imag_ridx = len(str_val[sub_idx]) - 1
if signed_val is False:
if complex_vals:
for [sub_idx, value] in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value[0:num_chars], base)
if complex_vals:
ret_vals[sub_idx] += 1j * np.int(value[imag_lidx:imag_ridx], base)
else:
for [sub_idx, value] in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value, base)
else:
offset = str.find(val_int[sub_idx], 'b') + 1
corr_fac = 2 ** (num_chars - offset)
if complex_vals:
offsetI = imag_lidx + 2
for (sub_idx, value) in np.ndenumerate(val_int):
ret_vals[sub_idx] = np.int(value[0:num_chars], base)
if (value[offset] == '1'):
ret_vals[sub_idx] -= corr_fac
if complex_vals:
temp = np.int(value[imag_lidx:imag_ridx], base)
if (value[offsetI] == '1'):
temp -= corr_fac
ret_vals[sub_idx] += 1j * temp
return ret_vals[0] if (ret_vals.size == 1) else ret_vals
def dec_to_list(dec_val, num_bits):
"""
Converts decimal value to list of 1's and 0's.
"""
bin_str = '{0:b}'.format(dec_val)
bin_str = str.zfill(bin_str, num_bits)
ret_list = []
for bin_val in bin_str:
ret_list.append(int(bin_val))
return ret_list
def bin_array_to_uint(data_vec):
"""
Converts 1 / 0 array to unsigned integer array representing
constellation indices.
Each binary vector that is to be converted to an unsigned number
lies on each row of the vector.
"""
data_int = np.atleast_2d(data_vec)
num_bits = np.size(data_int, 1)
mp.prec = num_bits
ret_val = []
for vec in data_int:
sum_value = 0
for idx, bin_bit in enumerate(reversed(vec)):
if bin_bit == 1:
sum_value += int(mp.power(2, idx))
ret_val.append(sum_value)
if len(ret_val) == 1:
ret_val = ret_val[0]
return ret_val
def bin_to_udec(bin_vec):
func = lambda x: int(x, 2)
vfunc = np.vectorize(func)
return vfunc(bin_vec)
def nextpow2(i):
"""
Find 2^n that is equal to or greater than.
"""
n = 0
while (2**n) < i:
n += 1
return n
def ret_bits_comb(value):
"""
Helper function returns number of bits to represent number of combinations, value.
"""
return int(np.ceil(np.log2(value)))
def ret_num_bitsU(value):
"""
Function returns required number of bits for unsigned binary
representation.
"""
val_new = np.floor(value)
if value == 0:
return 1
temp = np.ceil(np.log2(np.abs(val_new + .5)))
return temp.astype(np.int)
def ret_num_bitsS(value):
"""
Function returns required number of bits for 2's
complement representation.
"""
if value < 0:
temp = ret_num_bitsU(np.abs(value) - 1)
else:
temp = ret_num_bitsU(value) + 1
return temp
def bin_to_bool(string):
"""
Helper function converts a binary string into a boolean array
"""
# return map(lambda x: x**2, range(10)
bool_array = np.zeros((len(string),), dtype=np.bool)
for (ii, val) in enumerate(string):
bool_array[ii] = True if (val == '1') else False
return bool_array
def init_str_array(num_chars, array_shape, compType=False):
"""
Initializes a string array.
"""
init_str = ' ' * num_chars
if len(array_shape) == 1:
ret_str = [init_str] * array_shape[0]
else:
ret_str = [[init_str] * array_shape[1] for x in range(array_shape[0])]
return np.array(ret_str)
def flip_bin_vec(bin_str):
"""
Function flip bit order of binary string. Assumed to
"""
offset = bin_str.find('b') + 1
num_bits = len(bin_str) - offset
ret_val = bin_str[:offset]
for ii in range(num_bits):
ret_val += bin_str[offset + num_bits - ii - 1]
return ret_val
def xor_vec(in_val, mask_vec):
"""
Returns the XOR of bits from the result of masking bin_vec with the
mask vector mask_vec.
"""
and_val = in_val & mask_vec
return (bin(and_val).count('1') % 2)
def xor_list(prim_list, sec_list):
"""
Returns the XOR of bits from the primary and secondary lists.
"""
ret_list = []
for (x_val, y_val) in zip(prim_list, sec_list):
ret_list.append(x_val ^ y_val)
return ret_list
def parity_list(list_val, init_value=0):
"""
Helper function computes parity on list of 1's and 0's
"""
curr_value = init_value
for value in list_val:
curr_value = curr_value ^ value
return curr_value
def list_to_bin(list_val):
"""
Converts a 1,0 list and or ndarray to a binary string.
"""
vec = np.atleast_2d(np.array(list_val))
str_vec = '0b'
str_list = []
for val in vec:
str_vec = '0b'
for str_val in val:
str_vec += bin(str_val)[2]
str_list.append(str_vec)
return str_list
def list_to_oct(list_val, num_chars=None):
"""
Converts list of 1's and 0's to unsigned hex string.
"""
num_base_chars = int(np.ceil(len(list_val) / 3.))
num_bits = 3 * num_base_chars
if num_chars is not None:
num_bits = num_chars * 3
remain = len(list_val) % num_bits
pad = np.sign(remain) * num_bits - remain
list_val = [0] * pad + list_val
list_sh = np.reshape(list_val, (-1, 3))
ret_str = ''
for vec in list_sh:
dec_val = list_to_uint(vec)
oct_val = oct(dec_val)[1:]
# ipdb.set_trace()
ret_str += oct_val
ret_str = ret_str[-num_base_chars:]
return ret_str
def list_to_hex(list_val, num_chars=None):
"""
Converts list of 1's and 0's to unsigned hex string.
"""
num_base_chars = int(np.ceil(len(list_val) / 4.))
num_bits = 4 * num_base_chars
if num_chars is not None:
num_bits = num_chars * 4
remain = len(list_val) % num_bits
pad = np.sign(remain) * num_bits - remain
list_val = [0] * pad + list_val
list_sh = np.reshape(list_val, (-1, 4))
ret_str = ''
for vec in list_sh:
dec_val = list_to_uint(vec)
hex_val = hex(dec_val)[2:]
ret_str += hex_val
ret_str = ret_str[-num_base_chars:]
return '0x' + ret_str
def list_to_uint(list_val):
"""
Converts list of 1's and 0's to unsigned integer.
"""
list_val = np.atleast_2d(np.array(list_val))
bin_vec = list_to_bin(list_val)
ret_list = [int(vec, 2) for vec in bin_vec]
if len(ret_list) > 1:
return ret_list
else:
return ret_list[0]
def hex_to_list_vec(hex_str, num_bits=None):
"""
Converts hex string to list of 1's and 0's.
"""
def hex_conv(hex_str):
offset = hex_str.find('x') + 1
hex_str = hex_str[offset:]
ret_list = []
for hex_val in hex_str:
# pdb.set_trace()
temp = bin(int(hex_val, 16))[2:].zfill(4)
temp_bits = [int(bin_val) for bin_val in temp]
ret_list.extend(temp_bits)
if num_bits is not None:
pad = num_bits - len(ret_list)
return [0] * pad + ret_list
else:
return ret_list
# if single hex string
if isinstance(hex_str, str):
return hex_conv(hex_str)
else:
# if list of hex strings
ret_list = [hex_conv(hex_string) for hex_string in hex_str]
return ret_list
def uint_to_list(dec_val, num_bits=8):
"""
Converts hex string to list of 1's and 0's.
"""
format_str = '0{}b'.format(num_bits)
ret_val = format(dec_val, format_str)
temp = [int(bit) for bit in ret_val] # str_val in ret_val for bit in str_val]
return temp
def dec_to_ubin(dec_val, num_bits):
format_str = '0{}b'.format(num_bits)
return format(dec_val, format_str)
def dec_to_bin(dec_val, num_bits):
"""
Helper function convert decimal value to signed 2's complement binary value.
"""
mod_fac = (1 << num_bits)
format_str = '0{}b'.format(num_bits)
return format((dec_val + mod_fac) % mod_fac, format_str) # for value in dec_vals]
def dec_to_hex(dec_vals, num_chars):
if type(dec_vals) is not list and type(dec_vals) is not np.ndarray:
dec_vals = [dec_vals]
mod_fac = (1 << num_chars * 4)
format_str = '0{}X'.format(num_chars)
ret_val = [format((value + mod_fac) % mod_fac, format_str) for value in dec_vals]
return ret_val
def oct_to_udec(oct_str):
"""
Function returns decimal equivalent to octal value.
"""
return int(oct_str, 8)
def hex_to_ubin(hex_str, num_bits):
"""
Method converts hex string (ndarray) to binary string.
"""
format_str = '0{}b'.format(num_bits)
return format(int(hex_str, 16), format_str)
def oct_to_ubin(oct_str, num_bits):
"""
Method converts hex string (ndarray) to binary string.
"""
format_str = '0{}b'.format(num_bits)
return format(int(oct_str, 8), format_str)
def oct_to_list(oct_str, num_bits):
udec_val = oct_to_udec(oct_str)
return uint_to_list(udec_val, num_bits)
def hex_to_udec(hex_str):
"""
Function returns decimal equivalent to hexadecimal value
"""
return int(hex_str, 16)
def hex_to_dec(hex_str):
"""
Function returns decimal equivalent to hexadecimal value
"""
return str_to_dec(hex_str, 16, signed_val=True)
# def comp_frac_width(value, word_width, signed=0):
#
# shift_val = -1
# temp_val = value
# bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
# while bit_shift < 0:
# temp_val = temp_val * 2
# shift_val += 1
# bit_shift = ret_num_bitsU(np.max(np.abs(temp_val)))
# if (bit_shift >= shift_val):
# shift_val = -bit_shift
# frac_width = word_width - signed + shift_val
# return frac_width
def ret_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Helper function returns a fixed integer vector to the user.
If input is complex it will automatically convert real and
imaginary components separately.
"""
if np.iscomplexobj(vec):
real_temp = ret_dec_fi(vec.real, qvec, overflow, signed)
comp_temp = ret_dec_fi(vec.imag, qvec, overflow, signed)
vec = real_temp.vec + 1j * comp_temp.vec
fi_obj = Fi(vec, qvec, overflow, signed)
return fi_obj
else:
return ret_dec_fi(vec, qvec, overflow, signed)
def ret_flat_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
new_qvec = (qvec[0] * 2, 0)
if np.iscomplexobj(vec):
real_temp = ret_dec_fi(vec.real, qvec, overflow, signed)
comp_temp = ret_dec_fi(vec.imag, qvec, overflow, signed)
new_vec = (real_temp.udec << qvec[0]) + comp_temp.udec
return Fi(new_vec, new_qvec, overflow, signed=0)
else:
return ret_dec_fi(vec, qvec, overflow, signed)
def ret_dec_fi(vec, qvec=(16, 15), overflow='wrap', signed=1):
"""
Helper function returns a fixed integer vector to the user.
Assumes signed input.
"""
# word_width = qvec[0]
fraction_width = qvec[1]
temp = np.around(np.array(vec) * 2.**fraction_width, decimals=0)
temp = np.atleast_1d(temp)
min_int = comp_min_value(qvec, signed)
min_int *= 2. ** fraction_width
max_int = comp_max_value(qvec, signed)
max_int *= 2. ** fraction_width
if signed == 0 and str.lower(overflow) == 'wrap':
# this is so negative values and wrap appropriately on the
# asymmetric positive number line.
min_int = max_int + 1
if str.lower(overflow) == 'saturate':
idx = (temp >= max_int)
if np.any(idx):
# check for wrapping here.
temp[idx] = max_int
idx = (temp <= min_int)
if (np.any(idx)):
temp[idx] = min_int
if str.lower(overflow) == 'wrap':
idx = (temp > max_int)
if np.any(idx):
# check for wrapping here.
temp[idx] = temp[idx] % max_int
idx = (temp < min_int)
if np.any(idx):
temp[idx] = temp[idx] % min_int
temp = temp.flatten()
# create fi_obj and return it to the user
fi_obj = Fi(temp.astype(np.int), qvec, overflow, signed)
return fi_obj
def concat_fi(first_fi, second_fi):
"""
Function does a bitwise concatenation of 2 fi objects.
Treats both of the them as unsigned -- returns unsigned object that
is of the quantization type [total_ bits 0]. Only
format that makes sense. Uses fi_math of first_fi object.
"""
nbits0 = first_fi.qvec[0]
nbits1 = second_fi.qvec[0]
total_bits = nbits0 + nbits1
new_dec = (first_fi.udec << nbits0) + second_fi.udec
return ret_dec_fi(new_dec, (total_bits, 0), signed=0)
def stack_fi(first_fi, second_fi):
"""
Function does a stacking of 2 fi objects..
Treats both of the them as unsigned -- returns unsigned object that
Both fi object must have the same word lengths.
Only format that makes sense. Uses fi_math of first_fi object.
"""
nbits0 = first_fi.qvec[0]
nbits1 = second_fi.qvec[0]
assert (nbits0 == nbits1), 'Both fi objects must have the same word length'
vals0 = first_fi.udec
vals1 = second_fi.udec
new_dec = np.concatenate((vals0, vals1))
return ret_dec_fi(new_dec, (nbits0, 0), signed=0)
# def add_fi(first_term, sec_term):
# """
# Method is used to perform a trial addition of two fi objects.
# Simply uses the fi_math and numeric_types to generate a new fi object
# with 0 as its data.
#
# Commonly used to determine Integer and Fractional bit widths at the
# output of a fixed point multiplier.
#
# ==========
# Parameters
# ==========
#
# * first_term : (fi Object):
# First fi object used in the multiplication check.
# * sec_term : (fi Object)
# Second fi object used in the multiplication check
#
# =======
# Returns
# =======
#
# * out : (fi Object):
# Returns new fi object -- output of multiplying first and
# second input terms.
# """
# if (not isinstance(sec_term, Fi)):
# sec_term = ret_dec_fi(sec_term)
# if (not isinstance(first_term, Fi)):
# first_term = ret_dec_fi(first_term)
#
# num_type_first = first_term.numeric_type
# num_type_sec = sec_term.numeric_type
#
# first_term = fi(0, numeric_type=num_type_first, sign_val=0)
# sec_term = fi(0, numeric_type=num_type_sec, sign_val=0)
#
# new_obj = first_term + sec_term
# return new_obj
#
#
def mult_fi(first_term, sec_term, use_data=False):
"""
Method is used to perform a trial multiplication of two fi objects.
Simply uses the fi_math and numeric_types to generate a new fi object
with 0 as its data.
Commonly used to determine Integer and Fractional bit widths at the
output of a fixed point multiplier.
==========
Parameters
==========
* first_term : (fi Object):
First fi object used in the multiplication check.
* sec_term : (fi Object)
Second fi object used in the multiplication check
=======
Returns
=======
* out : (fi Object):
Returns new fi object -- output of multiplying first and
second input terms.
"""
if (not isinstance(sec_term, Fi)):
sec_term = ret_dec_fi(sec_term)
if (not isinstance(first_term, Fi)):
first_term = ret_dec_fi(first_term)
frac_length = first_term.qvec[1] + sec_term.qvec[1]
signed = first_term.signed or sec_term.signed
vec = 0.
if first_term.comp or sec_term.comp:
vec = 0. + 0.*1j
if use_data:
fp_step = first_term.range.step * sec_term.range.step
mat = (first_term.max_float * sec_term.max_float, first_term.min_float * sec_term.max_float,
first_term.max_float * sec_term.min_float, first_term.min_float * sec_term.min_float)
if first_term.comp or sec_term.comp:
mat = (np.max(np.abs(mat)), -np.max(np.abs(mat)))
max_data = np.max(mat)
min_data = np.min(mat)
if signed:
whole_bits = np.max((ret_num_bitsS(max_data), ret_num_bitsS(min_data)))
else:
whole_bits = np.max((ret_num_bitsU(max_data), ret_num_bitsU(min_data)))
word_length = whole_bits + frac_length
else:
word_length = first_term.qvec[0] + sec_term.qvec[0]
if first_term.comp and sec_term.comp:
word_length += 1
qvec_new = (word_length, frac_length)
return ret_fi(vec, qvec=qvec_new, overflow='wrap', signed=signed)
if __name__ == "__main__":
list_val = [1, 1, 1, 1]
print(list_to_uint(list_val))
| en | 0.675372 | #!/usr/bin/env python # -*- coding: utf-8 -*- @author: phil # from cStringIO import StringIO Quantization vector is of the formed fixed(N, F). Where the first value indicates the total number of bits and the second number indicates the location of the fractional point. Fast way to count 1's in a 64 bit integer. Based on Hamming weight Function performs a right shift of a binary string. Placing the new value into the MSB position. Function performs a left shift of a binary string. Placing the new value into the LSB position. Function left appends a binary string with string specified by string append. Function left appends int_val with bit_val. bit_val is assumed to be one bit. num_bits is the number of bits to represent unsigned integer int_val Function collapses a bit stream into unsigned integer representing bytes. # value = float(value) # negative value Simple fixed integer object to hold parameters related to a \ fixed point object. Converts vector to 2's complement binary values. Returns unsigned decimal integer of the vector # min_int = int(comp_min_value(self.qvec, 0) * 2 ** self.qvec[1]) Converts vector to 2's complement hexadecimal values. # overriding built in len term. # def __getslice__(self, lidx, ridx): # """ # Overloaded getslice method. # """ # self.vec = self.vec[lidx, ridx] # # return self # # # def __getitem__(self, index) # , self.__class__.__name__, self.block_name Function takes a fixed point vector as input and generates a Xilinx compatibily .coe file for ROM/RAM initialization. ========== Parameters ========== * fi_obj : fixed integer object Fixed Point object generated by fixed point toolbox. * radix : int (16) Radix used for formatting .coe file. * file_name : str File name used for outputting file to correct location and name. ======= Returns ======= Correctly formatted .coe file for use by Xilinx coregenerator modules. # find last forward slash # skip = 2 Function computes the optimal fractional width given the vector and the word_width Computes the mimimum real value given the fixed point representation Computes maximum real value given the fixed point representation, qvec. Returns the fixed point increment per unit increase in binary number. Computes range of real values for a given fixed point representation. Converts hex value to ascii string. # .decode('hex') Method converts numerical string to unsigned decimal representation Can take single value or vector; complex or real. Base 2 : binary base 8 : octal, base 16 : hexadecimal Converts decimal value to list of 1's and 0's. Converts 1 / 0 array to unsigned integer array representing constellation indices. Each binary vector that is to be converted to an unsigned number lies on each row of the vector. Find 2^n that is equal to or greater than. Helper function returns number of bits to represent number of combinations, value. Function returns required number of bits for unsigned binary representation. Function returns required number of bits for 2's complement representation. Helper function converts a binary string into a boolean array # return map(lambda x: x**2, range(10) Initializes a string array. Function flip bit order of binary string. Assumed to Returns the XOR of bits from the result of masking bin_vec with the mask vector mask_vec. Returns the XOR of bits from the primary and secondary lists. Helper function computes parity on list of 1's and 0's Converts a 1,0 list and or ndarray to a binary string. Converts list of 1's and 0's to unsigned hex string. # ipdb.set_trace() Converts list of 1's and 0's to unsigned hex string. Converts list of 1's and 0's to unsigned integer. Converts hex string to list of 1's and 0's. # pdb.set_trace() # if single hex string # if list of hex strings Converts hex string to list of 1's and 0's. # str_val in ret_val for bit in str_val] Helper function convert decimal value to signed 2's complement binary value. # for value in dec_vals] Function returns decimal equivalent to octal value. Method converts hex string (ndarray) to binary string. Method converts hex string (ndarray) to binary string. Function returns decimal equivalent to hexadecimal value Function returns decimal equivalent to hexadecimal value # def comp_frac_width(value, word_width, signed=0): # # shift_val = -1 # temp_val = value # bit_shift = ret_num_bitsU(np.max(np.abs(temp_val))) # while bit_shift < 0: # temp_val = temp_val * 2 # shift_val += 1 # bit_shift = ret_num_bitsU(np.max(np.abs(temp_val))) # if (bit_shift >= shift_val): # shift_val = -bit_shift # frac_width = word_width - signed + shift_val # return frac_width Helper function returns a fixed integer vector to the user. If input is complex it will automatically convert real and imaginary components separately. Helper function returns a fixed integer vector to the user. Assumes signed input. # word_width = qvec[0] # this is so negative values and wrap appropriately on the # asymmetric positive number line. # check for wrapping here. # check for wrapping here. # create fi_obj and return it to the user Function does a bitwise concatenation of 2 fi objects. Treats both of the them as unsigned -- returns unsigned object that is of the quantization type [total_ bits 0]. Only format that makes sense. Uses fi_math of first_fi object. Function does a stacking of 2 fi objects.. Treats both of the them as unsigned -- returns unsigned object that Both fi object must have the same word lengths. Only format that makes sense. Uses fi_math of first_fi object. # def add_fi(first_term, sec_term): # """ # Method is used to perform a trial addition of two fi objects. # Simply uses the fi_math and numeric_types to generate a new fi object # with 0 as its data. # # Commonly used to determine Integer and Fractional bit widths at the # output of a fixed point multiplier. # # ========== # Parameters # ========== # # * first_term : (fi Object): # First fi object used in the multiplication check. # * sec_term : (fi Object) # Second fi object used in the multiplication check # # ======= # Returns # ======= # # * out : (fi Object): # Returns new fi object -- output of multiplying first and # second input terms. # """ # if (not isinstance(sec_term, Fi)): # sec_term = ret_dec_fi(sec_term) # if (not isinstance(first_term, Fi)): # first_term = ret_dec_fi(first_term) # # num_type_first = first_term.numeric_type # num_type_sec = sec_term.numeric_type # # first_term = fi(0, numeric_type=num_type_first, sign_val=0) # sec_term = fi(0, numeric_type=num_type_sec, sign_val=0) # # new_obj = first_term + sec_term # return new_obj # # Method is used to perform a trial multiplication of two fi objects. Simply uses the fi_math and numeric_types to generate a new fi object with 0 as its data. Commonly used to determine Integer and Fractional bit widths at the output of a fixed point multiplier. ========== Parameters ========== * first_term : (fi Object): First fi object used in the multiplication check. * sec_term : (fi Object) Second fi object used in the multiplication check ======= Returns ======= * out : (fi Object): Returns new fi object -- output of multiplying first and second input terms. | 3.527373 | 4 |
pKaTool/stab_fit/myfitter.py | shambo001/peat | 3 | 6623999 | #!/usr/bin/env python
import numpy as np
import math, random
import operator, os, sys, csv
import pickle
import pylab as plt
import scipy.optimize
"""Prototype for newer fit class that allows user created
models to be added dynamically and can do multivariate fitting"""
class testdata(object):
def line(self, noise=2.0):
x=np.random.normal(1,10,500)
y=[i+np.random.normal(0,noise) for i in x]
return x,y
def simpleHH(self, noise=.01):
x=np.arange(1,10,0.2)
pKa=6;span=5;offset=0.2
y=[]
for i in x:
val = span / (1 + 10**(- i + pKa)) + offset
val += np.random.normal(0,9*noise)
y.append(val)
return x,y
def complexHH(self, noise=.02):
x=np.arange(1,10,0.2)
pKa1=3;span1=5;pKa2=7;span2=5;offset=0.6
y=[]
for i in x:
val = span1/ (1+10**(pKa1-i)) + span2/ (1+10**(-i+pKa2)) + offset
val += np.random.normal(0,9*noise)
y.append(val)
return x,y
class fitter(object):
def __init__(self, func, params, x, y):
self.params = params
self.func = func
self.x = x; self.y = y
return
def lstsq(self, x, y):
"""DIY lsq"""
p=self.params
rounds=range(60)
for r in rounds:
r = self.evaluate(y,fit)
self.fit = fit
return fit
def residuals(self, p, args=None):
"""Evaluate the func residuals given parameters"""
r=[]
x=self.x; y=self.y
fit=[self.func(i,p) for i in x]
r = [math.pow(i[0]-i[1],2) for i in zip(fit,y)]
return r
def evaluate(self, p, args=None):
"""Evaluate func and get sum sq res for given params"""
x=self.x; y=self.y
fit=[self.func(i,p) for i in x]
r=0
for i in zip(fit,y):
r += math.pow(i[0]-i[1],2)
return r
def minimize(self):
return
def fit(self, method='simplex'):
"""Fit by minimizing r-squared using various algorithms"""
#downhill simplex algorithm
if method == 'simplex':
p = scipy.optimize.fmin(self.evaluate, self.params)
#using scipy version of levenberg-Marquardt algorithm
elif method == 'lm':
p,ier = scipy.optimize.leastsq(self.residuals, self.params)
self.params = p
fit=[self.func(i,p) for i in self.x]
self.fit = fit
return fit
def plot(self, ax=None):
x=self.x; y=self.y
fit = self.fit
if ax==None:
fig=plt.figure(figsize=(6,6))
ax=fig.add_subplot(111)
self.fig = fig
ax.plot(x, y,'o',alpha=0.6)
inc = abs(max(x)-min(x))/30
fitx = np.arange(min(x)-inc,max(x)+inc,inc)
fity = [self.func(i,self.params) for i in fitx]
ax.plot(fitx, fity,lw=3,alpha=0.7)
#ax.set_title(self.params)
ax.text(0.1,0.8,self.params,fontsize=0.8)
return ax
def estimateUncertainty(self,x,y,p,xerr=0.1,yerr=0.1,runs=10):
"""Generic version of monte carlo parameter uncert, returns
st dev for each parameter over repeated runs"""
plist=[]
for r in range(runs):
mutx=[];muty=[]
for i in range(len(x)):
mutx.append(x[i] + random.uniform(-xerr, xerr))
muty.append(x[i] + random.uniform(-yerr, yerr))
F=fitter(self.func,p,mutx,muty)
F.fit()
plist.append(F.params)
result = []
for i in range(len(p)):
result.append(np.std([v[i] for v in plist]))
return result
class fitModel(object):
"""Models created dynamically should use this to inherit from"""
def __init__(self):
return
def guessStart(self):
return
def linear(x,p):
m,b=p
y = m * x + b
return y
def hh1pka(x,p):
pKa,span,offset=p
y = span / (1 + 10**(- x + pKa)) + offset
return y
def hh2pka(x,p):
pKa1,span1,pKa2,span2,offset=p
y = span1/ (1+10**(pKa1-x)) + span2/ (1+10**(-x+pKa2)) + offset
return y
def sigmoid(x,p):
t,bottom,top,slope=p
y = bottom + (top - bottom) / (1 + math.exp((t-x)/slope))
return y
def depletion(x, p):
M,D,x0=p
y=M * (1 - math.exp(-D*(x-x0)))
return y
def michaelismenten(x,p):
so,vmax,km=p
y = vmax*(s0/(km+x))
return y
def test():
T=testdata()
x,y=T.line()
#F=fitter(linear,[0.5,1],x,y)
x,y=T.simpleHH()
#x,y=T.complexHH()
F=fitter(hh1pka,[1,1,1],x,y)
#F=fitter(sigmoi[1,1,1]d,[6,0,1,1],x,y)
F.fit()
F.plot()
F.estimateUncertainty(x,y,[1,1,1])
def test10R():
"""pKa fitting from kcats using substr depletion"""
path = 'fergal_10R'
folders = ['fergal_10R/10RWT','fergal_10R/U33W1']
pkas=[]
for path in folders:
fig=plt.figure(figsize=(8,8))
i=1
data = []
ax1=None
for f in os.listdir(path):
if os.path.splitext(f)[1] != '.csv': continue
cr = csv.reader(open(os.path.join(path,f),'r'))
ph=float(f.split(' ')[1])
cols = len(cr.next())-1
print path, f, ph, '%s cols' %cols
vals = [r for r in cr]
#may be several replicates
for c in range(0,cols,2):
x = [float(r[c]) for r in vals]
y = [float(r[c+1]) for r in vals]
#fit
M = max(y)
F=fitter(depletion,[M,1,1],x,y)
F.fit()
D=F.params[1]
print 'D',D
if ph==9.0 and D>6: continue
data.append((ph,D))
if c==0:
ax=fig.add_subplot(4,4,i,sharey=ax1)
i+=1
if ax1==None: ax1=ax
F.plot(ax)
ax.set_title(ph)
#fit pKa
fig.subplots_adjust(wspace=0.4,hspace=0.4)
x,y=zip(*data)
F=fitter(hh1pka,[5,2,0],x,y)
F.fit()
pkas.append(F.params[0])
F.plot()
#res = F.estimateUncertainty(x,y,[5,2,0],xerr=0.1,yerr=0.2,runs=10)
pickle.dump(data,open(os.path.basename(path)+'.pickle','w'))
print pkas
return
def parametersTest():
data = pickle.load(open('10RWT.pickle','r'))
x,y=zip(*data)
crossValidate(x,y)
return
def crossValidate(x,y, frac=0.2, num=None):
"""Random sub-sampling removal of points to test effects on
fit parameters"""
l=len(x)
if num==None:
num = int(l*(1-frac))
print 'using %s out of %s points..' %(num,l)
fig=plt.figure(figsize=(8,8))
c=0
pkas=[]
for n in range(20):
n1 = random.sample(range(l), num)
x1 = [x[i] for i in range(l) if i in n1]
y1 = [y[i] for i in range(l) if i in n1]
F=fitter(hh1pka,[5,2,0],x1,y1)
F.fit()
pka = round(F.params[0],3); pkas.append(pka)
ax=fig.add_subplot(4,5,c)
F.plot(ax)
ax.set_title(pka)
c+=1
print 'stdev:', np.std(pkas)
return
def pltconf():
#plt.rc('font',family='serif')
plt.rc('font',size=10)
plt.rc('legend',fontsize=10)
#plt.rc('text',usetex=True)
plt.rc('savefig',dpi=300)
if __name__ == '__main__':
#test()
pltconf()
test10R()
#parametersTest()
plt.show()
| #!/usr/bin/env python
import numpy as np
import math, random
import operator, os, sys, csv
import pickle
import pylab as plt
import scipy.optimize
"""Prototype for newer fit class that allows user created
models to be added dynamically and can do multivariate fitting"""
class testdata(object):
def line(self, noise=2.0):
x=np.random.normal(1,10,500)
y=[i+np.random.normal(0,noise) for i in x]
return x,y
def simpleHH(self, noise=.01):
x=np.arange(1,10,0.2)
pKa=6;span=5;offset=0.2
y=[]
for i in x:
val = span / (1 + 10**(- i + pKa)) + offset
val += np.random.normal(0,9*noise)
y.append(val)
return x,y
def complexHH(self, noise=.02):
x=np.arange(1,10,0.2)
pKa1=3;span1=5;pKa2=7;span2=5;offset=0.6
y=[]
for i in x:
val = span1/ (1+10**(pKa1-i)) + span2/ (1+10**(-i+pKa2)) + offset
val += np.random.normal(0,9*noise)
y.append(val)
return x,y
class fitter(object):
def __init__(self, func, params, x, y):
self.params = params
self.func = func
self.x = x; self.y = y
return
def lstsq(self, x, y):
"""DIY lsq"""
p=self.params
rounds=range(60)
for r in rounds:
r = self.evaluate(y,fit)
self.fit = fit
return fit
def residuals(self, p, args=None):
"""Evaluate the func residuals given parameters"""
r=[]
x=self.x; y=self.y
fit=[self.func(i,p) for i in x]
r = [math.pow(i[0]-i[1],2) for i in zip(fit,y)]
return r
def evaluate(self, p, args=None):
"""Evaluate func and get sum sq res for given params"""
x=self.x; y=self.y
fit=[self.func(i,p) for i in x]
r=0
for i in zip(fit,y):
r += math.pow(i[0]-i[1],2)
return r
def minimize(self):
return
def fit(self, method='simplex'):
"""Fit by minimizing r-squared using various algorithms"""
#downhill simplex algorithm
if method == 'simplex':
p = scipy.optimize.fmin(self.evaluate, self.params)
#using scipy version of levenberg-Marquardt algorithm
elif method == 'lm':
p,ier = scipy.optimize.leastsq(self.residuals, self.params)
self.params = p
fit=[self.func(i,p) for i in self.x]
self.fit = fit
return fit
def plot(self, ax=None):
x=self.x; y=self.y
fit = self.fit
if ax==None:
fig=plt.figure(figsize=(6,6))
ax=fig.add_subplot(111)
self.fig = fig
ax.plot(x, y,'o',alpha=0.6)
inc = abs(max(x)-min(x))/30
fitx = np.arange(min(x)-inc,max(x)+inc,inc)
fity = [self.func(i,self.params) for i in fitx]
ax.plot(fitx, fity,lw=3,alpha=0.7)
#ax.set_title(self.params)
ax.text(0.1,0.8,self.params,fontsize=0.8)
return ax
def estimateUncertainty(self,x,y,p,xerr=0.1,yerr=0.1,runs=10):
"""Generic version of monte carlo parameter uncert, returns
st dev for each parameter over repeated runs"""
plist=[]
for r in range(runs):
mutx=[];muty=[]
for i in range(len(x)):
mutx.append(x[i] + random.uniform(-xerr, xerr))
muty.append(x[i] + random.uniform(-yerr, yerr))
F=fitter(self.func,p,mutx,muty)
F.fit()
plist.append(F.params)
result = []
for i in range(len(p)):
result.append(np.std([v[i] for v in plist]))
return result
class fitModel(object):
"""Models created dynamically should use this to inherit from"""
def __init__(self):
return
def guessStart(self):
return
def linear(x,p):
m,b=p
y = m * x + b
return y
def hh1pka(x,p):
pKa,span,offset=p
y = span / (1 + 10**(- x + pKa)) + offset
return y
def hh2pka(x,p):
pKa1,span1,pKa2,span2,offset=p
y = span1/ (1+10**(pKa1-x)) + span2/ (1+10**(-x+pKa2)) + offset
return y
def sigmoid(x,p):
t,bottom,top,slope=p
y = bottom + (top - bottom) / (1 + math.exp((t-x)/slope))
return y
def depletion(x, p):
M,D,x0=p
y=M * (1 - math.exp(-D*(x-x0)))
return y
def michaelismenten(x,p):
so,vmax,km=p
y = vmax*(s0/(km+x))
return y
def test():
T=testdata()
x,y=T.line()
#F=fitter(linear,[0.5,1],x,y)
x,y=T.simpleHH()
#x,y=T.complexHH()
F=fitter(hh1pka,[1,1,1],x,y)
#F=fitter(sigmoi[1,1,1]d,[6,0,1,1],x,y)
F.fit()
F.plot()
F.estimateUncertainty(x,y,[1,1,1])
def test10R():
"""pKa fitting from kcats using substr depletion"""
path = 'fergal_10R'
folders = ['fergal_10R/10RWT','fergal_10R/U33W1']
pkas=[]
for path in folders:
fig=plt.figure(figsize=(8,8))
i=1
data = []
ax1=None
for f in os.listdir(path):
if os.path.splitext(f)[1] != '.csv': continue
cr = csv.reader(open(os.path.join(path,f),'r'))
ph=float(f.split(' ')[1])
cols = len(cr.next())-1
print path, f, ph, '%s cols' %cols
vals = [r for r in cr]
#may be several replicates
for c in range(0,cols,2):
x = [float(r[c]) for r in vals]
y = [float(r[c+1]) for r in vals]
#fit
M = max(y)
F=fitter(depletion,[M,1,1],x,y)
F.fit()
D=F.params[1]
print 'D',D
if ph==9.0 and D>6: continue
data.append((ph,D))
if c==0:
ax=fig.add_subplot(4,4,i,sharey=ax1)
i+=1
if ax1==None: ax1=ax
F.plot(ax)
ax.set_title(ph)
#fit pKa
fig.subplots_adjust(wspace=0.4,hspace=0.4)
x,y=zip(*data)
F=fitter(hh1pka,[5,2,0],x,y)
F.fit()
pkas.append(F.params[0])
F.plot()
#res = F.estimateUncertainty(x,y,[5,2,0],xerr=0.1,yerr=0.2,runs=10)
pickle.dump(data,open(os.path.basename(path)+'.pickle','w'))
print pkas
return
def parametersTest():
data = pickle.load(open('10RWT.pickle','r'))
x,y=zip(*data)
crossValidate(x,y)
return
def crossValidate(x,y, frac=0.2, num=None):
"""Random sub-sampling removal of points to test effects on
fit parameters"""
l=len(x)
if num==None:
num = int(l*(1-frac))
print 'using %s out of %s points..' %(num,l)
fig=plt.figure(figsize=(8,8))
c=0
pkas=[]
for n in range(20):
n1 = random.sample(range(l), num)
x1 = [x[i] for i in range(l) if i in n1]
y1 = [y[i] for i in range(l) if i in n1]
F=fitter(hh1pka,[5,2,0],x1,y1)
F.fit()
pka = round(F.params[0],3); pkas.append(pka)
ax=fig.add_subplot(4,5,c)
F.plot(ax)
ax.set_title(pka)
c+=1
print 'stdev:', np.std(pkas)
return
def pltconf():
#plt.rc('font',family='serif')
plt.rc('font',size=10)
plt.rc('legend',fontsize=10)
#plt.rc('text',usetex=True)
plt.rc('savefig',dpi=300)
if __name__ == '__main__':
#test()
pltconf()
test10R()
#parametersTest()
plt.show()
| en | 0.516948 | #!/usr/bin/env python Prototype for newer fit class that allows user created models to be added dynamically and can do multivariate fitting DIY lsq Evaluate the func residuals given parameters Evaluate func and get sum sq res for given params Fit by minimizing r-squared using various algorithms #downhill simplex algorithm #using scipy version of levenberg-Marquardt algorithm #ax.set_title(self.params) Generic version of monte carlo parameter uncert, returns st dev for each parameter over repeated runs Models created dynamically should use this to inherit from #F=fitter(linear,[0.5,1],x,y) #x,y=T.complexHH() #F=fitter(sigmoi[1,1,1]d,[6,0,1,1],x,y) pKa fitting from kcats using substr depletion #may be several replicates #fit #fit pKa #res = F.estimateUncertainty(x,y,[5,2,0],xerr=0.1,yerr=0.2,runs=10) Random sub-sampling removal of points to test effects on fit parameters #plt.rc('font',family='serif') #plt.rc('text',usetex=True) #test() #parametersTest() | 3.121907 | 3 |
assemblyline/al_ui/error.py | dendisuhubdy/grokmachine | 46 | 6624000 |
from flask import Blueprint, render_template, request, redirect
from sys import exc_info
from traceback import format_tb
from urllib import quote
from al_ui.apiv3.core import make_api_response
from al_ui.config import AUDIT, AUDIT_LOG, LOGGER, config
from al_ui.helper.views import redirect_helper
from al_ui.http_exceptions import AccessDeniedException, QuotaExceededException
from al_ui.logger import log_with_traceback
errors = Blueprint("errors", __name__)
######################################
# Custom Error page
@errors.app_errorhandler(401)
def handle_401(_):
if request.path.startswith("/api/"):
return make_api_response("", "Authentication required", 401)
else:
return redirect(redirect_helper("/login.html?next=%s" % quote(request.full_path)))
@errors.app_errorhandler(404)
def handle_404(_):
if request.path.startswith("/api/"):
return make_api_response("", "Api does not exist (%s)" % request.path, 404)
else:
return render_template('404.html', url=request.path), 404
@errors.app_errorhandler(403)
def handle_403(e):
trace = exc_info()[2]
if AUDIT:
log_with_traceback(AUDIT_LOG, trace, "Access Denied")
if request.path.startswith("/api/"):
return make_api_response("", "Access Denied (%s) [%s]" % (request.path, e.message), 403)
else:
if e.message.startswith("User") and e.message.endswith("is disabled"):
return render_template('403e.html', exception=e.message,
email=config.ui.get("email", "")), 403
else:
return render_template('403.html', exception=e.message), 403
@errors.app_errorhandler(500)
def handle_500(e):
if isinstance(e, AccessDeniedException):
return handle_403(e)
if isinstance(e, QuotaExceededException):
return make_api_response("", e.message, 503)
trace = exc_info()[2]
log_with_traceback(LOGGER, trace, "Exception", is_exception=True)
message = ''.join(['\n'] + format_tb(exc_info()[2]) + ['%s: %s\n' % (e.__class__.__name__, str(e))]).rstrip('\n')
if request.path.startswith("/api/"):
return make_api_response("", message, 500)
else:
return render_template('500.html', exception=message), 500
|
from flask import Blueprint, render_template, request, redirect
from sys import exc_info
from traceback import format_tb
from urllib import quote
from al_ui.apiv3.core import make_api_response
from al_ui.config import AUDIT, AUDIT_LOG, LOGGER, config
from al_ui.helper.views import redirect_helper
from al_ui.http_exceptions import AccessDeniedException, QuotaExceededException
from al_ui.logger import log_with_traceback
errors = Blueprint("errors", __name__)
######################################
# Custom Error page
@errors.app_errorhandler(401)
def handle_401(_):
if request.path.startswith("/api/"):
return make_api_response("", "Authentication required", 401)
else:
return redirect(redirect_helper("/login.html?next=%s" % quote(request.full_path)))
@errors.app_errorhandler(404)
def handle_404(_):
if request.path.startswith("/api/"):
return make_api_response("", "Api does not exist (%s)" % request.path, 404)
else:
return render_template('404.html', url=request.path), 404
@errors.app_errorhandler(403)
def handle_403(e):
trace = exc_info()[2]
if AUDIT:
log_with_traceback(AUDIT_LOG, trace, "Access Denied")
if request.path.startswith("/api/"):
return make_api_response("", "Access Denied (%s) [%s]" % (request.path, e.message), 403)
else:
if e.message.startswith("User") and e.message.endswith("is disabled"):
return render_template('403e.html', exception=e.message,
email=config.ui.get("email", "")), 403
else:
return render_template('403.html', exception=e.message), 403
@errors.app_errorhandler(500)
def handle_500(e):
if isinstance(e, AccessDeniedException):
return handle_403(e)
if isinstance(e, QuotaExceededException):
return make_api_response("", e.message, 503)
trace = exc_info()[2]
log_with_traceback(LOGGER, trace, "Exception", is_exception=True)
message = ''.join(['\n'] + format_tb(exc_info()[2]) + ['%s: %s\n' % (e.__class__.__name__, str(e))]).rstrip('\n')
if request.path.startswith("/api/"):
return make_api_response("", message, 500)
else:
return render_template('500.html', exception=message), 500
| de | 0.730281 | ###################################### # Custom Error page | 2.188625 | 2 |
UCIQE.py | TongJiayan/UCIQE-python | 1 | 6624001 | <reponame>TongJiayan/UCIQE-python<gh_stars>1-10
import numpy as np
import cv2
def getUCIQE(img):
img_BGR = cv2.imread(img)
img_LAB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2LAB)
img_LAB = np.array(img_LAB,dtype=np.float64)
# Trained coefficients are c1=0.4680, c2=0.2745, c3=0.2576 according to paper.
coe_Metric = [0.4680, 0.2745, 0.2576]
img_lum = img_LAB[:,:,0]/255.0
img_a = img_LAB[:,:,1]/255.0
img_b = img_LAB[:,:,2]/255.0
# item-1
chroma = np.sqrt(np.square(img_a)+np.square(img_b))
sigma_c = np.std(chroma)
# item-2
img_lum = img_lum.flatten()
sorted_index = np.argsort(img_lum)
top_index = sorted_index[int(len(img_lum)*0.99)]
bottom_index = sorted_index[int(len(img_lum)*0.01)]
con_lum = img_lum[top_index] - img_lum[bottom_index]
# item-3
chroma = chroma.flatten()
sat = np.divide(chroma, img_lum, out=np.zeros_like(chroma, dtype=np.float64), where=img_lum!=0)
avg_sat = np.mean(sat)
uciqe = sigma_c*coe_Metric[0] + con_lum*coe_Metric[1] + avg_sat*coe_Metric[2]
return uciqe
if __name__ == '__main__':
img = '906_img_.png'
uciqe = getUCIQE(img)
print("UCIQE of image '{0}' = {1}".format(img,uciqe)) | import numpy as np
import cv2
def getUCIQE(img):
img_BGR = cv2.imread(img)
img_LAB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2LAB)
img_LAB = np.array(img_LAB,dtype=np.float64)
# Trained coefficients are c1=0.4680, c2=0.2745, c3=0.2576 according to paper.
coe_Metric = [0.4680, 0.2745, 0.2576]
img_lum = img_LAB[:,:,0]/255.0
img_a = img_LAB[:,:,1]/255.0
img_b = img_LAB[:,:,2]/255.0
# item-1
chroma = np.sqrt(np.square(img_a)+np.square(img_b))
sigma_c = np.std(chroma)
# item-2
img_lum = img_lum.flatten()
sorted_index = np.argsort(img_lum)
top_index = sorted_index[int(len(img_lum)*0.99)]
bottom_index = sorted_index[int(len(img_lum)*0.01)]
con_lum = img_lum[top_index] - img_lum[bottom_index]
# item-3
chroma = chroma.flatten()
sat = np.divide(chroma, img_lum, out=np.zeros_like(chroma, dtype=np.float64), where=img_lum!=0)
avg_sat = np.mean(sat)
uciqe = sigma_c*coe_Metric[0] + con_lum*coe_Metric[1] + avg_sat*coe_Metric[2]
return uciqe
if __name__ == '__main__':
img = '906_img_.png'
uciqe = getUCIQE(img)
print("UCIQE of image '{0}' = {1}".format(img,uciqe)) | en | 0.824602 | # Trained coefficients are c1=0.4680, c2=0.2745, c3=0.2576 according to paper. # item-1 # item-2 # item-3 | 2.521069 | 3 |
.github/workflows/find_changed_files.py | yut23/Microphysics | 1 | 6624002 | <filename>.github/workflows/find_changed_files.py
import subprocess
import sys
import argparse
from contextlib import contextmanager
import os
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def find_files(SHAs=None):
diff_command = ['git', 'diff', '--name-only']
if SHAs is not None:
diff_command += SHAs
stdout, stderr = subprocess.Popen(diff_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
if stderr is not None:
raise Exception('git diff encountered an error')
files = [f for f in stdout.decode('utf-8').strip().split('\n')
if f.startswith('networks/')]
print(files)
# see which directories contain changed files
changed_networks = set()
for f in files:
# check for the NETWORK_PROPERTIES file in each parent directory
parts = f.split('/')
while parts:
if os.path.exists(os.path.join(*parts, 'NETWORK_PROPERTIES')):
# remove networks/
changed_networks.add(os.path.join(*parts[1:]))
break
parts.pop(-1)
print(changed_networks)
return changed_networks
def run(SHAs=None, make_options=''):
networks = find_files(SHAs)
if len(networks) == 0:
networks = ['aprox13']
GITHUB_WORKSPACE = os.environ.get('GITHUB_WORKSPACE')
for network in networks:
make_command = f'make {make_options} USE_MPI=FALSE USE_OMP=FALSE USE_CUDA=FALSE NETWORK_DIR={network}'
print(f'make command = {make_command}')
with cd(f'unit_test/burn_cell'):
print('::group::making unit_test/burn_cell')
subprocess.run('make clean'.split(), stdout=subprocess.DEVNULL, check=True)
process = subprocess.run(make_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(process.stdout.decode('utf-8'))
print('::endgroup::')
if process.stderr is not None or process.returncode != 0:
raise Exception('make encountered an error')
# compile test_eos as well
make_command = f'make {make_options} USE_MPI=FALSE USE_OMP=FALSE USE_CUDA=FALSE'
with cd(f'unit_test/test_eos'):
print('::group::making unit_test/test_eos')
subprocess.run('make clean'.split(), stdout=subprocess.DEVNULL, check=True)
process = subprocess.run(make_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(process.stdout.decode('utf-8'))
print('::endgroup::')
if process.stderr is not None or process.returncode != 0:
raise Exception('make encountered an error')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-make-options',
default='-j 2',
help='make options')
parser.add_argument('SHAs', nargs='*', default=None,
help='SHAs to be compared')
args = parser.parse_args()
run(SHAs=args.SHAs, make_options=args.make_options)
| <filename>.github/workflows/find_changed_files.py
import subprocess
import sys
import argparse
from contextlib import contextmanager
import os
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def find_files(SHAs=None):
diff_command = ['git', 'diff', '--name-only']
if SHAs is not None:
diff_command += SHAs
stdout, stderr = subprocess.Popen(diff_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
if stderr is not None:
raise Exception('git diff encountered an error')
files = [f for f in stdout.decode('utf-8').strip().split('\n')
if f.startswith('networks/')]
print(files)
# see which directories contain changed files
changed_networks = set()
for f in files:
# check for the NETWORK_PROPERTIES file in each parent directory
parts = f.split('/')
while parts:
if os.path.exists(os.path.join(*parts, 'NETWORK_PROPERTIES')):
# remove networks/
changed_networks.add(os.path.join(*parts[1:]))
break
parts.pop(-1)
print(changed_networks)
return changed_networks
def run(SHAs=None, make_options=''):
networks = find_files(SHAs)
if len(networks) == 0:
networks = ['aprox13']
GITHUB_WORKSPACE = os.environ.get('GITHUB_WORKSPACE')
for network in networks:
make_command = f'make {make_options} USE_MPI=FALSE USE_OMP=FALSE USE_CUDA=FALSE NETWORK_DIR={network}'
print(f'make command = {make_command}')
with cd(f'unit_test/burn_cell'):
print('::group::making unit_test/burn_cell')
subprocess.run('make clean'.split(), stdout=subprocess.DEVNULL, check=True)
process = subprocess.run(make_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(process.stdout.decode('utf-8'))
print('::endgroup::')
if process.stderr is not None or process.returncode != 0:
raise Exception('make encountered an error')
# compile test_eos as well
make_command = f'make {make_options} USE_MPI=FALSE USE_OMP=FALSE USE_CUDA=FALSE'
with cd(f'unit_test/test_eos'):
print('::group::making unit_test/test_eos')
subprocess.run('make clean'.split(), stdout=subprocess.DEVNULL, check=True)
process = subprocess.run(make_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(process.stdout.decode('utf-8'))
print('::endgroup::')
if process.stderr is not None or process.returncode != 0:
raise Exception('make encountered an error')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-make-options',
default='-j 2',
help='make options')
parser.add_argument('SHAs', nargs='*', default=None,
help='SHAs to be compared')
args = parser.parse_args()
run(SHAs=args.SHAs, make_options=args.make_options)
| en | 0.856699 | # see which directories contain changed files # check for the NETWORK_PROPERTIES file in each parent directory # remove networks/ # compile test_eos as well | 2.566774 | 3 |
mab/gd/schw/schwhelper.py | maartenbreddels/mab | 1 | 6624003 | from numpy import *
import numpy
import os
class SchwHelper(object):
def __init__(self):
pass
@classmethod
def getrs(cls, galaxy, dither=1, dE=False, physical=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
if physical:
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
else:
logrmin, logrmax = logr1, logr2
nE = cls.nI1
logrs = arange(nE*dither, dtype=float) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
return rs
@classmethod
def getrborders(cls, galaxy, dither=1, dE=False, physical=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
if physical:
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
else:
logrmin, logrmax = logr1, logr2
nE = cls.nI1
logrs = (arange(nE*dither+1, dtype=float) -0.5) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
return rs
@classmethod
def getEs(cls, galaxy, dither=1, dE=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
nE = cls.nI1
logrs = arange(nE*dither, dtype=float) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
Es = galaxy.potentialr(rs)
if dE:
dEs = concatenate( ([Es[1] - Es[0]], (Es[2:] - Es[0:-2])/2, [Es[-1] - Es[-2]]) )
return Es, dEs
else:
return Es
@staticmethod
def index_to_orbitnr(i1, i2, i3):
return i1*nI2*nI3 + i2*nI3 + i3
class SchwSolution(object):
def __init__(self, dirname, n_moments, n_constraints, modelpath, weightname="", fitted=True, addLz=0):
self.dirname = dirname
if not fitted:
filename = os.path.join(modelpath, "orbitweights" +weightname +".npy")
#orbitweights = ravel(numpy.load(filename))
orbitweights = array(numpy.load(filename).flat)
if addLz:
allorbitweights = zeros((len(orbitweights), addLz))
for i in range(addLz/2):
allorbitweights[:,i] = orbitweights
allorbitweights[:,i+addLz/2+1] = orbitweights
allorbitweights[:,addLz/2] = 1*orbitweights
orbitweights = ravel(allorbitweights)/(addLz)
else:
filename = os.path.join(dirname, "orbitweights" +weightname +".npy")
#orbitweights = ravel(numpy.load(filename))
orbitweights = array(numpy.load(filename).flat)
filename = os.path.join(dirname, "projectedmoments.npy")
projectedmoments = array(memmap(filename, dtype='float64', mode='readonly', shape=(len(orbitweights), n_moments, n_constraints)))
self.orblibmoments = projectedmoments
#projectedmoments = load()
mask = projectedmoments[:,0,:] > 0
#print projectedmoments.shape
#print mask.shape
#mask = mask[:,newaxis,:]
#print mask.shape
f = (10000*5*5*25)
projectedmoments /= f
#for i in range(1, projectedmoments.shape[1]):
# projectedmoments[:,i,:][mask] /= projectedmoments[:,0,:][mask]
self.projectedmoments = tensordot(projectedmoments, orbitweights, axes=([0], [0]))
densities = array(projectedmoments[:,0,:])
self.rho2d = sum(orbitweights * transpose(densities), axis=1)
| from numpy import *
import numpy
import os
class SchwHelper(object):
def __init__(self):
pass
@classmethod
def getrs(cls, galaxy, dither=1, dE=False, physical=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
if physical:
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
else:
logrmin, logrmax = logr1, logr2
nE = cls.nI1
logrs = arange(nE*dither, dtype=float) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
return rs
@classmethod
def getrborders(cls, galaxy, dither=1, dE=False, physical=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
if physical:
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
else:
logrmin, logrmax = logr1, logr2
nE = cls.nI1
logrs = (arange(nE*dither+1, dtype=float) -0.5) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
return rs
@classmethod
def getEs(cls, galaxy, dither=1, dE=False):
logr1, logr2 = cls.logE1, cls.logE2
kpc_to_arcsec = galaxy.kpc_to_arcsec(1.)
logrmin, logrmax = logr1-log10(kpc_to_arcsec), logr2-log10(kpc_to_arcsec)
nE = cls.nI1
logrs = arange(nE*dither, dtype=float) / (nE*dither-1) * (logrmax-logrmin) + logrmin
rs = 10**logrs
Es = galaxy.potentialr(rs)
if dE:
dEs = concatenate( ([Es[1] - Es[0]], (Es[2:] - Es[0:-2])/2, [Es[-1] - Es[-2]]) )
return Es, dEs
else:
return Es
@staticmethod
def index_to_orbitnr(i1, i2, i3):
return i1*nI2*nI3 + i2*nI3 + i3
class SchwSolution(object):
def __init__(self, dirname, n_moments, n_constraints, modelpath, weightname="", fitted=True, addLz=0):
self.dirname = dirname
if not fitted:
filename = os.path.join(modelpath, "orbitweights" +weightname +".npy")
#orbitweights = ravel(numpy.load(filename))
orbitweights = array(numpy.load(filename).flat)
if addLz:
allorbitweights = zeros((len(orbitweights), addLz))
for i in range(addLz/2):
allorbitweights[:,i] = orbitweights
allorbitweights[:,i+addLz/2+1] = orbitweights
allorbitweights[:,addLz/2] = 1*orbitweights
orbitweights = ravel(allorbitweights)/(addLz)
else:
filename = os.path.join(dirname, "orbitweights" +weightname +".npy")
#orbitweights = ravel(numpy.load(filename))
orbitweights = array(numpy.load(filename).flat)
filename = os.path.join(dirname, "projectedmoments.npy")
projectedmoments = array(memmap(filename, dtype='float64', mode='readonly', shape=(len(orbitweights), n_moments, n_constraints)))
self.orblibmoments = projectedmoments
#projectedmoments = load()
mask = projectedmoments[:,0,:] > 0
#print projectedmoments.shape
#print mask.shape
#mask = mask[:,newaxis,:]
#print mask.shape
f = (10000*5*5*25)
projectedmoments /= f
#for i in range(1, projectedmoments.shape[1]):
# projectedmoments[:,i,:][mask] /= projectedmoments[:,0,:][mask]
self.projectedmoments = tensordot(projectedmoments, orbitweights, axes=([0], [0]))
densities = array(projectedmoments[:,0,:])
self.rho2d = sum(orbitweights * transpose(densities), axis=1)
| en | 0.300815 | #orbitweights = ravel(numpy.load(filename)) #orbitweights = ravel(numpy.load(filename)) #projectedmoments = load() #print projectedmoments.shape #print mask.shape #mask = mask[:,newaxis,:] #print mask.shape #for i in range(1, projectedmoments.shape[1]): # projectedmoments[:,i,:][mask] /= projectedmoments[:,0,:][mask] | 2.470378 | 2 |
download-avocado.py | flekschas/peax-avocado | 1 | 6624004 | #!/usr/bin/env python
import argparse
import os
import sys
module_path = os.path.abspath(os.path.join("../experiments"))
if module_path not in sys.path:
sys.path.append(module_path)
from utils import download_file
parser = argparse.ArgumentParser(description="Peax-Avocado")
parser.add_argument("chrom", help="chromosome name")
try:
args = parser.parse_args()
except SystemExit as err:
if err.code == 0:
sys.exit(0)
if err.code == 2:
parser.print_help()
sys.exit(0)
raise
download_dir = "models"
base_url = "https://noble.gs.washington.edu/proj/avocado/model/"
download_file(
f"{base_url}avocado-{args.chrom}.json",
f"avocado-{args.chrom}.json",
dir="models"
)
download_file(
f"{base_url}avocado-{args.chrom}.h5",
f"avocado-{args.chrom}.h5",
dir="models"
)
| #!/usr/bin/env python
import argparse
import os
import sys
module_path = os.path.abspath(os.path.join("../experiments"))
if module_path not in sys.path:
sys.path.append(module_path)
from utils import download_file
parser = argparse.ArgumentParser(description="Peax-Avocado")
parser.add_argument("chrom", help="chromosome name")
try:
args = parser.parse_args()
except SystemExit as err:
if err.code == 0:
sys.exit(0)
if err.code == 2:
parser.print_help()
sys.exit(0)
raise
download_dir = "models"
base_url = "https://noble.gs.washington.edu/proj/avocado/model/"
download_file(
f"{base_url}avocado-{args.chrom}.json",
f"avocado-{args.chrom}.json",
dir="models"
)
download_file(
f"{base_url}avocado-{args.chrom}.h5",
f"avocado-{args.chrom}.h5",
dir="models"
)
| ru | 0.26433 | #!/usr/bin/env python | 2.678633 | 3 |
labs/03_neural_recsys/movielens_paramsearch.py | soufiomario/labs-Deep-learning | 1,398 | 6624005 | from math import floor, ceil
from time import time
from pathlib import Path
from zipfile import ZipFile
from urllib.request import urlretrieve
from contextlib import contextmanager
import random
from pprint import pprint
import json
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import tensorflow as tf
from keras.layers import Input, Embedding, Flatten, merge, Dense, Dropout
from keras.layers import BatchNormalization
from keras.models import Model
from dask import delayed, compute
DEFAULT_LOSS = 'cross_entropy'
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = Path(ML_100K_URL.rsplit('/', 1)[1])
ML_100K_FOLDER = Path('ml-100k')
RESULTS_FILENAME = 'results.json'
MODEL_FILENAME = 'model.h5'
if not ML_100K_FILENAME.exists():
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME.name)
if not ML_100K_FOLDER.exists():
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME.name).extractall('.')
all_ratings = pd.read_csv(ML_100K_FOLDER / 'u.data', sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
DEFAULT_PARAMS = dict(
embedding_size=16,
hidden_size=64,
n_hidden=4,
dropout_embedding=0.3,
dropout_hidden=0.3,
use_batchnorm=True,
loss=DEFAULT_LOSS,
optimizer='adam',
batch_size=64,
)
COMMON_SEARCH_SPACE = dict(
embedding_size=[16, 32, 64, 128],
dropout_embedding=[0, 0.2, 0.5],
dropout_hidden=[0, 0.2, 0.5],
use_batchnorm=[True, False],
loss=['mse', 'mae', 'cross_entropy'],
batch_size=[16, 32, 64, 128],
)
SEARCH_SPACE = [
dict(n_hidden=[0], **COMMON_SEARCH_SPACE),
dict(n_hidden=[1, 2, 3, 4, 5],
hidden_size=[32, 64, 128, 256, 512],
**COMMON_SEARCH_SPACE),
]
def bootstrap_ci(func, data_args, ci_range=(0.025, 0.975), n_iter=10000,
random_state=0):
rng = np.random.RandomState(random_state)
n_samples = data_args[0].shape[0]
results = []
for i in range(n_iter):
# sample n_samples out of n_samples with replacement
idx = rng.randint(0, n_samples - 1, n_samples)
resampled_args = [np.asarray(arg)[idx] for arg in data_args]
results.append(func(*resampled_args))
results = np.sort(results)
return (results[floor(ci_range[0] * n_iter)],
results[ceil(ci_range[1] * n_iter)])
def make_model(user_input_dim, item_input_dim,
embedding_size=16, hidden_size=64, n_hidden=4,
dropout_embedding=0.3, dropout_hidden=0.3,
optimizer='adam', loss=DEFAULT_LOSS, use_batchnorm=True,
**ignored_args):
user_id_input = Input(shape=[1], name='user')
item_id_input = Input(shape=[1], name='item')
user_embedding = Embedding(output_dim=embedding_size,
input_dim=user_input_dim,
input_length=1,
name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=embedding_size,
input_dim=item_input_dim,
input_length=1,
name='item_embedding')(item_id_input)
user_vecs = Flatten()(user_embedding)
item_vecs = Flatten()(item_embedding)
input_vecs = merge([user_vecs, item_vecs], mode='concat')
x = Dropout(dropout_embedding)(input_vecs)
for i in range(n_hidden):
x = Dense(hidden_size, activation='relu')(x)
if i < n_hidden - 1:
x = Dropout(dropout_hidden)(x)
if use_batchnorm:
x = BatchNormalization()(x)
if loss == 'cross_entropy':
y = Dense(output_dim=5, activation='softmax')(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
else:
y = Dense(output_dim=1)(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss=loss)
return model
@contextmanager
def transactional_open(path, mode='wb'):
tmp_path = path.with_name(path.name + '.tmp')
with tmp_path.open(mode=mode) as f:
yield f
tmp_path.rename(path)
@contextmanager
def transactional_fname(path):
tmp_path = path.with_name(path.name + '.tmp')
yield str(tmp_path)
tmp_path.rename(path)
def _compute_scores(model, prefix, user_id, item_id, rating, loss):
preds = model.predict([user_id, item_id])
preds = preds.argmax(axis=1) + 1 if loss == 'cross_entropy' else preds
mse = mean_squared_error(preds, rating)
mae = mean_absolute_error(preds, rating)
mae_ci_min, mae_ci_max = bootstrap_ci(mean_absolute_error, [preds, rating])
results = {}
results[prefix + '_mse'] = mse
results[prefix + '_mae'] = mae
results[prefix + '_mae_ci_min'] = mae_ci_min
results[prefix + '_mae_ci_max'] = mae_ci_max
return results, preds
def evaluate_one(**kwargs):
# Create a single threaded TF session for this Python thread:
# parallelism is leveraged at a coarser level with dask
session = tf.Session(
# graph=tf.Graph(),
config=tf.ConfigProto(intra_op_parallelism_threads=1))
with session.as_default():
# graph-level deterministic weights init
tf.set_random_seed(0)
_evaluate_one(**kwargs)
def _evaluate_one(**kwargs):
params = DEFAULT_PARAMS.copy()
params.update(kwargs)
params_digest = joblib.hash(params)
results = params.copy()
results['digest'] = params_digest
results_folder = Path('results')
results_folder.mkdir(exist_ok=True)
folder = results_folder.joinpath(params_digest)
folder.mkdir(exist_ok=True)
if len(list(folder.glob("*/results.json"))) == 4:
print('Skipping')
split_idx = params.get('split_idx', 0)
print("Evaluating model on split #%d:" % split_idx)
pprint(params)
ratings_train, ratings_test = train_test_split(
all_ratings, test_size=0.2, random_state=split_idx)
max_user_id = all_ratings['user_id'].max()
max_item_id = all_ratings['item_id'].max()
user_id_train = ratings_train['user_id']
item_id_train = ratings_train['item_id']
rating_train = ratings_train['rating']
user_id_test = ratings_test['user_id']
item_id_test = ratings_test['item_id']
rating_test = ratings_test['rating']
loss = params.get('loss', DEFAULT_LOSS)
if loss == 'cross_entropy':
target_train = rating_train - 1
else:
target_train = rating_train
model = make_model(max_user_id + 1, max_item_id + 1, **params)
results['model_size'] = sum(w.size for w in model.get_weights())
nb_epoch = 5
epochs = 0
for i in range(4):
epochs += nb_epoch
t0 = time()
model.fit([user_id_train, item_id_train], target_train,
batch_size=params['batch_size'],
nb_epoch=nb_epoch, shuffle=True, verbose=False)
epoch_duration = (time() - t0) / nb_epoch
train_scores, train_preds = _compute_scores(
model, 'train', user_id_train, item_id_train, rating_train, loss)
results.update(train_scores)
test_scores, test_preds = _compute_scores(
model, 'test', user_id_test, item_id_test, rating_test, loss)
results.update(test_scores)
results['epoch_duration'] = epoch_duration
results['epochs'] = epochs
subfolder = folder.joinpath("%03d" % epochs)
subfolder.mkdir(exist_ok=True)
# Transactional results saving to avoid file corruption on ctrl-c
results_filepath = subfolder.joinpath(RESULTS_FILENAME)
with transactional_open(results_filepath, mode='w') as f:
json.dump(results, f)
model_filepath = subfolder.joinpath(MODEL_FILENAME)
with transactional_fname(model_filepath) as fname:
model.save(fname)
# Save predictions and true labels to be able to recompute new scores
# later
with transactional_open(subfolder / 'test_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'train_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'ratings.npy', mode='wb') as f:
np.save(f, rating_test)
return params_digest
def _model_complexity_proxy(params):
# Quick approximation of the number of tunable parameter to rank models
# by increasing complexity
embedding_size = params['embedding_size']
n_hidden = params['n_hidden']
if n_hidden == 0:
return embedding_size * 2
else:
hidden_size = params['hidden_size']
return (2 * embedding_size * hidden_size +
(n_hidden - 1) * hidden_size ** 2)
if __name__ == "__main__":
seed = 0
n_params = 500
all_combinations = list(ParameterGrid(SEARCH_SPACE))
random.Random(seed).shuffle(all_combinations)
sampled_params = all_combinations[:n_params]
sampled_params.sort(key=_model_complexity_proxy)
evaluations = []
for params in sampled_params:
for split_idx in range(3):
evaluations.append(delayed(evaluate_one)(
split_idx=split_idx, **params))
compute(*evaluations)
| from math import floor, ceil
from time import time
from pathlib import Path
from zipfile import ZipFile
from urllib.request import urlretrieve
from contextlib import contextmanager
import random
from pprint import pprint
import json
import numpy as np
import pandas as pd
import joblib
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import tensorflow as tf
from keras.layers import Input, Embedding, Flatten, merge, Dense, Dropout
from keras.layers import BatchNormalization
from keras.models import Model
from dask import delayed, compute
DEFAULT_LOSS = 'cross_entropy'
ML_100K_URL = "http://files.grouplens.org/datasets/movielens/ml-100k.zip"
ML_100K_FILENAME = Path(ML_100K_URL.rsplit('/', 1)[1])
ML_100K_FOLDER = Path('ml-100k')
RESULTS_FILENAME = 'results.json'
MODEL_FILENAME = 'model.h5'
if not ML_100K_FILENAME.exists():
print('Downloading %s to %s...' % (ML_100K_URL, ML_100K_FILENAME))
urlretrieve(ML_100K_URL, ML_100K_FILENAME.name)
if not ML_100K_FOLDER.exists():
print('Extracting %s to %s...' % (ML_100K_FILENAME, ML_100K_FOLDER))
ZipFile(ML_100K_FILENAME.name).extractall('.')
all_ratings = pd.read_csv(ML_100K_FOLDER / 'u.data', sep='\t',
names=["user_id", "item_id", "rating", "timestamp"])
DEFAULT_PARAMS = dict(
embedding_size=16,
hidden_size=64,
n_hidden=4,
dropout_embedding=0.3,
dropout_hidden=0.3,
use_batchnorm=True,
loss=DEFAULT_LOSS,
optimizer='adam',
batch_size=64,
)
COMMON_SEARCH_SPACE = dict(
embedding_size=[16, 32, 64, 128],
dropout_embedding=[0, 0.2, 0.5],
dropout_hidden=[0, 0.2, 0.5],
use_batchnorm=[True, False],
loss=['mse', 'mae', 'cross_entropy'],
batch_size=[16, 32, 64, 128],
)
SEARCH_SPACE = [
dict(n_hidden=[0], **COMMON_SEARCH_SPACE),
dict(n_hidden=[1, 2, 3, 4, 5],
hidden_size=[32, 64, 128, 256, 512],
**COMMON_SEARCH_SPACE),
]
def bootstrap_ci(func, data_args, ci_range=(0.025, 0.975), n_iter=10000,
random_state=0):
rng = np.random.RandomState(random_state)
n_samples = data_args[0].shape[0]
results = []
for i in range(n_iter):
# sample n_samples out of n_samples with replacement
idx = rng.randint(0, n_samples - 1, n_samples)
resampled_args = [np.asarray(arg)[idx] for arg in data_args]
results.append(func(*resampled_args))
results = np.sort(results)
return (results[floor(ci_range[0] * n_iter)],
results[ceil(ci_range[1] * n_iter)])
def make_model(user_input_dim, item_input_dim,
embedding_size=16, hidden_size=64, n_hidden=4,
dropout_embedding=0.3, dropout_hidden=0.3,
optimizer='adam', loss=DEFAULT_LOSS, use_batchnorm=True,
**ignored_args):
user_id_input = Input(shape=[1], name='user')
item_id_input = Input(shape=[1], name='item')
user_embedding = Embedding(output_dim=embedding_size,
input_dim=user_input_dim,
input_length=1,
name='user_embedding')(user_id_input)
item_embedding = Embedding(output_dim=embedding_size,
input_dim=item_input_dim,
input_length=1,
name='item_embedding')(item_id_input)
user_vecs = Flatten()(user_embedding)
item_vecs = Flatten()(item_embedding)
input_vecs = merge([user_vecs, item_vecs], mode='concat')
x = Dropout(dropout_embedding)(input_vecs)
for i in range(n_hidden):
x = Dense(hidden_size, activation='relu')(x)
if i < n_hidden - 1:
x = Dropout(dropout_hidden)(x)
if use_batchnorm:
x = BatchNormalization()(x)
if loss == 'cross_entropy':
y = Dense(output_dim=5, activation='softmax')(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
else:
y = Dense(output_dim=1)(x)
model = Model(input=[user_id_input, item_id_input], output=y)
model.compile(optimizer='adam', loss=loss)
return model
@contextmanager
def transactional_open(path, mode='wb'):
tmp_path = path.with_name(path.name + '.tmp')
with tmp_path.open(mode=mode) as f:
yield f
tmp_path.rename(path)
@contextmanager
def transactional_fname(path):
tmp_path = path.with_name(path.name + '.tmp')
yield str(tmp_path)
tmp_path.rename(path)
def _compute_scores(model, prefix, user_id, item_id, rating, loss):
preds = model.predict([user_id, item_id])
preds = preds.argmax(axis=1) + 1 if loss == 'cross_entropy' else preds
mse = mean_squared_error(preds, rating)
mae = mean_absolute_error(preds, rating)
mae_ci_min, mae_ci_max = bootstrap_ci(mean_absolute_error, [preds, rating])
results = {}
results[prefix + '_mse'] = mse
results[prefix + '_mae'] = mae
results[prefix + '_mae_ci_min'] = mae_ci_min
results[prefix + '_mae_ci_max'] = mae_ci_max
return results, preds
def evaluate_one(**kwargs):
# Create a single threaded TF session for this Python thread:
# parallelism is leveraged at a coarser level with dask
session = tf.Session(
# graph=tf.Graph(),
config=tf.ConfigProto(intra_op_parallelism_threads=1))
with session.as_default():
# graph-level deterministic weights init
tf.set_random_seed(0)
_evaluate_one(**kwargs)
def _evaluate_one(**kwargs):
params = DEFAULT_PARAMS.copy()
params.update(kwargs)
params_digest = joblib.hash(params)
results = params.copy()
results['digest'] = params_digest
results_folder = Path('results')
results_folder.mkdir(exist_ok=True)
folder = results_folder.joinpath(params_digest)
folder.mkdir(exist_ok=True)
if len(list(folder.glob("*/results.json"))) == 4:
print('Skipping')
split_idx = params.get('split_idx', 0)
print("Evaluating model on split #%d:" % split_idx)
pprint(params)
ratings_train, ratings_test = train_test_split(
all_ratings, test_size=0.2, random_state=split_idx)
max_user_id = all_ratings['user_id'].max()
max_item_id = all_ratings['item_id'].max()
user_id_train = ratings_train['user_id']
item_id_train = ratings_train['item_id']
rating_train = ratings_train['rating']
user_id_test = ratings_test['user_id']
item_id_test = ratings_test['item_id']
rating_test = ratings_test['rating']
loss = params.get('loss', DEFAULT_LOSS)
if loss == 'cross_entropy':
target_train = rating_train - 1
else:
target_train = rating_train
model = make_model(max_user_id + 1, max_item_id + 1, **params)
results['model_size'] = sum(w.size for w in model.get_weights())
nb_epoch = 5
epochs = 0
for i in range(4):
epochs += nb_epoch
t0 = time()
model.fit([user_id_train, item_id_train], target_train,
batch_size=params['batch_size'],
nb_epoch=nb_epoch, shuffle=True, verbose=False)
epoch_duration = (time() - t0) / nb_epoch
train_scores, train_preds = _compute_scores(
model, 'train', user_id_train, item_id_train, rating_train, loss)
results.update(train_scores)
test_scores, test_preds = _compute_scores(
model, 'test', user_id_test, item_id_test, rating_test, loss)
results.update(test_scores)
results['epoch_duration'] = epoch_duration
results['epochs'] = epochs
subfolder = folder.joinpath("%03d" % epochs)
subfolder.mkdir(exist_ok=True)
# Transactional results saving to avoid file corruption on ctrl-c
results_filepath = subfolder.joinpath(RESULTS_FILENAME)
with transactional_open(results_filepath, mode='w') as f:
json.dump(results, f)
model_filepath = subfolder.joinpath(MODEL_FILENAME)
with transactional_fname(model_filepath) as fname:
model.save(fname)
# Save predictions and true labels to be able to recompute new scores
# later
with transactional_open(subfolder / 'test_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'train_preds.npy', mode='wb') as f:
np.save(f, test_preds)
with transactional_open(subfolder / 'ratings.npy', mode='wb') as f:
np.save(f, rating_test)
return params_digest
def _model_complexity_proxy(params):
# Quick approximation of the number of tunable parameter to rank models
# by increasing complexity
embedding_size = params['embedding_size']
n_hidden = params['n_hidden']
if n_hidden == 0:
return embedding_size * 2
else:
hidden_size = params['hidden_size']
return (2 * embedding_size * hidden_size +
(n_hidden - 1) * hidden_size ** 2)
if __name__ == "__main__":
seed = 0
n_params = 500
all_combinations = list(ParameterGrid(SEARCH_SPACE))
random.Random(seed).shuffle(all_combinations)
sampled_params = all_combinations[:n_params]
sampled_params.sort(key=_model_complexity_proxy)
evaluations = []
for params in sampled_params:
for split_idx in range(3):
evaluations.append(delayed(evaluate_one)(
split_idx=split_idx, **params))
compute(*evaluations)
| en | 0.835132 | # sample n_samples out of n_samples with replacement # Create a single threaded TF session for this Python thread: # parallelism is leveraged at a coarser level with dask # graph=tf.Graph(), # graph-level deterministic weights init #%d:" % split_idx) # Transactional results saving to avoid file corruption on ctrl-c # Save predictions and true labels to be able to recompute new scores # later # Quick approximation of the number of tunable parameter to rank models # by increasing complexity | 2.220467 | 2 |
wsm/backend/asyncwhois/cache.py | Rayologist/windows-sshd-manager | 9 | 6624006 | <filename>wsm/backend/asyncwhois/cache.py
from .base import BaseCacheHandler, Action, Kind
import json
from ..services import (
get_whois,
create_whois,
get_whois_by_ip,
update_whois_by_ip,
get_cache_by_ip,
)
class IPWhoisCacheHandler(BaseCacheHandler):
async def create(self, action: Action):
if action.kind == Kind.CREATE_WHOIS:
return await create_whois(action.payload.ip)
async def read(self, action: Action):
if action.kind == Kind.GET_WHOIS_BY_IP:
return await get_whois_by_ip(action.payload.ip)
elif action.kind == Kind.GET_WHOIS:
return await get_whois()
elif action.kind == Kind.GET_CACHE_BY_IP:
return await get_cache_by_ip(action.payload.ip)
async def update(self, action: Action):
if action.kind == Kind.UPDATE_WHOIS_BY_IP:
return await update_whois_by_ip(
action.payload.ip,
action.payload.country,
json.dumps(action.payload.whois),
)
async def delete(self, action: Action):
return super().delete(action)
| <filename>wsm/backend/asyncwhois/cache.py
from .base import BaseCacheHandler, Action, Kind
import json
from ..services import (
get_whois,
create_whois,
get_whois_by_ip,
update_whois_by_ip,
get_cache_by_ip,
)
class IPWhoisCacheHandler(BaseCacheHandler):
async def create(self, action: Action):
if action.kind == Kind.CREATE_WHOIS:
return await create_whois(action.payload.ip)
async def read(self, action: Action):
if action.kind == Kind.GET_WHOIS_BY_IP:
return await get_whois_by_ip(action.payload.ip)
elif action.kind == Kind.GET_WHOIS:
return await get_whois()
elif action.kind == Kind.GET_CACHE_BY_IP:
return await get_cache_by_ip(action.payload.ip)
async def update(self, action: Action):
if action.kind == Kind.UPDATE_WHOIS_BY_IP:
return await update_whois_by_ip(
action.payload.ip,
action.payload.country,
json.dumps(action.payload.whois),
)
async def delete(self, action: Action):
return super().delete(action)
| none | 1 | 2.147803 | 2 | |
db_folder/sqldatabase.py | TheXer/Skaut-discord-bot | 16 | 6624007 | <filename>db_folder/sqldatabase.py<gh_stars>10-100
from os import getenv
import mysql.connector
from dotenv import load_dotenv
load_dotenv("password.env")
USER = getenv("USER_DATABASE")
PASSWORD = getenv("PASSWORD")
HOST = getenv("HOST")
DATABASE = getenv("DATABASE")
class SQLDatabase:
"""
Small wrapper for mysql.connector, so I can use magic with statement. Because readibility counts!
"""
def __init__(self, **credentials):
if not credentials:
self.credentials = {"user": USER, "password": PASSWORD, "host": HOST, "database": DATABASE}
else:
self.credentials = credentials
self.database = None
self.cursor = None
def __enter__(self):
self.database = mysql.connector.connect(**self.credentials)
self.cursor = self.database.cursor()
return self
def __exit__(self, exception_type, exception_val, trace):
try:
self.cursor.close()
self.database.close()
except AttributeError:
print('Not closable.')
return True
def query(self, query: str, val=None):
"""
Query of database. Returns list tuples from database.
:param query: str
:param val: Optional
:return: list of tuples
"""
self.cursor.execute(query, val or ())
return self.cursor.fetchall()
def execute(self, query, val=None, commit=False):
"""
Execute your values and commit them. Or not. Your decision.
:param query: str
:param val: Optional
:param commit: bool
:return: None
"""
self.cursor.execute(query, val or ())
if commit:
self.database.commit()
| <filename>db_folder/sqldatabase.py<gh_stars>10-100
from os import getenv
import mysql.connector
from dotenv import load_dotenv
load_dotenv("password.env")
USER = getenv("USER_DATABASE")
PASSWORD = getenv("PASSWORD")
HOST = getenv("HOST")
DATABASE = getenv("DATABASE")
class SQLDatabase:
"""
Small wrapper for mysql.connector, so I can use magic with statement. Because readibility counts!
"""
def __init__(self, **credentials):
if not credentials:
self.credentials = {"user": USER, "password": PASSWORD, "host": HOST, "database": DATABASE}
else:
self.credentials = credentials
self.database = None
self.cursor = None
def __enter__(self):
self.database = mysql.connector.connect(**self.credentials)
self.cursor = self.database.cursor()
return self
def __exit__(self, exception_type, exception_val, trace):
try:
self.cursor.close()
self.database.close()
except AttributeError:
print('Not closable.')
return True
def query(self, query: str, val=None):
"""
Query of database. Returns list tuples from database.
:param query: str
:param val: Optional
:return: list of tuples
"""
self.cursor.execute(query, val or ())
return self.cursor.fetchall()
def execute(self, query, val=None, commit=False):
"""
Execute your values and commit them. Or not. Your decision.
:param query: str
:param val: Optional
:param commit: bool
:return: None
"""
self.cursor.execute(query, val or ())
if commit:
self.database.commit()
| en | 0.655866 | Small wrapper for mysql.connector, so I can use magic with statement. Because readibility counts! Query of database. Returns list tuples from database. :param query: str :param val: Optional :return: list of tuples Execute your values and commit them. Or not. Your decision. :param query: str :param val: Optional :param commit: bool :return: None | 2.839571 | 3 |
vnpy_spreadtrading/backtesting.py | noranhe/vnpy_spreadtrading | 0 | 6624008 | from collections import defaultdict
from datetime import date, datetime
from typing import Callable, Type, Dict, List, Optional
from functools import partial
import numpy as np
from pandas import DataFrame
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Interval,
Status
)
from vnpy.trader.object import TradeData, BarData, TickData
from vnpy.trader.optimize import (
OptimizationSetting,
check_optimization_setting,
run_bf_optimization,
run_ga_optimization
)
from .template import SpreadStrategyTemplate, SpreadAlgoTemplate
from .base import SpreadData, BacktestingMode, load_bar_data, load_tick_data
class BacktestingEngine:
""""""
gateway_name: str = "BACKTESTING"
def __init__(self) -> None:
""""""
self.spread: SpreadData = None
self.start: datetime = None
self.end: datetime = None
self.rate: float = 0
self.slippage: float = 0
self.size: float = 1
self.pricetick: float = 0
self.capital: int = 1_000_000
self.mode: BacktestingMode = BacktestingMode.BAR
self.strategy_class: Type[SpreadStrategyTemplate] = None
self.strategy: SpreadStrategyTemplate = None
self.tick: TickData = None
self.bar: BarData = None
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.callback: Callable = None
self.history_data: list = []
self.algo_count: int = 0
self.algos: Dict[str, SpreadAlgoTemplate] = {}
self.active_algos: Dict[str, SpreadAlgoTemplate] = {}
self.trade_count: int = 0
self.trades: Dict[str, TradeData] = {}
self.logs: list = []
self.daily_results: Dict[date, DailyResult] = {}
self.daily_df: DataFrame = None
def output(self, msg) -> None:
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def clear_data(self) -> None:
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.algo_count = 0
self.algos.clear()
self.active_algos.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
def set_parameters(
self,
spread: SpreadData,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int = 0,
end: datetime = None,
mode: BacktestingMode = BacktestingMode.BAR
) -> None:
""""""
self.spread = spread
self.interval = Interval(interval)
self.rate = rate
self.slippage = slippage
self.size = size
self.pricetick = pricetick
self.start = start
self.capital = capital
self.end = end
self.mode = mode
def add_strategy(self, strategy_class: type, setting: dict) -> None:
""""""
self.strategy_class = strategy_class
self.strategy = strategy_class(
self,
strategy_class.__name__,
self.spread,
setting
)
def load_data(self) -> None:
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
if self.mode == BacktestingMode.BAR:
self.history_data = load_bar_data(
self.spread,
self.interval,
self.start,
self.end,
self.pricetick
)
else:
self.history_data = load_tick_data(
self.spread,
self.start,
self.end
)
self.output(f"历史数据加载完成,数据量:{len(self.history_data)}")
def run_backtesting(self) -> None:
""""""
if self.mode == BacktestingMode.BAR:
func = self.new_bar
else:
func = self.new_tick
self.strategy.on_init()
# Use the first [days] of history data for initializing strategy
day_count: int = 0
ix: int = 0
for ix, data in enumerate(self.history_data):
if self.datetime and data.datetime.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
self.datetime = data.datetime
self.callback(data)
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for data in self.history_data[ix:]:
func(data)
self.output("历史数据回放结束")
def calculate_result(self) -> DataFrame:
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d: date = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_close = 0
start_pos = 0
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_close,
start_pos,
self.size,
self.rate,
self.slippage
)
pre_close = daily_result.close_price
start_pos = daily_result.end_pos
# Generate dataframe
results: defaultdict = defaultdict(list)
for daily_result in self.daily_results.values():
for key, value in daily_result.__dict__.items():
results[key].append(value)
self.daily_df: DataFrame = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> dict:
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df: DataFrame = self.daily_df
# Check for init DataFrame
if df is None:
# Set all statistics to 0 if no trade.
start_date: str = ""
end_date: str = ""
total_days: int = 0
profit_days: int = 0
loss_days: int = 0
end_balance: float = 0
max_drawdown: float = 0
max_ddpercent: float = 0
max_drawdown_duration: int = 0
total_net_pnl: float = 0
daily_net_pnl: float = 0
total_commission: float = 0
daily_commission: float = 0
total_slippage: float = 0
daily_slippage: float = 0
total_turnover: float = 0
daily_turnover: float = 0
total_trade_count: int = 0
daily_trade_count: int = 0
total_return: float = 0
annual_return: float = 0
daily_return: float = 0
return_std: float = 0
sharpe_ratio: float = 0
return_drawdown_ratio: float = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days: int = len(df)
profit_days: int = len(df[df["net_pnl"] > 0])
loss_days: int = len(df[df["net_pnl"] < 0])
end_balance: float = df["balance"].iloc[-1]
max_drawdown: float = df["drawdown"].min()
max_ddpercent: float = df["ddpercent"].min()
max_drawdown_end: float = df["drawdown"].idxmin()
max_drawdown_start: float = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration: int = (max_drawdown_end - max_drawdown_start).days
total_net_pnl: float = df["net_pnl"].sum()
daily_net_pnl: float = total_net_pnl / total_days
total_commission: float = df["commission"].sum()
daily_commission: float = total_commission / total_days
total_slippage: float = df["slippage"].sum()
daily_slippage: float = total_slippage / total_days
total_turnover: float = df["turnover"].sum()
daily_turnover: float = total_turnover / total_days
total_trade_count: int = df["trade_count"].sum()
daily_trade_count: int = total_trade_count / total_days
total_return: float = (end_balance / self.capital - 1) * 100
annual_return: float = total_return / total_days * 240
daily_return: float = df["return"].mean() * 100
return_std: float = df["return"].std() * 100
if return_std:
sharpe_ratio: float = daily_return / return_std * np.sqrt(240)
else:
sharpe_ratio: float = 0
return_drawdown_ratio: float = -total_return / max_ddpercent
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics: dict = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
return statistics
def show_chart(self, df: DataFrame = None) -> None:
""""""
# Check DataFrame input exterior
if df is None:
df: DataFrame = self.daily_df
# Check for init DataFrame
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def run_bf_optimization(self, optimization_setting: OptimizationSetting, output=True) -> list:
""""""
if not check_optimization_setting(optimization_setting):
return
evaluate_func: callable = wrap_evaluate(self, optimization_setting.target_name)
results: list = run_bf_optimization(
evaluate_func,
optimization_setting,
get_target_value,
output=self.output,
)
if output:
for result in results:
msg: str = f"参数:{result[0]}, 目标:{result[1]}"
self.output(msg)
return results
run_optimization = run_bf_optimization
def run_ga_optimization(self, optimization_setting: OptimizationSetting, output=True) -> list:
""""""
if not check_optimization_setting(optimization_setting):
return
evaluate_func: callable = wrap_evaluate(self, optimization_setting.target_name)
results: list = run_ga_optimization(
evaluate_func,
optimization_setting,
get_target_value,
output=self.output
)
if output:
for result in results:
msg: str = f"参数:{result[0]}, 目标:{result[1]}"
self.output(msg)
return results
def update_daily_close(self, price: float) -> None:
""""""
d: date = self.datetime.date()
daily_result: Optional[DailyResult] = self.daily_results.get(d, None)
if daily_result:
daily_result.close_price = price
else:
self.daily_results[d] = DailyResult(d, price)
def new_bar(self, bar: BarData) -> None:
""""""
self.bar = bar
self.datetime = bar.datetime
self.cross_algo()
self.strategy.on_spread_bar(bar)
self.update_daily_close(bar.close_price)
def new_tick(self, tick: TickData) -> None:
""""""
self.tick = tick
self.datetime = tick.datetime
self.cross_algo()
self.spread.bid_price = tick.bid_price_1
self.spread.bid_volume = tick.bid_volume_1
self.spread.ask_price = tick.ask_price_1
self.spread.ask_volume = tick.ask_volume_1
self.spread.datetime = tick.datetime
self.strategy.on_spread_data()
self.update_daily_close(tick.last_price)
def cross_algo(self) -> None:
"""
Cross limit order with last bar/tick data.
"""
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.close_price
short_cross_price = self.bar.close_price
else:
long_cross_price = self.tick.ask_price_1
short_cross_price = self.tick.bid_price_1
for algo in list(self.active_algos.values()):
# Check whether limit orders can be filled.
long_cross: bool = (
algo.direction == Direction.LONG
and algo.price >= long_cross_price
)
short_cross: bool = (
algo.direction == Direction.SHORT
and algo.price <= short_cross_price
)
if not long_cross and not short_cross:
continue
# Push order udpate with status "all traded" (filled).
algo.traded = algo.target
algo.traded_volume = algo.volume
algo.traded_price = algo.price
algo.status = Status.ALLTRADED
self.strategy.update_spread_algo(algo)
self.active_algos.pop(algo.algoid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = long_cross_price
pos_change = algo.volume
else:
trade_price = short_cross_price
pos_change = -algo.volume
trade: TradeData = TradeData(
symbol=self.spread.name,
exchange=Exchange.LOCAL,
orderid=algo.algoid,
tradeid=str(self.trade_count),
direction=algo.direction,
price=trade_price,
volume=algo.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
if self.mode == BacktestingMode.BAR:
trade.value = self.bar.value
else:
trade.value = trade_price
self.spread.net_pos += pos_change
self.strategy.on_spread_pos()
self.trades[trade.vt_tradeid] = trade
def load_bar(
self, spread: SpreadData, days: int, interval: Interval, callback: Callable
) -> None:
""""""
self.days = days
self.callback = callback
def load_tick(self, spread: SpreadData, days: int, callback: Callable) -> None:
""""""
self.days = days
self.callback = callback
def start_algo(
self,
strategy: SpreadStrategyTemplate,
spread_name: str,
direction: Direction,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool,
extra: dict
) -> str:
""""""
self.algo_count += 1
algoid: str = str(self.algo_count)
algo: SpreadAlgoTemplate = SpreadAlgoTemplate(
self,
algoid,
self.spread,
direction,
price,
volume,
payup,
interval,
lock,
extra
)
self.algos[algoid] = algo
self.active_algos[algoid] = algo
return algoid
def stop_algo(
self,
strategy: SpreadStrategyTemplate,
algoid: str
) -> None:
""""""
if algoid not in self.active_algos:
return
algo: SpreadAlgoTemplate = self.active_algos.pop(algoid)
algo.status = Status.CANCELLED
self.strategy.update_spread_algo(algo)
def send_order(
self,
strategy: SpreadStrategyTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
) -> None:
""""""
pass
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str) -> None:
"""
Cancel order by vt_orderid.
"""
pass
def write_strategy_log(self, strategy: SpreadStrategyTemplate, msg: str) -> None:
"""
Write log message.
"""
msg: str = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: SpreadStrategyTemplate = None) -> None:
"""
Send email to default receiver.
"""
pass
def put_strategy_event(self, strategy: SpreadStrategyTemplate) -> None:
"""
Put an event to update strategy status.
"""
pass
def write_algo_log(self, algo: SpreadAlgoTemplate, msg: str) -> None:
""""""
pass
class DailyResult:
""""""
def __init__(self, date: date, close_price: float) -> None:
""""""
self.date: date = date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos = 0
self.end_pos = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
""""""
# If no pre_close provided on the first day,
# use value 1 to avoid zero division error
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover: float = trade.volume * size * trade.value
self.trading_pnl += pos_change * \
(self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def evaluate(
target_name: str,
strategy_class: SpreadStrategyTemplate,
spread: SpreadData,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int,
end: datetime,
setting: dict
) -> tuple:
"""
Function for running in multiprocessing.pool
"""
engine: BacktestingEngine = BacktestingEngine()
engine.set_parameters(
spread=spread,
interval=interval,
start=start,
rate=rate,
slippage=slippage,
size=size,
pricetick=pricetick,
capital=capital,
end=end,
)
engine.add_strategy(strategy_class, setting)
engine.load_data()
engine.run_backtesting()
engine.calculate_result()
statistics: dict = engine.calculate_statistics(output=False)
target_value: float = statistics[target_name]
return (str(setting), target_value, statistics)
def wrap_evaluate(engine: BacktestingEngine, target_name: str) -> callable:
"""
Wrap evaluate function with given setting from backtesting engine.
"""
func: callable = partial(
evaluate,
target_name,
engine.strategy_class,
engine.spread,
engine.interval,
engine.start,
engine.rate,
engine.slippage,
engine.size,
engine.pricetick,
engine.capital,
engine.end
)
return func
def get_target_value(result: list) -> float:
"""
Get target value for sorting optimization results.
"""
return result[1]
| from collections import defaultdict
from datetime import date, datetime
from typing import Callable, Type, Dict, List, Optional
from functools import partial
import numpy as np
from pandas import DataFrame
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
Interval,
Status
)
from vnpy.trader.object import TradeData, BarData, TickData
from vnpy.trader.optimize import (
OptimizationSetting,
check_optimization_setting,
run_bf_optimization,
run_ga_optimization
)
from .template import SpreadStrategyTemplate, SpreadAlgoTemplate
from .base import SpreadData, BacktestingMode, load_bar_data, load_tick_data
class BacktestingEngine:
""""""
gateway_name: str = "BACKTESTING"
def __init__(self) -> None:
""""""
self.spread: SpreadData = None
self.start: datetime = None
self.end: datetime = None
self.rate: float = 0
self.slippage: float = 0
self.size: float = 1
self.pricetick: float = 0
self.capital: int = 1_000_000
self.mode: BacktestingMode = BacktestingMode.BAR
self.strategy_class: Type[SpreadStrategyTemplate] = None
self.strategy: SpreadStrategyTemplate = None
self.tick: TickData = None
self.bar: BarData = None
self.datetime: datetime = None
self.interval: Interval = None
self.days: int = 0
self.callback: Callable = None
self.history_data: list = []
self.algo_count: int = 0
self.algos: Dict[str, SpreadAlgoTemplate] = {}
self.active_algos: Dict[str, SpreadAlgoTemplate] = {}
self.trade_count: int = 0
self.trades: Dict[str, TradeData] = {}
self.logs: list = []
self.daily_results: Dict[date, DailyResult] = {}
self.daily_df: DataFrame = None
def output(self, msg) -> None:
"""
Output message of backtesting engine.
"""
print(f"{datetime.now()}\t{msg}")
def clear_data(self) -> None:
"""
Clear all data of last backtesting.
"""
self.strategy = None
self.tick = None
self.bar = None
self.datetime = None
self.algo_count = 0
self.algos.clear()
self.active_algos.clear()
self.trade_count = 0
self.trades.clear()
self.logs.clear()
self.daily_results.clear()
def set_parameters(
self,
spread: SpreadData,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int = 0,
end: datetime = None,
mode: BacktestingMode = BacktestingMode.BAR
) -> None:
""""""
self.spread = spread
self.interval = Interval(interval)
self.rate = rate
self.slippage = slippage
self.size = size
self.pricetick = pricetick
self.start = start
self.capital = capital
self.end = end
self.mode = mode
def add_strategy(self, strategy_class: type, setting: dict) -> None:
""""""
self.strategy_class = strategy_class
self.strategy = strategy_class(
self,
strategy_class.__name__,
self.spread,
setting
)
def load_data(self) -> None:
""""""
self.output("开始加载历史数据")
if not self.end:
self.end = datetime.now()
if self.start >= self.end:
self.output("起始日期必须小于结束日期")
return
if self.mode == BacktestingMode.BAR:
self.history_data = load_bar_data(
self.spread,
self.interval,
self.start,
self.end,
self.pricetick
)
else:
self.history_data = load_tick_data(
self.spread,
self.start,
self.end
)
self.output(f"历史数据加载完成,数据量:{len(self.history_data)}")
def run_backtesting(self) -> None:
""""""
if self.mode == BacktestingMode.BAR:
func = self.new_bar
else:
func = self.new_tick
self.strategy.on_init()
# Use the first [days] of history data for initializing strategy
day_count: int = 0
ix: int = 0
for ix, data in enumerate(self.history_data):
if self.datetime and data.datetime.day != self.datetime.day:
day_count += 1
if day_count >= self.days:
break
self.datetime = data.datetime
self.callback(data)
self.strategy.inited = True
self.output("策略初始化完成")
self.strategy.on_start()
self.strategy.trading = True
self.output("开始回放历史数据")
# Use the rest of history data for running backtesting
for data in self.history_data[ix:]:
func(data)
self.output("历史数据回放结束")
def calculate_result(self) -> DataFrame:
""""""
self.output("开始计算逐日盯市盈亏")
if not self.trades:
self.output("成交记录为空,无法计算")
return
# Add trade data into daily reuslt.
for trade in self.trades.values():
d: date = trade.datetime.date()
daily_result = self.daily_results[d]
daily_result.add_trade(trade)
# Calculate daily result by iteration.
pre_close = 0
start_pos = 0
for daily_result in self.daily_results.values():
daily_result.calculate_pnl(
pre_close,
start_pos,
self.size,
self.rate,
self.slippage
)
pre_close = daily_result.close_price
start_pos = daily_result.end_pos
# Generate dataframe
results: defaultdict = defaultdict(list)
for daily_result in self.daily_results.values():
for key, value in daily_result.__dict__.items():
results[key].append(value)
self.daily_df: DataFrame = DataFrame.from_dict(results).set_index("date")
self.output("逐日盯市盈亏计算完成")
return self.daily_df
def calculate_statistics(self, df: DataFrame = None, output=True) -> dict:
""""""
self.output("开始计算策略统计指标")
# Check DataFrame input exterior
if df is None:
df: DataFrame = self.daily_df
# Check for init DataFrame
if df is None:
# Set all statistics to 0 if no trade.
start_date: str = ""
end_date: str = ""
total_days: int = 0
profit_days: int = 0
loss_days: int = 0
end_balance: float = 0
max_drawdown: float = 0
max_ddpercent: float = 0
max_drawdown_duration: int = 0
total_net_pnl: float = 0
daily_net_pnl: float = 0
total_commission: float = 0
daily_commission: float = 0
total_slippage: float = 0
daily_slippage: float = 0
total_turnover: float = 0
daily_turnover: float = 0
total_trade_count: int = 0
daily_trade_count: int = 0
total_return: float = 0
annual_return: float = 0
daily_return: float = 0
return_std: float = 0
sharpe_ratio: float = 0
return_drawdown_ratio: float = 0
else:
# Calculate balance related time series data
df["balance"] = df["net_pnl"].cumsum() + self.capital
df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
df["highlevel"] = (
df["balance"].rolling(
min_periods=1, window=len(df), center=False).max()
)
df["drawdown"] = df["balance"] - df["highlevel"]
df["ddpercent"] = df["drawdown"] / df["highlevel"] * 100
# Calculate statistics value
start_date = df.index[0]
end_date = df.index[-1]
total_days: int = len(df)
profit_days: int = len(df[df["net_pnl"] > 0])
loss_days: int = len(df[df["net_pnl"] < 0])
end_balance: float = df["balance"].iloc[-1]
max_drawdown: float = df["drawdown"].min()
max_ddpercent: float = df["ddpercent"].min()
max_drawdown_end: float = df["drawdown"].idxmin()
max_drawdown_start: float = df["balance"][:max_drawdown_end].idxmax()
max_drawdown_duration: int = (max_drawdown_end - max_drawdown_start).days
total_net_pnl: float = df["net_pnl"].sum()
daily_net_pnl: float = total_net_pnl / total_days
total_commission: float = df["commission"].sum()
daily_commission: float = total_commission / total_days
total_slippage: float = df["slippage"].sum()
daily_slippage: float = total_slippage / total_days
total_turnover: float = df["turnover"].sum()
daily_turnover: float = total_turnover / total_days
total_trade_count: int = df["trade_count"].sum()
daily_trade_count: int = total_trade_count / total_days
total_return: float = (end_balance / self.capital - 1) * 100
annual_return: float = total_return / total_days * 240
daily_return: float = df["return"].mean() * 100
return_std: float = df["return"].std() * 100
if return_std:
sharpe_ratio: float = daily_return / return_std * np.sqrt(240)
else:
sharpe_ratio: float = 0
return_drawdown_ratio: float = -total_return / max_ddpercent
# Output
if output:
self.output("-" * 30)
self.output(f"首个交易日:\t{start_date}")
self.output(f"最后交易日:\t{end_date}")
self.output(f"总交易日:\t{total_days}")
self.output(f"盈利交易日:\t{profit_days}")
self.output(f"亏损交易日:\t{loss_days}")
self.output(f"起始资金:\t{self.capital:,.2f}")
self.output(f"结束资金:\t{end_balance:,.2f}")
self.output(f"总收益率:\t{total_return:,.2f}%")
self.output(f"年化收益:\t{annual_return:,.2f}%")
self.output(f"最大回撤: \t{max_drawdown:,.2f}")
self.output(f"百分比最大回撤: {max_ddpercent:,.2f}%")
self.output(f"最长回撤天数: \t{max_drawdown_duration}")
self.output(f"总盈亏:\t{total_net_pnl:,.2f}")
self.output(f"总手续费:\t{total_commission:,.2f}")
self.output(f"总滑点:\t{total_slippage:,.2f}")
self.output(f"总成交金额:\t{total_turnover:,.2f}")
self.output(f"总成交笔数:\t{total_trade_count}")
self.output(f"日均盈亏:\t{daily_net_pnl:,.2f}")
self.output(f"日均手续费:\t{daily_commission:,.2f}")
self.output(f"日均滑点:\t{daily_slippage:,.2f}")
self.output(f"日均成交金额:\t{daily_turnover:,.2f}")
self.output(f"日均成交笔数:\t{daily_trade_count}")
self.output(f"日均收益率:\t{daily_return:,.2f}%")
self.output(f"收益标准差:\t{return_std:,.2f}%")
self.output(f"Sharpe Ratio:\t{sharpe_ratio:,.2f}")
self.output(f"收益回撤比:\t{return_drawdown_ratio:,.2f}")
statistics: dict = {
"start_date": start_date,
"end_date": end_date,
"total_days": total_days,
"profit_days": profit_days,
"loss_days": loss_days,
"capital": self.capital,
"end_balance": end_balance,
"max_drawdown": max_drawdown,
"max_ddpercent": max_ddpercent,
"max_drawdown_duration": max_drawdown_duration,
"total_net_pnl": total_net_pnl,
"daily_net_pnl": daily_net_pnl,
"total_commission": total_commission,
"daily_commission": daily_commission,
"total_slippage": total_slippage,
"daily_slippage": daily_slippage,
"total_turnover": total_turnover,
"daily_turnover": daily_turnover,
"total_trade_count": total_trade_count,
"daily_trade_count": daily_trade_count,
"total_return": total_return,
"annual_return": annual_return,
"daily_return": daily_return,
"return_std": return_std,
"sharpe_ratio": sharpe_ratio,
"return_drawdown_ratio": return_drawdown_ratio,
}
return statistics
def show_chart(self, df: DataFrame = None) -> None:
""""""
# Check DataFrame input exterior
if df is None:
df: DataFrame = self.daily_df
# Check for init DataFrame
if df is None:
return
fig = make_subplots(
rows=4,
cols=1,
subplot_titles=["Balance", "Drawdown", "Daily Pnl", "Pnl Distribution"],
vertical_spacing=0.06
)
balance_line = go.Scatter(
x=df.index,
y=df["balance"],
mode="lines",
name="Balance"
)
drawdown_scatter = go.Scatter(
x=df.index,
y=df["drawdown"],
fillcolor="red",
fill='tozeroy',
mode="lines",
name="Drawdown"
)
pnl_bar = go.Bar(y=df["net_pnl"], name="Daily Pnl")
pnl_histogram = go.Histogram(x=df["net_pnl"], nbinsx=100, name="Days")
fig.add_trace(balance_line, row=1, col=1)
fig.add_trace(drawdown_scatter, row=2, col=1)
fig.add_trace(pnl_bar, row=3, col=1)
fig.add_trace(pnl_histogram, row=4, col=1)
fig.update_layout(height=1000, width=1000)
fig.show()
def run_bf_optimization(self, optimization_setting: OptimizationSetting, output=True) -> list:
""""""
if not check_optimization_setting(optimization_setting):
return
evaluate_func: callable = wrap_evaluate(self, optimization_setting.target_name)
results: list = run_bf_optimization(
evaluate_func,
optimization_setting,
get_target_value,
output=self.output,
)
if output:
for result in results:
msg: str = f"参数:{result[0]}, 目标:{result[1]}"
self.output(msg)
return results
run_optimization = run_bf_optimization
def run_ga_optimization(self, optimization_setting: OptimizationSetting, output=True) -> list:
""""""
if not check_optimization_setting(optimization_setting):
return
evaluate_func: callable = wrap_evaluate(self, optimization_setting.target_name)
results: list = run_ga_optimization(
evaluate_func,
optimization_setting,
get_target_value,
output=self.output
)
if output:
for result in results:
msg: str = f"参数:{result[0]}, 目标:{result[1]}"
self.output(msg)
return results
def update_daily_close(self, price: float) -> None:
""""""
d: date = self.datetime.date()
daily_result: Optional[DailyResult] = self.daily_results.get(d, None)
if daily_result:
daily_result.close_price = price
else:
self.daily_results[d] = DailyResult(d, price)
def new_bar(self, bar: BarData) -> None:
""""""
self.bar = bar
self.datetime = bar.datetime
self.cross_algo()
self.strategy.on_spread_bar(bar)
self.update_daily_close(bar.close_price)
def new_tick(self, tick: TickData) -> None:
""""""
self.tick = tick
self.datetime = tick.datetime
self.cross_algo()
self.spread.bid_price = tick.bid_price_1
self.spread.bid_volume = tick.bid_volume_1
self.spread.ask_price = tick.ask_price_1
self.spread.ask_volume = tick.ask_volume_1
self.spread.datetime = tick.datetime
self.strategy.on_spread_data()
self.update_daily_close(tick.last_price)
def cross_algo(self) -> None:
"""
Cross limit order with last bar/tick data.
"""
if self.mode == BacktestingMode.BAR:
long_cross_price = self.bar.close_price
short_cross_price = self.bar.close_price
else:
long_cross_price = self.tick.ask_price_1
short_cross_price = self.tick.bid_price_1
for algo in list(self.active_algos.values()):
# Check whether limit orders can be filled.
long_cross: bool = (
algo.direction == Direction.LONG
and algo.price >= long_cross_price
)
short_cross: bool = (
algo.direction == Direction.SHORT
and algo.price <= short_cross_price
)
if not long_cross and not short_cross:
continue
# Push order udpate with status "all traded" (filled).
algo.traded = algo.target
algo.traded_volume = algo.volume
algo.traded_price = algo.price
algo.status = Status.ALLTRADED
self.strategy.update_spread_algo(algo)
self.active_algos.pop(algo.algoid)
# Push trade update
self.trade_count += 1
if long_cross:
trade_price = long_cross_price
pos_change = algo.volume
else:
trade_price = short_cross_price
pos_change = -algo.volume
trade: TradeData = TradeData(
symbol=self.spread.name,
exchange=Exchange.LOCAL,
orderid=algo.algoid,
tradeid=str(self.trade_count),
direction=algo.direction,
price=trade_price,
volume=algo.volume,
datetime=self.datetime,
gateway_name=self.gateway_name,
)
if self.mode == BacktestingMode.BAR:
trade.value = self.bar.value
else:
trade.value = trade_price
self.spread.net_pos += pos_change
self.strategy.on_spread_pos()
self.trades[trade.vt_tradeid] = trade
def load_bar(
self, spread: SpreadData, days: int, interval: Interval, callback: Callable
) -> None:
""""""
self.days = days
self.callback = callback
def load_tick(self, spread: SpreadData, days: int, callback: Callable) -> None:
""""""
self.days = days
self.callback = callback
def start_algo(
self,
strategy: SpreadStrategyTemplate,
spread_name: str,
direction: Direction,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool,
extra: dict
) -> str:
""""""
self.algo_count += 1
algoid: str = str(self.algo_count)
algo: SpreadAlgoTemplate = SpreadAlgoTemplate(
self,
algoid,
self.spread,
direction,
price,
volume,
payup,
interval,
lock,
extra
)
self.algos[algoid] = algo
self.active_algos[algoid] = algo
return algoid
def stop_algo(
self,
strategy: SpreadStrategyTemplate,
algoid: str
) -> None:
""""""
if algoid not in self.active_algos:
return
algo: SpreadAlgoTemplate = self.active_algos.pop(algoid)
algo.status = Status.CANCELLED
self.strategy.update_spread_algo(algo)
def send_order(
self,
strategy: SpreadStrategyTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
) -> None:
""""""
pass
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str) -> None:
"""
Cancel order by vt_orderid.
"""
pass
def write_strategy_log(self, strategy: SpreadStrategyTemplate, msg: str) -> None:
"""
Write log message.
"""
msg: str = f"{self.datetime}\t{msg}"
self.logs.append(msg)
def send_email(self, msg: str, strategy: SpreadStrategyTemplate = None) -> None:
"""
Send email to default receiver.
"""
pass
def put_strategy_event(self, strategy: SpreadStrategyTemplate) -> None:
"""
Put an event to update strategy status.
"""
pass
def write_algo_log(self, algo: SpreadAlgoTemplate, msg: str) -> None:
""""""
pass
class DailyResult:
""""""
def __init__(self, date: date, close_price: float) -> None:
""""""
self.date: date = date
self.close_price: float = close_price
self.pre_close: float = 0
self.trades: List[TradeData] = []
self.trade_count: int = 0
self.start_pos = 0
self.end_pos = 0
self.turnover: float = 0
self.commission: float = 0
self.slippage: float = 0
self.trading_pnl: float = 0
self.holding_pnl: float = 0
self.total_pnl: float = 0
self.net_pnl: float = 0
def add_trade(self, trade: TradeData) -> None:
""""""
self.trades.append(trade)
def calculate_pnl(
self,
pre_close: float,
start_pos: float,
size: int,
rate: float,
slippage: float
) -> None:
""""""
# If no pre_close provided on the first day,
# use value 1 to avoid zero division error
if pre_close:
self.pre_close = pre_close
else:
self.pre_close = 1
# Holding pnl is the pnl from holding position at day start
self.start_pos = start_pos
self.end_pos = start_pos
self.holding_pnl = self.start_pos * (self.close_price - self.pre_close) * size
# Trading pnl is the pnl from new trade during the day
self.trade_count = len(self.trades)
for trade in self.trades:
if trade.direction == Direction.LONG:
pos_change = trade.volume
else:
pos_change = -trade.volume
self.end_pos += pos_change
turnover: float = trade.volume * size * trade.value
self.trading_pnl += pos_change * \
(self.close_price - trade.price) * size
self.slippage += trade.volume * size * slippage
self.turnover += turnover
self.commission += turnover * rate
# Net pnl takes account of commission and slippage cost
self.total_pnl = self.trading_pnl + self.holding_pnl
self.net_pnl = self.total_pnl - self.commission - self.slippage
def evaluate(
target_name: str,
strategy_class: SpreadStrategyTemplate,
spread: SpreadData,
interval: Interval,
start: datetime,
rate: float,
slippage: float,
size: float,
pricetick: float,
capital: int,
end: datetime,
setting: dict
) -> tuple:
"""
Function for running in multiprocessing.pool
"""
engine: BacktestingEngine = BacktestingEngine()
engine.set_parameters(
spread=spread,
interval=interval,
start=start,
rate=rate,
slippage=slippage,
size=size,
pricetick=pricetick,
capital=capital,
end=end,
)
engine.add_strategy(strategy_class, setting)
engine.load_data()
engine.run_backtesting()
engine.calculate_result()
statistics: dict = engine.calculate_statistics(output=False)
target_value: float = statistics[target_name]
return (str(setting), target_value, statistics)
def wrap_evaluate(engine: BacktestingEngine, target_name: str) -> callable:
"""
Wrap evaluate function with given setting from backtesting engine.
"""
func: callable = partial(
evaluate,
target_name,
engine.strategy_class,
engine.spread,
engine.interval,
engine.start,
engine.rate,
engine.slippage,
engine.size,
engine.pricetick,
engine.capital,
engine.end
)
return func
def get_target_value(result: list) -> float:
"""
Get target value for sorting optimization results.
"""
return result[1]
| en | 0.705554 | Output message of backtesting engine. Clear all data of last backtesting. # Use the first [days] of history data for initializing strategy # Use the rest of history data for running backtesting # Add trade data into daily reuslt. # Calculate daily result by iteration. # Generate dataframe # Check DataFrame input exterior # Check for init DataFrame # Set all statistics to 0 if no trade. # Calculate balance related time series data # Calculate statistics value # Output # Check DataFrame input exterior # Check for init DataFrame Cross limit order with last bar/tick data. # Check whether limit orders can be filled. # Push order udpate with status "all traded" (filled). # Push trade update Cancel order by vt_orderid. Write log message. Send email to default receiver. Put an event to update strategy status. # If no pre_close provided on the first day, # use value 1 to avoid zero division error # Holding pnl is the pnl from holding position at day start # Trading pnl is the pnl from new trade during the day # Net pnl takes account of commission and slippage cost Function for running in multiprocessing.pool Wrap evaluate function with given setting from backtesting engine. Get target value for sorting optimization results. | 2.281944 | 2 |
src/pyastroapi/api/urls.py | rjfarmer/pyAstroApi | 0 | 6624009 | # SPDX-License-Identifier: BSD-3-Clause
import typing as t
# https://ui.adsabs.harvard.edu/help/api/api-docs.html
base_url = "https://api.adsabs.harvard.edu/v1"
urls = {
"search": {
"search": "/search/query",
"bigquery": "/search/bigquery",
},
# Stored search
"stored": {
"search": "/vault/query",
"query2svg": "/vault/query2svg",
"execute_query": "/vault/execute_query",
},
# Libraries
"libraries": {
"change": "/biblib/documents", # Add, remove, delete, update
"view": "/biblib/libraries", # New, view
"permission": "/biblib/permissions",
"operate": "/biblib/libraries/operations/",
"transfer": "/biblib/transfer",
},
# Export
"export": {
"ads": "/export/ads",
"bibtextads": "/export/bibtexabs",
"bibtex": "/export/bibtex",
"endnote": "/export/endnote",
"medlars": "/export/medlars",
"procite": "/export/procite",
"refworks": "/export/refworks",
"ris": "/export/ris",
"aastex": "/export/aastex",
"icarus": "/export/icarus",
"mnras": "/export/mnras",
"soph": "/export/soph",
"dcxml": "/export/dcxml",
"refxml": "/export/refxml",
"refabsxml": "/export/refabsxml",
"rss": "/export/rss",
"votable": "/export/votable",
"csl": "/export/csl",
"custom": "/export/custom",
"ieee": "/export/ieee",
},
# Metrics
"metrics": {
"detail": "/metrics/detail",
"metrics": "/metrics",
},
# Author
"authors": {
"search": "/author-affiliation/search",
"export": "/author-affiliation/export",
},
# Citations
"citations": {
"helper": "/citation_helper",
},
# Classic
"classic": {
"mirrors": "/harbour/mirrors",
"user": "/harbour/user",
"auth": "/harbour/auth/classic",
},
# Objects
"objects": {
"solr": "/objects/query",
"objects": "/objects",
},
# Oracle
"oracle": {
"match": "/oracle/matchdoc",
"read": "/oracle/readhist",
},
# Reference
"ref": {"text": "/reference/text", "xml": "/reference/xml"},
# Resolver
"resolve": {
"search": "/resolver",
},
# Notifications
"notification": {
"edit": "/vault/notifications",
"get": "/vault/notification_query",
},
# Visualtions
"visual": {
"author": "/vis/author-network",
"paper": "/vis/paper-network",
"word-cloud": "/vis/word-cloud",
},
}
def make_url(endpoint: str, *args: str) -> str:
u = [base_url, endpoint]
u.extend(args)
return "/".join(u)
| # SPDX-License-Identifier: BSD-3-Clause
import typing as t
# https://ui.adsabs.harvard.edu/help/api/api-docs.html
base_url = "https://api.adsabs.harvard.edu/v1"
urls = {
"search": {
"search": "/search/query",
"bigquery": "/search/bigquery",
},
# Stored search
"stored": {
"search": "/vault/query",
"query2svg": "/vault/query2svg",
"execute_query": "/vault/execute_query",
},
# Libraries
"libraries": {
"change": "/biblib/documents", # Add, remove, delete, update
"view": "/biblib/libraries", # New, view
"permission": "/biblib/permissions",
"operate": "/biblib/libraries/operations/",
"transfer": "/biblib/transfer",
},
# Export
"export": {
"ads": "/export/ads",
"bibtextads": "/export/bibtexabs",
"bibtex": "/export/bibtex",
"endnote": "/export/endnote",
"medlars": "/export/medlars",
"procite": "/export/procite",
"refworks": "/export/refworks",
"ris": "/export/ris",
"aastex": "/export/aastex",
"icarus": "/export/icarus",
"mnras": "/export/mnras",
"soph": "/export/soph",
"dcxml": "/export/dcxml",
"refxml": "/export/refxml",
"refabsxml": "/export/refabsxml",
"rss": "/export/rss",
"votable": "/export/votable",
"csl": "/export/csl",
"custom": "/export/custom",
"ieee": "/export/ieee",
},
# Metrics
"metrics": {
"detail": "/metrics/detail",
"metrics": "/metrics",
},
# Author
"authors": {
"search": "/author-affiliation/search",
"export": "/author-affiliation/export",
},
# Citations
"citations": {
"helper": "/citation_helper",
},
# Classic
"classic": {
"mirrors": "/harbour/mirrors",
"user": "/harbour/user",
"auth": "/harbour/auth/classic",
},
# Objects
"objects": {
"solr": "/objects/query",
"objects": "/objects",
},
# Oracle
"oracle": {
"match": "/oracle/matchdoc",
"read": "/oracle/readhist",
},
# Reference
"ref": {"text": "/reference/text", "xml": "/reference/xml"},
# Resolver
"resolve": {
"search": "/resolver",
},
# Notifications
"notification": {
"edit": "/vault/notifications",
"get": "/vault/notification_query",
},
# Visualtions
"visual": {
"author": "/vis/author-network",
"paper": "/vis/paper-network",
"word-cloud": "/vis/word-cloud",
},
}
def make_url(endpoint: str, *args: str) -> str:
u = [base_url, endpoint]
u.extend(args)
return "/".join(u)
| en | 0.519995 | # SPDX-License-Identifier: BSD-3-Clause # https://ui.adsabs.harvard.edu/help/api/api-docs.html # Stored search # Libraries # Add, remove, delete, update # New, view # Export # Metrics # Author # Citations # Classic # Objects # Oracle # Reference # Resolver # Notifications # Visualtions | 1.741044 | 2 |
setup.py | mwalpole/baywheels-py-demo | 0 | 6624010 | <gh_stars>0
from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
setup(
name="baywheels",
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
) | from glob import glob
from os.path import basename
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
setup(
name="baywheels",
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
) | none | 1 | 1.876842 | 2 | |
Python/Basic Data Types/nested_lists.py | abivilion/Hackerank-Solutions- | 0 | 6624011 | lis = [] # main list
n = int(input()) # no number of students
# sub list into main list
for i in range(n):
sl = []
name = input()
sl.append(name)
marks = float(input())
sl.append(marks)
lis.append(sl)
# number list
num_l = []
for x in range(n):
num_l.append(lis[x][1])
# print(num_l)
# applying min algorithm from here
min_num = min(num_l)
c = num_l.count(min_num)
for p in range(c):
num_l.remove(min_num)
# second min value name get
min_num = min(num_l)
c = num_l.count(min_num)
name_end = []
for p in range(n):
if lis[p][1] == min_num:
name_end.append(lis[p][0])
alpha = sorted(name_end)
mystr= '\n'.join(alpha)
print(mystr)
| lis = [] # main list
n = int(input()) # no number of students
# sub list into main list
for i in range(n):
sl = []
name = input()
sl.append(name)
marks = float(input())
sl.append(marks)
lis.append(sl)
# number list
num_l = []
for x in range(n):
num_l.append(lis[x][1])
# print(num_l)
# applying min algorithm from here
min_num = min(num_l)
c = num_l.count(min_num)
for p in range(c):
num_l.remove(min_num)
# second min value name get
min_num = min(num_l)
c = num_l.count(min_num)
name_end = []
for p in range(n):
if lis[p][1] == min_num:
name_end.append(lis[p][0])
alpha = sorted(name_end)
mystr= '\n'.join(alpha)
print(mystr)
| en | 0.727856 | # main list # no number of students # sub list into main list # number list # print(num_l) # applying min algorithm from here # second min value name get | 3.466274 | 3 |
Calcul Numeric (CN)/Laborator/Laborator 12/lab12.py | DLarisa/FMI-Materials-BachelorDegree | 4 | 6624012 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 09:58:31 2021
@author: Larisa
"""
import numpy as np
import sympy as sym
import matplotlib.pyplot as plt
import math
### Proceduri -> Ex1
def difFinProg(X, Y):
"""
x oarecare -> f'(x) = (f(x+h) - f(x)) / h
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi)) / (xi+1 - xi), unde
xi + 1 => nodul i + 1 al vectorului x
"""
n = len(X)
df = np.zeros((n - 1, 1))
for i in range(n - 1):
df[i] = (Y[i+1] - Y[i]) / (X[i+1] - X[i])
return df
def difFinReg(X, Y):
"""
x oarecare -> f'(x) = (f(x) - f(x-h)) / h
pt discretizare xi -> f'(xi) = (f(xi) - f(xi-1)) / (xi - xi-1), unde
xi-1 => nodul i-1 al vectorului x
"""
n = len(X)
df = np.zeros((n, 1))
for i in range(1, n):
df[i] = (Y[i] - Y[i - 1]) / (X[i] - X[i - 1])
return df
def difFinCen(X, Y):
"""
x oarecare -> f'(x) = (f(x+h) - f(x-h)) / (2*h)
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi-1)) / (xi+1 - xi-1), unde
xi-1 => nodul i-1 al vectorului x
"""
n = len(X)
df = np.zeros((n - 1, 1))
for i in range(1, n - 1):
df[i] = (Y[i + 1] - Y[i - 1]) / (X[i + 1] - X[i - 1])
return df
### Exercițiul 1
def f(x):
return np.sin(x)
a = 0
b = np.pi
n = 100
x_graf = np.linspace(a, b, n)
y_graf = f(x_graf)
x = sym.symbols('x')
f_expr = sym.sin(x)
df = sym.diff(f_expr, x)
dfFunc = sym.lambdify(x, df)
plt.plot(x_graf, dfFunc(x_graf), linewidth = 2)
plt.grid(True)
dfaprox = difFinProg(x_graf, y_graf)
plt.plot(x_graf[0:n-1], dfaprox, linewidth = 2)
plt.show()
err = np.zeros((n - 1, 1))
for i in range(n - 1):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox[i])
plt.plot(x_graf[0:n-1], err, linewidth = 2)
plt.grid(True)
plt.show()
# Pasul
print(x_graf[1] - x_graf[0])
# Metoda Reg
dfaprox2 = difFinReg(x_graf, y_graf)
plt.plot(x_graf[1:n], dfaprox2[1:n], linewidth = 2)
plt.grid(True)
plt.show()
err = np.zeros((n, 1))
for i in range(1, n):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox2[i])
plt.plot(x_graf[1:n], err[1:n], linewidth = 2)
plt.grid(True)
plt.show()
# Metoda Cen
dfaprox3 = difFinCen(x_graf, y_graf)
plt.plot(x_graf[1:n-1], dfaprox3[1:n-1], linewidth = 2)
plt.grid(True)
plt.show()
err = np.zeros((n-1, 1))
for i in range(1, n-1):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox3[i])
plt.plot(x_graf[1:n-1], err[1:n-1], linewidth = 2)
plt.grid(True)
plt.show()
### Proceduri -> Ex2
def MetRichardson(phi, x, h, n):
"""
Parameters
----------
phi : formula de aproximare a derivatei cu un ordin inferior.
x : punctul în care calculez derivata.
h : pasul.
n : ordinul de aproximare al derivatei (superior).
Returns
-------
df = derivata aproximativă
"""
Q = np.zeros((n, n))
for i in range(n):
Q[i, 0] = phi(x, h / 2 ** i)
for i in range(1, n):
for j in range(1, i + 1):
Q[i, j] = Q[i, j - 1] + 1 / (2 ** j - 1) * (Q[i, j - 1] - Q[i - 1, j - 1])
return Q[n - 1 , n - 1]
# Exercițiul 2
def phi(x, h):
return (f(x + h) - f(x)) / h
df_richardson = np.zeros((n, 1))
N = 3 # ordinul de aproximare la care dorim să ajungem cu met Richardson
for i in range(len(x_graf)):
# pas echidistant
df_richardson[i] = MetRichardson(phi, x_graf[i], x_graf[1] - x_graf[0], N)
plt.plot(x_graf, df_richardson, linewidth = 2)
plt.show()
err = np.zeros((n, 1))
for i in range(n):
err[i] = abs(dfFunc(x_graf[i]) - df_richardson[i])
plt.plot(x_graf, err, linewidth = 2)
plt.show()
# d.
# Aproximeaza a doua derivata si are ordinul de aproximare h^2
def phi2(x, h):
return (f(x + h) - 2 * f(x) + f(x - h)) / h ** 2
N = 5 # eroarea creste din cauza rotunjirilor făcute de pc (erori interne)
d2f_richardson = np.zeros((n, 1))
for i in range(len(x_graf)):
d2f_richardson[i] = MetRichardson(phi2, x_graf[i], (x_graf[1] - x_graf[0]), N - 1)
plt.figure(9)
plt.plot(x_graf, d2f_richardson, linewidth=3)
plt.show()
d2f = sym.diff(df, x)
d2f_func = sym.lambdify(x, d2f)
err2 = np.zeros((n, 1))
for i in range(n):
err2[i] = np.abs(d2f_func(x_graf[i]) - d2f_richardson[i])
plt.figure(10)
plt.plot(x_graf, err2, linewidth=3)
plt.show()
| # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 09:58:31 2021
@author: Larisa
"""
import numpy as np
import sympy as sym
import matplotlib.pyplot as plt
import math
### Proceduri -> Ex1
def difFinProg(X, Y):
"""
x oarecare -> f'(x) = (f(x+h) - f(x)) / h
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi)) / (xi+1 - xi), unde
xi + 1 => nodul i + 1 al vectorului x
"""
n = len(X)
df = np.zeros((n - 1, 1))
for i in range(n - 1):
df[i] = (Y[i+1] - Y[i]) / (X[i+1] - X[i])
return df
def difFinReg(X, Y):
"""
x oarecare -> f'(x) = (f(x) - f(x-h)) / h
pt discretizare xi -> f'(xi) = (f(xi) - f(xi-1)) / (xi - xi-1), unde
xi-1 => nodul i-1 al vectorului x
"""
n = len(X)
df = np.zeros((n, 1))
for i in range(1, n):
df[i] = (Y[i] - Y[i - 1]) / (X[i] - X[i - 1])
return df
def difFinCen(X, Y):
"""
x oarecare -> f'(x) = (f(x+h) - f(x-h)) / (2*h)
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi-1)) / (xi+1 - xi-1), unde
xi-1 => nodul i-1 al vectorului x
"""
n = len(X)
df = np.zeros((n - 1, 1))
for i in range(1, n - 1):
df[i] = (Y[i + 1] - Y[i - 1]) / (X[i + 1] - X[i - 1])
return df
### Exercițiul 1
def f(x):
return np.sin(x)
a = 0
b = np.pi
n = 100
x_graf = np.linspace(a, b, n)
y_graf = f(x_graf)
x = sym.symbols('x')
f_expr = sym.sin(x)
df = sym.diff(f_expr, x)
dfFunc = sym.lambdify(x, df)
plt.plot(x_graf, dfFunc(x_graf), linewidth = 2)
plt.grid(True)
dfaprox = difFinProg(x_graf, y_graf)
plt.plot(x_graf[0:n-1], dfaprox, linewidth = 2)
plt.show()
err = np.zeros((n - 1, 1))
for i in range(n - 1):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox[i])
plt.plot(x_graf[0:n-1], err, linewidth = 2)
plt.grid(True)
plt.show()
# Pasul
print(x_graf[1] - x_graf[0])
# Metoda Reg
dfaprox2 = difFinReg(x_graf, y_graf)
plt.plot(x_graf[1:n], dfaprox2[1:n], linewidth = 2)
plt.grid(True)
plt.show()
err = np.zeros((n, 1))
for i in range(1, n):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox2[i])
plt.plot(x_graf[1:n], err[1:n], linewidth = 2)
plt.grid(True)
plt.show()
# Metoda Cen
dfaprox3 = difFinCen(x_graf, y_graf)
plt.plot(x_graf[1:n-1], dfaprox3[1:n-1], linewidth = 2)
plt.grid(True)
plt.show()
err = np.zeros((n-1, 1))
for i in range(1, n-1):
err[i] = abs(dfFunc(x_graf[i]) - dfaprox3[i])
plt.plot(x_graf[1:n-1], err[1:n-1], linewidth = 2)
plt.grid(True)
plt.show()
### Proceduri -> Ex2
def MetRichardson(phi, x, h, n):
"""
Parameters
----------
phi : formula de aproximare a derivatei cu un ordin inferior.
x : punctul în care calculez derivata.
h : pasul.
n : ordinul de aproximare al derivatei (superior).
Returns
-------
df = derivata aproximativă
"""
Q = np.zeros((n, n))
for i in range(n):
Q[i, 0] = phi(x, h / 2 ** i)
for i in range(1, n):
for j in range(1, i + 1):
Q[i, j] = Q[i, j - 1] + 1 / (2 ** j - 1) * (Q[i, j - 1] - Q[i - 1, j - 1])
return Q[n - 1 , n - 1]
# Exercițiul 2
def phi(x, h):
return (f(x + h) - f(x)) / h
df_richardson = np.zeros((n, 1))
N = 3 # ordinul de aproximare la care dorim să ajungem cu met Richardson
for i in range(len(x_graf)):
# pas echidistant
df_richardson[i] = MetRichardson(phi, x_graf[i], x_graf[1] - x_graf[0], N)
plt.plot(x_graf, df_richardson, linewidth = 2)
plt.show()
err = np.zeros((n, 1))
for i in range(n):
err[i] = abs(dfFunc(x_graf[i]) - df_richardson[i])
plt.plot(x_graf, err, linewidth = 2)
plt.show()
# d.
# Aproximeaza a doua derivata si are ordinul de aproximare h^2
def phi2(x, h):
return (f(x + h) - 2 * f(x) + f(x - h)) / h ** 2
N = 5 # eroarea creste din cauza rotunjirilor făcute de pc (erori interne)
d2f_richardson = np.zeros((n, 1))
for i in range(len(x_graf)):
d2f_richardson[i] = MetRichardson(phi2, x_graf[i], (x_graf[1] - x_graf[0]), N - 1)
plt.figure(9)
plt.plot(x_graf, d2f_richardson, linewidth=3)
plt.show()
d2f = sym.diff(df, x)
d2f_func = sym.lambdify(x, d2f)
err2 = np.zeros((n, 1))
for i in range(n):
err2[i] = np.abs(d2f_func(x_graf[i]) - d2f_richardson[i])
plt.figure(10)
plt.plot(x_graf, err2, linewidth=3)
plt.show()
| ro | 0.612302 | # -*- coding: utf-8 -*- Created on Mon Jan 4 09:58:31 2021
@author: Larisa ### Proceduri -> Ex1 x oarecare -> f'(x) = (f(x+h) - f(x)) / h
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi)) / (xi+1 - xi), unde
xi + 1 => nodul i + 1 al vectorului x x oarecare -> f'(x) = (f(x) - f(x-h)) / h
pt discretizare xi -> f'(xi) = (f(xi) - f(xi-1)) / (xi - xi-1), unde
xi-1 => nodul i-1 al vectorului x x oarecare -> f'(x) = (f(x+h) - f(x-h)) / (2*h)
pt discretizare xi -> f'(xi) = (f(xi+1) - f(xi-1)) / (xi+1 - xi-1), unde
xi-1 => nodul i-1 al vectorului x ### Exercițiul 1 # Pasul # Metoda Reg # Metoda Cen ### Proceduri -> Ex2 Parameters
----------
phi : formula de aproximare a derivatei cu un ordin inferior.
x : punctul în care calculez derivata.
h : pasul.
n : ordinul de aproximare al derivatei (superior).
Returns
-------
df = derivata aproximativă # Exercițiul 2 # ordinul de aproximare la care dorim să ajungem cu met Richardson # pas echidistant # d. # Aproximeaza a doua derivata si are ordinul de aproximare h^2 # eroarea creste din cauza rotunjirilor făcute de pc (erori interne) | 2.997828 | 3 |
setup.py | dskprt/botnolib | 3 | 6624013 | import setuptools
setuptools.setup(name="fastcord",
version="0.3.1",
description="another discord api wrapper for writing bots",
author="dskprt",
url="https://github.com/dskprt/fastcord",
packages=[ "fastcord", "fastcord.utils", "fastcord.objects", "fastcord.command" ],
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent"
],
install_requires=[ "websocket-client" ],
python_requires=">=3.6")
| import setuptools
setuptools.setup(name="fastcord",
version="0.3.1",
description="another discord api wrapper for writing bots",
author="dskprt",
url="https://github.com/dskprt/fastcord",
packages=[ "fastcord", "fastcord.utils", "fastcord.objects", "fastcord.command" ],
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent"
],
install_requires=[ "websocket-client" ],
python_requires=">=3.6")
| none | 1 | 1.245092 | 1 | |
yt_shared/yt_shared/models/yt_dlp.py | tropicoo/yt-dlp-bot | 2 | 6624014 | import datetime
import sqlalchemy as sa
from sqlalchemy import func
from yt_shared.db import Base
class YTDLP(Base):
__tablename__ = 'yt_dlp'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True, nullable=False)
current_version = sa.Column(sa.String, nullable=False)
updated_at = sa.Column(sa.DateTime, nullable=False, default=datetime.datetime.utcnow, onupdate=func.now())
| import datetime
import sqlalchemy as sa
from sqlalchemy import func
from yt_shared.db import Base
class YTDLP(Base):
__tablename__ = 'yt_dlp'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True, nullable=False)
current_version = sa.Column(sa.String, nullable=False)
updated_at = sa.Column(sa.DateTime, nullable=False, default=datetime.datetime.utcnow, onupdate=func.now())
| none | 1 | 2.375127 | 2 | |
integration/examples/python/rkt-control/main.py | gbuzogany/rockette | 4 | 6624015 | <filename>integration/examples/python/rkt-control/main.py<gh_stars>1-10
import json
import rkt_pb2
import socket
from RocketteClient import RocketteClient
config_file = 'config.json'
if __name__ == '__main__':
with open(config_file) as data_file:
config = json.load(data_file)
rkt = RocketteClient(config)
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
stringData = rkt_pb2.StringValue(
value="My IP is "+ip_address,
identifier='message',
)
rkt.UpdateStringData(stringData)
| <filename>integration/examples/python/rkt-control/main.py<gh_stars>1-10
import json
import rkt_pb2
import socket
from RocketteClient import RocketteClient
config_file = 'config.json'
if __name__ == '__main__':
with open(config_file) as data_file:
config = json.load(data_file)
rkt = RocketteClient(config)
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
stringData = rkt_pb2.StringValue(
value="My IP is "+ip_address,
identifier='message',
)
rkt.UpdateStringData(stringData)
| none | 1 | 2.663062 | 3 | |
pytokapi/__init__.py | cryptosbyte/PyTokAPI | 0 | 6624016 | import requests
"""
More information at https://pypi.org/project/pytokapi
"""
__version__ = "1.0.0"
class TikTok:
def __init__(self):
""" TikTok API Wrapper """
pass
def getInfo(self, url : str):
req = requests.get(f"https://www.tiktok.com/oembed?url={url}").json()
if ("status_msg" in req):
raise SystemExit("Invalid URL | TikTok API Response Error")
else:
return {
"version": req["version"],
# Basic Video Information
"title": req["title"],
"author": {
"url": req["author_url"],
"name": req["author_name"],
},
# These would be the average key of the object in a response
"provider": {
"url": "https://www.tiktok.com",
"name": "TikTok",
},
# Video Information
"video": {
# Usage for websites
"html": {
"embed": req["html"],
"width": req["width"],
"height": req["height"],
},
# Video Size & URL
"height": req["thumbnail_height"],
"url": req["thumbnail_url"],
"width": req["thumbnail_width"],
}
} | import requests
"""
More information at https://pypi.org/project/pytokapi
"""
__version__ = "1.0.0"
class TikTok:
def __init__(self):
""" TikTok API Wrapper """
pass
def getInfo(self, url : str):
req = requests.get(f"https://www.tiktok.com/oembed?url={url}").json()
if ("status_msg" in req):
raise SystemExit("Invalid URL | TikTok API Response Error")
else:
return {
"version": req["version"],
# Basic Video Information
"title": req["title"],
"author": {
"url": req["author_url"],
"name": req["author_name"],
},
# These would be the average key of the object in a response
"provider": {
"url": "https://www.tiktok.com",
"name": "TikTok",
},
# Video Information
"video": {
# Usage for websites
"html": {
"embed": req["html"],
"width": req["width"],
"height": req["height"],
},
# Video Size & URL
"height": req["thumbnail_height"],
"url": req["thumbnail_url"],
"width": req["thumbnail_width"],
}
} | en | 0.69477 | More information at https://pypi.org/project/pytokapi TikTok API Wrapper # Basic Video Information # These would be the average key of the object in a response # Video Information # Usage for websites # Video Size & URL | 3.509059 | 4 |
ROBOT_MOTOMAN.py | BrendonVaz/MotoManRobotTCPUDPCommands | 0 | 6624017 | <reponame>BrendonVaz/MotoManRobotTCPUDPCommands
import os
import sys
import time
import socket
import threading
import math
import struct
class rob():
def __init__(self, PARENT=0, dbg = 0):
self.PAR = PARENT
self.dbg = dbg
self.com1 = 'CONNECT Robot_access\r' #host control request
self.com2 = 'HOSTCTRL_REQUEST ' #command header
self.IP_ADD = '192.168.1.31' #robot IP
self.TCP_PT = 80 #robot tcp port number
self.UDP_PT = 10040 #robot udp port number
self.rob_chkout = False #socket lock flag to make sure only one message at one time
self.sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);self.sock_udp.settimeout(1.0)
self.sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
return
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#~ TCP COMMANDS
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def runchk(self): #check if robot online
if not (not os.system('ping -c 1 192.168.1.31') or not os.system('ping 192.168.1.31 -n 1')): print ("ERROR! Robot Server Off Line!");sys.exit()
self.wrgpio() #write all gpio 0
stt = self.redstt();
saf = self.redsaf();
col = self.colsaf(); col = col[0] or col[1]
if saf[4] != 0: print("ERROR! Robot Battery Low!"); sys.exit()
if int(stt[0])!=1: print("ERROR! Robot Not in Command Mode"); sys.exit()
if sum(saf[0:3])!=3:print("ERROR! E Stop Triggered!"); sys.exit()
if col: print("ERROR! Collaborative Mode Triggered!"); sys.exit()
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
print "ROBOT CHECK"
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
print "Robot Server Online..."
print "Robot Mode Check Complete..."
print "Robot Safety Check Complete..."
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
return
def senreq(self): #host control request
try:self.sock_tcp.connect((self.IP_ADD,self.TCP_PT))
except: print ("Error! Cannot Connect Socket to Robot"); sys.exit();
self.sock.send(self.com1)
resp = self.sock.recv(256)
if self.dbg:
print ("Sent: ", self.com1.strip());
print ("Recd: ", resp.strip())
return resp
def sencom(self, comm, data, movecom = False, posf = None, resend = False): #send command
commm = comm #incase move wait recovery
dataa = data #incase move wait recovery
size = len(data) #if data get data size
comm = self.com2 + comm + ' ' + str(size) + '\r' #parse command n data
senrq = self.senreq() #send host control request
while self.rob_chkout:pass #if robot busy, wait
self.rob_chkout = True; #set robot busy
self.sock_tcp.send(comm);resp = self.sock.recv(256) #read 256 byte comm resp
self.sock_tcp.send(data);resp += self.sock.recv(256) #read 256 byte data resp
if "closing control connection" in resp: #if robot closes port
print("Robot Forcefully Disconnected") #if error resp exit
sys.exit()
self.rob_chkout = False #set robot not busy
if self.dbg:
print ("Sent: ", comm);
print ("Data: ", data);
print ("Recd: ", resp.split('\r\n')[0]+":", resp.split('\r\n')[1].strip(), "\n")
if movecom == True: self.mvwait(commm, dataa, posf); #loop while robot moving
return resp
def mvwait(self, comm, data, pos ,check_estop=0, check_safety_gate=0, check_collab=0): #wait for motion command to complete
dim = 100; saf = 4; run = 1; srv = 0; tog = 0; col = 1; ylo = False #target;safety;runing;servof;toggle;safety gate light
while dim > 25 or run == 1 or srv == 0 or saf != 3: #while command not complete
if 1: #debug print
print ("-------------------------------------------------------------")
print ("WAITING FOR...", comm)
print ("-------------------------------------------------------------")
print ("TARGET REACHED :", dim)
print ("RUNNING BIT ON :", run)
print ("SERVO BIT ON :", srv)
print ("SAFETY BIT SUM:", saf)
print ("COLLABORATIVE :", col)
print ("-------------------------------------------------------------")
if 1: #read and calculate data
#read safety, status, position
saf=self.redsaf();
stt=self.redstt();
pt1=self.redpos();
col=self.colsaf();
msg = "";
mod = int(stt[0]); gat = int(saf[3]); saf = sum(saf[0:3]) #pase mode, area scan, estop
srv = int(stt[9]); run = int(stt[4]); slo = int(stt[3]) #parse servo, run, safegate bit
col = col[0] or col[1]; pt1 = map(float, pt1.split('\n')[1].split(',')[0:6]) #parse colaborative safety trigger, position
if not pos == None: #if check target flag is on
dim = [pt1[0]-pos[0], pt1[1]-pos[1], pt1[2]-pos[2], pt1[3]-pos[3], pt1[4]-pos[4], pt1[5]-pos[5]] #check if robot reached target
dim = (dim[0]**2 + dim[1]**2 + dim[2]**2)**0.5 #calculate delta position norm
else: dim = 0
if not check_estop: srv = 1;
if not check_safety_gate: gat = 3;
if not check_collab: col = 0;
if 1: #print warnings & prompts
if mod!=1: print ("Error! Robot Not in Command Mode");sys.exit() #if not in remote mode, exit code
if col: print ("Error! Collaborative Safety Triggered!"); self.servof() #if collaborative trigger, warning, servo off
if not srv: #if servo off = trigger
if 1: print ("Error! Servo Off.") #send message servo off
if col: print ("Error! Collaborative Safety Triggered") #send message reset collaborative safety trigger
if saf != 3: print ("Error! E Stop Triggered.") #send message estop trigger
elif saf == 3 and not col: #if no safety trigger, recover
print ("Safety Clear. Restoring Servo Power.") #read alarm,reset alarm, restore servo
self.redalm();
self.resets();
self.servon();
print ("Resuming Motion, Please Stay Back")
self.sencom(comm,data,movecom = True, posf = pos, resend = True) #resend last motion command
if not gat and srv: print("Safety Gate Triggered");ylo = 1; #display message safety gate triggered
elif gat and srv and ylo: print ("Safety Gate Clear."); ylo = 0; #display message safety gate clear
return 1
def redpos(self): #read cartesian position of robot
comm = 'RPOSC'
data = '0,0\r'
return self.sencom(comm,data)
def redpls(self): #read pulse position of robot
comm = 'RPOSJ'
data = ''
return self.sencom(comm,data)
def redalm(self): #read alarms
comm = 'RALARM'
data = ''
return self.sencom(comm,data)
def redstt(self): #read status bits
comm = 'RSTATS'
data = ''
stt = self.sencom(comm,data).split('\n')[1].split(',')
st1 = int(stt[0])
st2 = int(stt[1])
stt = '{0:08b}'.format(st1) + '{0:08b}'.format(st2)
return stt
def redsaf(self): #read safety bytes
comm = 'IOREAD'
data = '80020,8\r';stop = self.sencom(comm,data)
data = '80400,8\r';safe = self.sencom(comm,data)
data = '50010,8\r';batt = self.sencom(comm,data)
stop = format(int(stop.split('\n')[1].strip()),'08b')
safe = format(int(safe.split('\n')[1].strip()),'08b')
batt = format(int(batt.split('\n')[1].strip()),'08b')
if batt[5] == '1' or batt[6] == '1': print "Battery Response:\t", batt
batt = int(batt[5]) or int(batt[6])
pstp = int(stop[1])
estp = int(stop[2])
astp = int(stop[4])
asaf = int(safe[7])
return [pstp, estp, astp, asaf, 0]
def colsaf(self): #check collaborative hard/soft bump
comm = 'IOREAD'
data = '81382,1\r'
hard = self.sencom(comm,data)
data = '81383,1\r'
soft = self.sencom(comm,data)
hard = format(int(hard.split('\n')[1].strip()),'08b')[5]
soft = format(int(soft.split('\n')[1].strip()),'08b')[5]
return [int(hard), int(soft)]
def resets(self): #reset alarms
comm = 'RESET'
data = ''
return self.sencom(comm,data)
def cancel(self): #cancel request... useless never used
comm = 'CANCEL'
data = ''
return self.sencom(comm,data)
def holdon(self): #external hold... useless never used
comm = 'HOLD'
data = '1\r'
return self.sencom(comm,data)
def holdof(self): #hold off... useless never used
comm = 'HOLD'
data = '0\r'
return self.sencom(comm,data)
def setmod(self, m): #useless... cannot switch to command mode without key anyway, hardware safety
if m == 1:data = '1\r'
if m == 2:data = '2\r'
comm = 'MODE'
return self.sencom(comm,data)
def servon(self): #servo on
comm = 'SVON'
data = '1\r'
return self.sencom(comm,data)
def servof(self): #servo off
comm = 'SVON'
data = '0\r'
return self.sencom(comm,data)
def msgdis(self, msg): #display pendant message
comm = 'MDSP'
data = msg + '\r'
return self.sencom(comm,data)
def rdgpio(self, stt_add=30050, byt_num=1, p=1): #read byt_num of gpio starting at stt_add
if not (isinstance(byt_num,int) and byt_num >0): return
byt_num = byt_num*8
comm = 'IOREAD'
data = str(stt_add)+','+str(byt_num)+'\r'
return self.sencom(comm,data)
def wrgpio(self, stt_add=27010, bit_num=8, bit_val=[[0,0,0,0,0,0,0,0]], p=1): #write bit_nums starting from stt_add
flag = 0
comm = 'IOWRITE'
data = str(stt_add) + "," + str(bit_num)
if 1: #check input
if not isinstance(bit_val,list): flag = 1;print "Error", 1
elif len(bit_val) != bit_num/8: flag = 1;print "Error", 2
elif bit_num % 8 != 0: flag = 1;print "Error", 3
else:
for byte in bit_val:
if flag: break
if len(byte) != 8:
flag = 1;print "Error", 4
break
for bit in byte:
if bit != 0 and bit != 1:
flag = 1;print "Error", 5
break
if flag: return "INPUT ERROR"
if 1: #parse data
bytedata = []
for bitlist in bit_val:
out = 0
for bit in bitlist:
out = (out<<1) | bit
bytedata.append(out)
for byte_val in bytedata: data = data + ',' + str(byte_val)
data = data + '\r'
return self.sencom(comm,data)
def runjob(self,n='HOME',o=30050): #run job name n, and read complete flag o
"""
NOTES:
-> this function will run a job n on robot controller and wait for an output flag to be set if 0 != 0
-> the function will wait a minimum of one second until the function is complete
-> n = string name of job
-> o = job complete flag output bit (Need to set on pendant)
"""
comm = 'START';data = n+'\r';a = 1
print self.sencom(comm,data);time.sleep(1)
while a: a = int(format(int(self.fxn.rob.rdgpio(o).split('\n')[1].strip()),'08b')[4]);
return a
def gohome(self): #move robot home position pulse = 0
comm = 'PMOVJ'
data = '5,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True)
def movjnt(self, v, px, py, pz, rx, ry, rz, tp=6): #move joint to absolute position
"""
v = velocity (in % Speed)
px = position x
py = position y
pz = position z
rx = rotation x
ry = rotation y
rz = rotation z
tp = orientation type -> please see documentation (default to type 6)
frame is defaulted to "0" which is world frame
"""
comm = 'MOVJ'
data = str(v) + ',0,' + str(px) + ',' + str(py) + ',' + str(pz) + ',' + str(rx) + ',' + str(ry) + ',' + str(rz) + ',' + str(tp) + ',0,0,0,0,0,0,0\r'
fpos = [px,py,pz,rx,ry,rz] #final position, used to confirm motion complete using read position
return self.sencom(comm,data, movecom = True, posf = fpos)
def movlin(self, v, px, py, pz, rx, ry, rz, tp=6): #linear move to absolute position
"""
v = velocity (in mm/s)
px = position x
py = position y
pz = position z
rx = rotation x
ry = rotation y
rz = rotation z
tp = orientation type -> please see documentation (default to type 6)
frame is defaulted to "0" which is world frame
"""
comm = 'MOVL'
data = '0, ' + str(v) + ',0,' + str(px) + ',' + str(py) + ',' + str(pz) + ',' + str(rx) + ',' + str(ry) + ',' + str(rz) + ',' + str(tp) + ',0,0,0,0,0,0,0\r'
fpos = [px,py,pz,rx,ry,rz] #final position, used to confirm motion complete using read position
return self.sencom(comm,data, movecom = True, posf = fpos)
def movinc(self,v,dx,dy,dz,da,db,dc, rv=0, lv=0): #incremental move
""" Use increment move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
rv = force speed rotational
lv = force speed linear
"""
comm = 'IMOV'
if dx+dy+dz == 0: data = '1,';v = min(v, 100); #if no linear distance, use rotate speed
else: data = '0,';v = min(v, 500); #else use linear speed
if rv: data = '1,';v = min(rv,100); #if optional rv provided use linear speed
if lv: data = '0,';v = min(lv,500); #if optional lv provided use rotate speed
data = data + str(v) + ',' + '0' + ',' + str(dx) + ',' + str(dy) + ',' + str(dz) + ',' + str(da) + ',' + str(db) + ',' + str(dc) + ',0,0,0,0,0,0,0,0\r'
posi = [float(i) for i in self.redpos().split('\n')[1].split(',')[0:6]] #get initial position of robot
posm = [float(i) for i in [dx, dy, dz, da, db, dc]] #calculate final position of robot
fpos = map(sum,zip(posi,posm))
return self.sencom(comm,data, movecom = True, posf = fpos)
def movijt(self,v,dx,dy,dz,da,db,dc,p=1): #joint incremental move with current position read
""" Use joint move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
"""
posr = self.redpos().split('\n')[1].split(','); #read current position...
posi = [float(i) for i in posr[0:6]] #get position & rotation
posm = [float(i) for i in [dx, dy, dz, da, db, dc]]; #parse input vector
fpos = map(sum,zip(posi,posm)) #add input vector to current positon...
comm = 'MOVJ'
data = str(v)+',0,'+str(fpos[0])+','+str(fpos[1])+','+str(fpos[2])+','+str(fpos[3])+','+str(fpos[4])+','+str(fpos[5])+','+posr[6]+',0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True, posf = fpos)
def moviln(self,v,dx,dy,dz,da,db,dc,p=1): #linear incremental move with current position read
""" Use Linear move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
"""
posr = self.redpos().split('\n')[1].split(','); #read current position...
posi = [float(i) for i in posr[0:6]] #get position & rotation
posm = [float(i) for i in [dx, dy, dz, da, db, dc]]; #parse input vector
fpos = map(sum,zip(posi,posm)) #add input vector to current positon...
comm = 'MOVL'
data = '0, ' + str(v) + ',0,'+str(fpos[0])+','+str(fpos[1])+','+str(fpos[2])+','+str(fpos[3])+','+str(fpos[4])+','+str(fpos[5])+','+posr[6]+',0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True, posf = fpos)
def mvpath(pts=[], inc=0, pls=0, xyz=0, jnt=0, lin=0, ind=0): #multipoint move
""" Send Continuous fire points
pts = list of each point with v,px,py,pz,rx,ry,rz,type for absolute or pulse motion
pts = list of each point with v,dx,dy,dz,da,db,dc, for incremental motion
ind = flag to set if motion settings are set individually
if 1,
inc = inc[i] = 1 if pts[i] is incremenetal else 0
pls = pls[i] = 1 if pts[i] is pulse motion else 0
xyz = xyz[i] = 1 if pts[i] is absolute move else 0
jnt = jnt[i] = 1 if pts[i] is joint motion else 0
lin = lin[i] = 1 if pts[i] is linear motion else 0
length of point and motion definition must be length of points
if 0,
all point definitions are set to either incremental = if inc = 1
or pulse = if pls = 1
or absolute = if xyz = 1
all motion types are set to joint = if jnt = 1
or linear = if lin = 1
either jnt or lin must be set to 1
either inc/pls/xyz must be set to 1
"""
if not len(pts) > 0: return 1 #atleast one point required #error 1 not enough points
if not all(len(a) == 7 for a in pts): return 2 #atleast v + 6axis required #error 2 points incompletely defined
if xyz and not all(len(a) == 8 for a in pts): return 3 #orientation types required #error 3 type variable not sent for absolute motion
if not ind: #if individual motion not specified
inc = [inc]*len(pts);
pls = [pls]*len(pts);
xyz = [xyz]*len(pts);
jnt = [jnt]*len(pts);
lin = [lin]*len(pts);
else: #ensure individual motion for each point in path
if not all(len(a) == len(pts) for a in [inc,pls,xyz,jnt,lin]): return 4 #error 4 motion types for each point not specified
path = [[],[]] #create path point list
path[0] = ['']*len(pts) #comm list
path[1] = ['']*len(pts) #data list
com1 = 'CONNECT Robot_access Keep-Alive:-1\r' #host control request -> infinite continuous fire
com2 = 'HOSTCTRL_REQUEST ' #command header
for i in range(0,len(pts)): #parse each command and data in path
v = str(pt[0])+','
p = ', '.join(map(str,pts[1:6])) + ', '
if inc[i]:
if jnt[i]:
path[i][1] = '1,' + v + '0,' + p + '0,0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'IMOV ' + str(len(path[1][i])) + '\r'
elif lin[i]:
path[i][1] = '1,' + v + '0,' + p + '0,0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'IMOV ' + str(len(path[1][i])) + '\r'
elif pls[i]:
if jnt[i]:
path[i][1] = v + p + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVJ ' + str(len(path[1][i])) + '\r'
elif lin[i]:
path[i][1] = '0, ' + v + p + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
elif xyz[i]:
if jnt:
t = str(pts[7]) + ',' if len(pts)<8 else '6,'
path[i][1] = v + '0,' + p + t + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
elif lin:
t = str(pts[7]) + ','
path[i][1] = '0, ' + v + '0,' + p + t + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #open socket to robot for continuous fire
try: sock.connect((self.IP_ADD,self.TCP_PT))
except: print("Error! Cannot Connect Socket to Robot"); sys.exit()
self.sock.send(com1); resp = self.sock.recv(256);
if not 'Keep-Alive:-1' in resp: print("Error! Cannot Connect Socket to Robot");sys.exit();
i=0;
while i < len(path): #send each command
j=1; #Monitor Running Bit Status
while j:
self.sock.send(com1 + 'RSTATS 0');resp = self.sock.recv(256);resp += self.sock.recv(256)
j = int(''.join(['{0:08b}'.format(int(q)) for q in resp.split('\n')[1].split(',')])[4])
self.sock.send(path[i][0]);resp = self.sock.recv(256) #Send Next Path Command
self.sock.send(path[i][1]);resp += self.sock.recv(256) #Send Next Path Command Data
print(resp)
i+=1;
return 0
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#~ UDP COMMANDS
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def udp_rtrq(self): #udp read joint torque
"""Doc
#~ ----------------------------
#~ Note: Read Joint Torques
#~ ----------------------------
"""
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x77\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
nib = []
axs = []
if len(data) > 32:
reqdat = data[32:]
for i in xrange(0,len(reqdat),4): nib.append(reqdat[i:i+4])
for i in range(5,11): axs.append(struct.unpack('<i',nib[i])[0])
if not ord(data[25]) + ord(data[26]): return float(ax[0]),float(ax[1]),float(ax[2]),float(ax[3]),float(ax[4]),float(ax[5])
else: print("Error with Torque Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_iorw(self, addr=27010, wrfl = 0, bits=[0,0,0,0,0,0,0,0]): #udp i.o. readwrite
"""doc
# ~ wrfl = read or write flag,
#~ 0 = Read
#~ 1 = Write
# ~ addr = io register specified as addr, divied by 10 to fit 2 bytes
# ~ bits = set values, must write 8 bits at a time.
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if wrfl:
a = 0
for bit in bits: a = (a<<1) | bit
comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x78\x00' + struct.pack('<H',addr/10) + '\x01\x10\x00\x00' + struct.pack('<B',a) + '\x00\x00\x00'
else:
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x78\x00' + struct.pack('<H',addr/10) + '\x01\x0e\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if not wrfl: #if not write, return data recv
bit = [-1,-1,-1,-1,-1,-1,-1,-1] #No response
if len(data) > 32: #parse if response
dt = struct.unpack('B',data[32]) #unpack response byte
bit = [int(x) for x in '{0:08b}'.format(dt[0])] #parse bits
if not ord(data[25]) + ord(data[26]):return bit #return result if no errror
else: print("Error with IO Write Command")
else: #if write, return data sent
if not ord(data[25]) + ord(data[26]): return bits
else: print("Error with IO Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def get_word(self, w,o): #get 32-bit int, 16
""" Doc
#~ Notes:
#~ w = number to create word packet (32 bit signed integer)
#~ o = order multiplier to number to create integer 10e^o
"""
a = w
b = math.modf(a);
c = b[1]*10**o;
d = b[0]*10**o;
e = int(c+d);
f = struct.pack('<i',e)
return f
def udp_rpos(self, p=0): #udp read position
"""doc
# ~ read robot position using udp server
command hard coded to return cartesian data
possible to request pulse data with flag p = 1
if 0: #debug.print Parsed Data
print "----------------------------------------------------------------------------"
print "Parsed Data..."
print "----------------------------------------------------------------------------"
if not p:
print " PX: ", axs[0]
print " PY: ", axs[1]
print " PZ: ", axs[2]
print " AX: ", axs[3]
print " AY: ", axs[4]
print " AZ: ", axs[5]
print " TP: ", t
print " ET: ", e
else:
print " PS: ", axs[0]
print " PL: ", axs[1]
print " PU: ", axs[2]
print " PR: ", axs[3]
print " PB: ", axs[4]
print " PT: ", axs[5]
print "----------------------------------------------------------------------------"
"""
if not p: comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x75\x00\x65\x00\x00\x01\x00\x00'
else: comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x75\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
nib = [] #list of 4byte chunks
axs = [] #list of axis coordinates
if len(data) > 32:
reqdat = data[32:] #get data part of packet
for i in xrange(0,len(reqdat),4): nib.append(reqdat[i:i+4]) #separate data words and extract requested data
for i in range(5,11): axs.append(struct.unpack('<i',nib[i])[0]) #unpack 4 byte packets as signed 32 bit integer
if not p: #Parse cartesian data
for i in range(0,3): axs[i] = axs[i]/1000. #10e-3 for position
for i in range(3,6): axs[i] = axs[i]/10000. #10e-4 for orientation
t = [hex(ord(x))[2:].zfill(2) for x in nib[1]] #get pose type for cartesian
e = [hex(ord(x))[2:].zfill(2) for x in nib[4]] #extended type for cartesian
if not ord(data[25]) + ord(data[26]):
if not p: return [axs[0],axs[1],axs[2],axs[3],axs[4],axs[5],t,e]
else: return [axs[0],axs[1],axs[2],axs[3],axs[4],axs[5]]
else: print(msg="Error with Position Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rstt(self): #-> read status
"""doc
# ~ Read Robot Status Byte 1 & 2
#~ byte 1:
#~ bit 0: Mode Step
#~ bit 1: Mode Cycle
#~ bit 2: Mode Continuous
#~ bit 3: Is Running
#~ bit 4: Is Safety
#~ bit 5: Mode Teach
#~ bit 6: Mode Play
#~ bit 7: Mode Remote
#~ byte 2:
#~ bit 0: Unused
#~ bit 1: Hold Pendant
#~ bit 2: Hold External
#~ bit 3: Hold Remote
#~ bit 4: Alarm Flag
#~ bit 5: Error Flag
#~ bit 6: Servo Status
#~ bit 7: Unused
"""
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x72\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if len(data) > 32:
dt1 = struct.unpack('B',data[32])
dt2 = struct.unpack('B',data[36])
stt = [int(x) for x in '{0:08b}'.format(dt1[0])] + [int(x) for x in '{0:08b}'.format(dt2[0])]
if not ord(data[25]) + ord(data[26]): return stt
else: print("Error with Status Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_ralm(self): #-> read alarm
""" Doc
----------------------------------------
Notes:
----------------------------------------
Function to Read Last Alarm
----------------------------------------
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x70\x00\x01\x00\x01\x0e\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
a = [-1,-1,-1,-1]
if len(data) > 32: a = [hex(ord(x))[2:].zfill(2) for x in data[32:36]]
if not ord(data[25]) + ord(data[26]): return a
else: print("Error with Alarm Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rset(self): #-> reset alarm & error
""" Doc
----------------------------------------
Notes:
----------------------------------------
Function: Cancel Alarm & Error Status
Required to Resume Servo On
----------------------------------------
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command Comm1 = Cancel Alarm, Comm2 = Cancel Error
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm1 = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x82\x00\x01\x00\x01\x10\x00\x00\x01\x00\x00\x00'
comm2 = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x82\x00\x02\x00\x01\x10\x00\x00\x01\x00\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm1,("192.168.1.31",10040))
data1,addr = self.sock_udp.recvfrom(512)
self.sock_udp.sendto(comm2,("192.168.1.31",10040))
data2,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
recStatusByte1 = ord(data1[25]) + ord(data1[26])
recStatusByte2 = ord(data2[25]) + ord(data2[26])
if not recStatusByte1 and not recStatusByte2: return 1
else: print("Error with Reset Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm1,data1)
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm2,data2)
return -1
def udp_serv(self,on=1): #-> servo on off
if on: comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x83\x00\x02\x00\x01\x10\x00\x00\x01\x00\x00\x00'
else: comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x83\x00\x02\x00\x01\x10\x00\x00\x02\x00\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if not ord(data[25]) + ord(data[26]):return 1
else: print("Error with Servo Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rsaf(self,s=1): #read Safety Bits implementation of iorw
""" Doc
Read the Safety IO Bits
Note the Registers May Be Dependent on Wiring & Logical Setup
For All Robots:
E-stop Status at Reg 80020
Area Scanner Status at Reg 80400
For Collaborative Robots Only:
Bump Status at Reg 81380
Hard Bump Status at Reg 81382
Soft Bump Status at Reg 81383
Input s: s=0 non collaborative robot, s=1 collaborative safe robot
"""
a = self.udp_iorw(addr = 80020)
b = self.udp_iorw(addr = 80400)
if s: c = self.udp_iorw(addr = 81380)
pstp = a[1]
estp = a[2]
astp = a[4]
asaf = b[7]
if s: hard=c[5];soft=c[6];
else: hard= -1 ;soft= -1 ;
return [pstp,estp,astp,asaf,hard,soft]
def udp_movj(self,args): #udp move cartesian
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to move robot
# ~ inputs:
# ~ m = motion Type,
# ~ 1 = joint,
# ~ 2 = linear,
# ~ 3 = linear increment
# ~ s = speed Type,
# ~ 1 = Percentage of Max Speed, for m = 1 only
# ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only
# ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only
# ~ v = Speed Value, must be specified in the type specified by s, no checks performed
# ~ px= X Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ py= Y Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ py= Z Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ rx= X Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ ry= Y Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ rz= Z Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ t = Orientation Type, axis coordinate and flip conditions (Hard Coded)
"""
m, s, v, px, py, pz, rx, ry, rz, t, e = args;
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Header
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if 1:
comm = '\x59\x45\x52\x43\x20\x00\x68\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39'
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x8a\x00' #-> Command ID Number for Move Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if m == 1: comm = comm + '\x01\x00' #-> Command Instance: Motion Type 1: Joint
elif m == 2: comm = comm + '\x02\x00' #-> Command Instance: Motion Type 2: Linear Absolute
elif m == 3: comm = comm + '\x03\x00' #-> Command Instance: Motion Type 2: Linear Increment
comm = comm + '\x01\x02\x00\x00'
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Data
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if 1:
#Robot & Station ID-----------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x00\x00\x00' #-> Data word 1: Robot Number (Hard Coded to 1)
comm = comm + '\x00\x00\x00\x00' #-> Data word 2: Station Number (Hard Coded to 0)
#speed type-------------------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + '\x00\x00\x00\x00' #-> Data word 3: Speed Type 1: % Max speed in 0.01 %
elif s == 2: comm = comm + '\x01\x00\x00\x00' #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s
elif s == 3: comm = comm + '\x02\x00\x00\x00' #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s
#speed for speed type---------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + self.get_word(max(min(v,100),0.01),2) #-> Data word 4: Robot Motion Speed in 0.01%
elif s == 2: comm = comm + self.get_word(max(min(v,999),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1mm/s
elif s == 3: comm = comm + self.get_word(max(min(v,499),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1deg/s
#Co-ordinate Frame------------------------------------------------------------------------------------------------------------------
comm = comm + self.get_word(16,0) #-> Data word 5: Coordinate Frame Hard Coded to Base Frame
#Robot Position & Tool Orientation--------------------------------------------------------------------------------------------------
comm = comm + self.get_word(px,3) #-> Data word 6: Robot X position in 1e-3 mm
comm = comm + self.get_word(py,3) #-> Data word 7: Robot Y position in 1e-3 mm
comm = comm + self.get_word(pz,3) #-> Data word 8: Robot Z position in 1e-3 mm
comm = comm + self.get_word(rx,4) #-> Data word 9: Robot X rotation in 1e-4 deg
comm = comm + self.get_word(ry,4) #-> Data word 10: Robot Y rotation in 1e-4 deg
comm = comm + self.get_word(rz,4) #-> Data word 11: Robot Z rotation in 1e-4 deg
#0 padding for words 12 to 13 (reserve)---------------------------------------------------------------------------------------------
comm = comm + self.get_word(0,0) #-> Data word 12: Pad Reserve with 0s
comm = comm + self.get_word(0,0) #-> Data word 13: Pad Reserve with 0s
#0 padding for words 12 to 13 (unused)----------------------------------------------------------------------------------------------
comm = comm + self.get_word(3,0) #-> Data word 14: Hard coded Orientation Type to \x03
comm = comm + self.get_word(0,0) #-> Data word 15: Hard coded Extended Type to \x00
#0 padding for words 15 to 22 (unused)----------------------------------------------------------------------------------------------
for i in range(16,27): comm = comm + self.get_word(0,0) #-> Data word 16-26: Pad Unused with 0s
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
data = '';
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
#~ if not ord(data[25]) + ord(data[26]):
if m == 3: # do not re-send increment move because of move wait
m = 2; cur_pos = self.udp_rpos()[0:6];
px = px + cur_pos[0];py = py + cur_pos[1];pz = pz + cur_pos[2];
rx = rx + cur_pos[3];ry = ry + cur_pos[4];rz = rz + cur_pos[5];
args = (m, s, v, px, py, pz, rx, ry, rz, t, e);
pos = [px, py, pz, rx, ry, rz]
self.udp_wait(self.udp_movj,args,pos);
if self.dbg or not(not ord(data[25]) + ord(data[26])): print("Error with Joint Move Command");self.udp_dbug(comm,data);return -1;
return 1
def udp_movp(self,args): #udp move pulse
m, s, v, ps, pl, pu, pr, pb, pt, pos = args
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to move robot using pulse
# ~ inputs:
# ~ m = motion Type,
# ~ 1 = joint,
# ~ 2 = linear,
# ~ s = speed Type,
# ~ 1 = Percentage of Max Speed, for m = 1 only
# ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only
# ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only
# ~ v = Speed Value, must be specified in the type specified by s, no checks performed
# ~ ps= S Rotation, specified in pulse
# ~ pl= L Rotation, specified in pulse
# ~ pu= U Rotation, specified in pulse
# ~ pr= R Rotation, specified in pulse
# ~ pb= B Rotation, specified in pulse
# ~ pt= T Rotation, specified in pulse
#~ pos = List of cartesian Position Equivalent of Pulse Rotations
"""
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Header
# ~ --------------------------------------------------------------------------------------------------------------------------------
if 1:
#~ # ~ -------------------------------------------------------------------------------------------------------------------------
comm = '\x59\x45\x52\x43\x20\x00\x58\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39'
# ~ ----------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x8b\x00' #-> Command ID Number for Move Command
# ~ ----------------------------------------------------------------------------------------------------------------------------
if m == 1: comm = comm + '\x01\x00' #-> Command Instance: Motion Type 1: Joint
elif m == 2: comm = comm + '\x02\x00' #-> Command Instance: Motion Type 2: Linear
# ~ ----------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x02\x00\x00'
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if 1:
#Robot & Station ID-------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x00\x00\x00' #-> Data word 1: Robot Number (Hard Coded to 1)
comm = comm + '\x00\x00\x00\x00' #-> Data word 2: Station Number (Hard Coded to 0)
#speed type---------------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + '\x00\x00\x00\x00' #-> Data word 3: Speed Type 1: % Max speed in 0.01 %
elif s == 2: comm = comm + '\x01\x00\x00\x00' #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s
elif s == 3: comm = comm + '\x02\x00\x00\x00' #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s
#speed for speed type-----------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + self.get_word(max(min(v,100),0.01),2) #-> Data word 4: Robot Motion Speed in 0.01%
elif s == 2: comm = comm + self.get_word(max(min(v,999),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1mm/s
elif s == 3: comm = comm + self.get_word(max(min(v,499),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1deg/s
#Robot Position & Tool Orientation----------------------------------------------------------------------------------------------
comm = comm + self.get_word(ps,0) #-> Data word 5: Robot X position in 1e-3 mm
comm = comm + self.get_word(pl,0) #-> Data word 6: Robot Y position in 1e-3 mm
comm = comm + self.get_word(pu,0) #-> Data word 7: Robot Z position in 1e-3 mm
comm = comm + self.get_word(pr,0) #-> Data word 8: Robot X rotation in 1e-4 deg
comm = comm + self.get_word(pb,0) #-> Data word 9: Robot Y rotation in 1e-4 deg
comm = comm + self.get_word(pt,0) #-> Data word 10: Robot Z rotation in 1e-4 deg
#0 padding for words 11 to 22 (unused)------------------------------------------------------------------------------------------
for i in range(11,23): comm = comm + self.get_word(0,0) #-> Data word 11-22: Pad with 0s
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
data = ''
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
#~ if not ord(data[25]) + ord(data[26]):
self.udp_wait(self.udp_movp,args,pos);
if self.dbg or not(not ord(data[25]) + ord(data[26])): print("Error with Pulse Move Command");self.udp_dbug(comm,data);return -1;
return 1
def udp_wait(self,command, args, pos): #wait for motion command
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
#~ print "STARTING MOVE WAIT"
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
dim = 100; saf = 4; run = 1; srv = 0; tog = 0; col = 1; ylo = False; ang = 100 #target;safety;runing;servof;toggle;light
while dim > 10 or ang > 5 or run == 1 or srv == 0 or saf != 3: #while command not complete
if self.dbg:
print "Position Error: \t", dim
print "Orientation Error:\t", ang, "(Discarded)"
print "Running Bit: \t", run
print "Servo Bit: \t", srv
print "Safe Bit: \t", saf
pass
if 1: #read and calculate data
msg = "";
a = self.udp_rsaf();
b = self.udp_rstt();
c = self.udp_rpos(p=0)[0:6];
if a != -1:
saf = a;
col = saf[4] or saf[5];
gat = saf[3];
saf = sum(saf[0:3]);
if b != -1:
stt = b;
mod = stt[0];
srv = stt[9];
run = stt[4];
slo = stt[3];
if c != -1:
pt1 = c;
if not pos == None: #if check target flag is on
dim = [pt1[0]-pos[0], pt1[1]-pos[1], pt1[2]-pos[2]] #check if robot reached target
dim = (dim[0]**2 + dim[1]**2 + dim[2]**2)**0.5 #calculate delta position norm
#~ ang = [pt1[3]-pos[3], pt1[4]-pos[4], pt1[5]-pos[5]] #check if robot reached target
#~ ang = (ang[0]**2 + ang[1]**2 + ang[2]**2)**0.5 #calculate delta position norm
ang = 0; #didnt work as well as i thought...
else: dim = 0;ang = 0 #if not target check set to 0
if 1: #parse warnings if warning
if mod!=1: print(" Error! Robot Not in Command Mode");sys.exit()
if col: print("Error! Collaborative Safety Triggered.");self.udp_serv(on=0);srv = 0;
if not srv: #if servo off = trigger
if 1: print("Error! Servo Off.") #send message servo off
if col: print("Error! Collaborative Safety Triggered") #if collaborative trigger
if saf != 3: print("Error! E Stop Triggered.") #if emergency stop trigger
elif saf == 3 and not col: #if off and safe
print ("Safety Clear. Restoring Servo Power.") #read alarm,reset alarm, restore servo
self.udp_ralm();
self.udp_rset();
self.udp_serv();
print ("Resuming Motion, Please Stay Back")
command(args); return 1;
if not gat and srv: print("Safety Gate Triggered"); ylo = 1;
elif gat and srv and ylo: print("Safety Gate Clear"); ylo = 0;
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
#~ print "ENDING MOVE WAIT"; time.sleep(0.025);
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
return 1
def udp_dbug(self,comm,data): #print udp command and response
if 1:#split header & data
senReqestData = comm[32:len(comm)]
recReqestData = data[32:len(data)]
datasize = len(data)
commsize = len(comm)
if 1: #comm head
senIdentifier = comm[0:4] #bytes 0,1,2,3 4 bytes
senHeaderSize = comm[4:6] #bytes 4,5 2 bytes
senDataPartsz = comm[6:8] #bytes 6,7 2 bytes
senReserveBt1 = comm[8] #bytes 8 1 bytes
senPricessDiv = comm[9] #bytes 9 1 bytes
senAcknowledg = comm[10] #bytes 10 1 bytes
senRequest_ID = comm[11] #bytes 11 1 bytes
senBlock_numb = comm[12:16] #bytes 12,13,14,15 4 bytes
senReservebt2 = comm[16:24] #bytes 16,17,18,19,20,21,22,23 8 bytes
senCommandnum = comm[24:26] #bytes 24,25 2 bytes
senInstanceID = comm[26:28] #bytes 26,27 2 bytes
senAttributes = comm[28] #bytes 28 1 bytes
senServicsreq = comm[29] #bytes 29 1 bytes
senPaddingbyt = comm[30:32] #bytes 30,31 2 bytes
if 1: #resp head
recIdentifier = data[0:4] #bytes 0,1,2,3 4 bytes
recHeaderSize = data[4:6] #bytes 4,5 2 bytes
recDataPartsz = data[6:8] #bytes 6,7 2 bytes
recReserveBt1 = data[8] #bytes 8 1 bytes
recPricessDiv = data[9] #bytes 9 1 bytes
recAcknowledg = data[10] #bytes 10 1 bytes
recRequest_ID = data[11] #bytes 11 1 bytes
recBlock_numb = data[12:16] #bytes 12,13,14,15 4 bytes
recReservebt2 = data[16:24] #bytes 16,17,18,19,20,21,22,23 8 bytes
recServiceByt = data[24] #bytes 24 1 bytes
recStatusByte = data[25] #bytes 25 1 bytes
recAddStatbyt = data[26] #bytes 26 1 bytes
recPaddingbyt = data[27] #bytes 27 1 bytes
recAddStatsiz = data[28:30] #bytes 28,29 1 bytes
recPaddingsiz = data[30:32] #bytes 30,31 1 bytes
if 1: #comm sent
print "----------------------------------------------------------------------------"
print "Total Bytes Sent: ", commsize
print "----------------------------------------------------------------------------"
print "Identifier: ", [hex(ord(x))[2:].zfill(2) for x in senIdentifier]
print "HeaderSize: ", [hex(ord(x))[2:].zfill(2) for x in senHeaderSize]
print "DataPartsz: ", [hex(ord(x))[2:].zfill(2) for x in senDataPartsz]
print "Reservebt1: ", [hex(ord(x))[2:].zfill(2) for x in senReserveBt1]
print "ProcessDiv: ", [hex(ord(x))[2:].zfill(2) for x in senPricessDiv]
print "Acknowledg: ", [hex(ord(x))[2:].zfill(2) for x in senAcknowledg]
print "Request_ID: ", [hex(ord(x))[2:].zfill(2) for x in senRequest_ID]
print "Block_numb: ", [hex(ord(x))[2:].zfill(2) for x in senBlock_numb]
print "Reservebt2: ", [hex(ord(x))[2:].zfill(2) for x in senReservebt2]
print "Commandnum: ", [hex(ord(x))[2:].zfill(2) for x in senCommandnum]
print "InstanceID: ", [hex(ord(x))[2:].zfill(2) for x in senInstanceID]
print "Attributes: ", [hex(ord(x))[2:].zfill(2) for x in senAttributes]
print "Servicsreq: ", [hex(ord(x))[2:].zfill(2) for x in senServicsreq]
print "Paddingsiz: ", [hex(ord(x))[2:].zfill(2) for x in senPaddingbyt]
if 1: #data sent
print "----------------------------------------------------------------------------"
print "SENT DATA: ", len(comm)-32, " bytes"
print "----------------------------------------------------------------------------"
if len(comm) > 32:
comdat = [hex(ord(x))[2:].zfill(2) for x in senReqestData]
for i in xrange(0,len(comdat),4):
print comdat[i:i+4]
if 1: #resp recd
print "----------------------------------------------------------------------------"
print "Total Bytes Recd: ", datasize
print "----------------------------------------------------------------------------"
print "Identifier: ", [hex(ord(x))[2:].zfill(2) for x in recIdentifier]
print "HeaderSize: ", [hex(ord(x))[2:].zfill(2) for x in recHeaderSize]
print "DataPartsz: ", [hex(ord(x))[2:].zfill(2) for x in recDataPartsz]
print "Reservebt1: ", [hex(ord(x))[2:].zfill(2) for x in recReserveBt1]
print "ProcessDiv: ", [hex(ord(x))[2:].zfill(2) for x in recPricessDiv]
print "Acknowledg: ", [hex(ord(x))[2:].zfill(2) for x in recAcknowledg]
print "Request_ID: ", [hex(ord(x))[2:].zfill(2) for x in recRequest_ID]
print "Block_numb: ", [hex(ord(x))[2:].zfill(2) for x in recBlock_numb]
print "Reservebt2: ", [hex(ord(x))[2:].zfill(2) for x in recReservebt2]
print "ServiceByt: ", [hex(ord(x))[2:].zfill(2) for x in recServiceByt]
print "StatusByte: ", [hex(ord(x))[2:].zfill(2) for x in recStatusByte]
print "AddStatbyt: ", [hex(ord(x))[2:].zfill(2) for x in recAddStatbyt]
print "Paddingbyt: ", [hex(ord(x))[2:].zfill(2) for x in recPaddingbyt]
print "AddStatsiz: ", [hex(ord(x))[2:].zfill(2) for x in recAddStatsiz]
print "Paddingsiz: ", [hex(ord(x))[2:].zfill(2) for x in recPaddingsiz]
if 1: #data recd
print "----------------------------------------------------------------------------"
print "RECD DATA: ", len(data)-32, " bytes"
print "----------------------------------------------------------------------------"
if len(data) > 32:
reqdat = [hex(ord(x))[2:].zfill(2) for x in recReqestData]
for i in xrange(0,len(reqdat),4):
print reqdat[i:i+4]
return 0
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#VAR READ WRITE FOR ON THE FLY JOB ***INCOMPLETE***
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def udp_pvar(self): #get set point
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Point Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_dvar(self): #get set double
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Double Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_ivar(self): #get set integer
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Integer Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_bvar(self): #get set byte
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Byte Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
| import os
import sys
import time
import socket
import threading
import math
import struct
class rob():
def __init__(self, PARENT=0, dbg = 0):
self.PAR = PARENT
self.dbg = dbg
self.com1 = 'CONNECT Robot_access\r' #host control request
self.com2 = 'HOSTCTRL_REQUEST ' #command header
self.IP_ADD = '192.168.1.31' #robot IP
self.TCP_PT = 80 #robot tcp port number
self.UDP_PT = 10040 #robot udp port number
self.rob_chkout = False #socket lock flag to make sure only one message at one time
self.sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);self.sock_udp.settimeout(1.0)
self.sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
return
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#~ TCP COMMANDS
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def runchk(self): #check if robot online
if not (not os.system('ping -c 1 192.168.1.31') or not os.system('ping 192.168.1.31 -n 1')): print ("ERROR! Robot Server Off Line!");sys.exit()
self.wrgpio() #write all gpio 0
stt = self.redstt();
saf = self.redsaf();
col = self.colsaf(); col = col[0] or col[1]
if saf[4] != 0: print("ERROR! Robot Battery Low!"); sys.exit()
if int(stt[0])!=1: print("ERROR! Robot Not in Command Mode"); sys.exit()
if sum(saf[0:3])!=3:print("ERROR! E Stop Triggered!"); sys.exit()
if col: print("ERROR! Collaborative Mode Triggered!"); sys.exit()
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
print "ROBOT CHECK"
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
print "Robot Server Online..."
print "Robot Mode Check Complete..."
print "Robot Safety Check Complete..."
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
return
def senreq(self): #host control request
try:self.sock_tcp.connect((self.IP_ADD,self.TCP_PT))
except: print ("Error! Cannot Connect Socket to Robot"); sys.exit();
self.sock.send(self.com1)
resp = self.sock.recv(256)
if self.dbg:
print ("Sent: ", self.com1.strip());
print ("Recd: ", resp.strip())
return resp
def sencom(self, comm, data, movecom = False, posf = None, resend = False): #send command
commm = comm #incase move wait recovery
dataa = data #incase move wait recovery
size = len(data) #if data get data size
comm = self.com2 + comm + ' ' + str(size) + '\r' #parse command n data
senrq = self.senreq() #send host control request
while self.rob_chkout:pass #if robot busy, wait
self.rob_chkout = True; #set robot busy
self.sock_tcp.send(comm);resp = self.sock.recv(256) #read 256 byte comm resp
self.sock_tcp.send(data);resp += self.sock.recv(256) #read 256 byte data resp
if "closing control connection" in resp: #if robot closes port
print("Robot Forcefully Disconnected") #if error resp exit
sys.exit()
self.rob_chkout = False #set robot not busy
if self.dbg:
print ("Sent: ", comm);
print ("Data: ", data);
print ("Recd: ", resp.split('\r\n')[0]+":", resp.split('\r\n')[1].strip(), "\n")
if movecom == True: self.mvwait(commm, dataa, posf); #loop while robot moving
return resp
def mvwait(self, comm, data, pos ,check_estop=0, check_safety_gate=0, check_collab=0): #wait for motion command to complete
dim = 100; saf = 4; run = 1; srv = 0; tog = 0; col = 1; ylo = False #target;safety;runing;servof;toggle;safety gate light
while dim > 25 or run == 1 or srv == 0 or saf != 3: #while command not complete
if 1: #debug print
print ("-------------------------------------------------------------")
print ("WAITING FOR...", comm)
print ("-------------------------------------------------------------")
print ("TARGET REACHED :", dim)
print ("RUNNING BIT ON :", run)
print ("SERVO BIT ON :", srv)
print ("SAFETY BIT SUM:", saf)
print ("COLLABORATIVE :", col)
print ("-------------------------------------------------------------")
if 1: #read and calculate data
#read safety, status, position
saf=self.redsaf();
stt=self.redstt();
pt1=self.redpos();
col=self.colsaf();
msg = "";
mod = int(stt[0]); gat = int(saf[3]); saf = sum(saf[0:3]) #pase mode, area scan, estop
srv = int(stt[9]); run = int(stt[4]); slo = int(stt[3]) #parse servo, run, safegate bit
col = col[0] or col[1]; pt1 = map(float, pt1.split('\n')[1].split(',')[0:6]) #parse colaborative safety trigger, position
if not pos == None: #if check target flag is on
dim = [pt1[0]-pos[0], pt1[1]-pos[1], pt1[2]-pos[2], pt1[3]-pos[3], pt1[4]-pos[4], pt1[5]-pos[5]] #check if robot reached target
dim = (dim[0]**2 + dim[1]**2 + dim[2]**2)**0.5 #calculate delta position norm
else: dim = 0
if not check_estop: srv = 1;
if not check_safety_gate: gat = 3;
if not check_collab: col = 0;
if 1: #print warnings & prompts
if mod!=1: print ("Error! Robot Not in Command Mode");sys.exit() #if not in remote mode, exit code
if col: print ("Error! Collaborative Safety Triggered!"); self.servof() #if collaborative trigger, warning, servo off
if not srv: #if servo off = trigger
if 1: print ("Error! Servo Off.") #send message servo off
if col: print ("Error! Collaborative Safety Triggered") #send message reset collaborative safety trigger
if saf != 3: print ("Error! E Stop Triggered.") #send message estop trigger
elif saf == 3 and not col: #if no safety trigger, recover
print ("Safety Clear. Restoring Servo Power.") #read alarm,reset alarm, restore servo
self.redalm();
self.resets();
self.servon();
print ("Resuming Motion, Please Stay Back")
self.sencom(comm,data,movecom = True, posf = pos, resend = True) #resend last motion command
if not gat and srv: print("Safety Gate Triggered");ylo = 1; #display message safety gate triggered
elif gat and srv and ylo: print ("Safety Gate Clear."); ylo = 0; #display message safety gate clear
return 1
def redpos(self): #read cartesian position of robot
comm = 'RPOSC'
data = '0,0\r'
return self.sencom(comm,data)
def redpls(self): #read pulse position of robot
comm = 'RPOSJ'
data = ''
return self.sencom(comm,data)
def redalm(self): #read alarms
comm = 'RALARM'
data = ''
return self.sencom(comm,data)
def redstt(self): #read status bits
comm = 'RSTATS'
data = ''
stt = self.sencom(comm,data).split('\n')[1].split(',')
st1 = int(stt[0])
st2 = int(stt[1])
stt = '{0:08b}'.format(st1) + '{0:08b}'.format(st2)
return stt
def redsaf(self): #read safety bytes
comm = 'IOREAD'
data = '80020,8\r';stop = self.sencom(comm,data)
data = '80400,8\r';safe = self.sencom(comm,data)
data = '50010,8\r';batt = self.sencom(comm,data)
stop = format(int(stop.split('\n')[1].strip()),'08b')
safe = format(int(safe.split('\n')[1].strip()),'08b')
batt = format(int(batt.split('\n')[1].strip()),'08b')
if batt[5] == '1' or batt[6] == '1': print "Battery Response:\t", batt
batt = int(batt[5]) or int(batt[6])
pstp = int(stop[1])
estp = int(stop[2])
astp = int(stop[4])
asaf = int(safe[7])
return [pstp, estp, astp, asaf, 0]
def colsaf(self): #check collaborative hard/soft bump
comm = 'IOREAD'
data = '81382,1\r'
hard = self.sencom(comm,data)
data = '81383,1\r'
soft = self.sencom(comm,data)
hard = format(int(hard.split('\n')[1].strip()),'08b')[5]
soft = format(int(soft.split('\n')[1].strip()),'08b')[5]
return [int(hard), int(soft)]
def resets(self): #reset alarms
comm = 'RESET'
data = ''
return self.sencom(comm,data)
def cancel(self): #cancel request... useless never used
comm = 'CANCEL'
data = ''
return self.sencom(comm,data)
def holdon(self): #external hold... useless never used
comm = 'HOLD'
data = '1\r'
return self.sencom(comm,data)
def holdof(self): #hold off... useless never used
comm = 'HOLD'
data = '0\r'
return self.sencom(comm,data)
def setmod(self, m): #useless... cannot switch to command mode without key anyway, hardware safety
if m == 1:data = '1\r'
if m == 2:data = '2\r'
comm = 'MODE'
return self.sencom(comm,data)
def servon(self): #servo on
comm = 'SVON'
data = '1\r'
return self.sencom(comm,data)
def servof(self): #servo off
comm = 'SVON'
data = '0\r'
return self.sencom(comm,data)
def msgdis(self, msg): #display pendant message
comm = 'MDSP'
data = msg + '\r'
return self.sencom(comm,data)
def rdgpio(self, stt_add=30050, byt_num=1, p=1): #read byt_num of gpio starting at stt_add
if not (isinstance(byt_num,int) and byt_num >0): return
byt_num = byt_num*8
comm = 'IOREAD'
data = str(stt_add)+','+str(byt_num)+'\r'
return self.sencom(comm,data)
def wrgpio(self, stt_add=27010, bit_num=8, bit_val=[[0,0,0,0,0,0,0,0]], p=1): #write bit_nums starting from stt_add
flag = 0
comm = 'IOWRITE'
data = str(stt_add) + "," + str(bit_num)
if 1: #check input
if not isinstance(bit_val,list): flag = 1;print "Error", 1
elif len(bit_val) != bit_num/8: flag = 1;print "Error", 2
elif bit_num % 8 != 0: flag = 1;print "Error", 3
else:
for byte in bit_val:
if flag: break
if len(byte) != 8:
flag = 1;print "Error", 4
break
for bit in byte:
if bit != 0 and bit != 1:
flag = 1;print "Error", 5
break
if flag: return "INPUT ERROR"
if 1: #parse data
bytedata = []
for bitlist in bit_val:
out = 0
for bit in bitlist:
out = (out<<1) | bit
bytedata.append(out)
for byte_val in bytedata: data = data + ',' + str(byte_val)
data = data + '\r'
return self.sencom(comm,data)
def runjob(self,n='HOME',o=30050): #run job name n, and read complete flag o
"""
NOTES:
-> this function will run a job n on robot controller and wait for an output flag to be set if 0 != 0
-> the function will wait a minimum of one second until the function is complete
-> n = string name of job
-> o = job complete flag output bit (Need to set on pendant)
"""
comm = 'START';data = n+'\r';a = 1
print self.sencom(comm,data);time.sleep(1)
while a: a = int(format(int(self.fxn.rob.rdgpio(o).split('\n')[1].strip()),'08b')[4]);
return a
def gohome(self): #move robot home position pulse = 0
comm = 'PMOVJ'
data = '5,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True)
def movjnt(self, v, px, py, pz, rx, ry, rz, tp=6): #move joint to absolute position
"""
v = velocity (in % Speed)
px = position x
py = position y
pz = position z
rx = rotation x
ry = rotation y
rz = rotation z
tp = orientation type -> please see documentation (default to type 6)
frame is defaulted to "0" which is world frame
"""
comm = 'MOVJ'
data = str(v) + ',0,' + str(px) + ',' + str(py) + ',' + str(pz) + ',' + str(rx) + ',' + str(ry) + ',' + str(rz) + ',' + str(tp) + ',0,0,0,0,0,0,0\r'
fpos = [px,py,pz,rx,ry,rz] #final position, used to confirm motion complete using read position
return self.sencom(comm,data, movecom = True, posf = fpos)
def movlin(self, v, px, py, pz, rx, ry, rz, tp=6): #linear move to absolute position
"""
v = velocity (in mm/s)
px = position x
py = position y
pz = position z
rx = rotation x
ry = rotation y
rz = rotation z
tp = orientation type -> please see documentation (default to type 6)
frame is defaulted to "0" which is world frame
"""
comm = 'MOVL'
data = '0, ' + str(v) + ',0,' + str(px) + ',' + str(py) + ',' + str(pz) + ',' + str(rx) + ',' + str(ry) + ',' + str(rz) + ',' + str(tp) + ',0,0,0,0,0,0,0\r'
fpos = [px,py,pz,rx,ry,rz] #final position, used to confirm motion complete using read position
return self.sencom(comm,data, movecom = True, posf = fpos)
def movinc(self,v,dx,dy,dz,da,db,dc, rv=0, lv=0): #incremental move
""" Use increment move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
rv = force speed rotational
lv = force speed linear
"""
comm = 'IMOV'
if dx+dy+dz == 0: data = '1,';v = min(v, 100); #if no linear distance, use rotate speed
else: data = '0,';v = min(v, 500); #else use linear speed
if rv: data = '1,';v = min(rv,100); #if optional rv provided use linear speed
if lv: data = '0,';v = min(lv,500); #if optional lv provided use rotate speed
data = data + str(v) + ',' + '0' + ',' + str(dx) + ',' + str(dy) + ',' + str(dz) + ',' + str(da) + ',' + str(db) + ',' + str(dc) + ',0,0,0,0,0,0,0,0\r'
posi = [float(i) for i in self.redpos().split('\n')[1].split(',')[0:6]] #get initial position of robot
posm = [float(i) for i in [dx, dy, dz, da, db, dc]] #calculate final position of robot
fpos = map(sum,zip(posi,posm))
return self.sencom(comm,data, movecom = True, posf = fpos)
def movijt(self,v,dx,dy,dz,da,db,dc,p=1): #joint incremental move with current position read
""" Use joint move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
"""
posr = self.redpos().split('\n')[1].split(','); #read current position...
posi = [float(i) for i in posr[0:6]] #get position & rotation
posm = [float(i) for i in [dx, dy, dz, da, db, dc]]; #parse input vector
fpos = map(sum,zip(posi,posm)) #add input vector to current positon...
comm = 'MOVJ'
data = str(v)+',0,'+str(fpos[0])+','+str(fpos[1])+','+str(fpos[2])+','+str(fpos[3])+','+str(fpos[4])+','+str(fpos[5])+','+posr[6]+',0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True, posf = fpos)
def moviln(self,v,dx,dy,dz,da,db,dc,p=1): #linear incremental move with current position read
""" Use Linear move command with increment data
v = velocity, see lv/rv flag
dx = incremental position x
dy = incremental position y
dz = incremental position z
da = incremental rotation x
db = incremental rotation y
dc = incremental rotation z
"""
posr = self.redpos().split('\n')[1].split(','); #read current position...
posi = [float(i) for i in posr[0:6]] #get position & rotation
posm = [float(i) for i in [dx, dy, dz, da, db, dc]]; #parse input vector
fpos = map(sum,zip(posi,posm)) #add input vector to current positon...
comm = 'MOVL'
data = '0, ' + str(v) + ',0,'+str(fpos[0])+','+str(fpos[1])+','+str(fpos[2])+','+str(fpos[3])+','+str(fpos[4])+','+str(fpos[5])+','+posr[6]+',0,0,0,0,0,0,0\r'
return self.sencom(comm,data, movecom = True, posf = fpos)
def mvpath(pts=[], inc=0, pls=0, xyz=0, jnt=0, lin=0, ind=0): #multipoint move
""" Send Continuous fire points
pts = list of each point with v,px,py,pz,rx,ry,rz,type for absolute or pulse motion
pts = list of each point with v,dx,dy,dz,da,db,dc, for incremental motion
ind = flag to set if motion settings are set individually
if 1,
inc = inc[i] = 1 if pts[i] is incremenetal else 0
pls = pls[i] = 1 if pts[i] is pulse motion else 0
xyz = xyz[i] = 1 if pts[i] is absolute move else 0
jnt = jnt[i] = 1 if pts[i] is joint motion else 0
lin = lin[i] = 1 if pts[i] is linear motion else 0
length of point and motion definition must be length of points
if 0,
all point definitions are set to either incremental = if inc = 1
or pulse = if pls = 1
or absolute = if xyz = 1
all motion types are set to joint = if jnt = 1
or linear = if lin = 1
either jnt or lin must be set to 1
either inc/pls/xyz must be set to 1
"""
if not len(pts) > 0: return 1 #atleast one point required #error 1 not enough points
if not all(len(a) == 7 for a in pts): return 2 #atleast v + 6axis required #error 2 points incompletely defined
if xyz and not all(len(a) == 8 for a in pts): return 3 #orientation types required #error 3 type variable not sent for absolute motion
if not ind: #if individual motion not specified
inc = [inc]*len(pts);
pls = [pls]*len(pts);
xyz = [xyz]*len(pts);
jnt = [jnt]*len(pts);
lin = [lin]*len(pts);
else: #ensure individual motion for each point in path
if not all(len(a) == len(pts) for a in [inc,pls,xyz,jnt,lin]): return 4 #error 4 motion types for each point not specified
path = [[],[]] #create path point list
path[0] = ['']*len(pts) #comm list
path[1] = ['']*len(pts) #data list
com1 = 'CONNECT Robot_access Keep-Alive:-1\r' #host control request -> infinite continuous fire
com2 = 'HOSTCTRL_REQUEST ' #command header
for i in range(0,len(pts)): #parse each command and data in path
v = str(pt[0])+','
p = ', '.join(map(str,pts[1:6])) + ', '
if inc[i]:
if jnt[i]:
path[i][1] = '1,' + v + '0,' + p + '0,0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'IMOV ' + str(len(path[1][i])) + '\r'
elif lin[i]:
path[i][1] = '1,' + v + '0,' + p + '0,0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'IMOV ' + str(len(path[1][i])) + '\r'
elif pls[i]:
if jnt[i]:
path[i][1] = v + p + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVJ ' + str(len(path[1][i])) + '\r'
elif lin[i]:
path[i][1] = '0, ' + v + p + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
elif xyz[i]:
if jnt:
t = str(pts[7]) + ',' if len(pts)<8 else '6,'
path[i][1] = v + '0,' + p + t + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
elif lin:
t = str(pts[7]) + ','
path[i][1] = '0, ' + v + '0,' + p + t + '0,0,0,0,0,0,0\r'
path[i][0] = com2 + 'PMOVL ' + str(len(path[1][i])) + '\r'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #open socket to robot for continuous fire
try: sock.connect((self.IP_ADD,self.TCP_PT))
except: print("Error! Cannot Connect Socket to Robot"); sys.exit()
self.sock.send(com1); resp = self.sock.recv(256);
if not 'Keep-Alive:-1' in resp: print("Error! Cannot Connect Socket to Robot");sys.exit();
i=0;
while i < len(path): #send each command
j=1; #Monitor Running Bit Status
while j:
self.sock.send(com1 + 'RSTATS 0');resp = self.sock.recv(256);resp += self.sock.recv(256)
j = int(''.join(['{0:08b}'.format(int(q)) for q in resp.split('\n')[1].split(',')])[4])
self.sock.send(path[i][0]);resp = self.sock.recv(256) #Send Next Path Command
self.sock.send(path[i][1]);resp += self.sock.recv(256) #Send Next Path Command Data
print(resp)
i+=1;
return 0
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#~ UDP COMMANDS
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def udp_rtrq(self): #udp read joint torque
"""Doc
#~ ----------------------------
#~ Note: Read Joint Torques
#~ ----------------------------
"""
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x77\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
nib = []
axs = []
if len(data) > 32:
reqdat = data[32:]
for i in xrange(0,len(reqdat),4): nib.append(reqdat[i:i+4])
for i in range(5,11): axs.append(struct.unpack('<i',nib[i])[0])
if not ord(data[25]) + ord(data[26]): return float(ax[0]),float(ax[1]),float(ax[2]),float(ax[3]),float(ax[4]),float(ax[5])
else: print("Error with Torque Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_iorw(self, addr=27010, wrfl = 0, bits=[0,0,0,0,0,0,0,0]): #udp i.o. readwrite
"""doc
# ~ wrfl = read or write flag,
#~ 0 = Read
#~ 1 = Write
# ~ addr = io register specified as addr, divied by 10 to fit 2 bytes
# ~ bits = set values, must write 8 bits at a time.
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if wrfl:
a = 0
for bit in bits: a = (a<<1) | bit
comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x78\x00' + struct.pack('<H',addr/10) + '\x01\x10\x00\x00' + struct.pack('<B',a) + '\x00\x00\x00'
else:
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x78\x00' + struct.pack('<H',addr/10) + '\x01\x0e\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if not wrfl: #if not write, return data recv
bit = [-1,-1,-1,-1,-1,-1,-1,-1] #No response
if len(data) > 32: #parse if response
dt = struct.unpack('B',data[32]) #unpack response byte
bit = [int(x) for x in '{0:08b}'.format(dt[0])] #parse bits
if not ord(data[25]) + ord(data[26]):return bit #return result if no errror
else: print("Error with IO Write Command")
else: #if write, return data sent
if not ord(data[25]) + ord(data[26]): return bits
else: print("Error with IO Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def get_word(self, w,o): #get 32-bit int, 16
""" Doc
#~ Notes:
#~ w = number to create word packet (32 bit signed integer)
#~ o = order multiplier to number to create integer 10e^o
"""
a = w
b = math.modf(a);
c = b[1]*10**o;
d = b[0]*10**o;
e = int(c+d);
f = struct.pack('<i',e)
return f
def udp_rpos(self, p=0): #udp read position
"""doc
# ~ read robot position using udp server
command hard coded to return cartesian data
possible to request pulse data with flag p = 1
if 0: #debug.print Parsed Data
print "----------------------------------------------------------------------------"
print "Parsed Data..."
print "----------------------------------------------------------------------------"
if not p:
print " PX: ", axs[0]
print " PY: ", axs[1]
print " PZ: ", axs[2]
print " AX: ", axs[3]
print " AY: ", axs[4]
print " AZ: ", axs[5]
print " TP: ", t
print " ET: ", e
else:
print " PS: ", axs[0]
print " PL: ", axs[1]
print " PU: ", axs[2]
print " PR: ", axs[3]
print " PB: ", axs[4]
print " PT: ", axs[5]
print "----------------------------------------------------------------------------"
"""
if not p: comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x75\x00\x65\x00\x00\x01\x00\x00'
else: comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x75\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
nib = [] #list of 4byte chunks
axs = [] #list of axis coordinates
if len(data) > 32:
reqdat = data[32:] #get data part of packet
for i in xrange(0,len(reqdat),4): nib.append(reqdat[i:i+4]) #separate data words and extract requested data
for i in range(5,11): axs.append(struct.unpack('<i',nib[i])[0]) #unpack 4 byte packets as signed 32 bit integer
if not p: #Parse cartesian data
for i in range(0,3): axs[i] = axs[i]/1000. #10e-3 for position
for i in range(3,6): axs[i] = axs[i]/10000. #10e-4 for orientation
t = [hex(ord(x))[2:].zfill(2) for x in nib[1]] #get pose type for cartesian
e = [hex(ord(x))[2:].zfill(2) for x in nib[4]] #extended type for cartesian
if not ord(data[25]) + ord(data[26]):
if not p: return [axs[0],axs[1],axs[2],axs[3],axs[4],axs[5],t,e]
else: return [axs[0],axs[1],axs[2],axs[3],axs[4],axs[5]]
else: print(msg="Error with Position Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rstt(self): #-> read status
"""doc
# ~ Read Robot Status Byte 1 & 2
#~ byte 1:
#~ bit 0: Mode Step
#~ bit 1: Mode Cycle
#~ bit 2: Mode Continuous
#~ bit 3: Is Running
#~ bit 4: Is Safety
#~ bit 5: Mode Teach
#~ bit 6: Mode Play
#~ bit 7: Mode Remote
#~ byte 2:
#~ bit 0: Unused
#~ bit 1: Hold Pendant
#~ bit 2: Hold External
#~ bit 3: Hold Remote
#~ bit 4: Alarm Flag
#~ bit 5: Error Flag
#~ bit 6: Servo Status
#~ bit 7: Unused
"""
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x72\x00\x01\x00\x00\x01\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if len(data) > 32:
dt1 = struct.unpack('B',data[32])
dt2 = struct.unpack('B',data[36])
stt = [int(x) for x in '{0:08b}'.format(dt1[0])] + [int(x) for x in '{0:08b}'.format(dt2[0])]
if not ord(data[25]) + ord(data[26]): return stt
else: print("Error with Status Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_ralm(self): #-> read alarm
""" Doc
----------------------------------------
Notes:
----------------------------------------
Function to Read Last Alarm
----------------------------------------
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm = '\x59\x45\x52\x43\x20\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x70\x00\x01\x00\x01\x0e\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
a = [-1,-1,-1,-1]
if len(data) > 32: a = [hex(ord(x))[2:].zfill(2) for x in data[32:36]]
if not ord(data[25]) + ord(data[26]): return a
else: print("Error with Alarm Read Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rset(self): #-> reset alarm & error
""" Doc
----------------------------------------
Notes:
----------------------------------------
Function: Cancel Alarm & Error Status
Required to Resume Servo On
----------------------------------------
"""
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Command Comm1 = Cancel Alarm, Comm2 = Cancel Error
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm1 = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x82\x00\x01\x00\x01\x10\x00\x00\x01\x00\x00\x00'
comm2 = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x82\x00\x02\x00\x01\x10\x00\x00\x01\x00\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm1,("192.168.1.31",10040))
data1,addr = self.sock_udp.recvfrom(512)
self.sock_udp.sendto(comm2,("192.168.1.31",10040))
data2,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
recStatusByte1 = ord(data1[25]) + ord(data1[26])
recStatusByte2 = ord(data2[25]) + ord(data2[26])
if not recStatusByte1 and not recStatusByte2: return 1
else: print("Error with Reset Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm1,data1)
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm2,data2)
return -1
def udp_serv(self,on=1): #-> servo on off
if on: comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x83\x00\x02\x00\x01\x10\x00\x00\x01\x00\x00\x00'
else: comm = '\x59\x45\x52\x43\x20\x00\x04\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39\x83\x00\x02\x00\x01\x10\x00\x00\x02\x00\x00\x00'
data = ''
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if not ord(data[25]) + ord(data[26]):return 1
else: print("Error with Servo Command")
if self.dbg or not(not ord(data[25]) + ord(data[26])): self.udp_dbug(comm,data)
return -1
def udp_rsaf(self,s=1): #read Safety Bits implementation of iorw
""" Doc
Read the Safety IO Bits
Note the Registers May Be Dependent on Wiring & Logical Setup
For All Robots:
E-stop Status at Reg 80020
Area Scanner Status at Reg 80400
For Collaborative Robots Only:
Bump Status at Reg 81380
Hard Bump Status at Reg 81382
Soft Bump Status at Reg 81383
Input s: s=0 non collaborative robot, s=1 collaborative safe robot
"""
a = self.udp_iorw(addr = 80020)
b = self.udp_iorw(addr = 80400)
if s: c = self.udp_iorw(addr = 81380)
pstp = a[1]
estp = a[2]
astp = a[4]
asaf = b[7]
if s: hard=c[5];soft=c[6];
else: hard= -1 ;soft= -1 ;
return [pstp,estp,astp,asaf,hard,soft]
def udp_movj(self,args): #udp move cartesian
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to move robot
# ~ inputs:
# ~ m = motion Type,
# ~ 1 = joint,
# ~ 2 = linear,
# ~ 3 = linear increment
# ~ s = speed Type,
# ~ 1 = Percentage of Max Speed, for m = 1 only
# ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only
# ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only
# ~ v = Speed Value, must be specified in the type specified by s, no checks performed
# ~ px= X Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ py= Y Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ py= Z Coordinate, specified in milimeters and converted to micro meters (10e-6)
# ~ rx= X Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ ry= Y Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ rz= Z Rotation, specified in degrees and converted to 0.1 mili deg (10e-4)
# ~ t = Orientation Type, axis coordinate and flip conditions (Hard Coded)
"""
m, s, v, px, py, pz, rx, ry, rz, t, e = args;
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Header
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if 1:
comm = '\x59\x45\x52\x43\x20\x00\x68\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39'
# ~ ------------------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x8a\x00' #-> Command ID Number for Move Command
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if m == 1: comm = comm + '\x01\x00' #-> Command Instance: Motion Type 1: Joint
elif m == 2: comm = comm + '\x02\x00' #-> Command Instance: Motion Type 2: Linear Absolute
elif m == 3: comm = comm + '\x03\x00' #-> Command Instance: Motion Type 2: Linear Increment
comm = comm + '\x01\x02\x00\x00'
# ~ ------------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Data
# ~ ------------------------------------------------------------------------------------------------------------------------------------
if 1:
#Robot & Station ID-----------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x00\x00\x00' #-> Data word 1: Robot Number (Hard Coded to 1)
comm = comm + '\x00\x00\x00\x00' #-> Data word 2: Station Number (Hard Coded to 0)
#speed type-------------------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + '\x00\x00\x00\x00' #-> Data word 3: Speed Type 1: % Max speed in 0.01 %
elif s == 2: comm = comm + '\x01\x00\x00\x00' #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s
elif s == 3: comm = comm + '\x02\x00\x00\x00' #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s
#speed for speed type---------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + self.get_word(max(min(v,100),0.01),2) #-> Data word 4: Robot Motion Speed in 0.01%
elif s == 2: comm = comm + self.get_word(max(min(v,999),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1mm/s
elif s == 3: comm = comm + self.get_word(max(min(v,499),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1deg/s
#Co-ordinate Frame------------------------------------------------------------------------------------------------------------------
comm = comm + self.get_word(16,0) #-> Data word 5: Coordinate Frame Hard Coded to Base Frame
#Robot Position & Tool Orientation--------------------------------------------------------------------------------------------------
comm = comm + self.get_word(px,3) #-> Data word 6: Robot X position in 1e-3 mm
comm = comm + self.get_word(py,3) #-> Data word 7: Robot Y position in 1e-3 mm
comm = comm + self.get_word(pz,3) #-> Data word 8: Robot Z position in 1e-3 mm
comm = comm + self.get_word(rx,4) #-> Data word 9: Robot X rotation in 1e-4 deg
comm = comm + self.get_word(ry,4) #-> Data word 10: Robot Y rotation in 1e-4 deg
comm = comm + self.get_word(rz,4) #-> Data word 11: Robot Z rotation in 1e-4 deg
#0 padding for words 12 to 13 (reserve)---------------------------------------------------------------------------------------------
comm = comm + self.get_word(0,0) #-> Data word 12: Pad Reserve with 0s
comm = comm + self.get_word(0,0) #-> Data word 13: Pad Reserve with 0s
#0 padding for words 12 to 13 (unused)----------------------------------------------------------------------------------------------
comm = comm + self.get_word(3,0) #-> Data word 14: Hard coded Orientation Type to \x03
comm = comm + self.get_word(0,0) #-> Data word 15: Hard coded Extended Type to \x00
#0 padding for words 15 to 22 (unused)----------------------------------------------------------------------------------------------
for i in range(16,27): comm = comm + self.get_word(0,0) #-> Data word 16-26: Pad Unused with 0s
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
data = '';
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
#~ if not ord(data[25]) + ord(data[26]):
if m == 3: # do not re-send increment move because of move wait
m = 2; cur_pos = self.udp_rpos()[0:6];
px = px + cur_pos[0];py = py + cur_pos[1];pz = pz + cur_pos[2];
rx = rx + cur_pos[3];ry = ry + cur_pos[4];rz = rz + cur_pos[5];
args = (m, s, v, px, py, pz, rx, ry, rz, t, e);
pos = [px, py, pz, rx, ry, rz]
self.udp_wait(self.udp_movj,args,pos);
if self.dbg or not(not ord(data[25]) + ord(data[26])): print("Error with Joint Move Command");self.udp_dbug(comm,data);return -1;
return 1
def udp_movp(self,args): #udp move pulse
m, s, v, ps, pl, pu, pr, pb, pt, pos = args
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to move robot using pulse
# ~ inputs:
# ~ m = motion Type,
# ~ 1 = joint,
# ~ 2 = linear,
# ~ s = speed Type,
# ~ 1 = Percentage of Max Speed, for m = 1 only
# ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only
# ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only
# ~ v = Speed Value, must be specified in the type specified by s, no checks performed
# ~ ps= S Rotation, specified in pulse
# ~ pl= L Rotation, specified in pulse
# ~ pu= U Rotation, specified in pulse
# ~ pr= R Rotation, specified in pulse
# ~ pb= B Rotation, specified in pulse
# ~ pt= T Rotation, specified in pulse
#~ pos = List of cartesian Position Equivalent of Pulse Rotations
"""
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Header
# ~ --------------------------------------------------------------------------------------------------------------------------------
if 1:
#~ # ~ -------------------------------------------------------------------------------------------------------------------------
comm = '\x59\x45\x52\x43\x20\x00\x58\x00\x03\x01\x00\x00\x00\x00\x00\x00\x39\x39\x39\x39\x39\x39\x39\x39'
# ~ ----------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x8b\x00' #-> Command ID Number for Move Command
# ~ ----------------------------------------------------------------------------------------------------------------------------
if m == 1: comm = comm + '\x01\x00' #-> Command Instance: Motion Type 1: Joint
elif m == 2: comm = comm + '\x02\x00' #-> Command Instance: Motion Type 2: Linear
# ~ ----------------------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x02\x00\x00'
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
if 1:
#Robot & Station ID-------------------------------------------------------------------------------------------------------------
comm = comm + '\x01\x00\x00\x00' #-> Data word 1: Robot Number (Hard Coded to 1)
comm = comm + '\x00\x00\x00\x00' #-> Data word 2: Station Number (Hard Coded to 0)
#speed type---------------------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + '\x00\x00\x00\x00' #-> Data word 3: Speed Type 1: % Max speed in 0.01 %
elif s == 2: comm = comm + '\x01\x00\x00\x00' #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s
elif s == 3: comm = comm + '\x02\x00\x00\x00' #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s
#speed for speed type-----------------------------------------------------------------------------------------------------------
if s == 1: comm = comm + self.get_word(max(min(v,100),0.01),2) #-> Data word 4: Robot Motion Speed in 0.01%
elif s == 2: comm = comm + self.get_word(max(min(v,999),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1mm/s
elif s == 3: comm = comm + self.get_word(max(min(v,499),0.10),1) #-> Data word 4: Robot Motion Speed in 0.1deg/s
#Robot Position & Tool Orientation----------------------------------------------------------------------------------------------
comm = comm + self.get_word(ps,0) #-> Data word 5: Robot X position in 1e-3 mm
comm = comm + self.get_word(pl,0) #-> Data word 6: Robot Y position in 1e-3 mm
comm = comm + self.get_word(pu,0) #-> Data word 7: Robot Z position in 1e-3 mm
comm = comm + self.get_word(pr,0) #-> Data word 8: Robot X rotation in 1e-4 deg
comm = comm + self.get_word(pb,0) #-> Data word 9: Robot Y rotation in 1e-4 deg
comm = comm + self.get_word(pt,0) #-> Data word 10: Robot Z rotation in 1e-4 deg
#0 padding for words 11 to 22 (unused)------------------------------------------------------------------------------------------
for i in range(11,23): comm = comm + self.get_word(0,0) #-> Data word 11-22: Pad with 0s
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Send Command Receive Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
data = ''
while self.rob_chkout: pass
self.rob_chkout = True;
self.sock_udp.sendto(comm,("192.168.1.31",10040))
data,addr = self.sock_udp.recvfrom(512)
self.rob_chkout = False;
# ~ --------------------------------------------------------------------------------------------------------------------------------
# ~ Parse Received Data
# ~ --------------------------------------------------------------------------------------------------------------------------------
#~ if not ord(data[25]) + ord(data[26]):
self.udp_wait(self.udp_movp,args,pos);
if self.dbg or not(not ord(data[25]) + ord(data[26])): print("Error with Pulse Move Command");self.udp_dbug(comm,data);return -1;
return 1
def udp_wait(self,command, args, pos): #wait for motion command
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
#~ print "STARTING MOVE WAIT"
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
dim = 100; saf = 4; run = 1; srv = 0; tog = 0; col = 1; ylo = False; ang = 100 #target;safety;runing;servof;toggle;light
while dim > 10 or ang > 5 or run == 1 or srv == 0 or saf != 3: #while command not complete
if self.dbg:
print "Position Error: \t", dim
print "Orientation Error:\t", ang, "(Discarded)"
print "Running Bit: \t", run
print "Servo Bit: \t", srv
print "Safe Bit: \t", saf
pass
if 1: #read and calculate data
msg = "";
a = self.udp_rsaf();
b = self.udp_rstt();
c = self.udp_rpos(p=0)[0:6];
if a != -1:
saf = a;
col = saf[4] or saf[5];
gat = saf[3];
saf = sum(saf[0:3]);
if b != -1:
stt = b;
mod = stt[0];
srv = stt[9];
run = stt[4];
slo = stt[3];
if c != -1:
pt1 = c;
if not pos == None: #if check target flag is on
dim = [pt1[0]-pos[0], pt1[1]-pos[1], pt1[2]-pos[2]] #check if robot reached target
dim = (dim[0]**2 + dim[1]**2 + dim[2]**2)**0.5 #calculate delta position norm
#~ ang = [pt1[3]-pos[3], pt1[4]-pos[4], pt1[5]-pos[5]] #check if robot reached target
#~ ang = (ang[0]**2 + ang[1]**2 + ang[2]**2)**0.5 #calculate delta position norm
ang = 0; #didnt work as well as i thought...
else: dim = 0;ang = 0 #if not target check set to 0
if 1: #parse warnings if warning
if mod!=1: print(" Error! Robot Not in Command Mode");sys.exit()
if col: print("Error! Collaborative Safety Triggered.");self.udp_serv(on=0);srv = 0;
if not srv: #if servo off = trigger
if 1: print("Error! Servo Off.") #send message servo off
if col: print("Error! Collaborative Safety Triggered") #if collaborative trigger
if saf != 3: print("Error! E Stop Triggered.") #if emergency stop trigger
elif saf == 3 and not col: #if off and safe
print ("Safety Clear. Restoring Servo Power.") #read alarm,reset alarm, restore servo
self.udp_ralm();
self.udp_rset();
self.udp_serv();
print ("Resuming Motion, Please Stay Back")
command(args); return 1;
if not gat and srv: print("Safety Gate Triggered"); ylo = 1;
elif gat and srv and ylo: print("Safety Gate Clear"); ylo = 0;
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
#~ print "ENDING MOVE WAIT"; time.sleep(0.025);
#~ print "-----------------------------------------------------------------------------------------------------------------------------"
return 1
def udp_dbug(self,comm,data): #print udp command and response
if 1:#split header & data
senReqestData = comm[32:len(comm)]
recReqestData = data[32:len(data)]
datasize = len(data)
commsize = len(comm)
if 1: #comm head
senIdentifier = comm[0:4] #bytes 0,1,2,3 4 bytes
senHeaderSize = comm[4:6] #bytes 4,5 2 bytes
senDataPartsz = comm[6:8] #bytes 6,7 2 bytes
senReserveBt1 = comm[8] #bytes 8 1 bytes
senPricessDiv = comm[9] #bytes 9 1 bytes
senAcknowledg = comm[10] #bytes 10 1 bytes
senRequest_ID = comm[11] #bytes 11 1 bytes
senBlock_numb = comm[12:16] #bytes 12,13,14,15 4 bytes
senReservebt2 = comm[16:24] #bytes 16,17,18,19,20,21,22,23 8 bytes
senCommandnum = comm[24:26] #bytes 24,25 2 bytes
senInstanceID = comm[26:28] #bytes 26,27 2 bytes
senAttributes = comm[28] #bytes 28 1 bytes
senServicsreq = comm[29] #bytes 29 1 bytes
senPaddingbyt = comm[30:32] #bytes 30,31 2 bytes
if 1: #resp head
recIdentifier = data[0:4] #bytes 0,1,2,3 4 bytes
recHeaderSize = data[4:6] #bytes 4,5 2 bytes
recDataPartsz = data[6:8] #bytes 6,7 2 bytes
recReserveBt1 = data[8] #bytes 8 1 bytes
recPricessDiv = data[9] #bytes 9 1 bytes
recAcknowledg = data[10] #bytes 10 1 bytes
recRequest_ID = data[11] #bytes 11 1 bytes
recBlock_numb = data[12:16] #bytes 12,13,14,15 4 bytes
recReservebt2 = data[16:24] #bytes 16,17,18,19,20,21,22,23 8 bytes
recServiceByt = data[24] #bytes 24 1 bytes
recStatusByte = data[25] #bytes 25 1 bytes
recAddStatbyt = data[26] #bytes 26 1 bytes
recPaddingbyt = data[27] #bytes 27 1 bytes
recAddStatsiz = data[28:30] #bytes 28,29 1 bytes
recPaddingsiz = data[30:32] #bytes 30,31 1 bytes
if 1: #comm sent
print "----------------------------------------------------------------------------"
print "Total Bytes Sent: ", commsize
print "----------------------------------------------------------------------------"
print "Identifier: ", [hex(ord(x))[2:].zfill(2) for x in senIdentifier]
print "HeaderSize: ", [hex(ord(x))[2:].zfill(2) for x in senHeaderSize]
print "DataPartsz: ", [hex(ord(x))[2:].zfill(2) for x in senDataPartsz]
print "Reservebt1: ", [hex(ord(x))[2:].zfill(2) for x in senReserveBt1]
print "ProcessDiv: ", [hex(ord(x))[2:].zfill(2) for x in senPricessDiv]
print "Acknowledg: ", [hex(ord(x))[2:].zfill(2) for x in senAcknowledg]
print "Request_ID: ", [hex(ord(x))[2:].zfill(2) for x in senRequest_ID]
print "Block_numb: ", [hex(ord(x))[2:].zfill(2) for x in senBlock_numb]
print "Reservebt2: ", [hex(ord(x))[2:].zfill(2) for x in senReservebt2]
print "Commandnum: ", [hex(ord(x))[2:].zfill(2) for x in senCommandnum]
print "InstanceID: ", [hex(ord(x))[2:].zfill(2) for x in senInstanceID]
print "Attributes: ", [hex(ord(x))[2:].zfill(2) for x in senAttributes]
print "Servicsreq: ", [hex(ord(x))[2:].zfill(2) for x in senServicsreq]
print "Paddingsiz: ", [hex(ord(x))[2:].zfill(2) for x in senPaddingbyt]
if 1: #data sent
print "----------------------------------------------------------------------------"
print "SENT DATA: ", len(comm)-32, " bytes"
print "----------------------------------------------------------------------------"
if len(comm) > 32:
comdat = [hex(ord(x))[2:].zfill(2) for x in senReqestData]
for i in xrange(0,len(comdat),4):
print comdat[i:i+4]
if 1: #resp recd
print "----------------------------------------------------------------------------"
print "Total Bytes Recd: ", datasize
print "----------------------------------------------------------------------------"
print "Identifier: ", [hex(ord(x))[2:].zfill(2) for x in recIdentifier]
print "HeaderSize: ", [hex(ord(x))[2:].zfill(2) for x in recHeaderSize]
print "DataPartsz: ", [hex(ord(x))[2:].zfill(2) for x in recDataPartsz]
print "Reservebt1: ", [hex(ord(x))[2:].zfill(2) for x in recReserveBt1]
print "ProcessDiv: ", [hex(ord(x))[2:].zfill(2) for x in recPricessDiv]
print "Acknowledg: ", [hex(ord(x))[2:].zfill(2) for x in recAcknowledg]
print "Request_ID: ", [hex(ord(x))[2:].zfill(2) for x in recRequest_ID]
print "Block_numb: ", [hex(ord(x))[2:].zfill(2) for x in recBlock_numb]
print "Reservebt2: ", [hex(ord(x))[2:].zfill(2) for x in recReservebt2]
print "ServiceByt: ", [hex(ord(x))[2:].zfill(2) for x in recServiceByt]
print "StatusByte: ", [hex(ord(x))[2:].zfill(2) for x in recStatusByte]
print "AddStatbyt: ", [hex(ord(x))[2:].zfill(2) for x in recAddStatbyt]
print "Paddingbyt: ", [hex(ord(x))[2:].zfill(2) for x in recPaddingbyt]
print "AddStatsiz: ", [hex(ord(x))[2:].zfill(2) for x in recAddStatsiz]
print "Paddingsiz: ", [hex(ord(x))[2:].zfill(2) for x in recPaddingsiz]
if 1: #data recd
print "----------------------------------------------------------------------------"
print "RECD DATA: ", len(data)-32, " bytes"
print "----------------------------------------------------------------------------"
if len(data) > 32:
reqdat = [hex(ord(x))[2:].zfill(2) for x in recReqestData]
for i in xrange(0,len(reqdat),4):
print reqdat[i:i+4]
return 0
#~ -----------------------------------------------------------------------------------------------------------------------------------------
#VAR READ WRITE FOR ON THE FLY JOB ***INCOMPLETE***
#~ -----------------------------------------------------------------------------------------------------------------------------------------
def udp_pvar(self): #get set point
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Point Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_dvar(self): #get set double
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Double Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_ivar(self): #get set integer
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Integer Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1
def udp_bvar(self): #get set byte
""" Doc
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ Notes
# ~ --------------------------------------------------------------------------------------------------------------------
# ~ this function uses the yaskawa hi-speed udp server to set or get Byte Variable Data
"""
comm = ''
data = ''
if not ord(data[25]) + ord(data[26]):return 1
return -1 | en | 0.333759 | #host control request #command header #robot IP #robot tcp port number #robot udp port number #socket lock flag to make sure only one message at one time #~ ----------------------------------------------------------------------------------------------------------------------------------------- #~ TCP COMMANDS #~ ----------------------------------------------------------------------------------------------------------------------------------------- #check if robot online #write all gpio 0 #host control request #send command #incase move wait recovery #incase move wait recovery #if data get data size #parse command n data #send host control request #if robot busy, wait #set robot busy #read 256 byte comm resp #read 256 byte data resp #if robot closes port #if error resp exit #set robot not busy #loop while robot moving #wait for motion command to complete #target;safety;runing;servof;toggle;safety gate light #while command not complete #debug print #read and calculate data #read safety, status, position #pase mode, area scan, estop #parse servo, run, safegate bit #parse colaborative safety trigger, position #if check target flag is on #check if robot reached target #calculate delta position norm #print warnings & prompts #if not in remote mode, exit code #if collaborative trigger, warning, servo off #if servo off = trigger #send message servo off #send message reset collaborative safety trigger #send message estop trigger #if no safety trigger, recover #read alarm,reset alarm, restore servo #resend last motion command #display message safety gate triggered #display message safety gate clear #read cartesian position of robot #read pulse position of robot #read alarms #read status bits #read safety bytes #check collaborative hard/soft bump #reset alarms #cancel request... useless never used #external hold... useless never used #hold off... useless never used #useless... cannot switch to command mode without key anyway, hardware safety #servo on #servo off #display pendant message #read byt_num of gpio starting at stt_add #write bit_nums starting from stt_add #check input #parse data #run job name n, and read complete flag o NOTES: -> this function will run a job n on robot controller and wait for an output flag to be set if 0 != 0 -> the function will wait a minimum of one second until the function is complete -> n = string name of job -> o = job complete flag output bit (Need to set on pendant) #move robot home position pulse = 0 #move joint to absolute position v = velocity (in % Speed) px = position x py = position y pz = position z rx = rotation x ry = rotation y rz = rotation z tp = orientation type -> please see documentation (default to type 6) frame is defaulted to "0" which is world frame #final position, used to confirm motion complete using read position #linear move to absolute position v = velocity (in mm/s) px = position x py = position y pz = position z rx = rotation x ry = rotation y rz = rotation z tp = orientation type -> please see documentation (default to type 6) frame is defaulted to "0" which is world frame #final position, used to confirm motion complete using read position #incremental move Use increment move command with increment data v = velocity, see lv/rv flag dx = incremental position x dy = incremental position y dz = incremental position z da = incremental rotation x db = incremental rotation y dc = incremental rotation z rv = force speed rotational lv = force speed linear #if no linear distance, use rotate speed #else use linear speed #if optional rv provided use linear speed #if optional lv provided use rotate speed #get initial position of robot #calculate final position of robot #joint incremental move with current position read Use joint move command with increment data v = velocity, see lv/rv flag dx = incremental position x dy = incremental position y dz = incremental position z da = incremental rotation x db = incremental rotation y dc = incremental rotation z #read current position... #get position & rotation #parse input vector #add input vector to current positon... #linear incremental move with current position read Use Linear move command with increment data v = velocity, see lv/rv flag dx = incremental position x dy = incremental position y dz = incremental position z da = incremental rotation x db = incremental rotation y dc = incremental rotation z #read current position... #get position & rotation #parse input vector #add input vector to current positon... #multipoint move Send Continuous fire points pts = list of each point with v,px,py,pz,rx,ry,rz,type for absolute or pulse motion pts = list of each point with v,dx,dy,dz,da,db,dc, for incremental motion ind = flag to set if motion settings are set individually if 1, inc = inc[i] = 1 if pts[i] is incremenetal else 0 pls = pls[i] = 1 if pts[i] is pulse motion else 0 xyz = xyz[i] = 1 if pts[i] is absolute move else 0 jnt = jnt[i] = 1 if pts[i] is joint motion else 0 lin = lin[i] = 1 if pts[i] is linear motion else 0 length of point and motion definition must be length of points if 0, all point definitions are set to either incremental = if inc = 1 or pulse = if pls = 1 or absolute = if xyz = 1 all motion types are set to joint = if jnt = 1 or linear = if lin = 1 either jnt or lin must be set to 1 either inc/pls/xyz must be set to 1 #atleast one point required #error 1 not enough points #atleast v + 6axis required #error 2 points incompletely defined #orientation types required #error 3 type variable not sent for absolute motion #if individual motion not specified #ensure individual motion for each point in path #error 4 motion types for each point not specified #create path point list #comm list #data list #host control request -> infinite continuous fire #command header #parse each command and data in path #open socket to robot for continuous fire #send each command #Monitor Running Bit Status #Send Next Path Command #Send Next Path Command Data #~ ----------------------------------------------------------------------------------------------------------------------------------------- #~ UDP COMMANDS #~ ----------------------------------------------------------------------------------------------------------------------------------------- #udp read joint torque Doc #~ ---------------------------- #~ Note: Read Joint Torques #~ ---------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #udp i.o. readwrite doc # ~ wrfl = read or write flag, #~ 0 = Read #~ 1 = Write # ~ addr = io register specified as addr, divied by 10 to fit 2 bytes # ~ bits = set values, must write 8 bits at a time. # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ Parse Command # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #if not write, return data recv #No response #parse if response #unpack response byte #parse bits #return result if no errror #if write, return data sent #get 32-bit int, 16 Doc #~ Notes: #~ w = number to create word packet (32 bit signed integer) #~ o = order multiplier to number to create integer 10e^o #udp read position doc # ~ read robot position using udp server command hard coded to return cartesian data possible to request pulse data with flag p = 1 if 0: #debug.print Parsed Data print "----------------------------------------------------------------------------" print "Parsed Data..." print "----------------------------------------------------------------------------" if not p: print " PX: ", axs[0] print " PY: ", axs[1] print " PZ: ", axs[2] print " AX: ", axs[3] print " AY: ", axs[4] print " AZ: ", axs[5] print " TP: ", t print " ET: ", e else: print " PS: ", axs[0] print " PL: ", axs[1] print " PU: ", axs[2] print " PR: ", axs[3] print " PB: ", axs[4] print " PT: ", axs[5] print "----------------------------------------------------------------------------" # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #list of 4byte chunks #list of axis coordinates #get data part of packet #separate data words and extract requested data #unpack 4 byte packets as signed 32 bit integer #Parse cartesian data #10e-3 for position #10e-4 for orientation #get pose type for cartesian #extended type for cartesian #-> read status doc # ~ Read Robot Status Byte 1 & 2 #~ byte 1: #~ bit 0: Mode Step #~ bit 1: Mode Cycle #~ bit 2: Mode Continuous #~ bit 3: Is Running #~ bit 4: Is Safety #~ bit 5: Mode Teach #~ bit 6: Mode Play #~ bit 7: Mode Remote #~ byte 2: #~ bit 0: Unused #~ bit 1: Hold Pendant #~ bit 2: Hold External #~ bit 3: Hold Remote #~ bit 4: Alarm Flag #~ bit 5: Error Flag #~ bit 6: Servo Status #~ bit 7: Unused # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #-> read alarm Doc ---------------------------------------- Notes: ---------------------------------------- Function to Read Last Alarm ---------------------------------------- # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ Parse Command # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #-> reset alarm & error Doc ---------------------------------------- Notes: ---------------------------------------- Function: Cancel Alarm & Error Status Required to Resume Servo On ---------------------------------------- # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ Parse Command Comm1 = Cancel Alarm, Comm2 = Cancel Error # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #-> servo on off # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #read Safety Bits implementation of iorw Doc Read the Safety IO Bits Note the Registers May Be Dependent on Wiring & Logical Setup For All Robots: E-stop Status at Reg 80020 Area Scanner Status at Reg 80400 For Collaborative Robots Only: Bump Status at Reg 81380 Hard Bump Status at Reg 81382 Soft Bump Status at Reg 81383 Input s: s=0 non collaborative robot, s=1 collaborative safe robot #udp move cartesian Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to move robot # ~ inputs: # ~ m = motion Type, # ~ 1 = joint, # ~ 2 = linear, # ~ 3 = linear increment # ~ s = speed Type, # ~ 1 = Percentage of Max Speed, for m = 1 only # ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only # ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only # ~ v = Speed Value, must be specified in the type specified by s, no checks performed # ~ px= X Coordinate, specified in milimeters and converted to micro meters (10e-6) # ~ py= Y Coordinate, specified in milimeters and converted to micro meters (10e-6) # ~ py= Z Coordinate, specified in milimeters and converted to micro meters (10e-6) # ~ rx= X Rotation, specified in degrees and converted to 0.1 mili deg (10e-4) # ~ ry= Y Rotation, specified in degrees and converted to 0.1 mili deg (10e-4) # ~ rz= Z Rotation, specified in degrees and converted to 0.1 mili deg (10e-4) # ~ t = Orientation Type, axis coordinate and flip conditions (Hard Coded) # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ Parse Header # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ ------------------------------------------------------------------------------------------------------------------------------------ #-> Command ID Number for Move Command # ~ ------------------------------------------------------------------------------------------------------------------------------------ #-> Command Instance: Motion Type 1: Joint #-> Command Instance: Motion Type 2: Linear Absolute #-> Command Instance: Motion Type 2: Linear Increment # ~ ------------------------------------------------------------------------------------------------------------------------------------ # ~ Parse Data # ~ ------------------------------------------------------------------------------------------------------------------------------------ #Robot & Station ID----------------------------------------------------------------------------------------------------------------- #-> Data word 1: Robot Number (Hard Coded to 1) #-> Data word 2: Station Number (Hard Coded to 0) #speed type------------------------------------------------------------------------------------------------------------------------- #-> Data word 3: Speed Type 1: % Max speed in 0.01 % #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s #speed for speed type--------------------------------------------------------------------------------------------------------------- #-> Data word 4: Robot Motion Speed in 0.01% #-> Data word 4: Robot Motion Speed in 0.1mm/s #-> Data word 4: Robot Motion Speed in 0.1deg/s #Co-ordinate Frame------------------------------------------------------------------------------------------------------------------ #-> Data word 5: Coordinate Frame Hard Coded to Base Frame #Robot Position & Tool Orientation-------------------------------------------------------------------------------------------------- #-> Data word 6: Robot X position in 1e-3 mm #-> Data word 7: Robot Y position in 1e-3 mm #-> Data word 8: Robot Z position in 1e-3 mm #-> Data word 9: Robot X rotation in 1e-4 deg #-> Data word 10: Robot Y rotation in 1e-4 deg #-> Data word 11: Robot Z rotation in 1e-4 deg #0 padding for words 12 to 13 (reserve)--------------------------------------------------------------------------------------------- #-> Data word 12: Pad Reserve with 0s #-> Data word 13: Pad Reserve with 0s #0 padding for words 12 to 13 (unused)---------------------------------------------------------------------------------------------- #-> Data word 14: Hard coded Orientation Type to \x03 #-> Data word 15: Hard coded Extended Type to \x00 #0 padding for words 15 to 22 (unused)---------------------------------------------------------------------------------------------- #-> Data word 16-26: Pad Unused with 0s # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #~ if not ord(data[25]) + ord(data[26]): # do not re-send increment move because of move wait #udp move pulse Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to move robot using pulse # ~ inputs: # ~ m = motion Type, # ~ 1 = joint, # ~ 2 = linear, # ~ s = speed Type, # ~ 1 = Percentage of Max Speed, for m = 1 only # ~ 2 = Linear speed in 0.1 mm/s, for m = 2,3 only # ~ 3 = Rotation speed in 0.1 deg/s, for m = 2,3 only # ~ v = Speed Value, must be specified in the type specified by s, no checks performed # ~ ps= S Rotation, specified in pulse # ~ pl= L Rotation, specified in pulse # ~ pu= U Rotation, specified in pulse # ~ pr= R Rotation, specified in pulse # ~ pb= B Rotation, specified in pulse # ~ pt= T Rotation, specified in pulse #~ pos = List of cartesian Position Equivalent of Pulse Rotations # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Header # ~ -------------------------------------------------------------------------------------------------------------------------------- #~ # ~ ------------------------------------------------------------------------------------------------------------------------- # ~ ---------------------------------------------------------------------------------------------------------------------------- #-> Command ID Number for Move Command # ~ ---------------------------------------------------------------------------------------------------------------------------- #-> Command Instance: Motion Type 1: Joint #-> Command Instance: Motion Type 2: Linear # ~ ---------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #Robot & Station ID------------------------------------------------------------------------------------------------------------- #-> Data word 1: Robot Number (Hard Coded to 1) #-> Data word 2: Station Number (Hard Coded to 0) #speed type--------------------------------------------------------------------------------------------------------------------- #-> Data word 3: Speed Type 1: % Max speed in 0.01 % #-> Data word 3: Speed Type 2: Linear Speed in 0.1 mm/s #-> Data word 3: Speed Type 3: Rotate Speed in 0.1 deg/s #speed for speed type----------------------------------------------------------------------------------------------------------- #-> Data word 4: Robot Motion Speed in 0.01% #-> Data word 4: Robot Motion Speed in 0.1mm/s #-> Data word 4: Robot Motion Speed in 0.1deg/s #Robot Position & Tool Orientation---------------------------------------------------------------------------------------------- #-> Data word 5: Robot X position in 1e-3 mm #-> Data word 6: Robot Y position in 1e-3 mm #-> Data word 7: Robot Z position in 1e-3 mm #-> Data word 8: Robot X rotation in 1e-4 deg #-> Data word 9: Robot Y rotation in 1e-4 deg #-> Data word 10: Robot Z rotation in 1e-4 deg #0 padding for words 11 to 22 (unused)------------------------------------------------------------------------------------------ #-> Data word 11-22: Pad with 0s # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Send Command Receive Data # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ -------------------------------------------------------------------------------------------------------------------------------- # ~ Parse Received Data # ~ -------------------------------------------------------------------------------------------------------------------------------- #~ if not ord(data[25]) + ord(data[26]): #wait for motion command #~ print "-----------------------------------------------------------------------------------------------------------------------------" #~ print "STARTING MOVE WAIT" #~ print "-----------------------------------------------------------------------------------------------------------------------------" #target;safety;runing;servof;toggle;light #while command not complete #read and calculate data #if check target flag is on #check if robot reached target #calculate delta position norm #~ ang = [pt1[3]-pos[3], pt1[4]-pos[4], pt1[5]-pos[5]] #check if robot reached target #~ ang = (ang[0]**2 + ang[1]**2 + ang[2]**2)**0.5 #calculate delta position norm #didnt work as well as i thought... #if not target check set to 0 #parse warnings if warning #if servo off = trigger #send message servo off #if collaborative trigger #if emergency stop trigger #if off and safe #read alarm,reset alarm, restore servo #~ print "-----------------------------------------------------------------------------------------------------------------------------" #~ print "ENDING MOVE WAIT"; time.sleep(0.025); #~ print "-----------------------------------------------------------------------------------------------------------------------------" #print udp command and response #split header & data #comm head #bytes 0,1,2,3 4 bytes #bytes 4,5 2 bytes #bytes 6,7 2 bytes #bytes 8 1 bytes #bytes 9 1 bytes #bytes 10 1 bytes #bytes 11 1 bytes #bytes 12,13,14,15 4 bytes #bytes 16,17,18,19,20,21,22,23 8 bytes #bytes 24,25 2 bytes #bytes 26,27 2 bytes #bytes 28 1 bytes #bytes 29 1 bytes #bytes 30,31 2 bytes #resp head #bytes 0,1,2,3 4 bytes #bytes 4,5 2 bytes #bytes 6,7 2 bytes #bytes 8 1 bytes #bytes 9 1 bytes #bytes 10 1 bytes #bytes 11 1 bytes #bytes 12,13,14,15 4 bytes #bytes 16,17,18,19,20,21,22,23 8 bytes #bytes 24 1 bytes #bytes 25 1 bytes #bytes 26 1 bytes #bytes 27 1 bytes #bytes 28,29 1 bytes #bytes 30,31 1 bytes #comm sent #data sent #resp recd #data recd #~ ----------------------------------------------------------------------------------------------------------------------------------------- #VAR READ WRITE FOR ON THE FLY JOB ***INCOMPLETE*** #~ ----------------------------------------------------------------------------------------------------------------------------------------- #get set point Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to set or get Point Variable Data #get set double Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to set or get Double Variable Data #get set integer Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to set or get Integer Variable Data #get set byte Doc # ~ -------------------------------------------------------------------------------------------------------------------- # ~ Notes # ~ -------------------------------------------------------------------------------------------------------------------- # ~ this function uses the yaskawa hi-speed udp server to set or get Byte Variable Data | 2.63083 | 3 |
script/Run_KLEE.py | kupl/HOMI_public | 6 | 6624018 | from multiprocessing import Process
import signal
import os
import sys
import random
import json
import argparse
import datetime
start_time = datetime.datetime.now()
configs = {
'script_path': os.path.abspath(os.getcwd()),
'top_dir': os.path.abspath('../experiments/'),
'build_dir': os.path.abspath('../klee/build/')
}
def load_pgm_config(config_file):
with open(config_file, 'r') as f:
parsed = json.load(f)
return parsed
def gen_run_cmd(pgm, stgy, mem, small_time, iters, tool, ith_trial, result_dir):
base_command=" ".join([configs['build_dir']+"/bin/klee", "-trial="+str(iters), "--max-memory="+mem, "--watchdog -max-time="+small_time,
"-dirname="+configs['top_dir']+"/"+result_dir, "-write-kqueries", "-only-output-states-covering-new",
"--simplify-sym-indices", "--output-module=false", "--output-source=false", "--output-stats=false",
"--disable-inlining", "--use-forked-solver", "--use-cex-cache", "--libc=uclibc", "--posix-runtime",
"-env-file="+configs['build_dir']+"/../test.env",
"--max-sym-array-size=4096", "--max-instruction-time=30", "--switch-type=internal",
"--use-batching-search", "--batch-instructions=10000", "-ignore-solver-failures"])
opt_flag=1
no_opt_pgms=["gawk", "trueprint"]
if pgm in no_opt_pgms:
opt_flag=0
if stgy=="roundrobin":
stgy="random-path --search=nurs:covnew"
if opt_flag==1:
base_command=" ".join([base_command, "--optimize"])
if (tool=="homi") and (iters!=0):
base_command=" ".join([base_command, "-homi", "-parallel="+str(ith_trial)])
# Follow the symbolic arguments in KLEE paper. (https://klee.github.io/docs/coreutils-experiments/)
if pgm=="dd":
argv = "--sym-args 0 3 10 --sym-files 1 8 --sym-stdin 8 --sym-stdout"
else:
argv = "--sym-args 0 1 10 --sym-args 0 2 2 --sym-files 1 8 --sym-stdin 8 --sym-stdout"
run_cmd = " ".join([base_command, "--search="+stgy, pgm+".bc", argv])
return run_cmd
def run_all(l_config, pgm, stgy, mem, small_time, ith_trial, iters, tool, d_name):
top_dir = "/".join([configs['top_dir'], tool+"__"+stgy+str(iters), pgm])
if not os.path.exists(top_dir):
os.makedirs(top_dir)
group_dir = top_dir + "/" + str(ith_trial)
os.system(" ".join(["cp -r", l_config['pgm_dir'], group_dir]))
os.chdir(group_dir+l_config['exec_dir'])
result_dir="result_"+d_name
top_tc_dir="/".join([configs['top_dir'], result_dir])
print top_tc_dir
if not os.path.exists(top_tc_dir):
os.mkdir(top_tc_dir)
if tool=="homi":
tc_dir="/".join([configs['top_dir'], result_dir, str(ith_trial)+"homi_"+pgm+"_"+stgy+"_tc_dir"])
else:
tc_dir="/".join([configs['top_dir'], result_dir, str(ith_trial)+"pureklee_"+pgm+"_"+stgy+"_tc_dir"])
if not os.path.exists(tc_dir):
os.mkdir(tc_dir)
os.chdir(group_dir+l_config['exec_dir'])
run_cmd = gen_run_cmd(pgm, stgy, mem, small_time, iters, tool, ith_trial, result_dir)
with open(os.devnull, 'wb') as devnull:
os.system(run_cmd)
klee_dir = "klee-out-0"
rm_cmd=" ".join(["rm", klee_dir+"/assembly.ll", klee_dir+"/run.istats"])
os.system(rm_cmd)
cp_cmd = " ".join(["cp", "-r", klee_dir, tc_dir+"/"+str(iters)+"__tc_dirs"])
print cp_cmd
os.system(cp_cmd)
cp2_cmd = " ".join(["cp", "time_result state_data", tc_dir+"/"+str(iters)+"__tc_dirs/"])
os.system(cp2_cmd)
rm_cmd=" ".join(["rm -rf", group_dir])
os.system(rm_cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pgm_config")
parser.add_argument("pgm")
parser.add_argument("search_heuristic",help='[nurs:covnew, random-path, ..]')
parser.add_argument("memory")
parser.add_argument("small_time",help='[200(s),800(s)]')
parser.add_argument("ith_trial",help='[1,2,3,..]')
parser.add_argument("iters")
parser.add_argument("tool",help='[homi, pureklee]')
parser.add_argument("d_name", help='0314')
args = parser.parse_args()
pgm_config = args.pgm_config
load_config = load_pgm_config(args.pgm_config)
pgm = args.pgm
stgy = args.search_heuristic
mem = args.memory
small_time = args.small_time
ith_trial = int(args.ith_trial)
iters = int(args.iters)
tool = args.tool
d_name=args.d_name
run_all(load_config, pgm, stgy, mem, small_time, ith_trial, iters, tool, d_name)
| from multiprocessing import Process
import signal
import os
import sys
import random
import json
import argparse
import datetime
start_time = datetime.datetime.now()
configs = {
'script_path': os.path.abspath(os.getcwd()),
'top_dir': os.path.abspath('../experiments/'),
'build_dir': os.path.abspath('../klee/build/')
}
def load_pgm_config(config_file):
with open(config_file, 'r') as f:
parsed = json.load(f)
return parsed
def gen_run_cmd(pgm, stgy, mem, small_time, iters, tool, ith_trial, result_dir):
base_command=" ".join([configs['build_dir']+"/bin/klee", "-trial="+str(iters), "--max-memory="+mem, "--watchdog -max-time="+small_time,
"-dirname="+configs['top_dir']+"/"+result_dir, "-write-kqueries", "-only-output-states-covering-new",
"--simplify-sym-indices", "--output-module=false", "--output-source=false", "--output-stats=false",
"--disable-inlining", "--use-forked-solver", "--use-cex-cache", "--libc=uclibc", "--posix-runtime",
"-env-file="+configs['build_dir']+"/../test.env",
"--max-sym-array-size=4096", "--max-instruction-time=30", "--switch-type=internal",
"--use-batching-search", "--batch-instructions=10000", "-ignore-solver-failures"])
opt_flag=1
no_opt_pgms=["gawk", "trueprint"]
if pgm in no_opt_pgms:
opt_flag=0
if stgy=="roundrobin":
stgy="random-path --search=nurs:covnew"
if opt_flag==1:
base_command=" ".join([base_command, "--optimize"])
if (tool=="homi") and (iters!=0):
base_command=" ".join([base_command, "-homi", "-parallel="+str(ith_trial)])
# Follow the symbolic arguments in KLEE paper. (https://klee.github.io/docs/coreutils-experiments/)
if pgm=="dd":
argv = "--sym-args 0 3 10 --sym-files 1 8 --sym-stdin 8 --sym-stdout"
else:
argv = "--sym-args 0 1 10 --sym-args 0 2 2 --sym-files 1 8 --sym-stdin 8 --sym-stdout"
run_cmd = " ".join([base_command, "--search="+stgy, pgm+".bc", argv])
return run_cmd
def run_all(l_config, pgm, stgy, mem, small_time, ith_trial, iters, tool, d_name):
top_dir = "/".join([configs['top_dir'], tool+"__"+stgy+str(iters), pgm])
if not os.path.exists(top_dir):
os.makedirs(top_dir)
group_dir = top_dir + "/" + str(ith_trial)
os.system(" ".join(["cp -r", l_config['pgm_dir'], group_dir]))
os.chdir(group_dir+l_config['exec_dir'])
result_dir="result_"+d_name
top_tc_dir="/".join([configs['top_dir'], result_dir])
print top_tc_dir
if not os.path.exists(top_tc_dir):
os.mkdir(top_tc_dir)
if tool=="homi":
tc_dir="/".join([configs['top_dir'], result_dir, str(ith_trial)+"homi_"+pgm+"_"+stgy+"_tc_dir"])
else:
tc_dir="/".join([configs['top_dir'], result_dir, str(ith_trial)+"pureklee_"+pgm+"_"+stgy+"_tc_dir"])
if not os.path.exists(tc_dir):
os.mkdir(tc_dir)
os.chdir(group_dir+l_config['exec_dir'])
run_cmd = gen_run_cmd(pgm, stgy, mem, small_time, iters, tool, ith_trial, result_dir)
with open(os.devnull, 'wb') as devnull:
os.system(run_cmd)
klee_dir = "klee-out-0"
rm_cmd=" ".join(["rm", klee_dir+"/assembly.ll", klee_dir+"/run.istats"])
os.system(rm_cmd)
cp_cmd = " ".join(["cp", "-r", klee_dir, tc_dir+"/"+str(iters)+"__tc_dirs"])
print cp_cmd
os.system(cp_cmd)
cp2_cmd = " ".join(["cp", "time_result state_data", tc_dir+"/"+str(iters)+"__tc_dirs/"])
os.system(cp2_cmd)
rm_cmd=" ".join(["rm -rf", group_dir])
os.system(rm_cmd)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("pgm_config")
parser.add_argument("pgm")
parser.add_argument("search_heuristic",help='[nurs:covnew, random-path, ..]')
parser.add_argument("memory")
parser.add_argument("small_time",help='[200(s),800(s)]')
parser.add_argument("ith_trial",help='[1,2,3,..]')
parser.add_argument("iters")
parser.add_argument("tool",help='[homi, pureklee]')
parser.add_argument("d_name", help='0314')
args = parser.parse_args()
pgm_config = args.pgm_config
load_config = load_pgm_config(args.pgm_config)
pgm = args.pgm
stgy = args.search_heuristic
mem = args.memory
small_time = args.small_time
ith_trial = int(args.ith_trial)
iters = int(args.iters)
tool = args.tool
d_name=args.d_name
run_all(load_config, pgm, stgy, mem, small_time, ith_trial, iters, tool, d_name)
| en | 0.469283 | # Follow the symbolic arguments in KLEE paper. (https://klee.github.io/docs/coreutils-experiments/) | 1.867001 | 2 |
weather.py | wangwanglulu/pythonlecture12 | 0 | 6624019 | import requests
url = "http://t.weather.sojson.com/api/weather/city/101020100"
r = requests.get(url)
print(r.status_code)
response_dict = r.json()
f = response_dict['data']
ff = f['forecast']
ff_today = ff[0]
ff_1 = ff[1]
ff_2 = ff[2]
def show(day):
for x in day:
print(x +': ' + str(day[x]))
print('\n')
show(ff_today)
show(ff_1)
show(ff_2) | import requests
url = "http://t.weather.sojson.com/api/weather/city/101020100"
r = requests.get(url)
print(r.status_code)
response_dict = r.json()
f = response_dict['data']
ff = f['forecast']
ff_today = ff[0]
ff_1 = ff[1]
ff_2 = ff[2]
def show(day):
for x in day:
print(x +': ' + str(day[x]))
print('\n')
show(ff_today)
show(ff_1)
show(ff_2) | none | 1 | 3.246659 | 3 | |
save_weight_in_mat.py | ifgovh/loss-landscape | 1 | 6624020 | """
Calculate and visualize the loss surface.
Usage example:
>> python plot_surface.py --x=-1:1:101 --y=-1:1:101 --model resnet56 --cuda
"""
import argparse
import copy
import h5py
import torch
import time
import socket
import os
import sys
import numpy as np
import torchvision
import torch.nn as nn
import dataloader
import evaluation
import projection as proj
import net_plotter
import plot_2D
import plot_1D
import model_loader
import scheduler
import mpi4pytorch as mpi
import scipy.io as sio
###############################################################
# MAIN
###############################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plotting loss surface')
# data parameters
parser.add_argument('--dataset', default='cifar10', help='cifar10 | imagenet')
# model parameters
parser.add_argument('--model', default='resnet56_noshort', help='model name')
parser.add_argument('--max_epoch', type=int, default=500, help='maximum epoch')
parser.add_argument('--step', type=int, default=1, help='epoch step')
args = parser.parse_args()
#--------------------------------------------------------------------------
# Load models and extract parameters
#--------------------------------------------------------------------------
all_weights = []
for i in range(0,args.max_epoch+1,args.step):
model_file = 'model_' + str(i) + '.t7'
net = model_loader.load(args.dataset, args.model, model_file)
w = net_plotter.get_weights(net) # initial parameters
#s = copy.deepcopy(net.state_dict()) # deepcopy since state_dict are references
#import pdb; pdb.set_trace()
for j in range(len(w)):
w[j] = w[j].numpy()
all_weights.append(w)
sio.savemat(args.model + 'all_weights.mat',
mdict={'weight': all_weights},
)
| """
Calculate and visualize the loss surface.
Usage example:
>> python plot_surface.py --x=-1:1:101 --y=-1:1:101 --model resnet56 --cuda
"""
import argparse
import copy
import h5py
import torch
import time
import socket
import os
import sys
import numpy as np
import torchvision
import torch.nn as nn
import dataloader
import evaluation
import projection as proj
import net_plotter
import plot_2D
import plot_1D
import model_loader
import scheduler
import mpi4pytorch as mpi
import scipy.io as sio
###############################################################
# MAIN
###############################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='plotting loss surface')
# data parameters
parser.add_argument('--dataset', default='cifar10', help='cifar10 | imagenet')
# model parameters
parser.add_argument('--model', default='resnet56_noshort', help='model name')
parser.add_argument('--max_epoch', type=int, default=500, help='maximum epoch')
parser.add_argument('--step', type=int, default=1, help='epoch step')
args = parser.parse_args()
#--------------------------------------------------------------------------
# Load models and extract parameters
#--------------------------------------------------------------------------
all_weights = []
for i in range(0,args.max_epoch+1,args.step):
model_file = 'model_' + str(i) + '.t7'
net = model_loader.load(args.dataset, args.model, model_file)
w = net_plotter.get_weights(net) # initial parameters
#s = copy.deepcopy(net.state_dict()) # deepcopy since state_dict are references
#import pdb; pdb.set_trace()
for j in range(len(w)):
w[j] = w[j].numpy()
all_weights.append(w)
sio.savemat(args.model + 'all_weights.mat',
mdict={'weight': all_weights},
)
| en | 0.156347 | Calculate and visualize the loss surface. Usage example: >> python plot_surface.py --x=-1:1:101 --y=-1:1:101 --model resnet56 --cuda ############################################################### # MAIN ############################################################### # data parameters # model parameters #-------------------------------------------------------------------------- # Load models and extract parameters #-------------------------------------------------------------------------- # initial parameters #s = copy.deepcopy(net.state_dict()) # deepcopy since state_dict are references #import pdb; pdb.set_trace() | 2.853926 | 3 |
salescleanup.py | jlat07/PandasDataTypes | 1 | 6624021 | import pandas as pd
import numpy as np
def convert_currency(val):
"""
$125,000.00 -> 125000.00
Convert the string number value to a float
- Remove $
- Remove commas
- Convert to float type
"""
new_val = val.replace(',','').replace('$', '')
return float(new_val)
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
"""
new_val = val.replace('%', '')
return float(new_val) / 100
df_2 = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True",
dtype={'Customer Number':'int'},
converters={'2016':convert_currency,
'2017': convert_currency,
'Percent Growth': convert_percent,
'Jan Units': lambda x: pd.to_numeric(x, errors='coerce'),
'Active': lambda x: np.where(x == "Y", True, False)
})
df_2["Start_Date"] = pd.to_datetime(df_2[['Month', 'Day', 'Year']])
print(df_2)
# Should output something like:
# (base) Aeneid:notebooks kristofer$ python3 ./salescleanup.py
# Customer Number Customer Name 2016 2017 Percent Growth Jan Units Month Day Year Active Start_Date
# 0 10002 Quest Industries 125000.0 162500.0 0.30 500.0 1 10 2015 True 2015-01-10
# 1 552278 Smith Plumbing 920000.0 1012000.0 0.10 700.0 6 15 2014 True 2014-06-15
# 2 23477 ACME Industrial 50000.0 62500.0 0.25 125.0 3 29 2016 True 2016-03-29
# 3 24900 Brekke LTD 350000.0 490000.0 0.04 75.0 10 27 2015 True 2015-10-27
# 4 651029 Harbor Co 15000.0 12750.0 -0.15 NaN 2 2 2014 False 2014-02-02
| import pandas as pd
import numpy as np
def convert_currency(val):
"""
$125,000.00 -> 125000.00
Convert the string number value to a float
- Remove $
- Remove commas
- Convert to float type
"""
new_val = val.replace(',','').replace('$', '')
return float(new_val)
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
"""
new_val = val.replace('%', '')
return float(new_val) / 100
df_2 = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True",
dtype={'Customer Number':'int'},
converters={'2016':convert_currency,
'2017': convert_currency,
'Percent Growth': convert_percent,
'Jan Units': lambda x: pd.to_numeric(x, errors='coerce'),
'Active': lambda x: np.where(x == "Y", True, False)
})
df_2["Start_Date"] = pd.to_datetime(df_2[['Month', 'Day', 'Year']])
print(df_2)
# Should output something like:
# (base) Aeneid:notebooks kristofer$ python3 ./salescleanup.py
# Customer Number Customer Name 2016 2017 Percent Growth Jan Units Month Day Year Active Start_Date
# 0 10002 Quest Industries 125000.0 162500.0 0.30 500.0 1 10 2015 True 2015-01-10
# 1 552278 Smith Plumbing 920000.0 1012000.0 0.10 700.0 6 15 2014 True 2014-06-15
# 2 23477 ACME Industrial 50000.0 62500.0 0.25 125.0 3 29 2016 True 2016-03-29
# 3 24900 Brekke LTD 350000.0 490000.0 0.04 75.0 10 27 2015 True 2015-10-27
# 4 651029 Harbor Co 15000.0 12750.0 -0.15 NaN 2 2 2014 False 2014-02-02
| en | 0.351606 | $125,000.00 -> 125000.00 Convert the string number value to a float - Remove $ - Remove commas - Convert to float type Convert the percentage string to an actual floating point percent # Should output something like: # (base) Aeneid:notebooks kristofer$ python3 ./salescleanup.py # Customer Number Customer Name 2016 2017 Percent Growth Jan Units Month Day Year Active Start_Date # 0 10002 Quest Industries 125000.0 162500.0 0.30 500.0 1 10 2015 True 2015-01-10 # 1 552278 Smith Plumbing 920000.0 1012000.0 0.10 700.0 6 15 2014 True 2014-06-15 # 2 23477 ACME Industrial 50000.0 62500.0 0.25 125.0 3 29 2016 True 2016-03-29 # 3 24900 Brekke LTD 350000.0 490000.0 0.04 75.0 10 27 2015 True 2015-10-27 # 4 651029 Harbor Co 15000.0 12750.0 -0.15 NaN 2 2 2014 False 2014-02-02 | 3.777267 | 4 |
MoleculeACE/benchmark/models/__init__.py | molML/MoleculeACE | 9 | 6624022 | from MoleculeACE.benchmark.models.model import Model
from MoleculeACE.benchmark.models.load_model import load_model
from MoleculeACE.benchmark.models.train_model import train_model
| from MoleculeACE.benchmark.models.model import Model
from MoleculeACE.benchmark.models.load_model import load_model
from MoleculeACE.benchmark.models.train_model import train_model
| none | 1 | 1.000663 | 1 | |
AutoMouse/automouse.py | yyFFans/DemoPractises | 0 | 6624023 | <gh_stars>0
# -*- coding: utf-8 -*-
import pyautogui
import time
pyautogui.FAILSAFE = False
screenshot = pyautogui.screenshot
pngLocate = pyautogui.locateOnScreen
def click(x,y):
pyautogui.moveTo(x,y)
pyautogui.click()
def get_button_center_from_screen(button_png,png_path='pics'):
screen = screenshot("screen.png")
button_png = png_path + '\\' + button_png
start_pos = pngLocate(button_png)
if start_pos == None:
#找不到button
print("{} not exsit on current screen".format(button_png))
return 0,0
return pyautogui.center(start_pos)
def AutoMouse():
print("Start")
n = 1
while(n<90):
print("{now} 第{n}次\n".format(now=time.strftime("%m-%d %H:%M:%S"), n=n))
while(1):
x, y = get_button_center_from_screen('开始闯关.PNG')
if (x,y) == (0,0):
time.sleep(2)
continue
click(x,y)
time.sleep(5)
break
loading = False
#是否正在加载中
while(1):
x,y = get_button_center_from_screen('加载中.PNG')
time.sleep(3)
if (x,y) != (0,0):
break
loading = False
print("加载中\n")
while(1):
x,y = get_button_center_from_screen('加载中.PNG')
if (x,y) == (0,0):
break
print("加载完成\n")
#检查是否初始画面需要跳过
x,y = get_button_center_from_screen('跳过.PNG')
if (x,y) == (0,0):
print("no need Jump over")
else:
print("need Jump over")
click(x,y)
if 0:
#检查是否已经启用自动
x,y = get_button_center_from_screen("未启用自动.PNG")
if (x,y) != (0,0):
print("not auto run")
click(x,y)
else:
print("already auto run")
time.sleep(80)
#运行监测,是否结束,以及中间存在需要跳过,结束则开启下一次 每5s检测一次
JumpOver_1 = False
JumpOver_2 = False
Game_END = False
while(1):
if JumpOver_1 == False:
x,y = get_button_center_from_screen('秦始皇1跳过.PNG')
if (x,y) != (0,0):
print("need Jump over 1")
JumpOver_1 = True
click(x,y)
if JumpOver_2 == False:
x, y = get_button_center_from_screen('秦始皇2跳过.PNG')
if (x, y) != (0, 0):
print("need Jump over 2")
JumpOver_2 = True
click(x, y)
if JumpOver_1 == True or JumpOver_2 == True:
x,y = get_button_center_from_screen("结束后继续.PNG")
if (x,y) != (0,0):
print("all over.\n")
Game_END = True
click(x,y)
#start 闯关
if Game_END == True:
x, y = get_button_center_from_screen('再次挑战.PNG')
if (x, y) != (0, 0):
n = n+1
print("Start again")
click(x,y)
time.sleep(2)
break
if __name__ == '__main__':
AutoMouse()
| # -*- coding: utf-8 -*-
import pyautogui
import time
pyautogui.FAILSAFE = False
screenshot = pyautogui.screenshot
pngLocate = pyautogui.locateOnScreen
def click(x,y):
pyautogui.moveTo(x,y)
pyautogui.click()
def get_button_center_from_screen(button_png,png_path='pics'):
screen = screenshot("screen.png")
button_png = png_path + '\\' + button_png
start_pos = pngLocate(button_png)
if start_pos == None:
#找不到button
print("{} not exsit on current screen".format(button_png))
return 0,0
return pyautogui.center(start_pos)
def AutoMouse():
print("Start")
n = 1
while(n<90):
print("{now} 第{n}次\n".format(now=time.strftime("%m-%d %H:%M:%S"), n=n))
while(1):
x, y = get_button_center_from_screen('开始闯关.PNG')
if (x,y) == (0,0):
time.sleep(2)
continue
click(x,y)
time.sleep(5)
break
loading = False
#是否正在加载中
while(1):
x,y = get_button_center_from_screen('加载中.PNG')
time.sleep(3)
if (x,y) != (0,0):
break
loading = False
print("加载中\n")
while(1):
x,y = get_button_center_from_screen('加载中.PNG')
if (x,y) == (0,0):
break
print("加载完成\n")
#检查是否初始画面需要跳过
x,y = get_button_center_from_screen('跳过.PNG')
if (x,y) == (0,0):
print("no need Jump over")
else:
print("need Jump over")
click(x,y)
if 0:
#检查是否已经启用自动
x,y = get_button_center_from_screen("未启用自动.PNG")
if (x,y) != (0,0):
print("not auto run")
click(x,y)
else:
print("already auto run")
time.sleep(80)
#运行监测,是否结束,以及中间存在需要跳过,结束则开启下一次 每5s检测一次
JumpOver_1 = False
JumpOver_2 = False
Game_END = False
while(1):
if JumpOver_1 == False:
x,y = get_button_center_from_screen('秦始皇1跳过.PNG')
if (x,y) != (0,0):
print("need Jump over 1")
JumpOver_1 = True
click(x,y)
if JumpOver_2 == False:
x, y = get_button_center_from_screen('秦始皇2跳过.PNG')
if (x, y) != (0, 0):
print("need Jump over 2")
JumpOver_2 = True
click(x, y)
if JumpOver_1 == True or JumpOver_2 == True:
x,y = get_button_center_from_screen("结束后继续.PNG")
if (x,y) != (0,0):
print("all over.\n")
Game_END = True
click(x,y)
#start 闯关
if Game_END == True:
x, y = get_button_center_from_screen('再次挑战.PNG')
if (x, y) != (0, 0):
n = n+1
print("Start again")
click(x,y)
time.sleep(2)
break
if __name__ == '__main__':
AutoMouse() | zh | 0.970613 | # -*- coding: utf-8 -*- #找不到button #是否正在加载中 #检查是否初始画面需要跳过 #检查是否已经启用自动 #运行监测,是否结束,以及中间存在需要跳过,结束则开启下一次 每5s检测一次 #start 闯关 | 3.109181 | 3 |
cryptkeeper/quarry/node/icodrops.py | CMoncur/cryptkeeper | 0 | 6624024 | """ ICODrops Excavator """
# Core Dependencies
from datetime import datetime
# External Dependencies
from bs4 import BeautifulSoup
# Internal Dependencies
from cryptkeeper.quarry.excavator import Excavator
from cryptkeeper.db.librarian import Librarian
import cryptkeeper.db.schema.icodrops as Schema
import cryptkeeper.util.util as Util
# Sanitization Functions
def containsAllData(entry):
""" Ensures ICODrops entry contains all data needed to be stored """
return isinstance(entry["name"], str) \
and isinstance(entry["start"], datetime) \
and isinstance(entry["end"], datetime) \
and isinstance(entry["description"], str) \
and isinstance(entry["price"], float) \
and isinstance(entry["raised"], int) \
and isinstance(entry["presale_start"], datetime) \
and isinstance(entry["presale_end"], datetime) \
and isinstance(entry["token_symbol"], str)
# Scraping Functions
def scrapeDescription(soup):
""" Scrapes ICO description from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-main-info" }) \
.text \
.replace("\n", " ") \
.translate({ ord(x): "" for x in ["\r", "\t" ] }) \
.strip() \
.split(" ", 1)[-1]
def scrapeEnd(soup):
""" Scrapes ICO end date from ICODrops listing """
year = str(datetime.now().year)
token_sale = list(filter(lambda x: "Sale:" in x.text, soup.findAll("h4")))
if token_sale:
date_string = token_sale[0] \
.text \
.translate({ ord(x): "" for x in [ "\n", "\r", "\t" ] }) \
.replace("Token Sale: ", "") \
.split(" – ")[0]
try:
return datetime.strptime(date_string + " " + year, "%d %b %Y")
except ValueError:
# Return nothing in event string is still not formatted properly
return None
# Catchall in the event this entity was not scraped
return None
def scrapeName(soup):
""" Scrapes ICO name from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-main-info" }) \
.find("h3").text
def scrapePrice(soup):
""" Scrapes ICO price from ICODrops listing """
li = soup.findAll("li")
for idx, yeah in enumerate(li):
span = yeah.find("span", attrs = { "class" : "grey" })
if span and "Token Price" in span.text:
price = li[idx] \
.text \
.split(" = ")[-1] \
.split(" (")[0] \
.replace("\xa0", " ") \
.split(" ")[0]
# Return only first match
try:
return float(price)
except ValueError:
# Return nothing in the event type casting fails
return None
# Catchall in the event no matches are found
return None
def scrapeRaised(soup):
""" Scrapes ICO amount raised from ICODrops listing """
raised = soup \
.find("div", attrs = { "class" : "money-goal" }) \
.text \
.translate({ ord(x): "" for x in [ "$", ",", "\n", "\r", "\t" ] })
try:
return int(raised)
except ValueError:
# Return nothing in the event type casting fails
return None
def scrapeSite(soup):
""" Scrapes ICO website URL from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-right-col" }) \
.find("a")["href"]
def scrapeStart(soup):
""" Scrapes ICO start date from ICODrops listing """
year = str(datetime.now().year)
token_sale = list(filter(lambda x: "Sale:" in x.text, soup.findAll("h4")))
if token_sale:
date_string = token_sale[0] \
.text \
.translate({ ord(x): "" for x in [ "\n", "\r", "\t" ] }) \
.replace("Token Sale: ", "") \
.split(" – ")[0]
try:
return datetime.strptime(date_string + " " + year, "%d %b %Y")
except ValueError:
# Return nothing in event string is still not formatted properly
return None
# Catchall in the event this entity was not scraped
return None
def scrapeSymbol(soup):
""" Scrapes ICO symbol from ICODrops listing """
li = soup.findAll("li")
for idx, yeah in enumerate(li):
span = yeah.find("span", attrs = { "class" : "grey" })
if span and "Ticker:" in span.text:
# Return only first match
return li[idx] \
.text \
.replace("Ticker: ", "")
return None
# Public Entities
class IcoDrops(Excavator, Librarian):
""" ICODrops Excavator Class """
URL = "https://icodrops.com"
def __init__(self):
Excavator.__init__(self, self.__fetchIcoUrls(), True, True)
Librarian.__init__(self, Schema.IcoDrops)
self.raw_ico_data = []
self.sanitized_ico_data = []
if not self.urls:
print("IcoDrops: No URLs to mine...")
else:
self.__fetchIcoData()
self.__sanitizeAndStoreIcoData()
# Private Methods
def __fetchIcoData(self):
""" Fetch metadata specific to each ICO """
# Filter out non-HTML responses
self.data = list(filter(Util.isHtml, self.data))
for data in self.data:
soup = BeautifulSoup(data["content"], "html.parser")
self.raw_ico_data.append({
"name" : scrapeName(soup),
"start" : scrapeStart(soup),
"end" : scrapeEnd(soup),
"description" : scrapeDescription(soup),
"price" : scrapePrice(soup),
"raised" : scrapeRaised(soup),
"presale_start" : scrapeStart(soup),
"presale_end" : scrapeEnd(soup),
"token_symbol" : scrapeSymbol(soup)
})
def __fetchIcoUrls(self):
"""
Within IcoDrops, there are three main columns -- 1) Active ICO,
2) Upcoming ICO, 3) Ended ICO. Each column has a "View All" anchor at
the bottom of the list. This function will grab the URLs for each of
those "View All" links and append them to a list.
Utilizing each of the gathered ICO List URLs, fetch the URLS of each
individual ICO, and append them to a list.
"""
icodrops_home = Excavator([ self.URL ], True, True)
ico_list_urls = []
ico_urls = []
if Util.isHtml(icodrops_home.data[0]):
soup = BeautifulSoup(icodrops_home.data[0]["content"], "html.parser")
for s in soup.findAll("div", attrs = { "id" : "view_all" }):
ico_list_urls.append(self.URL + s.find("a")["href"])
ico_lists = Excavator(ico_list_urls, True, True)
for data in ico_lists.data:
if Util.isHtml(data):
soup = BeautifulSoup(data["content"], "html.parser")
for a in soup.findAll("a", attrs = { "id" : "ccc" }):
ico_urls.append(a["href"])
return ico_urls
def __sanitizeAndStoreIcoData(self):
"""
Ensures only values with all essential information are included, then
upserts data to Postgres.
"""
self.sanitized_ico_data = list(filter(containsAllData, self.raw_ico_data))
# Inherited from Librarian class
self.bulkUpsert(
self.sanitized_ico_data,
[ Schema.IcoDrops.name.name ]
)
| """ ICODrops Excavator """
# Core Dependencies
from datetime import datetime
# External Dependencies
from bs4 import BeautifulSoup
# Internal Dependencies
from cryptkeeper.quarry.excavator import Excavator
from cryptkeeper.db.librarian import Librarian
import cryptkeeper.db.schema.icodrops as Schema
import cryptkeeper.util.util as Util
# Sanitization Functions
def containsAllData(entry):
""" Ensures ICODrops entry contains all data needed to be stored """
return isinstance(entry["name"], str) \
and isinstance(entry["start"], datetime) \
and isinstance(entry["end"], datetime) \
and isinstance(entry["description"], str) \
and isinstance(entry["price"], float) \
and isinstance(entry["raised"], int) \
and isinstance(entry["presale_start"], datetime) \
and isinstance(entry["presale_end"], datetime) \
and isinstance(entry["token_symbol"], str)
# Scraping Functions
def scrapeDescription(soup):
""" Scrapes ICO description from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-main-info" }) \
.text \
.replace("\n", " ") \
.translate({ ord(x): "" for x in ["\r", "\t" ] }) \
.strip() \
.split(" ", 1)[-1]
def scrapeEnd(soup):
""" Scrapes ICO end date from ICODrops listing """
year = str(datetime.now().year)
token_sale = list(filter(lambda x: "Sale:" in x.text, soup.findAll("h4")))
if token_sale:
date_string = token_sale[0] \
.text \
.translate({ ord(x): "" for x in [ "\n", "\r", "\t" ] }) \
.replace("Token Sale: ", "") \
.split(" – ")[0]
try:
return datetime.strptime(date_string + " " + year, "%d %b %Y")
except ValueError:
# Return nothing in event string is still not formatted properly
return None
# Catchall in the event this entity was not scraped
return None
def scrapeName(soup):
""" Scrapes ICO name from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-main-info" }) \
.find("h3").text
def scrapePrice(soup):
""" Scrapes ICO price from ICODrops listing """
li = soup.findAll("li")
for idx, yeah in enumerate(li):
span = yeah.find("span", attrs = { "class" : "grey" })
if span and "Token Price" in span.text:
price = li[idx] \
.text \
.split(" = ")[-1] \
.split(" (")[0] \
.replace("\xa0", " ") \
.split(" ")[0]
# Return only first match
try:
return float(price)
except ValueError:
# Return nothing in the event type casting fails
return None
# Catchall in the event no matches are found
return None
def scrapeRaised(soup):
""" Scrapes ICO amount raised from ICODrops listing """
raised = soup \
.find("div", attrs = { "class" : "money-goal" }) \
.text \
.translate({ ord(x): "" for x in [ "$", ",", "\n", "\r", "\t" ] })
try:
return int(raised)
except ValueError:
# Return nothing in the event type casting fails
return None
def scrapeSite(soup):
""" Scrapes ICO website URL from ICODrops listing """
return soup \
.find("div", attrs = { "class" : "ico-right-col" }) \
.find("a")["href"]
def scrapeStart(soup):
""" Scrapes ICO start date from ICODrops listing """
year = str(datetime.now().year)
token_sale = list(filter(lambda x: "Sale:" in x.text, soup.findAll("h4")))
if token_sale:
date_string = token_sale[0] \
.text \
.translate({ ord(x): "" for x in [ "\n", "\r", "\t" ] }) \
.replace("Token Sale: ", "") \
.split(" – ")[0]
try:
return datetime.strptime(date_string + " " + year, "%d %b %Y")
except ValueError:
# Return nothing in event string is still not formatted properly
return None
# Catchall in the event this entity was not scraped
return None
def scrapeSymbol(soup):
""" Scrapes ICO symbol from ICODrops listing """
li = soup.findAll("li")
for idx, yeah in enumerate(li):
span = yeah.find("span", attrs = { "class" : "grey" })
if span and "Ticker:" in span.text:
# Return only first match
return li[idx] \
.text \
.replace("Ticker: ", "")
return None
# Public Entities
class IcoDrops(Excavator, Librarian):
""" ICODrops Excavator Class """
URL = "https://icodrops.com"
def __init__(self):
Excavator.__init__(self, self.__fetchIcoUrls(), True, True)
Librarian.__init__(self, Schema.IcoDrops)
self.raw_ico_data = []
self.sanitized_ico_data = []
if not self.urls:
print("IcoDrops: No URLs to mine...")
else:
self.__fetchIcoData()
self.__sanitizeAndStoreIcoData()
# Private Methods
def __fetchIcoData(self):
""" Fetch metadata specific to each ICO """
# Filter out non-HTML responses
self.data = list(filter(Util.isHtml, self.data))
for data in self.data:
soup = BeautifulSoup(data["content"], "html.parser")
self.raw_ico_data.append({
"name" : scrapeName(soup),
"start" : scrapeStart(soup),
"end" : scrapeEnd(soup),
"description" : scrapeDescription(soup),
"price" : scrapePrice(soup),
"raised" : scrapeRaised(soup),
"presale_start" : scrapeStart(soup),
"presale_end" : scrapeEnd(soup),
"token_symbol" : scrapeSymbol(soup)
})
def __fetchIcoUrls(self):
"""
Within IcoDrops, there are three main columns -- 1) Active ICO,
2) Upcoming ICO, 3) Ended ICO. Each column has a "View All" anchor at
the bottom of the list. This function will grab the URLs for each of
those "View All" links and append them to a list.
Utilizing each of the gathered ICO List URLs, fetch the URLS of each
individual ICO, and append them to a list.
"""
icodrops_home = Excavator([ self.URL ], True, True)
ico_list_urls = []
ico_urls = []
if Util.isHtml(icodrops_home.data[0]):
soup = BeautifulSoup(icodrops_home.data[0]["content"], "html.parser")
for s in soup.findAll("div", attrs = { "id" : "view_all" }):
ico_list_urls.append(self.URL + s.find("a")["href"])
ico_lists = Excavator(ico_list_urls, True, True)
for data in ico_lists.data:
if Util.isHtml(data):
soup = BeautifulSoup(data["content"], "html.parser")
for a in soup.findAll("a", attrs = { "id" : "ccc" }):
ico_urls.append(a["href"])
return ico_urls
def __sanitizeAndStoreIcoData(self):
"""
Ensures only values with all essential information are included, then
upserts data to Postgres.
"""
self.sanitized_ico_data = list(filter(containsAllData, self.raw_ico_data))
# Inherited from Librarian class
self.bulkUpsert(
self.sanitized_ico_data,
[ Schema.IcoDrops.name.name ]
)
| en | 0.820157 | ICODrops Excavator # Core Dependencies # External Dependencies # Internal Dependencies # Sanitization Functions Ensures ICODrops entry contains all data needed to be stored # Scraping Functions Scrapes ICO description from ICODrops listing Scrapes ICO end date from ICODrops listing # Return nothing in event string is still not formatted properly # Catchall in the event this entity was not scraped Scrapes ICO name from ICODrops listing Scrapes ICO price from ICODrops listing # Return only first match # Return nothing in the event type casting fails # Catchall in the event no matches are found Scrapes ICO amount raised from ICODrops listing # Return nothing in the event type casting fails Scrapes ICO website URL from ICODrops listing Scrapes ICO start date from ICODrops listing # Return nothing in event string is still not formatted properly # Catchall in the event this entity was not scraped Scrapes ICO symbol from ICODrops listing # Return only first match # Public Entities ICODrops Excavator Class # Private Methods Fetch metadata specific to each ICO # Filter out non-HTML responses Within IcoDrops, there are three main columns -- 1) Active ICO, 2) Upcoming ICO, 3) Ended ICO. Each column has a "View All" anchor at the bottom of the list. This function will grab the URLs for each of those "View All" links and append them to a list. Utilizing each of the gathered ICO List URLs, fetch the URLS of each individual ICO, and append them to a list. Ensures only values with all essential information are included, then upserts data to Postgres. # Inherited from Librarian class | 2.765731 | 3 |
pulling-repos/filters.py | IliadisVictor/deep-learning-applications-research | 0 | 6624025 | from bs4 import BeautifulSoup
import requests
# Input the full name of the repository with the slash and the amount of contributors
# you want to see if it exceeds , True if it does False if it doesn't None if something went wrong
# with the scraping
# WARNING , these tools do not use the official API that is safer but much slower.
def contributors_check(repo_name,contributors_threshold):
url = 'https://github.com/'+repo_name
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
cells=soup.find_all('a', class_="Link--primary no-underline")
for a in cells:
if 'Contributors' in a.get_text():
contributors_amount = a.get_text().replace('Contributors','').strip()
if int(contributors_amount)>contributors_threshold:
return True
else:
return False
return None
def above_stars_threshold(repo_name,stars_barrier):
url = 'https://github.com/'+repo_name
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
cells=soup.find_all('a', class_="Link--muted")
# looking for the link that corresponds to the stars by searching for the string star in it
for i in range(0,len(cells)):
if 'star' in cells[i].get_text():
break
# here we clean the string to get only the numeral so we can convert to an int
stars = cells[i].get_text().replace('stars', '').strip()
if 'star' in stars:
stars = cells[i].get_text().replace('star', '').strip()
# K means a thousand so it will surely be bigger than the 3 numeral threshold we give as input
if 'k' in stars:
return True
else:
if int(stars)>stars_barrier:
return True
else:
return False
return None | from bs4 import BeautifulSoup
import requests
# Input the full name of the repository with the slash and the amount of contributors
# you want to see if it exceeds , True if it does False if it doesn't None if something went wrong
# with the scraping
# WARNING , these tools do not use the official API that is safer but much slower.
def contributors_check(repo_name,contributors_threshold):
url = 'https://github.com/'+repo_name
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
cells=soup.find_all('a', class_="Link--primary no-underline")
for a in cells:
if 'Contributors' in a.get_text():
contributors_amount = a.get_text().replace('Contributors','').strip()
if int(contributors_amount)>contributors_threshold:
return True
else:
return False
return None
def above_stars_threshold(repo_name,stars_barrier):
url = 'https://github.com/'+repo_name
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
cells=soup.find_all('a', class_="Link--muted")
# looking for the link that corresponds to the stars by searching for the string star in it
for i in range(0,len(cells)):
if 'star' in cells[i].get_text():
break
# here we clean the string to get only the numeral so we can convert to an int
stars = cells[i].get_text().replace('stars', '').strip()
if 'star' in stars:
stars = cells[i].get_text().replace('star', '').strip()
# K means a thousand so it will surely be bigger than the 3 numeral threshold we give as input
if 'k' in stars:
return True
else:
if int(stars)>stars_barrier:
return True
else:
return False
return None | en | 0.909496 | # Input the full name of the repository with the slash and the amount of contributors # you want to see if it exceeds , True if it does False if it doesn't None if something went wrong # with the scraping # WARNING , these tools do not use the official API that is safer but much slower. # looking for the link that corresponds to the stars by searching for the string star in it # here we clean the string to get only the numeral so we can convert to an int # K means a thousand so it will surely be bigger than the 3 numeral threshold we give as input | 3.512286 | 4 |
aoc11.py | juestr/aoc-2021 | 0 | 6624026 | <gh_stars>0
#!/usr/bin/env python3
import numpy as np
from scipy.ndimage import correlate
with open('aoc11_input.txt') as f:
a = np.genfromtxt(f, delimiter=1, dtype=np.int_)
NBKERNEL = np.array(
[[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
def step(a):
a += 1
active = np.ones_like(a, dtype=np.bool_)
while np.any(new_flashes:=(a > 9) & active):
nb_increases = correlate(new_flashes.astype(np.int_), NBKERNEL,
mode='constant', cval=False) * active
a += nb_increases
active &= ~new_flashes
a *= active
return a.size - np.sum(active)
flashes = sum(step(a) for _ in range(100))
print(f'Part 1: {flashes=}')
at_step = 101
while step(a) != a.size:
at_step += 1
print(f'Part 2: {at_step=}')
| #!/usr/bin/env python3
import numpy as np
from scipy.ndimage import correlate
with open('aoc11_input.txt') as f:
a = np.genfromtxt(f, delimiter=1, dtype=np.int_)
NBKERNEL = np.array(
[[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
def step(a):
a += 1
active = np.ones_like(a, dtype=np.bool_)
while np.any(new_flashes:=(a > 9) & active):
nb_increases = correlate(new_flashes.astype(np.int_), NBKERNEL,
mode='constant', cval=False) * active
a += nb_increases
active &= ~new_flashes
a *= active
return a.size - np.sum(active)
flashes = sum(step(a) for _ in range(100))
print(f'Part 1: {flashes=}')
at_step = 101
while step(a) != a.size:
at_step += 1
print(f'Part 2: {at_step=}') | fr | 0.221828 | #!/usr/bin/env python3 | 2.739568 | 3 |
kaifa/select_11.py | AluuLL/initial-exper_python | 0 | 6624027 | #/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import getopt
import re
from itertools import *
import time
import json
import csv
import codecs
import random as r
import time
import random
import pandas as pd
##此程序用来将csv文件转成json格式
| #/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import getopt
import re
from itertools import *
import time
import json
import csv
import codecs
import random as r
import time
import random
import pandas as pd
##此程序用来将csv文件转成json格式
| zh | 0.59504 | #/usr/bin/python3 # -*- coding: utf-8 -*- ##此程序用来将csv文件转成json格式 | 1.881587 | 2 |
leetcode/1351-Count-Negative-Numbers-in-a-Sorted-Matrix/binary-search.py | cc13ny/all-in | 1 | 6624028 | <filename>leetcode/1351-Count-Negative-Numbers-in-a-Sorted-Matrix/binary-search.py
class Solution:
def countNegatives(self, grid: List[List[int]]) -> int:
res = 0
nrows, ncols = len(grid), len(grid[0])
start_l, start_r = 0, nrows - 1
for i in range(ncols - 1, -1, -1):
l, r = start_l, start_r
while l <= r:
m = l + int((r - l) / 2)
if grid[m][i] >= 0:
l = m + 1
else:
r = m - 1
res += nrows - l
start_l = l
return res
| <filename>leetcode/1351-Count-Negative-Numbers-in-a-Sorted-Matrix/binary-search.py
class Solution:
def countNegatives(self, grid: List[List[int]]) -> int:
res = 0
nrows, ncols = len(grid), len(grid[0])
start_l, start_r = 0, nrows - 1
for i in range(ncols - 1, -1, -1):
l, r = start_l, start_r
while l <= r:
m = l + int((r - l) / 2)
if grid[m][i] >= 0:
l = m + 1
else:
r = m - 1
res += nrows - l
start_l = l
return res
| none | 1 | 3.397755 | 3 | |
egrul/worker.py | ServerHack-The-First-Law-Of-Robotics/data_engineering | 0 | 6624029 | <filename>egrul/worker.py
from aiohttp import ClientSession
from asyncio import sleep
from logging import getLogger
from os.path import join
from base.worker import Worker
from base.data_objects import INNTask
from .data_objects import EgrulResult
logger = getLogger(__name__)
class EgrulWorker(Worker):
def __init__(
self,
*args,
base_pdf_path: str,
**kwargs
):
super().__init__(*args, **kwargs)
self.base_pdf_path = base_pdf_path
async def complete_task(self, session: ClientSession, task: INNTask) -> EgrulResult:
inn = task.inn
resp = None
pdf_path = join(self.base_pdf_path, f"egrul_{inn}.pdf")
try:
async with self.get_response(
session,
'https://egrul.nalog.ru/',
kwargs={"data": {'query': inn}},
method="post"
) as resp:
inn_info = await resp.json()
if resp.status != 200:
raise ValueError(f"Некорректный ответ от egrul.nalog.ru. {resp.status=}, {inn_info=}")
inn_access_key = inn_info["t"]
async with self.get_response(
session,
f'https://egrul.nalog.ru/search-result/{inn_access_key}',
) as resp:
search_result_resp = await resp.json()
file_access_code = search_result_resp['rows'][0]['t']
# гвоорит серверу "дядя, начни готовить для меня вот этот файл"
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-request/{file_access_code}'
) as resp:
...
# проверяет, готов ли файл. Статус может быть "wait" и "ready"
max_tries = 3
time_to_sleep = 1
await sleep(0.5)
for _ in range(max_tries):
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-status/{file_access_code}',
) as resp:
data = await resp.json()
if data["status"] == "ready":
break
else:
logger.debug(f"Спим {time_to_sleep} секунд")
await sleep(time_to_sleep)
time_to_sleep = 60
else:
logger.error(f"После {max_tries} циклов ожидания, файл для ИНН {inn=} нам не доступен.")
raise RuntimeError()
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-download/{file_access_code}',
) as resp:
if resp.status != 200:
raise RuntimeError(f"Статус response для загрузки не 200: {resp.status=}")
with open(pdf_path, 'wb') as pdf_file:
async for data in resp.content.iter_chunked(1024):
pdf_file.write(data)
return EgrulResult(
pdf_path=pdf_path,
is_error=resp.status != 200,
status_code=resp.status
)
except Exception:
logger.error("Произошла ошибка при скачивании документа из ЕГРЮЛ", exc_info=True)
return EgrulResult(
pdf_path=None,
is_error=True,
status_code=-1 if resp is None else resp.status
)
| <filename>egrul/worker.py
from aiohttp import ClientSession
from asyncio import sleep
from logging import getLogger
from os.path import join
from base.worker import Worker
from base.data_objects import INNTask
from .data_objects import EgrulResult
logger = getLogger(__name__)
class EgrulWorker(Worker):
def __init__(
self,
*args,
base_pdf_path: str,
**kwargs
):
super().__init__(*args, **kwargs)
self.base_pdf_path = base_pdf_path
async def complete_task(self, session: ClientSession, task: INNTask) -> EgrulResult:
inn = task.inn
resp = None
pdf_path = join(self.base_pdf_path, f"egrul_{inn}.pdf")
try:
async with self.get_response(
session,
'https://egrul.nalog.ru/',
kwargs={"data": {'query': inn}},
method="post"
) as resp:
inn_info = await resp.json()
if resp.status != 200:
raise ValueError(f"Некорректный ответ от egrul.nalog.ru. {resp.status=}, {inn_info=}")
inn_access_key = inn_info["t"]
async with self.get_response(
session,
f'https://egrul.nalog.ru/search-result/{inn_access_key}',
) as resp:
search_result_resp = await resp.json()
file_access_code = search_result_resp['rows'][0]['t']
# гвоорит серверу "дядя, начни готовить для меня вот этот файл"
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-request/{file_access_code}'
) as resp:
...
# проверяет, готов ли файл. Статус может быть "wait" и "ready"
max_tries = 3
time_to_sleep = 1
await sleep(0.5)
for _ in range(max_tries):
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-status/{file_access_code}',
) as resp:
data = await resp.json()
if data["status"] == "ready":
break
else:
logger.debug(f"Спим {time_to_sleep} секунд")
await sleep(time_to_sleep)
time_to_sleep = 60
else:
logger.error(f"После {max_tries} циклов ожидания, файл для ИНН {inn=} нам не доступен.")
raise RuntimeError()
async with self.get_response(
session,
f'https://egrul.nalog.ru/vyp-download/{file_access_code}',
) as resp:
if resp.status != 200:
raise RuntimeError(f"Статус response для загрузки не 200: {resp.status=}")
with open(pdf_path, 'wb') as pdf_file:
async for data in resp.content.iter_chunked(1024):
pdf_file.write(data)
return EgrulResult(
pdf_path=pdf_path,
is_error=resp.status != 200,
status_code=resp.status
)
except Exception:
logger.error("Произошла ошибка при скачивании документа из ЕГРЮЛ", exc_info=True)
return EgrulResult(
pdf_path=None,
is_error=True,
status_code=-1 if resp is None else resp.status
)
| ru | 0.996376 | # гвоорит серверу "дядя, начни готовить для меня вот этот файл" # проверяет, готов ли файл. Статус может быть "wait" и "ready" | 2.395889 | 2 |
UpstreamTracker/ParseData.py | mcgov/Linux-CommA | 2 | 6624030 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from datetime import datetime
from typing import List, Optional, Set
import Util.Config
from DatabaseDriver.DatabaseDriver import DatabaseDriver
from DatabaseDriver.SqlClasses import PatchData
from Util.Tracking import get_filenames, get_linux_repo, get_tracked_paths
def should_keep_line(line: str):
# Filter description by removing blank and unwanted lines
ignore_phrases = (
"reported-by:",
"signed-off-by:",
"reviewed-by:",
"acked-by:",
"cc:",
)
# TODO: Maybe just `return not line.lower().startswith(ignore_phrases)`?
simplified_line = line.lower()
if not simplified_line:
return False
if simplified_line.startswith(ignore_phrases):
return False
return True
def process_commits(
commit_ids: Optional[Set[str]] = None,
revision: str = "origin/master",
add_to_database: bool = False,
since: str = Util.Config.since,
) -> List[PatchData]:
"""
Look at all commits in the given repo and handle based on distro.
repo: Git.Repo object of the repository where we want to parse commits
rev: revision we want to see the commits of, or None
paths: list of filenames to check commits for
add_to_database: whether or not to add to database (side-effect)
since: if provided, will only process commits after this commit
"""
all_patches = []
num_patches = 0
num_patches_added = 0
repo = get_linux_repo()
if commit_ids is None:
# We use `--min-parents=1 --max-parents=1` to avoid both
# merges and graft commits.
commits = repo.iter_commits(
rev=revision,
paths=get_tracked_paths(),
min_parents=1,
max_parents=1,
since=since,
)
else:
# If given a list of commit SHAs, get the commit objects.
commits = list()
for c in commit_ids:
try:
commits.append(repo.commit(c))
except ValueError:
logging.warning(f"Commit '{c}' does not exist in the repo! Skipping...")
logging.info("Starting commit processing...")
for commit in commits:
logging.debug(f"Parsing commit {commit.hexsha}")
patch = PatchData(
commitID=commit.hexsha,
author=commit.author.name,
authorEmail=commit.author.email,
authorTime=datetime.utcfromtimestamp(commit.authored_date),
commitTime=datetime.utcfromtimestamp(commit.committed_date),
)
# TODO abstract parsing description to another function to simplify and optimize
# Especially with the checking of phrases starting in lines, we don't have to do separately.
# Remove extra whitespace while splitting commit message
split_message = [line.strip() for line in commit.message.split("\n")]
patch.subject = split_message[0]
description_lines = []
# Check for blank description
if len(split_message) > 1:
description_lines = list(filter(should_keep_line, split_message[1:]))
patch.description = "\n".join(description_lines)
else:
patch.description = ""
# Check if this patch fixes other patches. This will fill
# fixed_patches with a string of space-separated fixed patches
# e.g. "SHA1 SHA2 SHA3"
if patch.description != "":
fixed_patches_lines = filter(
lambda x: x.strip().lower().startswith("fixes:"),
list(description_lines),
)
fixed_patches = []
for line in fixed_patches_lines:
words = line.split(" ")
if len(words) > 1:
fixed_patches.append(words[1])
patch.fixedPatches = " ".join(fixed_patches)
patch.affectedFilenames = " ".join(get_filenames(commit))
# Parse diff to only keep lines with changes (+ or - at start)
# diff is passed in as bytes
def parse_diff(diff):
diff_lines = diff.decode("utf-8").splitlines()
return "\n".join(
filter(lambda line: line.startswith(("+", "-")), diff_lines)
)
if len(commit.parents) == 0:
# First ever commit, we don't need to store this as
# it'll be present in any distro as it's needed
# TODO revisit, maybe check against set hash of first commit?
# Get code some other way? Unsure if first commit matters or not.
continue
else:
# We are ignoring merges so all commits should have a single parent
commit_diffs = commit.tree.diff(
commit.parents[0], paths=get_tracked_paths(), create_patch=True
)
# The patch commit diffs are stored as "(filename1)\n(diff1)\n(filename2)\n(diff2)..."
patch.commitDiffs = "\n".join(
[
"%s\n%s" % (diff.a_path, parse_diff(diff.diff))
for diff in commit_diffs
if diff.a_path is not None
]
)
if add_to_database:
# TODO is this check needed if we start on only patches we haven't processed before?
# If we DO want to keep this check, let's move before parsing everything
with DatabaseDriver.get_session() as s:
if (
s.query(PatchData.commitID)
.filter_by(commitID=patch.commitID)
.one_or_none()
is None
):
s.add(patch)
num_patches_added += 1
else:
all_patches.append(patch)
num_patches += 1
# Log progress
if num_patches % 250 == 0:
logging.debug(" %d commits processed..." % num_patches)
if add_to_database:
logging.info("%s patches added to database." % num_patches_added)
return all_patches
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from datetime import datetime
from typing import List, Optional, Set
import Util.Config
from DatabaseDriver.DatabaseDriver import DatabaseDriver
from DatabaseDriver.SqlClasses import PatchData
from Util.Tracking import get_filenames, get_linux_repo, get_tracked_paths
def should_keep_line(line: str):
# Filter description by removing blank and unwanted lines
ignore_phrases = (
"reported-by:",
"signed-off-by:",
"reviewed-by:",
"acked-by:",
"cc:",
)
# TODO: Maybe just `return not line.lower().startswith(ignore_phrases)`?
simplified_line = line.lower()
if not simplified_line:
return False
if simplified_line.startswith(ignore_phrases):
return False
return True
def process_commits(
commit_ids: Optional[Set[str]] = None,
revision: str = "origin/master",
add_to_database: bool = False,
since: str = Util.Config.since,
) -> List[PatchData]:
"""
Look at all commits in the given repo and handle based on distro.
repo: Git.Repo object of the repository where we want to parse commits
rev: revision we want to see the commits of, or None
paths: list of filenames to check commits for
add_to_database: whether or not to add to database (side-effect)
since: if provided, will only process commits after this commit
"""
all_patches = []
num_patches = 0
num_patches_added = 0
repo = get_linux_repo()
if commit_ids is None:
# We use `--min-parents=1 --max-parents=1` to avoid both
# merges and graft commits.
commits = repo.iter_commits(
rev=revision,
paths=get_tracked_paths(),
min_parents=1,
max_parents=1,
since=since,
)
else:
# If given a list of commit SHAs, get the commit objects.
commits = list()
for c in commit_ids:
try:
commits.append(repo.commit(c))
except ValueError:
logging.warning(f"Commit '{c}' does not exist in the repo! Skipping...")
logging.info("Starting commit processing...")
for commit in commits:
logging.debug(f"Parsing commit {commit.hexsha}")
patch = PatchData(
commitID=commit.hexsha,
author=commit.author.name,
authorEmail=commit.author.email,
authorTime=datetime.utcfromtimestamp(commit.authored_date),
commitTime=datetime.utcfromtimestamp(commit.committed_date),
)
# TODO abstract parsing description to another function to simplify and optimize
# Especially with the checking of phrases starting in lines, we don't have to do separately.
# Remove extra whitespace while splitting commit message
split_message = [line.strip() for line in commit.message.split("\n")]
patch.subject = split_message[0]
description_lines = []
# Check for blank description
if len(split_message) > 1:
description_lines = list(filter(should_keep_line, split_message[1:]))
patch.description = "\n".join(description_lines)
else:
patch.description = ""
# Check if this patch fixes other patches. This will fill
# fixed_patches with a string of space-separated fixed patches
# e.g. "SHA1 SHA2 SHA3"
if patch.description != "":
fixed_patches_lines = filter(
lambda x: x.strip().lower().startswith("fixes:"),
list(description_lines),
)
fixed_patches = []
for line in fixed_patches_lines:
words = line.split(" ")
if len(words) > 1:
fixed_patches.append(words[1])
patch.fixedPatches = " ".join(fixed_patches)
patch.affectedFilenames = " ".join(get_filenames(commit))
# Parse diff to only keep lines with changes (+ or - at start)
# diff is passed in as bytes
def parse_diff(diff):
diff_lines = diff.decode("utf-8").splitlines()
return "\n".join(
filter(lambda line: line.startswith(("+", "-")), diff_lines)
)
if len(commit.parents) == 0:
# First ever commit, we don't need to store this as
# it'll be present in any distro as it's needed
# TODO revisit, maybe check against set hash of first commit?
# Get code some other way? Unsure if first commit matters or not.
continue
else:
# We are ignoring merges so all commits should have a single parent
commit_diffs = commit.tree.diff(
commit.parents[0], paths=get_tracked_paths(), create_patch=True
)
# The patch commit diffs are stored as "(filename1)\n(diff1)\n(filename2)\n(diff2)..."
patch.commitDiffs = "\n".join(
[
"%s\n%s" % (diff.a_path, parse_diff(diff.diff))
for diff in commit_diffs
if diff.a_path is not None
]
)
if add_to_database:
# TODO is this check needed if we start on only patches we haven't processed before?
# If we DO want to keep this check, let's move before parsing everything
with DatabaseDriver.get_session() as s:
if (
s.query(PatchData.commitID)
.filter_by(commitID=patch.commitID)
.one_or_none()
is None
):
s.add(patch)
num_patches_added += 1
else:
all_patches.append(patch)
num_patches += 1
# Log progress
if num_patches % 250 == 0:
logging.debug(" %d commits processed..." % num_patches)
if add_to_database:
logging.info("%s patches added to database." % num_patches_added)
return all_patches
| en | 0.881341 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # Filter description by removing blank and unwanted lines # TODO: Maybe just `return not line.lower().startswith(ignore_phrases)`? Look at all commits in the given repo and handle based on distro. repo: Git.Repo object of the repository where we want to parse commits rev: revision we want to see the commits of, or None paths: list of filenames to check commits for add_to_database: whether or not to add to database (side-effect) since: if provided, will only process commits after this commit # We use `--min-parents=1 --max-parents=1` to avoid both # merges and graft commits. # If given a list of commit SHAs, get the commit objects. # TODO abstract parsing description to another function to simplify and optimize # Especially with the checking of phrases starting in lines, we don't have to do separately. # Remove extra whitespace while splitting commit message # Check for blank description # Check if this patch fixes other patches. This will fill # fixed_patches with a string of space-separated fixed patches # e.g. "SHA1 SHA2 SHA3" # Parse diff to only keep lines with changes (+ or - at start) # diff is passed in as bytes # First ever commit, we don't need to store this as # it'll be present in any distro as it's needed # TODO revisit, maybe check against set hash of first commit? # Get code some other way? Unsure if first commit matters or not. # We are ignoring merges so all commits should have a single parent # The patch commit diffs are stored as "(filename1)\n(diff1)\n(filename2)\n(diff2)..." # TODO is this check needed if we start on only patches we haven't processed before? # If we DO want to keep this check, let's move before parsing everything # Log progress | 2.378995 | 2 |
provider.py | AndrzejR/beeminder-integrations | 1 | 6624031 | """This is the main executable of the provider job.
Gets the data from the sources and upserts into the DCM.
"""
import logging
import toggl, db, habitica
from datetime import date, timedelta
from time import sleep
DATE_RANGE = 3
LOG_DIR = './logs/provider_'
LOG_DATE = str(date.today().isoformat().replace('-', ''))
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_DIR + LOG_DATE + '.log',
level=logging.DEBUG, format=LOG_FORMAT)
logging.info("****************** Starting a new provider run ******************")
for DateDelta in range(DATE_RANGE):
date_to_sync = date.today()-timedelta(days=DateDelta)
currently_in_toggl = toggl.get_data(date_to_sync)
currently_in_dcm = db.get_toggl_dcm_datapoint(date_to_sync)
if not currently_in_dcm:
db.insert_toggl_dcm(date_to_sync, currently_in_toggl)
elif currently_in_toggl != currently_in_dcm[0]:
db.update_toggl_dcm(date_to_sync, currently_in_toggl)
sleep(2) # horrible hack; as it turns out I can't just get the data grouped per day for a date range
# and there is a limit of 1 API call per second in Toggl
| """This is the main executable of the provider job.
Gets the data from the sources and upserts into the DCM.
"""
import logging
import toggl, db, habitica
from datetime import date, timedelta
from time import sleep
DATE_RANGE = 3
LOG_DIR = './logs/provider_'
LOG_DATE = str(date.today().isoformat().replace('-', ''))
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_DIR + LOG_DATE + '.log',
level=logging.DEBUG, format=LOG_FORMAT)
logging.info("****************** Starting a new provider run ******************")
for DateDelta in range(DATE_RANGE):
date_to_sync = date.today()-timedelta(days=DateDelta)
currently_in_toggl = toggl.get_data(date_to_sync)
currently_in_dcm = db.get_toggl_dcm_datapoint(date_to_sync)
if not currently_in_dcm:
db.insert_toggl_dcm(date_to_sync, currently_in_toggl)
elif currently_in_toggl != currently_in_dcm[0]:
db.update_toggl_dcm(date_to_sync, currently_in_toggl)
sleep(2) # horrible hack; as it turns out I can't just get the data grouped per day for a date range
# and there is a limit of 1 API call per second in Toggl
| en | 0.942041 | This is the main executable of the provider job. Gets the data from the sources and upserts into the DCM. # horrible hack; as it turns out I can't just get the data grouped per day for a date range # and there is a limit of 1 API call per second in Toggl | 2.349739 | 2 |
login_gui.py | Sourabh-12354/Login_Page-Design-Gui-By-Python | 0 | 6624032 | <filename>login_gui.py
from tkinter import *
from PIL import Image, ImageTk
import hashlib
import pymysql as mysql
from tkinter import messagebox
window = Tk()
window.geometry("800x500+300+100")
window.minsize(800, 500)
window.maxsize(800, 500)
window.title("SOUHARDO")
window.iconbitmap("C:\Python\login_icons.ico")
image = Image.open("C:\Python\Computer.jpg")
pic = ImageTk.PhotoImage(image)
label0 = Label(image = pic)
label0.pack(fill = BOTH, expand = 'yes')
#global valu
def register_GUI():
win=Toplevel(window)
win.geometry("700x500+0+0")
win.title("Register")
lebel1=Label(win,text="User_Name:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
global entry1,entry2,entry3,entry4,entry5,entry6
entry1=Entry(win, textvar = userName,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry1.place(x=140,y=10)
lebel2=Label(win,text="Password:",font=("arial",16,"bold"))
lebel2.place(x=0,y=50)
password=StringVar
entry2=Entry(win, textvar = password,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry2.place(x=140,y=50)
lebel3=Label(win,text="Email:",font=("arial",16,"bold"))
lebel3.place(x=0,y=90)
email=StringVar
entry3=Entry(win, textvar = email,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry3.place(x=140,y=90)
lebel4=Label(win,text="Gender:",font=("arial",16,"bold"))
lebel4.place(x=0,y=130)
gender=StringVar
entry4=Entry(win, textvar = gender,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry4.place(x=140,y=130)
lebel5=Label(win,text="Age:",font=("arial",16,"bold"))
lebel5.place(x=0,y=170)
age=StringVar
entry5=Entry(win, textvar = age,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry5.place(x=140,y=170)
lebel6=Label(win,text="Occupation:",font=("arial",16,"bold"))
lebel6.place(x=0,y=210)
occupation=StringVar
entry6=Entry(win, textvar = occupation,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry6.place(x=140,y=210)
register1=Button(win,text="Register",bg="blue",relief = "raised",command=register,width=10,font = ("arial", 16, "bold"))
register1.place(x=230,y=250)
return
def reset_GUI():
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("Reset")
global ent1,ent2
lebel1=Label(win,text="User_Name:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
ent1=Entry(win, textvar = userName,width = 20, font = ("arial", 16, "bold"),bg="blue")
ent1.place(x=140,y=10)
reset1=Button(win,text="Reset",bg="blue",relief = "raised",command=reset,width=10,font = ("arial", 16, "bold"))
reset1.place(x=170,y=60)
return
def pass_change():
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
password=ent3.get()
has=hash_map(password)
val=[has,name]
sql = 'UPDATE user_details SET password = %s WHERE name = %s'
cur.execute(sql,val)
mydb.commit()
messagebox.showinfo("Success","Password has Changed Successfully.")
def new_password_GUI():
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("New Password")
lebel1=Label(win,text="New Password:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
global ent3
ent3=Entry(win, textvar = userName,width = 20, font = ("arial", 16, "bold"),bg="blue")
ent3.place(x=160,y=10)
submit=Button(win,text="Submit",bg="blue",relief = "raised",command=pass_change,width=10,font = ("arial", 16, "bold"))
submit.place(x=170,y=60)
return
def hash_map(password):
hash_object=hashlib.sha256(password.encode())
hash_dig=hash_object.hexdigest()
return hash_dig
def login():
numme=textBox1.get()
password1=textBox2.get()
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
command = "Select name,password FROM user_details WHERE name=%s"
results=cur.execute(command,numme)
data=cur.fetchone()
if(data==None):
messagebox.showinfo("Error","User-Name Or Password Icorrect!!")
else:
has=hash_map(password1)
if has==data[1]:
messagebox.showinfo("Success","Login Succesfully")
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("login")
else:
messagebox.showinfo("Error","User-Name Or Password is Icorrect !!")
def register():
lnth0=len(entry1.get())
lnth1=len(entry2.get())
lnth2=len(entry3.get())
lnth3=len(entry4.get())
lnth4=len(entry5.get())
lnth5=len(entry6.get())
if lnth0==0:
messagebox.showinfo("Error","User-name Field cann't be empty.")
elif lnth1==0:
messagebox.showinfo("Error","Password Field cann't be empty.")
elif lnth2==0:
messagebox.showinfo("Error","Email Field cann't be empty.")
elif lnth3==0:
messagebox.showinfo("Error","Gender Field cann't be empty.")
elif lnth4==0:
messagebox.showinfo("Error","Age Field cann't be empty.")
elif lnth5==0:
messagebox.showinfo("Error","Occupation Field cann't be empty.")
else:
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
userName=entry1.get()
password=entry2.get()
command = "Select name FROM user_details WHERE name=%s"
results=cur.execute(command,userName)
data=cur.fetchone()
if data==None:
has=hash_map(password)
val=[entry1.get(),has,entry3.get(),entry4.get(),entry5.get(),entry6.get()]
sql = "Insert INTO user_details(name,password,email,gender,age,occupation)VALUES(%s,%s,%s,%s,%s,%s)"
cur.execute(sql,val)
mydb.commit()
size=cur.rowcount
messagebox.showinfo("Success","Register Successfull")
cur.close()
else:
messagebox.showinfo("Error","This Name Already Registered!!Use Another Name")
def reset():
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
global name
name=ent1.get()
lnth1=len(ent1.get())
if(lnth1==0):
messagebox.showinfo("Error","Enter User-Name!!")
else:
command = "Select name FROM user_details WHERE name=%s"
results=cur.execute(command,name)
data=cur.fetchone()
if data==None:
messagebox.showinfo("Error","User-Name is Incorrect")
elif data!=None:
new_password_GUI()
label1 = Label(window, text = " Login System ",bg="black" ,fg = "blue", font = ("new times roman", 30, "bold"))
label1.place(x = 350, y = 70)
label2 = Label(window, text = "User Name :", font = ("arial", 16, "bold"),bg="red",width="9")
label2.place(x = 250, y = 200)
userName = StringVar()
textBox1 = Entry(window, textvar = userName,width = 18, font = ("arial", 16, "bold"),bg="blue")
textBox1.place(x = 385, y = 200)
label3 = Label(window, text = "Password :", font = ("arial", 16, "bold"),width="9",bg="red")
label3.place(x = 250, y = 260)
password = StringVar()
textBox2 = Entry(window, textvar = password, width = 18, font = ("arial", 16, "bold"),bg="blue")
textBox2.place(x = 385, y = 260)
button1 = Button(window, text = " Login ", fg = "black", bg = "blue", relief = "raised", font = ("arial", 16, "bold"), command = login)
button1.place(x = 280, y = 300)
button2 = Button(window, text = " Register ", fg = "black", bg = "Yellow", relief = "raised", font = ("arial", 16, "bold"), command = register_GUI)
button2.place(x = 440, y = 300)
button3 = Button(window, text = " Reset Password ", width='15',fg = "black", bg = "red", relief = "raised", font = ("arial", 16, "bold"), command = reset_GUI)
button3.place(x = 330, y = 350)
#display window
window.mainloop() | <filename>login_gui.py
from tkinter import *
from PIL import Image, ImageTk
import hashlib
import pymysql as mysql
from tkinter import messagebox
window = Tk()
window.geometry("800x500+300+100")
window.minsize(800, 500)
window.maxsize(800, 500)
window.title("SOUHARDO")
window.iconbitmap("C:\Python\login_icons.ico")
image = Image.open("C:\Python\Computer.jpg")
pic = ImageTk.PhotoImage(image)
label0 = Label(image = pic)
label0.pack(fill = BOTH, expand = 'yes')
#global valu
def register_GUI():
win=Toplevel(window)
win.geometry("700x500+0+0")
win.title("Register")
lebel1=Label(win,text="User_Name:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
global entry1,entry2,entry3,entry4,entry5,entry6
entry1=Entry(win, textvar = userName,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry1.place(x=140,y=10)
lebel2=Label(win,text="Password:",font=("arial",16,"bold"))
lebel2.place(x=0,y=50)
password=StringVar
entry2=Entry(win, textvar = password,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry2.place(x=140,y=50)
lebel3=Label(win,text="Email:",font=("arial",16,"bold"))
lebel3.place(x=0,y=90)
email=StringVar
entry3=Entry(win, textvar = email,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry3.place(x=140,y=90)
lebel4=Label(win,text="Gender:",font=("arial",16,"bold"))
lebel4.place(x=0,y=130)
gender=StringVar
entry4=Entry(win, textvar = gender,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry4.place(x=140,y=130)
lebel5=Label(win,text="Age:",font=("arial",16,"bold"))
lebel5.place(x=0,y=170)
age=StringVar
entry5=Entry(win, textvar = age,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry5.place(x=140,y=170)
lebel6=Label(win,text="Occupation:",font=("arial",16,"bold"))
lebel6.place(x=0,y=210)
occupation=StringVar
entry6=Entry(win, textvar = occupation,width = 30, font = ("arial", 16, "bold"),bg="blue")
entry6.place(x=140,y=210)
register1=Button(win,text="Register",bg="blue",relief = "raised",command=register,width=10,font = ("arial", 16, "bold"))
register1.place(x=230,y=250)
return
def reset_GUI():
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("Reset")
global ent1,ent2
lebel1=Label(win,text="User_Name:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
ent1=Entry(win, textvar = userName,width = 20, font = ("arial", 16, "bold"),bg="blue")
ent1.place(x=140,y=10)
reset1=Button(win,text="Reset",bg="blue",relief = "raised",command=reset,width=10,font = ("arial", 16, "bold"))
reset1.place(x=170,y=60)
return
def pass_change():
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
password=ent3.get()
has=hash_map(password)
val=[has,name]
sql = 'UPDATE user_details SET password = %s WHERE name = %s'
cur.execute(sql,val)
mydb.commit()
messagebox.showinfo("Success","Password has Changed Successfully.")
def new_password_GUI():
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("New Password")
lebel1=Label(win,text="New Password:",font=("arial",16,"bold"))
lebel1.place(x=0,y=10)
userName=StringVar
global ent3
ent3=Entry(win, textvar = userName,width = 20, font = ("arial", 16, "bold"),bg="blue")
ent3.place(x=160,y=10)
submit=Button(win,text="Submit",bg="blue",relief = "raised",command=pass_change,width=10,font = ("arial", 16, "bold"))
submit.place(x=170,y=60)
return
def hash_map(password):
hash_object=hashlib.sha256(password.encode())
hash_dig=hash_object.hexdigest()
return hash_dig
def login():
numme=textBox1.get()
password1=textBox2.get()
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
command = "Select name,password FROM user_details WHERE name=%s"
results=cur.execute(command,numme)
data=cur.fetchone()
if(data==None):
messagebox.showinfo("Error","User-Name Or Password Icorrect!!")
else:
has=hash_map(password1)
if has==data[1]:
messagebox.showinfo("Success","Login Succesfully")
win=Toplevel(window)
win.geometry("500x500+0+0")
win.title("login")
else:
messagebox.showinfo("Error","User-Name Or Password is Icorrect !!")
def register():
lnth0=len(entry1.get())
lnth1=len(entry2.get())
lnth2=len(entry3.get())
lnth3=len(entry4.get())
lnth4=len(entry5.get())
lnth5=len(entry6.get())
if lnth0==0:
messagebox.showinfo("Error","User-name Field cann't be empty.")
elif lnth1==0:
messagebox.showinfo("Error","Password Field cann't be empty.")
elif lnth2==0:
messagebox.showinfo("Error","Email Field cann't be empty.")
elif lnth3==0:
messagebox.showinfo("Error","Gender Field cann't be empty.")
elif lnth4==0:
messagebox.showinfo("Error","Age Field cann't be empty.")
elif lnth5==0:
messagebox.showinfo("Error","Occupation Field cann't be empty.")
else:
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
userName=entry1.get()
password=entry2.get()
command = "Select name FROM user_details WHERE name=%s"
results=cur.execute(command,userName)
data=cur.fetchone()
if data==None:
has=hash_map(password)
val=[entry1.get(),has,entry3.get(),entry4.get(),entry5.get(),entry6.get()]
sql = "Insert INTO user_details(name,password,email,gender,age,occupation)VALUES(%s,%s,%s,%s,%s,%s)"
cur.execute(sql,val)
mydb.commit()
size=cur.rowcount
messagebox.showinfo("Success","Register Successfull")
cur.close()
else:
messagebox.showinfo("Error","This Name Already Registered!!Use Another Name")
def reset():
mydb = mysql.connect(host = 'localhost',user = 'root',passwd = '',db = 'login')
cur = mydb.cursor()
global name
name=ent1.get()
lnth1=len(ent1.get())
if(lnth1==0):
messagebox.showinfo("Error","Enter User-Name!!")
else:
command = "Select name FROM user_details WHERE name=%s"
results=cur.execute(command,name)
data=cur.fetchone()
if data==None:
messagebox.showinfo("Error","User-Name is Incorrect")
elif data!=None:
new_password_GUI()
label1 = Label(window, text = " Login System ",bg="black" ,fg = "blue", font = ("new times roman", 30, "bold"))
label1.place(x = 350, y = 70)
label2 = Label(window, text = "User Name :", font = ("arial", 16, "bold"),bg="red",width="9")
label2.place(x = 250, y = 200)
userName = StringVar()
textBox1 = Entry(window, textvar = userName,width = 18, font = ("arial", 16, "bold"),bg="blue")
textBox1.place(x = 385, y = 200)
label3 = Label(window, text = "Password :", font = ("arial", 16, "bold"),width="9",bg="red")
label3.place(x = 250, y = 260)
password = StringVar()
textBox2 = Entry(window, textvar = password, width = 18, font = ("arial", 16, "bold"),bg="blue")
textBox2.place(x = 385, y = 260)
button1 = Button(window, text = " Login ", fg = "black", bg = "blue", relief = "raised", font = ("arial", 16, "bold"), command = login)
button1.place(x = 280, y = 300)
button2 = Button(window, text = " Register ", fg = "black", bg = "Yellow", relief = "raised", font = ("arial", 16, "bold"), command = register_GUI)
button2.place(x = 440, y = 300)
button3 = Button(window, text = " Reset Password ", width='15',fg = "black", bg = "red", relief = "raised", font = ("arial", 16, "bold"), command = reset_GUI)
button3.place(x = 330, y = 350)
#display window
window.mainloop() | en | 0.208555 | #global valu #display window | 3.066149 | 3 |
proxyapp/views.py | eugenechia95/whaleproxy | 1 | 6624033 | <reponame>eugenechia95/whaleproxy<filename>proxyapp/views.py
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
import requests
from django.views.decorators.cache import cache_page
from django.core.cache import cache
from django import http
from django.conf import settings
import datetime, re
from django.http import JsonResponse
import ast
whaletoken = '<KEY>'
# Create your views here.
@csrf_exempt
def getview(request):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cache_key = '<KEY>' # needs to be unique
cache_time = 86400 # time in seconds for cache to be valid
#GET Request returns dictionary of all whale instances
if request.method == 'GET':
data = cache.get(cache_key) # returns None if no key-value pair
if data:
print("Fetching from Cache")
return HttpResponse(status = data.status_code, content = data.text)
if not data:
print("Not in Cache. Fetching from Server")
url = 'https://whalemarket.saleswhale.io/whales'
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
return HttpResponse(status = response.status_code, content=response.text)
#url = 'https://whalemarket.saleswhale.io/whales'
#urlheaders = {}
#urlheaders['Authorization'] = '<KEY>'
#response = requests.get(url, headers = urlheaders)
#x = HttpResponse(status = response.status_code,content=response.text)
#return HttpResponse(x)
#POST Request creates new whale instance in whalemarket
elif request.method == 'POST':
url = 'https://whalemarket.saleswhale.io/whales'
datadict = ast.literal_eval(request.body.decode('utf-8'))
urlheaders = {}
urlheaders['Authorization'] = authorization_token
#return HttpResponse(status = 400)
y = requests.post(url, json = datadict, headers = urlheaders)
return HttpResponse(y)
#DELETE Request purges cache of service
elif request.method == 'DELETE':
cache.clear()
print('Cache Purged Successfully')
return HttpResponse(status = '204')
#PUT Request forces a sync of every whale in whalemarket
elif request.method == 'PUT':
print('Syncing all whales in cache')
data = cache.get(cache_key)
url = 'https://whalemarket.saleswhale.io/whales'
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
responselist = ast.literal_eval(response.text)
whaleslist = responselist.get('whales')
for whale in whaleslist:
whaleid = whale.get('id')
print(whaleid)
newurl = url + '/' + str(whaleid)
print(newurl)
response = requests.get(newurl, headers = urlheaders)
print(response)
cache_key = whaleid
cache.set(cache_key, response, cache_time)
print('All Whales Synced!')
return HttpResponse(status = '200', content= "All Whales Synced!")
#GET Request returns whale instance corresponding to id in argument.
@csrf_exempt
def getidview(request, id):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cache_key = id # needs to be unique
cache_time = 86400 # time in seconds for cache to be valid
if request.method == 'GET':
data = cache.get(cache_key) # returns None if no key-value pair
if data:
print("Fetching from Cache")
return HttpResponse(status = data.status_code, content = data.text)
if not data:
print("Not in Cache. Fetching from Server")
url = 'https://whalemarket.saleswhale.io/whales/' + id
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
return HttpResponse(status = response.status_code, content=response.text)
@csrf_exempt
def hitratio(request):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cachestats = cache._cache.get_stats()[0][1]
hits = int(cachestats.get('get_hits'))
total_retrievals = int(cachestats.get('cmd_get'))
if total_retrievals == 0:
hitratio = 0
else:
hitratio = hits/total_retrievals*100
outcome = ('Total Retrieval Request: %d \nTotal Cache Hits: %d \nCache Hit Ratio: %.2f' % (total_retrievals, hits, hitratio) + '%')
return HttpResponse(status = 200, content = outcome) | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
import requests
from django.views.decorators.cache import cache_page
from django.core.cache import cache
from django import http
from django.conf import settings
import datetime, re
from django.http import JsonResponse
import ast
whaletoken = '<KEY>'
# Create your views here.
@csrf_exempt
def getview(request):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cache_key = '<KEY>' # needs to be unique
cache_time = 86400 # time in seconds for cache to be valid
#GET Request returns dictionary of all whale instances
if request.method == 'GET':
data = cache.get(cache_key) # returns None if no key-value pair
if data:
print("Fetching from Cache")
return HttpResponse(status = data.status_code, content = data.text)
if not data:
print("Not in Cache. Fetching from Server")
url = 'https://whalemarket.saleswhale.io/whales'
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
return HttpResponse(status = response.status_code, content=response.text)
#url = 'https://whalemarket.saleswhale.io/whales'
#urlheaders = {}
#urlheaders['Authorization'] = '<KEY>'
#response = requests.get(url, headers = urlheaders)
#x = HttpResponse(status = response.status_code,content=response.text)
#return HttpResponse(x)
#POST Request creates new whale instance in whalemarket
elif request.method == 'POST':
url = 'https://whalemarket.saleswhale.io/whales'
datadict = ast.literal_eval(request.body.decode('utf-8'))
urlheaders = {}
urlheaders['Authorization'] = authorization_token
#return HttpResponse(status = 400)
y = requests.post(url, json = datadict, headers = urlheaders)
return HttpResponse(y)
#DELETE Request purges cache of service
elif request.method == 'DELETE':
cache.clear()
print('Cache Purged Successfully')
return HttpResponse(status = '204')
#PUT Request forces a sync of every whale in whalemarket
elif request.method == 'PUT':
print('Syncing all whales in cache')
data = cache.get(cache_key)
url = 'https://whalemarket.saleswhale.io/whales'
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
responselist = ast.literal_eval(response.text)
whaleslist = responselist.get('whales')
for whale in whaleslist:
whaleid = whale.get('id')
print(whaleid)
newurl = url + '/' + str(whaleid)
print(newurl)
response = requests.get(newurl, headers = urlheaders)
print(response)
cache_key = whaleid
cache.set(cache_key, response, cache_time)
print('All Whales Synced!')
return HttpResponse(status = '200', content= "All Whales Synced!")
#GET Request returns whale instance corresponding to id in argument.
@csrf_exempt
def getidview(request, id):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cache_key = id # needs to be unique
cache_time = 86400 # time in seconds for cache to be valid
if request.method == 'GET':
data = cache.get(cache_key) # returns None if no key-value pair
if data:
print("Fetching from Cache")
return HttpResponse(status = data.status_code, content = data.text)
if not data:
print("Not in Cache. Fetching from Server")
url = 'https://whalemarket.saleswhale.io/whales/' + id
urlheaders = {}
urlheaders['Authorization'] = authorization_token
response = requests.get(url, headers = urlheaders)
cache.set(cache_key, response, cache_time)
return HttpResponse(status = response.status_code, content=response.text)
@csrf_exempt
def hitratio(request):
authorization_token = request.META.get('HTTP_AUTHORIZATION')
print(authorization_token)
if authorization_token != whaletoken:
return HttpResponse(status = 401)
cachestats = cache._cache.get_stats()[0][1]
hits = int(cachestats.get('get_hits'))
total_retrievals = int(cachestats.get('cmd_get'))
if total_retrievals == 0:
hitratio = 0
else:
hitratio = hits/total_retrievals*100
outcome = ('Total Retrieval Request: %d \nTotal Cache Hits: %d \nCache Hit Ratio: %.2f' % (total_retrievals, hits, hitratio) + '%')
return HttpResponse(status = 200, content = outcome) | en | 0.568351 | # Create your views here. # needs to be unique # time in seconds for cache to be valid #GET Request returns dictionary of all whale instances # returns None if no key-value pair #url = 'https://whalemarket.saleswhale.io/whales' #urlheaders = {} #urlheaders['Authorization'] = '<KEY>' #response = requests.get(url, headers = urlheaders) #x = HttpResponse(status = response.status_code,content=response.text) #return HttpResponse(x) #POST Request creates new whale instance in whalemarket #return HttpResponse(status = 400) #DELETE Request purges cache of service #PUT Request forces a sync of every whale in whalemarket #GET Request returns whale instance corresponding to id in argument. # needs to be unique # time in seconds for cache to be valid # returns None if no key-value pair | 2.067898 | 2 |
Chal01/Chal01Part2.py | CasparovJR/AOC2021 | 0 | 6624034 | with open("./Chal01.txt", "r") as f:
l = []
s = 0
before = 0
after = 0
increased = 0
for i in f.readlines():
s = int(i.split()[0])
l.append(s)
if len(l) == 3:
after = sum(l)
l.pop(0)
if after > before and before != 0:
increased += 1
before = after
print(increased) | with open("./Chal01.txt", "r") as f:
l = []
s = 0
before = 0
after = 0
increased = 0
for i in f.readlines():
s = int(i.split()[0])
l.append(s)
if len(l) == 3:
after = sum(l)
l.pop(0)
if after > before and before != 0:
increased += 1
before = after
print(increased) | none | 1 | 3.272004 | 3 | |
tests/test_long_refresh_token_views.py | doordash/django-rest-framework-jwt-refresh-token | 0 | 6624035 | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from refreshtoken.models import RefreshToken
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework_jwt import utils
from .urls import urlpatterns # noqa
User = get_user_model()
class RefreshTokenTestCase(APITestCase):
urls = __name__
def setUp(self):
self.email = '<EMAIL>'
self.username = 'jpueblo'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password)
self.token = RefreshToken.objects.create(user=self.user,
app='test-app')
email1 = '<EMAIL>'
username1 = 'jonnytestpants'
password1 = 'password'
self.user1 = User.objects.create_user(username1, email1, password1)
self.token1 = RefreshToken.objects.create(user=self.user1,
app='another-app')
self.list_url = reverse('refreshtoken-list')
self.detail_url = reverse(
'refreshtoken-detail',
kwargs={'key': self.token.key}
)
self.detail_url1 = reverse(
'refreshtoken-detail',
kwargs={'key': self.token1.key}
)
self.delegate_url = reverse('delegate-tokens')
self.user_admin = User.objects.create_user(
'adminator', self.email, self.password,
)
self.user_admin.is_superuser = True
self.user_admin.save()
def test_repr_refresh_token(self):
print(self.token)
def test_requires_auth(self):
response = self.client.get(self.list_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.get(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.delete(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.post(self.list_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
def test_get_refresh_token_list_with_admin(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user_admin)))
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 2)
def test_get_refresh_token_list(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 1)
resp0 = response.data[0]
self.assertEqual(self.token.key, resp0['key'])
self.client.force_authenticate(self.user1)
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 1)
resp0 = response.data[0]
self.assertEqual(self.token1.key, resp0['key'])
self.assertEqual(RefreshToken.objects.count(), 2)
def test_get_refresth_token_detail(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.get(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
(response.status_code, response.content)
)
response = self.client.get(self.detail_url1)
self.assertEqual(
response.status_code,
status.HTTP_404_NOT_FOUND,
(response.status_code, response.content)
)
def test_delete_refresth_token(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.delete(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
(response.status_code, response.content)
)
response = self.client.delete(self.detail_url1)
self.assertEqual(
response.status_code,
status.HTTP_404_NOT_FOUND,
(response.status_code, response.content)
)
def test_create_refresth_token(self):
data = {
'app': 'gandolf'
}
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
(response.status_code, response.content)
)
self.assertEqual(response.data['user'], self.user.pk)
self.assertEqual(response.data['app'], data['app'])
def test_delegate_jwt(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
(response.status_code, response.content)
)
self.assertIn('token', response.data)
def test_invalid_body_delegate_jwt(self):
# client_id is missing
data = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
response = self.client.post(self.delegate_url, data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST,
(response.status_code, response.content)
)
def test_delegate_jwti_wrong_token(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': '<PASSWORD>',
'api_type': 'app',
}
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
def test_delegate_jwti_inactive_user(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
self.user1.is_active = False
self.user1.save()
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
| from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from refreshtoken.models import RefreshToken
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework_jwt import utils
from .urls import urlpatterns # noqa
User = get_user_model()
class RefreshTokenTestCase(APITestCase):
urls = __name__
def setUp(self):
self.email = '<EMAIL>'
self.username = 'jpueblo'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password)
self.token = RefreshToken.objects.create(user=self.user,
app='test-app')
email1 = '<EMAIL>'
username1 = 'jonnytestpants'
password1 = 'password'
self.user1 = User.objects.create_user(username1, email1, password1)
self.token1 = RefreshToken.objects.create(user=self.user1,
app='another-app')
self.list_url = reverse('refreshtoken-list')
self.detail_url = reverse(
'refreshtoken-detail',
kwargs={'key': self.token.key}
)
self.detail_url1 = reverse(
'refreshtoken-detail',
kwargs={'key': self.token1.key}
)
self.delegate_url = reverse('delegate-tokens')
self.user_admin = User.objects.create_user(
'adminator', self.email, self.password,
)
self.user_admin.is_superuser = True
self.user_admin.save()
def test_repr_refresh_token(self):
print(self.token)
def test_requires_auth(self):
response = self.client.get(self.list_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.get(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.delete(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
response = self.client.post(self.list_url)
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
def test_get_refresh_token_list_with_admin(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user_admin)))
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 2)
def test_get_refresh_token_list(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 1)
resp0 = response.data[0]
self.assertEqual(self.token.key, resp0['key'])
self.client.force_authenticate(self.user1)
response = self.client.get(self.list_url)
self.assertEqual(len(response.data), 1)
resp0 = response.data[0]
self.assertEqual(self.token1.key, resp0['key'])
self.assertEqual(RefreshToken.objects.count(), 2)
def test_get_refresth_token_detail(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.get(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
(response.status_code, response.content)
)
response = self.client.get(self.detail_url1)
self.assertEqual(
response.status_code,
status.HTTP_404_NOT_FOUND,
(response.status_code, response.content)
)
def test_delete_refresth_token(self):
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.delete(self.detail_url)
self.assertEqual(
response.status_code,
status.HTTP_204_NO_CONTENT,
(response.status_code, response.content)
)
response = self.client.delete(self.detail_url1)
self.assertEqual(
response.status_code,
status.HTTP_404_NOT_FOUND,
(response.status_code, response.content)
)
def test_create_refresth_token(self):
data = {
'app': 'gandolf'
}
self.client.credentials(
HTTP_AUTHORIZATION='JWT ' + utils.jwt_encode_handler(
utils.jwt_payload_handler(self.user)))
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
(response.status_code, response.content)
)
self.assertEqual(response.data['user'], self.user.pk)
self.assertEqual(response.data['app'], data['app'])
def test_delegate_jwt(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_200_OK,
(response.status_code, response.content)
)
self.assertIn('token', response.data)
def test_invalid_body_delegate_jwt(self):
# client_id is missing
data = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
response = self.client.post(self.delegate_url, data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_400_BAD_REQUEST,
(response.status_code, response.content)
)
def test_delegate_jwti_wrong_token(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': '<PASSWORD>',
'api_type': 'app',
}
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
def test_delegate_jwti_inactive_user(self):
data = {
'client_id': 'gandolf',
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'refresh_token': self.token1.key,
'api_type': 'app',
}
self.user1.is_active = False
self.user1.save()
response = self.client.post(self.delegate_url,
data=data,
format='json')
self.assertEqual(
response.status_code,
status.HTTP_401_UNAUTHORIZED,
(response.status_code, response.content)
)
| en | 0.881924 | # noqa # client_id is missing | 2.303371 | 2 |
send-GTFS-rt-to-GeoEvent/GTFS-rt-to-GeoEvent.py | d-wasserman/public-transit-tools | 130 | 6624036 | <gh_stars>100-1000
# Copyright 2015 Esri
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
#
# distributed under the License is distributed on an "AS IS" BASIS,
#
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
#
# limitations under the License.
def main():
# need the GTFS Python bindings
from google.transit import gtfs_realtime_pb2
import urllib
import json
import socket
import time
# create socket connection to hostname/port on which a TCP GeoEvent input is running
tcpSocket = socket.create_connection(("<hostname>", 5565))
# polling model - run, wait 5 seconds, run, wait, run, wait, etc
while True:
feed = gtfs_realtime_pb2.FeedMessage()
# this particular feed is from CT Transit (http://www.cttransit.com/about/developers/gtfsdata/)
response = urllib.urlopen('http://172.16.31.10/realtimefeed/vehicle/vehiclepositions.pb')
# read the Protocal Buffers (.pb) file
feed.ParseFromString(response.read())
# loop through feed entities
for entity in feed.entity:
# check for a vehicle in feed entity
if entity.HasField('vehicle'):
# build a simple id,lon,lat message to send to GeoEvent.
msg = str(entity.vehicle.vehicle.label) + "," + \
str(entity.vehicle.position.longitude) + "," + \
str(entity.vehicle.position.latitude) + "\n"
# send message
tcpSocket.send(msg)
time.sleep(5)
if __name__ == '__main__':
main()
| # Copyright 2015 Esri
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
#
# distributed under the License is distributed on an "AS IS" BASIS,
#
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
#
# limitations under the License.
def main():
# need the GTFS Python bindings
from google.transit import gtfs_realtime_pb2
import urllib
import json
import socket
import time
# create socket connection to hostname/port on which a TCP GeoEvent input is running
tcpSocket = socket.create_connection(("<hostname>", 5565))
# polling model - run, wait 5 seconds, run, wait, run, wait, etc
while True:
feed = gtfs_realtime_pb2.FeedMessage()
# this particular feed is from CT Transit (http://www.cttransit.com/about/developers/gtfsdata/)
response = urllib.urlopen('http://172.16.31.10/realtimefeed/vehicle/vehiclepositions.pb')
# read the Protocal Buffers (.pb) file
feed.ParseFromString(response.read())
# loop through feed entities
for entity in feed.entity:
# check for a vehicle in feed entity
if entity.HasField('vehicle'):
# build a simple id,lon,lat message to send to GeoEvent.
msg = str(entity.vehicle.vehicle.label) + "," + \
str(entity.vehicle.position.longitude) + "," + \
str(entity.vehicle.position.latitude) + "\n"
# send message
tcpSocket.send(msg)
time.sleep(5)
if __name__ == '__main__':
main() | en | 0.823147 | # Copyright 2015 Esri # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # need the GTFS Python bindings # create socket connection to hostname/port on which a TCP GeoEvent input is running # polling model - run, wait 5 seconds, run, wait, run, wait, etc # this particular feed is from CT Transit (http://www.cttransit.com/about/developers/gtfsdata/) # read the Protocal Buffers (.pb) file # loop through feed entities # check for a vehicle in feed entity # build a simple id,lon,lat message to send to GeoEvent. # send message | 2.691779 | 3 |
extensions/games.py | xxori/PeepoBot | 0 | 6624037 | <filename>extensions/games.py
'''
MIT License
Copyright (c) 2020 <NAME> & <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import discord
from discord.ext import commands
import random
import utils
WORDS = open("words.txt").read().split("\n")
class Games(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ongoing_games = {}
@commands.command()
async def hangman(self, ctx):
if ctx.channel.id in self.ongoing_games.keys():
return await ctx.send(":x: **There is already an ongoing game in the current channel**")
word = random.choice(WORDS)
disp = ["\_" for _ in range(len(word))]
disp[0] = ">\_<"
self.ongoing_games[ctx.channel.id] = {
"game": "hangman",
"user": ctx.author,
"word": word,
"current_letter": 0,
"turnNo": 0,
"damage": 0,
"guessed_letters": [],
"display_string": " ".join(disp)
}
print(self.ongoing_games[ctx.channel.id])
await ctx.send(f":white_check_mark: **Hangman Started**\n"+self.ongoing_games[ctx.channel.id]["display_string"])
@commands.command()
async def stopgame(self, ctx):
if ctx.channel.id not in self.ongoing_games.keys():
return await ctx.send(":x: **There is not ongoing game in the current channel**")
self.ongoing_games.pop(ctx.channel.id)
await ctx.send("**:white_check_mark: Game successfully ended**")
@commands.Cog.listener()
async def on_message(self, message):
ctx = await self.bot.get_context(message)
if ctx.valid:
return
if ctx.author.bot:
return
if ctx.channel.id not in self.ongoing_games.keys():
return
gameData = self.ongoing_games[ctx.channel.id]
if ctx.author != gameData["user"]:
return
if gameData["game"] == "hangman":
guess = message.content.split(" ")[0]
if len(guess) > 1:
return await ctx.send("Please send only a single letter to guess the next letter in the word")
if not guess.isalpha():
return await ctx.send("Letters only please")
gameData["turnNo"] += 1
if message.content[0].lower() == gameData["word"][gameData["current_letter"]]:
if (gameData["current_letter"]+1) >= len(gameData["word"]):
await ctx.send(f"**Congratulations! You won! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
disp = gameData["display_string"].split(" ")
disp[gameData["current_letter"]] = "__" + guess + "__"
gameData["current_letter"] += 1
disp[gameData["current_letter"]] = ">\_<"
gameData["display_string"] = " ".join(disp)
await ctx.send(gameData["display_string"])
else:
gameData["damage"] += 1
if gameData["damage"] >= 7:
await ctx.send(f"**You died! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
gameData["guessed_letters"].append(guess)
await ctx.send(f"**Incorrect: ``{7-gameData['damage']}`` lives left**\nGuessed letters: {' '.join(gameData['guessed_letters'])}\n{gameData['display_string']}")
def setup(bot):
bot.add_cog(Games(bot)) | <filename>extensions/games.py
'''
MIT License
Copyright (c) 2020 <NAME> & <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import discord
from discord.ext import commands
import random
import utils
WORDS = open("words.txt").read().split("\n")
class Games(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ongoing_games = {}
@commands.command()
async def hangman(self, ctx):
if ctx.channel.id in self.ongoing_games.keys():
return await ctx.send(":x: **There is already an ongoing game in the current channel**")
word = random.choice(WORDS)
disp = ["\_" for _ in range(len(word))]
disp[0] = ">\_<"
self.ongoing_games[ctx.channel.id] = {
"game": "hangman",
"user": ctx.author,
"word": word,
"current_letter": 0,
"turnNo": 0,
"damage": 0,
"guessed_letters": [],
"display_string": " ".join(disp)
}
print(self.ongoing_games[ctx.channel.id])
await ctx.send(f":white_check_mark: **Hangman Started**\n"+self.ongoing_games[ctx.channel.id]["display_string"])
@commands.command()
async def stopgame(self, ctx):
if ctx.channel.id not in self.ongoing_games.keys():
return await ctx.send(":x: **There is not ongoing game in the current channel**")
self.ongoing_games.pop(ctx.channel.id)
await ctx.send("**:white_check_mark: Game successfully ended**")
@commands.Cog.listener()
async def on_message(self, message):
ctx = await self.bot.get_context(message)
if ctx.valid:
return
if ctx.author.bot:
return
if ctx.channel.id not in self.ongoing_games.keys():
return
gameData = self.ongoing_games[ctx.channel.id]
if ctx.author != gameData["user"]:
return
if gameData["game"] == "hangman":
guess = message.content.split(" ")[0]
if len(guess) > 1:
return await ctx.send("Please send only a single letter to guess the next letter in the word")
if not guess.isalpha():
return await ctx.send("Letters only please")
gameData["turnNo"] += 1
if message.content[0].lower() == gameData["word"][gameData["current_letter"]]:
if (gameData["current_letter"]+1) >= len(gameData["word"]):
await ctx.send(f"**Congratulations! You won! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
disp = gameData["display_string"].split(" ")
disp[gameData["current_letter"]] = "__" + guess + "__"
gameData["current_letter"] += 1
disp[gameData["current_letter"]] = ">\_<"
gameData["display_string"] = " ".join(disp)
await ctx.send(gameData["display_string"])
else:
gameData["damage"] += 1
if gameData["damage"] >= 7:
await ctx.send(f"**You died! The word was ``{gameData['word']}``**")
del self.ongoing_games[ctx.channel.id]
else:
gameData["guessed_letters"].append(guess)
await ctx.send(f"**Incorrect: ``{7-gameData['damage']}`` lives left**\nGuessed letters: {' '.join(gameData['guessed_letters'])}\n{gameData['display_string']}")
def setup(bot):
bot.add_cog(Games(bot)) | en | 0.766033 | MIT License Copyright (c) 2020 <NAME> & <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 3.018894 | 3 |
formation/xml.py | mardukbp/Formation | 71 | 6624038 | <filename>formation/xml.py
"""
XML utilities for handling formation design files
"""
# ======================================================================= #
# Copyright (c) 2020 Hoverset Group. #
# ======================================================================= #
import functools
import re
from collections import defaultdict
from lxml import etree
try:
import Tkinter as tk
import ttk
except ModuleNotFoundError:
import tkinter as tk
import tkinter.ttk as ttk
namespaces = {
"layout": "http://www.hoversetformationstudio.com/layouts/",
"attr": "http://www.hoversetformationstudio.com/styles/",
"menu": "http://www.hoversetformationstudio.com/menu",
}
_reversed_namespaces = dict(zip(namespaces.values(), namespaces.keys()))
tag_rgx = re.compile(r"(.+)\.([^.]+)")
_attr_rgx = re.compile(r"{(?P<namespace>.+)}(?P<attr>.+)")
_var_rgx = re.compile(r".+Var$")
def _register_namespaces():
for k in namespaces:
etree.register_namespace(k, namespaces[k])
_register_namespaces()
class BaseConverter:
"""
Base xml converter class. Contains utility methods useful in
dealing with xml used in formation design files.
"""
required_fields = []
@staticmethod
def _is_var(tag):
return _var_rgx.match(tag)
@staticmethod
def get_source_line_info(node: etree._Element):
"""
Returned a formatted message containing the line number in the
xml file where the node is found
:param node: Node whose source line is to be determined
:return: formatted string containing the source line
"""
return "" if node.sourceline is None else "Line {}: ".format(node.sourceline)
@classmethod
def _get_class(cls, node):
"""
Obtain the class represented by the node for the sake of object creation
:param node: Node whose class is to be determined
:return:
"""
raise NotImplementedError("get_class method needs to be implemented")
@staticmethod
def create_element(parent, tag):
"""
Create a :class:`lxml.etree._Element` node from a string tag
:param parent: parent node for the node to be created
:param tag: a string for the node. To obtain a node `<object></object>`
tag will be the string "object"
:return: a :class:`etree.SubElement` sub node if parent is provide else a :class:`lxml.etree.Element`
root node
"""
if parent is not None:
return etree.SubElement(parent, tag)
return etree.Element(tag)
@classmethod
def load_attributes(cls, attributes, node, namespace=None):
"""
Set namespaced attributes to a node. Given a node `<object></object>`
.. code-block:: python
node = lxml.etree.Element('object')
layout = {"width": "40", "height": "70"}
# Assuming layout is a registered namespace
BaseConverter.load_attributes(layout, node, namespace='layout')
print(lxml.etree.tostring(node))
This outputs the following xml
.. code-block:: xml
<object layout:width=40 layout:height=70></object>
:param attributes: a dictionary containing the attributes
:param node: node to be updated with attributes
:param namespace: namespace to be used if any
"""
for attribute in attributes:
node.attrib[cls.get_attr_name(namespace, attribute)] = str(
attributes[attribute]
)
@staticmethod
def get_attr_name(namespace, attr):
"""
Get the fully qualified namespaced attribute name. For instance, given xml node:
.. code-block:: xml
<object layout:width=40 layout:height=70></object>
.. code-block:: python
BaseConverter.get_attr_name("layout", "width")
# returns {http://www.hoversetformationstudio.com/layouts/}width
The fully qualified name can be used to directly set the node's attribute
:param namespace: the attribute namespace
:param attr: attribute to be determined
:return: A fully qualified namespaced attribute name
"""
if namespace is None:
return attr
return "{{{}}}{}".format(namespaces.get(namespace), attr)
@staticmethod
def extract_attr_name(attr):
"""
Get the attribute name in a fully qualified namespaced name. A fully qualified name like
``{http://www.hoversetformationstudio.com/layouts/}width`` will return ``width``
:param attr: namespaced attribute from which the attribute is to be extracted
:return: simple extracted attribute name
"""
match = _attr_rgx.search(attr)
if match:
return match.group("attr")
return attr
@classmethod
def drop_attr(cls, node, attr, namespace=None):
"""
Remove an attribute from a node.
:param node: Node in which to drop the attribute
:param attr: simple name of attribute to be dropped
:param namespace: attribute's namespace if any
"""
attr = cls.get_attr_name(namespace, attr)
if attr in node.attrib:
node.attrib.pop(attr)
@classmethod
@functools.lru_cache(maxsize=4)
def attrib(cls, node):
"""
Get all node attributes grouped by namespace. Given the following xml node:
.. code-block:: xml
<object
name=60
attr:color=red
attr:text=something
layout:width=50
layout:height=70
></object>
.. code-block:: python
>>> BaseConverter.attrib(node)
{"attr":{"color":"red", "text": "something"},
"layout":{"width": "50", "height": "70"}}
>>> BaseConverter.required_fields.append('color')
>>> BaseConverter.attrib(node)
{"attr":{"color":"red", "text": "something"},
"layout":{"width": "50", "height": "70"},
"color":{}}
To ensure that a namespace is always included in the grouped result
even if it is empty, add it to :py:attr:`BaseConverter.required_fields`
:param node: Node whose attributes are to be obtained
:return: a dictionary containing attributes grouped by namespace
"""
grouped = defaultdict(dict)
# add required fields
for field in cls.required_fields:
grouped[field] = {}
for attr in node.attrib:
match = _attr_rgx.search(attr)
if match:
group = _reversed_namespaces.get(match.group("namespace"))
grouped[group][match.group("attr")] = node.attrib.get(attr)
return grouped
@classmethod
def get_attr(cls, node, attr, namespace=None):
"""
Get an attribute (value) from a node given the attribute name and namespace (if any)
:param node: Node whose attribute is to be read
:param attr: simple name of attribute to be read
:param namespace: namespace of attribute if any
:return: attribute value
"""
return node.attrib.get(cls.get_attr_name(namespace, attr))
@classmethod
def is_equal(cls, node1, node2):
"""
Compare two lxml nodes for equality. It checks for attribute equality,
children and child order equality and tag name equality. Order of attributes
does not matter
:param node1: Node to be compared
:param node2: Node to be compared
:return: True if node1 is equal to node2
"""
# if items are not nodes use default behaviour
if not isinstance(node1, etree._Element) or not isinstance(
node2, etree._Element
):
return node1 == node2
tag_eq = node1.tag == node2.tag
attrib_eq = node1.attrib == node2.attrib
child_eq = len(list(node1)) == len(list(node2))
# if any of the above is false no need to even check further
if child_eq and tag_eq and attrib_eq:
for sub_node1, sub_node2 in zip(list(node1), list(node2)):
child_eq = cls.is_equal(sub_node1, sub_node2)
# if the equality check fails break immediately
if not child_eq:
break
return tag_eq and attrib_eq and child_eq
| <filename>formation/xml.py
"""
XML utilities for handling formation design files
"""
# ======================================================================= #
# Copyright (c) 2020 Hoverset Group. #
# ======================================================================= #
import functools
import re
from collections import defaultdict
from lxml import etree
try:
import Tkinter as tk
import ttk
except ModuleNotFoundError:
import tkinter as tk
import tkinter.ttk as ttk
namespaces = {
"layout": "http://www.hoversetformationstudio.com/layouts/",
"attr": "http://www.hoversetformationstudio.com/styles/",
"menu": "http://www.hoversetformationstudio.com/menu",
}
_reversed_namespaces = dict(zip(namespaces.values(), namespaces.keys()))
tag_rgx = re.compile(r"(.+)\.([^.]+)")
_attr_rgx = re.compile(r"{(?P<namespace>.+)}(?P<attr>.+)")
_var_rgx = re.compile(r".+Var$")
def _register_namespaces():
for k in namespaces:
etree.register_namespace(k, namespaces[k])
_register_namespaces()
class BaseConverter:
"""
Base xml converter class. Contains utility methods useful in
dealing with xml used in formation design files.
"""
required_fields = []
@staticmethod
def _is_var(tag):
return _var_rgx.match(tag)
@staticmethod
def get_source_line_info(node: etree._Element):
"""
Returned a formatted message containing the line number in the
xml file where the node is found
:param node: Node whose source line is to be determined
:return: formatted string containing the source line
"""
return "" if node.sourceline is None else "Line {}: ".format(node.sourceline)
@classmethod
def _get_class(cls, node):
"""
Obtain the class represented by the node for the sake of object creation
:param node: Node whose class is to be determined
:return:
"""
raise NotImplementedError("get_class method needs to be implemented")
@staticmethod
def create_element(parent, tag):
"""
Create a :class:`lxml.etree._Element` node from a string tag
:param parent: parent node for the node to be created
:param tag: a string for the node. To obtain a node `<object></object>`
tag will be the string "object"
:return: a :class:`etree.SubElement` sub node if parent is provide else a :class:`lxml.etree.Element`
root node
"""
if parent is not None:
return etree.SubElement(parent, tag)
return etree.Element(tag)
@classmethod
def load_attributes(cls, attributes, node, namespace=None):
"""
Set namespaced attributes to a node. Given a node `<object></object>`
.. code-block:: python
node = lxml.etree.Element('object')
layout = {"width": "40", "height": "70"}
# Assuming layout is a registered namespace
BaseConverter.load_attributes(layout, node, namespace='layout')
print(lxml.etree.tostring(node))
This outputs the following xml
.. code-block:: xml
<object layout:width=40 layout:height=70></object>
:param attributes: a dictionary containing the attributes
:param node: node to be updated with attributes
:param namespace: namespace to be used if any
"""
for attribute in attributes:
node.attrib[cls.get_attr_name(namespace, attribute)] = str(
attributes[attribute]
)
@staticmethod
def get_attr_name(namespace, attr):
"""
Get the fully qualified namespaced attribute name. For instance, given xml node:
.. code-block:: xml
<object layout:width=40 layout:height=70></object>
.. code-block:: python
BaseConverter.get_attr_name("layout", "width")
# returns {http://www.hoversetformationstudio.com/layouts/}width
The fully qualified name can be used to directly set the node's attribute
:param namespace: the attribute namespace
:param attr: attribute to be determined
:return: A fully qualified namespaced attribute name
"""
if namespace is None:
return attr
return "{{{}}}{}".format(namespaces.get(namespace), attr)
@staticmethod
def extract_attr_name(attr):
"""
Get the attribute name in a fully qualified namespaced name. A fully qualified name like
``{http://www.hoversetformationstudio.com/layouts/}width`` will return ``width``
:param attr: namespaced attribute from which the attribute is to be extracted
:return: simple extracted attribute name
"""
match = _attr_rgx.search(attr)
if match:
return match.group("attr")
return attr
@classmethod
def drop_attr(cls, node, attr, namespace=None):
"""
Remove an attribute from a node.
:param node: Node in which to drop the attribute
:param attr: simple name of attribute to be dropped
:param namespace: attribute's namespace if any
"""
attr = cls.get_attr_name(namespace, attr)
if attr in node.attrib:
node.attrib.pop(attr)
@classmethod
@functools.lru_cache(maxsize=4)
def attrib(cls, node):
"""
Get all node attributes grouped by namespace. Given the following xml node:
.. code-block:: xml
<object
name=60
attr:color=red
attr:text=something
layout:width=50
layout:height=70
></object>
.. code-block:: python
>>> BaseConverter.attrib(node)
{"attr":{"color":"red", "text": "something"},
"layout":{"width": "50", "height": "70"}}
>>> BaseConverter.required_fields.append('color')
>>> BaseConverter.attrib(node)
{"attr":{"color":"red", "text": "something"},
"layout":{"width": "50", "height": "70"},
"color":{}}
To ensure that a namespace is always included in the grouped result
even if it is empty, add it to :py:attr:`BaseConverter.required_fields`
:param node: Node whose attributes are to be obtained
:return: a dictionary containing attributes grouped by namespace
"""
grouped = defaultdict(dict)
# add required fields
for field in cls.required_fields:
grouped[field] = {}
for attr in node.attrib:
match = _attr_rgx.search(attr)
if match:
group = _reversed_namespaces.get(match.group("namespace"))
grouped[group][match.group("attr")] = node.attrib.get(attr)
return grouped
@classmethod
def get_attr(cls, node, attr, namespace=None):
"""
Get an attribute (value) from a node given the attribute name and namespace (if any)
:param node: Node whose attribute is to be read
:param attr: simple name of attribute to be read
:param namespace: namespace of attribute if any
:return: attribute value
"""
return node.attrib.get(cls.get_attr_name(namespace, attr))
@classmethod
def is_equal(cls, node1, node2):
"""
Compare two lxml nodes for equality. It checks for attribute equality,
children and child order equality and tag name equality. Order of attributes
does not matter
:param node1: Node to be compared
:param node2: Node to be compared
:return: True if node1 is equal to node2
"""
# if items are not nodes use default behaviour
if not isinstance(node1, etree._Element) or not isinstance(
node2, etree._Element
):
return node1 == node2
tag_eq = node1.tag == node2.tag
attrib_eq = node1.attrib == node2.attrib
child_eq = len(list(node1)) == len(list(node2))
# if any of the above is false no need to even check further
if child_eq and tag_eq and attrib_eq:
for sub_node1, sub_node2 in zip(list(node1), list(node2)):
child_eq = cls.is_equal(sub_node1, sub_node2)
# if the equality check fails break immediately
if not child_eq:
break
return tag_eq and attrib_eq and child_eq
| en | 0.628366 | XML utilities for handling formation design files # ======================================================================= # # Copyright (c) 2020 Hoverset Group. # # ======================================================================= # Base xml converter class. Contains utility methods useful in dealing with xml used in formation design files. Returned a formatted message containing the line number in the xml file where the node is found :param node: Node whose source line is to be determined :return: formatted string containing the source line Obtain the class represented by the node for the sake of object creation :param node: Node whose class is to be determined :return: Create a :class:`lxml.etree._Element` node from a string tag :param parent: parent node for the node to be created :param tag: a string for the node. To obtain a node `<object></object>` tag will be the string "object" :return: a :class:`etree.SubElement` sub node if parent is provide else a :class:`lxml.etree.Element` root node Set namespaced attributes to a node. Given a node `<object></object>` .. code-block:: python node = lxml.etree.Element('object') layout = {"width": "40", "height": "70"} # Assuming layout is a registered namespace BaseConverter.load_attributes(layout, node, namespace='layout') print(lxml.etree.tostring(node)) This outputs the following xml .. code-block:: xml <object layout:width=40 layout:height=70></object> :param attributes: a dictionary containing the attributes :param node: node to be updated with attributes :param namespace: namespace to be used if any Get the fully qualified namespaced attribute name. For instance, given xml node: .. code-block:: xml <object layout:width=40 layout:height=70></object> .. code-block:: python BaseConverter.get_attr_name("layout", "width") # returns {http://www.hoversetformationstudio.com/layouts/}width The fully qualified name can be used to directly set the node's attribute :param namespace: the attribute namespace :param attr: attribute to be determined :return: A fully qualified namespaced attribute name Get the attribute name in a fully qualified namespaced name. A fully qualified name like ``{http://www.hoversetformationstudio.com/layouts/}width`` will return ``width`` :param attr: namespaced attribute from which the attribute is to be extracted :return: simple extracted attribute name Remove an attribute from a node. :param node: Node in which to drop the attribute :param attr: simple name of attribute to be dropped :param namespace: attribute's namespace if any Get all node attributes grouped by namespace. Given the following xml node: .. code-block:: xml <object name=60 attr:color=red attr:text=something layout:width=50 layout:height=70 ></object> .. code-block:: python >>> BaseConverter.attrib(node) {"attr":{"color":"red", "text": "something"}, "layout":{"width": "50", "height": "70"}} >>> BaseConverter.required_fields.append('color') >>> BaseConverter.attrib(node) {"attr":{"color":"red", "text": "something"}, "layout":{"width": "50", "height": "70"}, "color":{}} To ensure that a namespace is always included in the grouped result even if it is empty, add it to :py:attr:`BaseConverter.required_fields` :param node: Node whose attributes are to be obtained :return: a dictionary containing attributes grouped by namespace # add required fields Get an attribute (value) from a node given the attribute name and namespace (if any) :param node: Node whose attribute is to be read :param attr: simple name of attribute to be read :param namespace: namespace of attribute if any :return: attribute value Compare two lxml nodes for equality. It checks for attribute equality, children and child order equality and tag name equality. Order of attributes does not matter :param node1: Node to be compared :param node2: Node to be compared :return: True if node1 is equal to node2 # if items are not nodes use default behaviour # if any of the above is false no need to even check further # if the equality check fails break immediately | 2.43202 | 2 |
multiplayer-rl/mprl/utility_services/worker/console.py | oslumbers/pipeline-psro | 26 | 6624039 | import json
import logging
import time
import grpc
from google.protobuf.empty_pb2 import Empty
from minio import Minio
from mprl.utility_services.cloud_storage import DEFAULT_LOCAL_SAVE_PATH
from mprl.utility_services.protobuf.population_server_pb2 import ManagerStats
from mprl.utility_services.worker.base_interface import BaseClientManagerInterface, WorkerType, \
_INFINITE_RETRY_INTERVAL_SECONDS
logger = logging.getLogger(__name__)
class ConsoleManagerInterface(BaseClientManagerInterface):
def __init__(self,
server_host: str,
port: int,
worker_id: str,
storage_client: Minio,
minio_bucket_name: str,
minio_local_dir: str = DEFAULT_LOCAL_SAVE_PATH
):
super(ConsoleManagerInterface, self).__init__(
server_host=server_host,
port=port,
worker_type=WorkerType.CONSOLE,
worker_id=worker_id,
storage_client=storage_client,
minio_bucket_name=minio_bucket_name,
minio_local_dir=minio_local_dir)
def get_manager_stats(self, infinite_retry_on_error: bool = True):
while True:
try:
request = Empty()
response: ManagerStats = self._stub.GetManagerStats(request)
break
except grpc.RpcError as err:
if infinite_retry_on_error:
logger.warning(f"grpc.RPCError raised while getting manager stats:\n{err}\n"
f"(retrying in {_INFINITE_RETRY_INTERVAL_SECONDS} seconds)")
time.sleep(_INFINITE_RETRY_INTERVAL_SECONDS)
else:
raise
stats_dict = json.loads(response.manager_stats_json)
return stats_dict
| import json
import logging
import time
import grpc
from google.protobuf.empty_pb2 import Empty
from minio import Minio
from mprl.utility_services.cloud_storage import DEFAULT_LOCAL_SAVE_PATH
from mprl.utility_services.protobuf.population_server_pb2 import ManagerStats
from mprl.utility_services.worker.base_interface import BaseClientManagerInterface, WorkerType, \
_INFINITE_RETRY_INTERVAL_SECONDS
logger = logging.getLogger(__name__)
class ConsoleManagerInterface(BaseClientManagerInterface):
def __init__(self,
server_host: str,
port: int,
worker_id: str,
storage_client: Minio,
minio_bucket_name: str,
minio_local_dir: str = DEFAULT_LOCAL_SAVE_PATH
):
super(ConsoleManagerInterface, self).__init__(
server_host=server_host,
port=port,
worker_type=WorkerType.CONSOLE,
worker_id=worker_id,
storage_client=storage_client,
minio_bucket_name=minio_bucket_name,
minio_local_dir=minio_local_dir)
def get_manager_stats(self, infinite_retry_on_error: bool = True):
while True:
try:
request = Empty()
response: ManagerStats = self._stub.GetManagerStats(request)
break
except grpc.RpcError as err:
if infinite_retry_on_error:
logger.warning(f"grpc.RPCError raised while getting manager stats:\n{err}\n"
f"(retrying in {_INFINITE_RETRY_INTERVAL_SECONDS} seconds)")
time.sleep(_INFINITE_RETRY_INTERVAL_SECONDS)
else:
raise
stats_dict = json.loads(response.manager_stats_json)
return stats_dict
| none | 1 | 1.926658 | 2 | |
model/DHS_RCL.py | lartpang/DHSNet-PyTorch | 3 | 6624040 | import torch
import torch.nn as nn
class RCL_Module(nn.Module):
def __init__(self, in_channels):
super(RCL_Module, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 1)
self.sigmoid = nn.Sigmoid()
self.conv2 = nn.Conv2d(65, 64, 3, padding=1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 1, 3, padding=1)
def forward(self, x, smr):
"""
RCL模块的正向传播
:param x: 来自前面卷积网络的特征图, 因为通道数不唯一, 所以使用额外参数in_channels制定
:param smr: 来自上一级得到的预测掩膜图, 通道数为1
:return: RCL模块输出的预测掩膜图
"""
# in_channelx1x1x64
out1 = self.conv1(x)
out1 = self.sigmoid(out1)
out2 = self.sigmoid(smr)
# 合并来自前一级的预测掩膜和对应前期卷积特征图, 并进行融合
out = torch.cat((out1, out2), 1)
out = self.conv2(out)
out = self.relu(out)
out = self.bn(out)
out_share = out
for i in range(3):
out = self.conv3(out)
# 在RCL中, 使用求和的方式对共享特征和输出不同的时间步的特征结合
out = torch.add(out, out_share)
out = self.relu(out)
out = self.bn(out)
out = self.sigmoid(self.conv4(out))
return out
| import torch
import torch.nn as nn
class RCL_Module(nn.Module):
def __init__(self, in_channels):
super(RCL_Module, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 64, 1)
self.sigmoid = nn.Sigmoid()
self.conv2 = nn.Conv2d(65, 64, 3, padding=1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 1, 3, padding=1)
def forward(self, x, smr):
"""
RCL模块的正向传播
:param x: 来自前面卷积网络的特征图, 因为通道数不唯一, 所以使用额外参数in_channels制定
:param smr: 来自上一级得到的预测掩膜图, 通道数为1
:return: RCL模块输出的预测掩膜图
"""
# in_channelx1x1x64
out1 = self.conv1(x)
out1 = self.sigmoid(out1)
out2 = self.sigmoid(smr)
# 合并来自前一级的预测掩膜和对应前期卷积特征图, 并进行融合
out = torch.cat((out1, out2), 1)
out = self.conv2(out)
out = self.relu(out)
out = self.bn(out)
out_share = out
for i in range(3):
out = self.conv3(out)
# 在RCL中, 使用求和的方式对共享特征和输出不同的时间步的特征结合
out = torch.add(out, out_share)
out = self.relu(out)
out = self.bn(out)
out = self.sigmoid(self.conv4(out))
return out
| zh | 0.934939 | RCL模块的正向传播 :param x: 来自前面卷积网络的特征图, 因为通道数不唯一, 所以使用额外参数in_channels制定 :param smr: 来自上一级得到的预测掩膜图, 通道数为1 :return: RCL模块输出的预测掩膜图 # in_channelx1x1x64 # 合并来自前一级的预测掩膜和对应前期卷积特征图, 并进行融合 # 在RCL中, 使用求和的方式对共享特征和输出不同的时间步的特征结合 | 2.845731 | 3 |
workers/shutdown_worker.py | DRvader/Gcloud-preemtible-trainer | 0 | 6624041 | #!/usr/bin/python3
from google.cloud import firestore
import json
import os
import requests
def main():
if os.isfile(os.path.join('~/job_id')):
config = json.load(open('../config.json'))
redis_config = json.load(open('../jobServer/config.json'))
with open('~/job_id') as file:
job_id = file.readline().strip()
r = request.put('{}/job/{}/requeue'.format(config['job_queue_address'], job_id),
headers={'auth_key': redis_config['redis_auth_key']})
db = firestore.Client()
job_ref = db.document(document_path)
job_ref.update({u'state': u'PREEMPTED'})
if __name__ == '__main__':
main() | #!/usr/bin/python3
from google.cloud import firestore
import json
import os
import requests
def main():
if os.isfile(os.path.join('~/job_id')):
config = json.load(open('../config.json'))
redis_config = json.load(open('../jobServer/config.json'))
with open('~/job_id') as file:
job_id = file.readline().strip()
r = request.put('{}/job/{}/requeue'.format(config['job_queue_address'], job_id),
headers={'auth_key': redis_config['redis_auth_key']})
db = firestore.Client()
job_ref = db.document(document_path)
job_ref.update({u'state': u'PREEMPTED'})
if __name__ == '__main__':
main() | fr | 0.386793 | #!/usr/bin/python3 | 2.42063 | 2 |
cards/models.py | onerbs/treux | 0 | 6624042 | from django.db import models
from base.models import BaseModel
from boards.models import Board
from users.models import User
class List(BaseModel):
index = models.PositiveIntegerField()
title = models.CharField(max_length=100)
of_board = models.ForeignKey(Board, models.CASCADE, 'lists')
exports = BaseModel.exports + ['index', 'title', 'of_board', 'cards']
class Card(BaseModel):
index = models.PositiveIntegerField()
text = models.TextField()
of_list = models.ForeignKey(List, models.CASCADE, 'cards')
assigned_to = models.ManyToManyField(User, 'assigned_cards')
expires_at = models.DateTimeField(null=True, default=None)
exports = BaseModel.exports + [
'index', 'text', 'of_list', 'assigned_to', 'expires_at'
]
| from django.db import models
from base.models import BaseModel
from boards.models import Board
from users.models import User
class List(BaseModel):
index = models.PositiveIntegerField()
title = models.CharField(max_length=100)
of_board = models.ForeignKey(Board, models.CASCADE, 'lists')
exports = BaseModel.exports + ['index', 'title', 'of_board', 'cards']
class Card(BaseModel):
index = models.PositiveIntegerField()
text = models.TextField()
of_list = models.ForeignKey(List, models.CASCADE, 'cards')
assigned_to = models.ManyToManyField(User, 'assigned_cards')
expires_at = models.DateTimeField(null=True, default=None)
exports = BaseModel.exports + [
'index', 'text', 'of_list', 'assigned_to', 'expires_at'
]
| none | 1 | 2.126134 | 2 | |
third_party/chromite/lib/workqueue/tasks.py | zipated/src | 2,151 | 6624043 | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Task manager classes for work queues."""
from __future__ import print_function
import abc
import multiprocessing
from chromite.lib import cros_logging as logging
def _ExecuteTask(handler, request_data):
"""Wrapper for the task handler function."""
root_logger = logging.getLogger()
for h in list(root_logger.handlers):
root_logger.removeHandler(h)
try:
return handler(request_data)
except Exception as e:
return e
class TaskManager(object):
"""Abstract base class for task management.
`TaskManager` is responsible for managing individual work queue
requests from the time that they're scheduled to run, until they
complete or are aborted.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, handler, sample_interval):
self.sample_interval = sample_interval
self._handler = handler
@abc.abstractmethod
def StartTick(self):
"""Start the polling cycle in `WorkQueueService.ProcessRequests()`.
The work queue service's server polling loop will call this function
once per loop iteration, to mark the nominal start of the polling
cycle.
"""
@abc.abstractmethod
def HasCapacity(self):
"""Return whether there is capacity to start more tasks.
Returns:
A true value if there is enough capacity for at least one
additional call to `StartTask()`.
"""
return False
@abc.abstractmethod
def StartTask(self, request_id, request_data):
"""Start work on a new task request.
Args:
request_id: Identifier for the task, used by `TerminateTask()`
and `Reap()`.
request_data: Argument to be passed to the task handler.
"""
@abc.abstractmethod
def TerminateTask(self, request_id):
"""Terminate a running task.
A terminated task will be forgotten, and will never be returned
by `Reap()`.
Args:
request_id: Identifier of the task to be terminated.
"""
@abc.abstractmethod
def Reap(self):
"""Generator to return results of all completed tasks.
Yields:
A `(request_id, return_value)` tuple.
"""
pass
class ProcessPoolTaskManager(TaskManager):
"""A task manager implemented with `multiprocessing.Pool`."""
def __init__(self, max_tasks, handler, sample_interval):
super(ProcessPoolTaskManager, self).__init__(handler, sample_interval)
self._pool = multiprocessing.Pool(max_tasks)
self._max_tasks = max_tasks
self._pending_results = {}
self._pending_aborts = set()
def __len__(self):
return len(self._pending_results)
def StartTick(self):
pass
def HasCapacity(self):
return len(self) < self._max_tasks
def StartTask(self, request_id, request_data):
self._pending_results[request_id] = (
self._pool.apply_async(_ExecuteTask,
(self._handler, request_data)))
def TerminateTask(self, request_id):
self._pending_aborts.add(request_id)
def Reap(self):
for request_id, result in self._pending_results.items():
if result.ready():
del self._pending_results[request_id]
if request_id in self._pending_aborts:
self._pending_aborts.remove(request_id)
else:
yield request_id, result.get()
def Close(self):
self._pool.terminate()
self._pool.join()
| # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Task manager classes for work queues."""
from __future__ import print_function
import abc
import multiprocessing
from chromite.lib import cros_logging as logging
def _ExecuteTask(handler, request_data):
"""Wrapper for the task handler function."""
root_logger = logging.getLogger()
for h in list(root_logger.handlers):
root_logger.removeHandler(h)
try:
return handler(request_data)
except Exception as e:
return e
class TaskManager(object):
"""Abstract base class for task management.
`TaskManager` is responsible for managing individual work queue
requests from the time that they're scheduled to run, until they
complete or are aborted.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, handler, sample_interval):
self.sample_interval = sample_interval
self._handler = handler
@abc.abstractmethod
def StartTick(self):
"""Start the polling cycle in `WorkQueueService.ProcessRequests()`.
The work queue service's server polling loop will call this function
once per loop iteration, to mark the nominal start of the polling
cycle.
"""
@abc.abstractmethod
def HasCapacity(self):
"""Return whether there is capacity to start more tasks.
Returns:
A true value if there is enough capacity for at least one
additional call to `StartTask()`.
"""
return False
@abc.abstractmethod
def StartTask(self, request_id, request_data):
"""Start work on a new task request.
Args:
request_id: Identifier for the task, used by `TerminateTask()`
and `Reap()`.
request_data: Argument to be passed to the task handler.
"""
@abc.abstractmethod
def TerminateTask(self, request_id):
"""Terminate a running task.
A terminated task will be forgotten, and will never be returned
by `Reap()`.
Args:
request_id: Identifier of the task to be terminated.
"""
@abc.abstractmethod
def Reap(self):
"""Generator to return results of all completed tasks.
Yields:
A `(request_id, return_value)` tuple.
"""
pass
class ProcessPoolTaskManager(TaskManager):
"""A task manager implemented with `multiprocessing.Pool`."""
def __init__(self, max_tasks, handler, sample_interval):
super(ProcessPoolTaskManager, self).__init__(handler, sample_interval)
self._pool = multiprocessing.Pool(max_tasks)
self._max_tasks = max_tasks
self._pending_results = {}
self._pending_aborts = set()
def __len__(self):
return len(self._pending_results)
def StartTick(self):
pass
def HasCapacity(self):
return len(self) < self._max_tasks
def StartTask(self, request_id, request_data):
self._pending_results[request_id] = (
self._pool.apply_async(_ExecuteTask,
(self._handler, request_data)))
def TerminateTask(self, request_id):
self._pending_aborts.add(request_id)
def Reap(self):
for request_id, result in self._pending_results.items():
if result.ready():
del self._pending_results[request_id]
if request_id in self._pending_aborts:
self._pending_aborts.remove(request_id)
else:
yield request_id, result.get()
def Close(self):
self._pool.terminate()
self._pool.join()
| en | 0.84233 | # -*- coding: utf-8 -*- # Copyright 2017 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Task manager classes for work queues. Wrapper for the task handler function. Abstract base class for task management. `TaskManager` is responsible for managing individual work queue requests from the time that they're scheduled to run, until they complete or are aborted. Start the polling cycle in `WorkQueueService.ProcessRequests()`. The work queue service's server polling loop will call this function once per loop iteration, to mark the nominal start of the polling cycle. Return whether there is capacity to start more tasks. Returns: A true value if there is enough capacity for at least one additional call to `StartTask()`. Start work on a new task request. Args: request_id: Identifier for the task, used by `TerminateTask()` and `Reap()`. request_data: Argument to be passed to the task handler. Terminate a running task. A terminated task will be forgotten, and will never be returned by `Reap()`. Args: request_id: Identifier of the task to be terminated. Generator to return results of all completed tasks. Yields: A `(request_id, return_value)` tuple. A task manager implemented with `multiprocessing.Pool`. | 2.595003 | 3 |
ex06-string_lists-palindrome.py | lew18/practicepython.org-mysolutions | 0 | 6624044 | """
https://www.practicepython.org
Exercise 6: String Lists
2 chilis
Ask the user for a string and print out whether this string is a
palindrome or not. (A palindrome is a string that reads the same
forwards and backwards.)
"""
def palindrome_checker(s1):
for i in range(int(len(s1)/2)):
if s1[i] != s1[len(s1)-i-1] :
return(False)
return(True)
s1 = input("Enter some text: ")
if palindrome_checker(s1.lower()):
print("'" + s1 + "' is a palindrome")
else:
print("'" + s1 + "' is not a palindrome")
| """
https://www.practicepython.org
Exercise 6: String Lists
2 chilis
Ask the user for a string and print out whether this string is a
palindrome or not. (A palindrome is a string that reads the same
forwards and backwards.)
"""
def palindrome_checker(s1):
for i in range(int(len(s1)/2)):
if s1[i] != s1[len(s1)-i-1] :
return(False)
return(True)
s1 = input("Enter some text: ")
if palindrome_checker(s1.lower()):
print("'" + s1 + "' is a palindrome")
else:
print("'" + s1 + "' is not a palindrome")
| en | 0.768001 | https://www.practicepython.org Exercise 6: String Lists 2 chilis Ask the user for a string and print out whether this string is a palindrome or not. (A palindrome is a string that reads the same forwards and backwards.) | 4.123694 | 4 |
images/examples/nodes/kdp-node-validate-directory/validatedirectory/s3utils.py | NASA-PDS/kdp | 6 | 6624045 | <filename>images/examples/nodes/kdp-node-validate-directory/validatedirectory/s3utils.py
import os
import boto3
import fnmatch
s3 = boto3.resource('s3')
client = boto3.client('s3')
def get_matching_objects_from_s3_prefix(bucket_name, s3_prefix='', regex='*'):
"""Given an S3 prefix and regex pattern, return a list of all matching objects."""
bucket = s3.Bucket(sanitize_bucket_name(bucket_name))
object_summaries = bucket.objects.filter(Prefix=s3_prefix)
return fnmatch.filter(map(lambda object: object.key, object_summaries), regex)
def download_all_from_s3_prefix(bucket_name, working_directory='.', s3_prefix=''):
"""Given an S3 prefix and working directory, download all
files under that prefix to the working directory"""
matching_objs = get_matching_objects_from_s3_prefix(bucket_name, s3_prefix)
_download_object_list(bucket_name, working_directory, matching_objs)
def download_all_xml_from_s3_prefix(bucket_name, working_directory='.', s3_prefix=''):
"""Given an S3 prefix and working directory, download all XML
files under that prefix to the working directory"""
case_insensitive_xml_pattern = '*.[xX][mM][lL]'
matching_objs = get_matching_objects_from_s3_prefix(bucket_name, s3_prefix, regex=case_insensitive_xml_pattern)
_download_object_list(bucket_name, working_directory, matching_objs)
def push_file_to_s3(bucket_name, s3_prefix, local_filepath):
"""Push given file to s3 bucket and prefix"""
s3_uri = _local_filepath_to_s3_uri(local_filepath, s3_prefix)
client.upload_file(local_filepath, sanitize_bucket_name(bucket_name), s3_uri)
def _download_object_list(bucket_name, working_directory, objs):
"""Download list of s3 objects from given bucket into given directory"""
for obj in objs:
client.download_file(sanitize_bucket_name(bucket_name), obj, _s3_to_local_filepath(working_directory, obj))
def _s3_to_local_filepath(directory, s3_uri):
"""Return valid local filepath given a directory and s3 URI"""
return os.path.join(directory, os.path.basename(s3_uri))
def _local_filepath_to_s3_uri(local_filepath, s3_prefix):
"""Return valid s3 URI given a local filepath and target bucket + prefix"""
return os.path.join(s3_prefix, os.path.basename(local_filepath))
def sanitize_bucket_name(bucket_name):
"""removes s3:// if present"""
if bucket_name.startswith('s3://'):
return bucket_name[5:]
else:
return bucket_name | <filename>images/examples/nodes/kdp-node-validate-directory/validatedirectory/s3utils.py
import os
import boto3
import fnmatch
s3 = boto3.resource('s3')
client = boto3.client('s3')
def get_matching_objects_from_s3_prefix(bucket_name, s3_prefix='', regex='*'):
"""Given an S3 prefix and regex pattern, return a list of all matching objects."""
bucket = s3.Bucket(sanitize_bucket_name(bucket_name))
object_summaries = bucket.objects.filter(Prefix=s3_prefix)
return fnmatch.filter(map(lambda object: object.key, object_summaries), regex)
def download_all_from_s3_prefix(bucket_name, working_directory='.', s3_prefix=''):
"""Given an S3 prefix and working directory, download all
files under that prefix to the working directory"""
matching_objs = get_matching_objects_from_s3_prefix(bucket_name, s3_prefix)
_download_object_list(bucket_name, working_directory, matching_objs)
def download_all_xml_from_s3_prefix(bucket_name, working_directory='.', s3_prefix=''):
"""Given an S3 prefix and working directory, download all XML
files under that prefix to the working directory"""
case_insensitive_xml_pattern = '*.[xX][mM][lL]'
matching_objs = get_matching_objects_from_s3_prefix(bucket_name, s3_prefix, regex=case_insensitive_xml_pattern)
_download_object_list(bucket_name, working_directory, matching_objs)
def push_file_to_s3(bucket_name, s3_prefix, local_filepath):
"""Push given file to s3 bucket and prefix"""
s3_uri = _local_filepath_to_s3_uri(local_filepath, s3_prefix)
client.upload_file(local_filepath, sanitize_bucket_name(bucket_name), s3_uri)
def _download_object_list(bucket_name, working_directory, objs):
"""Download list of s3 objects from given bucket into given directory"""
for obj in objs:
client.download_file(sanitize_bucket_name(bucket_name), obj, _s3_to_local_filepath(working_directory, obj))
def _s3_to_local_filepath(directory, s3_uri):
"""Return valid local filepath given a directory and s3 URI"""
return os.path.join(directory, os.path.basename(s3_uri))
def _local_filepath_to_s3_uri(local_filepath, s3_prefix):
"""Return valid s3 URI given a local filepath and target bucket + prefix"""
return os.path.join(s3_prefix, os.path.basename(local_filepath))
def sanitize_bucket_name(bucket_name):
"""removes s3:// if present"""
if bucket_name.startswith('s3://'):
return bucket_name[5:]
else:
return bucket_name | en | 0.648316 | Given an S3 prefix and regex pattern, return a list of all matching objects. Given an S3 prefix and working directory, download all files under that prefix to the working directory Given an S3 prefix and working directory, download all XML files under that prefix to the working directory Push given file to s3 bucket and prefix Download list of s3 objects from given bucket into given directory Return valid local filepath given a directory and s3 URI Return valid s3 URI given a local filepath and target bucket + prefix removes s3:// if present | 2.779758 | 3 |
dungeon/utils.py | bdwheele/dungeon | 0 | 6624046 | <reponame>bdwheele/dungeon
from math import floor
from random import randint, choices, choice
import re
from copy import deepcopy
"""
Miscellaneous utility functions that are useful for many things
"""
id_state = {}
def gen_id(table, seed=1, random=False, prefix=None, random_limit=4095, reserved=[]):
"""
Generate a per-program-run-unique ID either by using a
random number or by incrementing a counter. optionally,
a prefix can be added to the ID. When choosing a random id,
a reserved list can be specified to avoid generating those ids.
"""
new_id = None
if random:
if table not in id_state:
id_state[table] = set()
new_id = randint(0, random_limit)
while new_id in id_state[table] or new_id in reserved:
new_id = randint(0, random_limit)
id_state[table].add(new_id)
else:
if table not in id_state:
id_state[table] = seed
else:
id_state[table] += 1
new_id = id_state[table]
if prefix is not None:
new_id = prefix + str(new_id)
return new_id
def generate_avg_list(average, count, min, max):
"""
Generate a list of <count> integers between <min> and <max> with
an average of <average>. Average itself need not be an integer.
"""
avg = max
sum = 0
numbers = []
for n in range(0, count):
if avg < average:
n = randint(floor(avg), max)
else:
n = randint(min, floor(avg))
sum += n
numbers.append(n)
avg = sum / len(numbers)
return numbers
def roll_dice(spec):
"""
Given a D&D roll specification, generate a random number. The
spec should look like: 1d6, 3d4-2, 2d8+1, 3d6+2x100
"""
spec_re = re.compile(r"^(\d+)d(\d+)([\+\-]\d+)?(x(\d+))?$")
spec = spec.replace(' ', '').lower()
m = spec_re.match(spec)
if not m:
raise ValueError(f"Roll spec '{spec}' doesn't seem valid")
count = int(m.group(1))
die = int(m.group(2))
modifier = int(m.group(3) if m.group(3) else 0)
multiplier = int(m.group(5) if m.group(5) else 1)
sum = 0
for _ in range(count):
sum += randint(1, die)
return (sum + modifier) * multiplier
def array_random(array):
"""
Select a random item from an array, based on the structure
of the array.
If the array elements are lists, sets, or tuples, then the first item of
the element is the relative weight of that element, and the second item
of the element is the data that will be returned if that element is chosen.
If the array elements are anything else, it's assumed to be an even
distribution and the elements are the data that will be returned when
chosen.
"""
if not array:
return None
if isinstance(array[0], (list, set, tuple)):
weights, values = list(zip(*array))
return deepcopy(choices(values, weights=weights, k=1)[0])
else:
return deepcopy(choice(array))
#
# Templating 'system'
#
def template(string, values):
"""Apply a template"""
for k, v in values.items():
try:
string = string.replace(f"{{{k}}}", v)
except Exception as e:
print(f"Can't apply template for '{k}' with '{v}' -- {e}")
return string
def get_template_vars(string):
"""get the variables needed to complete the template"""
var_re = re.compile(r"\{(.+?)\}")
try:
return list(set(var_re.findall(string)))
except Exception as e:
print(f"Bad string: '{string}'")
raise e
def is_template(string):
"""return whether or not the string is a template"""
return len(get_template_vars(string)) != 0
| from math import floor
from random import randint, choices, choice
import re
from copy import deepcopy
"""
Miscellaneous utility functions that are useful for many things
"""
id_state = {}
def gen_id(table, seed=1, random=False, prefix=None, random_limit=4095, reserved=[]):
"""
Generate a per-program-run-unique ID either by using a
random number or by incrementing a counter. optionally,
a prefix can be added to the ID. When choosing a random id,
a reserved list can be specified to avoid generating those ids.
"""
new_id = None
if random:
if table not in id_state:
id_state[table] = set()
new_id = randint(0, random_limit)
while new_id in id_state[table] or new_id in reserved:
new_id = randint(0, random_limit)
id_state[table].add(new_id)
else:
if table not in id_state:
id_state[table] = seed
else:
id_state[table] += 1
new_id = id_state[table]
if prefix is not None:
new_id = prefix + str(new_id)
return new_id
def generate_avg_list(average, count, min, max):
"""
Generate a list of <count> integers between <min> and <max> with
an average of <average>. Average itself need not be an integer.
"""
avg = max
sum = 0
numbers = []
for n in range(0, count):
if avg < average:
n = randint(floor(avg), max)
else:
n = randint(min, floor(avg))
sum += n
numbers.append(n)
avg = sum / len(numbers)
return numbers
def roll_dice(spec):
"""
Given a D&D roll specification, generate a random number. The
spec should look like: 1d6, 3d4-2, 2d8+1, 3d6+2x100
"""
spec_re = re.compile(r"^(\d+)d(\d+)([\+\-]\d+)?(x(\d+))?$")
spec = spec.replace(' ', '').lower()
m = spec_re.match(spec)
if not m:
raise ValueError(f"Roll spec '{spec}' doesn't seem valid")
count = int(m.group(1))
die = int(m.group(2))
modifier = int(m.group(3) if m.group(3) else 0)
multiplier = int(m.group(5) if m.group(5) else 1)
sum = 0
for _ in range(count):
sum += randint(1, die)
return (sum + modifier) * multiplier
def array_random(array):
"""
Select a random item from an array, based on the structure
of the array.
If the array elements are lists, sets, or tuples, then the first item of
the element is the relative weight of that element, and the second item
of the element is the data that will be returned if that element is chosen.
If the array elements are anything else, it's assumed to be an even
distribution and the elements are the data that will be returned when
chosen.
"""
if not array:
return None
if isinstance(array[0], (list, set, tuple)):
weights, values = list(zip(*array))
return deepcopy(choices(values, weights=weights, k=1)[0])
else:
return deepcopy(choice(array))
#
# Templating 'system'
#
def template(string, values):
"""Apply a template"""
for k, v in values.items():
try:
string = string.replace(f"{{{k}}}", v)
except Exception as e:
print(f"Can't apply template for '{k}' with '{v}' -- {e}")
return string
def get_template_vars(string):
"""get the variables needed to complete the template"""
var_re = re.compile(r"\{(.+?)\}")
try:
return list(set(var_re.findall(string)))
except Exception as e:
print(f"Bad string: '{string}'")
raise e
def is_template(string):
"""return whether or not the string is a template"""
return len(get_template_vars(string)) != 0 | en | 0.811467 | Miscellaneous utility functions that are useful for many things Generate a per-program-run-unique ID either by using a random number or by incrementing a counter. optionally, a prefix can be added to the ID. When choosing a random id, a reserved list can be specified to avoid generating those ids. Generate a list of <count> integers between <min> and <max> with an average of <average>. Average itself need not be an integer. Given a D&D roll specification, generate a random number. The spec should look like: 1d6, 3d4-2, 2d8+1, 3d6+2x100 Select a random item from an array, based on the structure of the array. If the array elements are lists, sets, or tuples, then the first item of the element is the relative weight of that element, and the second item of the element is the data that will be returned if that element is chosen. If the array elements are anything else, it's assumed to be an even distribution and the elements are the data that will be returned when chosen. # # Templating 'system' # Apply a template get the variables needed to complete the template return whether or not the string is a template | 3.868978 | 4 |
envs/tests/test_breakout_env.py | MonteyMontey/deep-reinforcement-learning-sandbox | 0 | 6624047 | <filename>envs/tests/test_breakout_env.py
import numpy as np
from copy import deepcopy
import unittest
from envs.breakout_env import BreakoutEnv, Action
class TestBreakoutEnv(unittest.TestCase):
def test_step(self):
env = BreakoutEnv(15)
env.reset()
ball_pos = deepcopy(env.ball.pos)
ball_vel = deepcopy(env.ball.vel)
paddle_y_pos = deepcopy(env.paddle.y_pos)
paddle_x_start = env.paddle.x_start
paddle_x_end = env.paddle.x_end
action = Action.RIGHT
env.step(action)
# check ball pos
self.assertTrue(env.ball.pos == [ball_pos[0] + ball_vel[0], ball_pos[1] + ball_vel[1]])
# check paddle pos
self.assertTrue(
env.paddle.x_start == paddle_x_start + action.value[0] and
env.paddle.x_end == paddle_x_end + action.value[0] and
env.paddle.y_pos == paddle_y_pos)
if __name__ == "__main__":
unittest.main()
| <filename>envs/tests/test_breakout_env.py
import numpy as np
from copy import deepcopy
import unittest
from envs.breakout_env import BreakoutEnv, Action
class TestBreakoutEnv(unittest.TestCase):
def test_step(self):
env = BreakoutEnv(15)
env.reset()
ball_pos = deepcopy(env.ball.pos)
ball_vel = deepcopy(env.ball.vel)
paddle_y_pos = deepcopy(env.paddle.y_pos)
paddle_x_start = env.paddle.x_start
paddle_x_end = env.paddle.x_end
action = Action.RIGHT
env.step(action)
# check ball pos
self.assertTrue(env.ball.pos == [ball_pos[0] + ball_vel[0], ball_pos[1] + ball_vel[1]])
# check paddle pos
self.assertTrue(
env.paddle.x_start == paddle_x_start + action.value[0] and
env.paddle.x_end == paddle_x_end + action.value[0] and
env.paddle.y_pos == paddle_y_pos)
if __name__ == "__main__":
unittest.main()
| en | 0.320933 | # check ball pos # check paddle pos | 3.032465 | 3 |
search/binary.py | ashleawalker29/algorithms_python | 0 | 6624048 | <gh_stars>0
from numbers import Number
def binary_search(numbers, value, start=0, end=None):
if not numbers:
return 'Nothing to search through.'
if not value and value != 0:
return 'Nothing to search for.'
if not isinstance(value, Number):
return 'Can only search for numbers.'
for number in numbers:
if not isinstance(number, Number):
return 'Can only search through lists of just numbers.'
numbers = sorted(numbers)
if end is None:
end = len(numbers)
if start == end:
return 'Value was not found within the list.'
position = (end - start) // 2 + start
if value < numbers[position]:
return binary_search(numbers, value, start=start, end=position)
if value > numbers[position]:
return binary_search(numbers, value, start=position + 1, end=end)
return 'Value was found within the list.'
| from numbers import Number
def binary_search(numbers, value, start=0, end=None):
if not numbers:
return 'Nothing to search through.'
if not value and value != 0:
return 'Nothing to search for.'
if not isinstance(value, Number):
return 'Can only search for numbers.'
for number in numbers:
if not isinstance(number, Number):
return 'Can only search through lists of just numbers.'
numbers = sorted(numbers)
if end is None:
end = len(numbers)
if start == end:
return 'Value was not found within the list.'
position = (end - start) // 2 + start
if value < numbers[position]:
return binary_search(numbers, value, start=start, end=position)
if value > numbers[position]:
return binary_search(numbers, value, start=position + 1, end=end)
return 'Value was found within the list.' | none | 1 | 4.119973 | 4 | |
pbootstrap.py | bnkr/pbundle | 3 | 6624049 | #!/usr/bin/python
"""Very quick bootstrapping script to avoid the need to manually make a
virtualenv."""
import subprocess
if __name__ == "__main__":
subprocess.call(['virtualenv', 'pbundle_modules'])
subprocess.call(['pbundle_modules/bin/pip', 'install',
'-e', 'git://github.com/bnkr/pbundle.git#egg=pbundle'])
| #!/usr/bin/python
"""Very quick bootstrapping script to avoid the need to manually make a
virtualenv."""
import subprocess
if __name__ == "__main__":
subprocess.call(['virtualenv', 'pbundle_modules'])
subprocess.call(['pbundle_modules/bin/pip', 'install',
'-e', 'git://github.com/bnkr/pbundle.git#egg=pbundle'])
| en | 0.412592 | #!/usr/bin/python Very quick bootstrapping script to avoid the need to manually make a virtualenv. #egg=pbundle']) | 1.787611 | 2 |
spider/cluster/kss/kss.py | dvdmjohnson/d3m_michigan_primitives | 1 | 6624050 | import typing
from d3m.metadata import hyperparams, base as metadata_module, params
from d3m.primitive_interfaces import base, clustering
from d3m import container, utils
import numpy as np
from scipy.linalg import orth
import os
Inputs = container.ndarray
Outputs = container.ndarray
DistanceMatrixOutput = container.ndarray
class KSSParams(params.Params):
U: container.ndarray
class KSSHyperparams(hyperparams.Hyperparams):
n_clusters = hyperparams.Bounded[int](lower=2,
upper=None,
default=2,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="number of subspaces/clusters to learn")
dim_subspaces = hyperparams.Bounded[int](lower=1,
upper=50,
default=2,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description="dimensionality of learned subspaces")
class KSS(clustering.ClusteringDistanceMatrixMixin[Inputs, Outputs, KSSParams, KSSHyperparams, DistanceMatrixOutput],
clustering.ClusteringLearnerPrimitiveBase[Inputs, Outputs, KSSParams, KSSHyperparams]):
metadata = metadata_module.PrimitiveMetadata({
'id': '044e5c71-7507-4f58-a139-bc5481179d62',
'version': "0.0.5",
'name': 'KSS',
'description': """Does clustering via the k-subspaces method.""",
'keywords': ['clustering', 'k-subspaces', 'subspace'],
'source': {
'name': 'Michigan',
'contact': 'mailto:<EMAIL>',
'uris': [
#link to file and repo
'https://github.com/dvdmjohnson/d3m_michigan_primitives/blob/master/spider/cluster/kss/kss.py',
'https://github.com/dvdmjohnson/d3m_michigan_primitives'],
'citation': """@inproceedings{agarwal2004k, title={K-means projective clustering}, author={<NAME> and <NAME>}, booktitle={Proceedings of the twenty-third ACM SIGMOD-SIGACT-SIGART symposium on Principles of database systems}, pages={155--165}, year={2004}, organization={ACM}}"""
},
'installation': [
{'type': metadata_module.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/dvdmjohnson/d3m_michigan_primitives.git@{git_commit}#egg=spider'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)))
},
{'type': metadata_module.PrimitiveInstallationType.UBUNTU,
'package': 'ffmpeg',
'version': '7:2.8.11-0ubuntu0.16.04.1'}],
'python_path': 'd3m.primitives.clustering.kss.Umich',
'hyperparams_to_tune': ['n_clusters', 'dim_subspaces'],
'algorithm_types': [
metadata_module.PrimitiveAlgorithmType.SUBSPACE_CLUSTERING],
'primitive_family': metadata_module.PrimitiveFamily.CLUSTERING
})
def __init__(self, *, hyperparams: KSSHyperparams, random_seed: int = 0, docker_containers: typing.Dict[str, base.DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._dim_subspaces = hyperparams['dim_subspaces']
self._k = hyperparams['n_clusters']
self._X: Inputs = None
self._U = None
self._random_state = np.random.RandomState(random_seed)
def set_training_data(self, *, inputs: Inputs) -> None:
self._X = inputs
self._U = None
def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:
assert self._X is not None, "No training data provided."
assert self._X.ndim == 2, "Data is not in the right shape."
assert self._dim_subspaces <= self._X.shape[1], "Dim_subspaces should be less than ambient dimension."
_X = self._X.T
n_features, n_samples = _X.shape
# randomly initialize subspaces
U_init = np.zeros((self._k, n_features, self._dim_subspaces))
for kk in range(self._k):
U_init[kk] = orth(self._random_state.randn(n_features, self._dim_subspaces))
# compute residuals
full_residuals = np.zeros((n_samples, self._k))
for kk in range(self._k):
tmp1 = np.dot(U_init[kk].T, _X)
tmp2 = np.dot(U_init[kk], tmp1)
full_residuals[:,kk] = np.linalg.norm(_X-tmp2, ord=2, axis=0)
# label by nearest subspace
estimated_labels = np.argmin(full_residuals, axis=1)
# alternate between subspace estimation and assignment
prev_labels = -1 * np.ones(estimated_labels.shape)
it = 0
while np.sum(estimated_labels != prev_labels) and (iterations is None or it < iterations):
# first update residuals after labels obtained
U = np.empty((self._k, n_features, self._dim_subspaces))
for kk in range(self._k):
Z = _X[:,estimated_labels == kk]
D, V = np.linalg.eig(np.dot(Z, Z.T))
D_idx = np.argsort(-D) # descending order
U[kk] = V.real[:,D_idx[list(range(self._dim_subspaces))]]
tmp1 = np.dot(U[kk,:].T, _X)
tmp2 = np.dot(U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(_X-tmp2, ord=2, axis=0)
# update prev_labels
prev_labels = estimated_labels
# label by nearest subspace
estimated_labels = np.argmin(full_residuals, axis=1)
it = it + 1
self._U = U
return base.CallResult(None)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
if self._U is None:
raise ValueError("Calling produce before fitting.")
full_residuals = np.empty((inputs.shape[0], self._k))
for kk in range(self._k):
tmp1 = np.dot(self._U[kk,:].T, inputs.T)
tmp2 = np.dot(self._U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(inputs.T-tmp2, ord=2, axis=0)
labels = np.argmin(full_residuals, axis=1)
return base.CallResult(Outputs(labels))
def produce_distance_matrix(self, *, timeout: float = None, iterations: int = None, inputs: Inputs) -> base.CallResult[DistanceMatrixOutput]:
"""
Returns a generic result representing the cluster assignment labels in distance matrix form (i.e. distance is 0
if the two instances are in the same class, and 1 if they are not).
"""
if self._U is None:
raise ValueError("Calling produce before fitting.")
full_residuals = np.empty((inputs.shape[0], self._k))
for kk in range(self._k):
tmp1 = np.dot(self._U[kk,:].T, inputs.T)
tmp2 = np.dot(self._U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(inputs.T-tmp2, ord=2, axis=0)
labels = np.argmin(full_residuals, axis=1)
n = labels.shape[0]
labmat = np.empty((n,n))
for i in range(0,n):
labmat[i,:] = labels != labels[i]
return base.CallResult(DistanceMatrixOutput(labmat))
def get_params(self) -> KSSParams:
return KSSParams(U = self._U)
def set_params(self, *, params: KSSParams) -> None:
self._U = params['U']
def __getstate__(self) -> dict:
return {
'constructor': {
'hyperparams': self.hyperparams,
'random_seed': self.random_seed,
'docker_containers': self.docker_containers,
},
'params': self.get_params(),
'random_state': self._random_state,
}
def __setstate__(self, state: dict) -> None:
self.__init__(**state['constructor']) # type: ignore
self.set_params(params=state['params'])
self._random_state = state['random_state']
#placeholder for now, just calls base version.
@classmethod
def can_accept(cls, *, method_name: str, arguments: typing.Dict[str, typing.Union[metadata_module.Metadata, type]], hyperparams: KSSHyperparams) -> typing.Optional[metadata_module.DataMetadata]:
return super().can_accept(method_name=method_name, arguments=arguments, hyperparams=hyperparams)
| import typing
from d3m.metadata import hyperparams, base as metadata_module, params
from d3m.primitive_interfaces import base, clustering
from d3m import container, utils
import numpy as np
from scipy.linalg import orth
import os
Inputs = container.ndarray
Outputs = container.ndarray
DistanceMatrixOutput = container.ndarray
class KSSParams(params.Params):
U: container.ndarray
class KSSHyperparams(hyperparams.Hyperparams):
n_clusters = hyperparams.Bounded[int](lower=2,
upper=None,
default=2,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="number of subspaces/clusters to learn")
dim_subspaces = hyperparams.Bounded[int](lower=1,
upper=50,
default=2,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description="dimensionality of learned subspaces")
class KSS(clustering.ClusteringDistanceMatrixMixin[Inputs, Outputs, KSSParams, KSSHyperparams, DistanceMatrixOutput],
clustering.ClusteringLearnerPrimitiveBase[Inputs, Outputs, KSSParams, KSSHyperparams]):
metadata = metadata_module.PrimitiveMetadata({
'id': '044e5c71-7507-4f58-a139-bc5481179d62',
'version': "0.0.5",
'name': 'KSS',
'description': """Does clustering via the k-subspaces method.""",
'keywords': ['clustering', 'k-subspaces', 'subspace'],
'source': {
'name': 'Michigan',
'contact': 'mailto:<EMAIL>',
'uris': [
#link to file and repo
'https://github.com/dvdmjohnson/d3m_michigan_primitives/blob/master/spider/cluster/kss/kss.py',
'https://github.com/dvdmjohnson/d3m_michigan_primitives'],
'citation': """@inproceedings{agarwal2004k, title={K-means projective clustering}, author={<NAME> and <NAME>}, booktitle={Proceedings of the twenty-third ACM SIGMOD-SIGACT-SIGART symposium on Principles of database systems}, pages={155--165}, year={2004}, organization={ACM}}"""
},
'installation': [
{'type': metadata_module.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/dvdmjohnson/d3m_michigan_primitives.git@{git_commit}#egg=spider'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)))
},
{'type': metadata_module.PrimitiveInstallationType.UBUNTU,
'package': 'ffmpeg',
'version': '7:2.8.11-0ubuntu0.16.04.1'}],
'python_path': 'd3m.primitives.clustering.kss.Umich',
'hyperparams_to_tune': ['n_clusters', 'dim_subspaces'],
'algorithm_types': [
metadata_module.PrimitiveAlgorithmType.SUBSPACE_CLUSTERING],
'primitive_family': metadata_module.PrimitiveFamily.CLUSTERING
})
def __init__(self, *, hyperparams: KSSHyperparams, random_seed: int = 0, docker_containers: typing.Dict[str, base.DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._dim_subspaces = hyperparams['dim_subspaces']
self._k = hyperparams['n_clusters']
self._X: Inputs = None
self._U = None
self._random_state = np.random.RandomState(random_seed)
def set_training_data(self, *, inputs: Inputs) -> None:
self._X = inputs
self._U = None
def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:
assert self._X is not None, "No training data provided."
assert self._X.ndim == 2, "Data is not in the right shape."
assert self._dim_subspaces <= self._X.shape[1], "Dim_subspaces should be less than ambient dimension."
_X = self._X.T
n_features, n_samples = _X.shape
# randomly initialize subspaces
U_init = np.zeros((self._k, n_features, self._dim_subspaces))
for kk in range(self._k):
U_init[kk] = orth(self._random_state.randn(n_features, self._dim_subspaces))
# compute residuals
full_residuals = np.zeros((n_samples, self._k))
for kk in range(self._k):
tmp1 = np.dot(U_init[kk].T, _X)
tmp2 = np.dot(U_init[kk], tmp1)
full_residuals[:,kk] = np.linalg.norm(_X-tmp2, ord=2, axis=0)
# label by nearest subspace
estimated_labels = np.argmin(full_residuals, axis=1)
# alternate between subspace estimation and assignment
prev_labels = -1 * np.ones(estimated_labels.shape)
it = 0
while np.sum(estimated_labels != prev_labels) and (iterations is None or it < iterations):
# first update residuals after labels obtained
U = np.empty((self._k, n_features, self._dim_subspaces))
for kk in range(self._k):
Z = _X[:,estimated_labels == kk]
D, V = np.linalg.eig(np.dot(Z, Z.T))
D_idx = np.argsort(-D) # descending order
U[kk] = V.real[:,D_idx[list(range(self._dim_subspaces))]]
tmp1 = np.dot(U[kk,:].T, _X)
tmp2 = np.dot(U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(_X-tmp2, ord=2, axis=0)
# update prev_labels
prev_labels = estimated_labels
# label by nearest subspace
estimated_labels = np.argmin(full_residuals, axis=1)
it = it + 1
self._U = U
return base.CallResult(None)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
if self._U is None:
raise ValueError("Calling produce before fitting.")
full_residuals = np.empty((inputs.shape[0], self._k))
for kk in range(self._k):
tmp1 = np.dot(self._U[kk,:].T, inputs.T)
tmp2 = np.dot(self._U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(inputs.T-tmp2, ord=2, axis=0)
labels = np.argmin(full_residuals, axis=1)
return base.CallResult(Outputs(labels))
def produce_distance_matrix(self, *, timeout: float = None, iterations: int = None, inputs: Inputs) -> base.CallResult[DistanceMatrixOutput]:
"""
Returns a generic result representing the cluster assignment labels in distance matrix form (i.e. distance is 0
if the two instances are in the same class, and 1 if they are not).
"""
if self._U is None:
raise ValueError("Calling produce before fitting.")
full_residuals = np.empty((inputs.shape[0], self._k))
for kk in range(self._k):
tmp1 = np.dot(self._U[kk,:].T, inputs.T)
tmp2 = np.dot(self._U[kk,:], tmp1)
full_residuals[:,kk] = np.linalg.norm(inputs.T-tmp2, ord=2, axis=0)
labels = np.argmin(full_residuals, axis=1)
n = labels.shape[0]
labmat = np.empty((n,n))
for i in range(0,n):
labmat[i,:] = labels != labels[i]
return base.CallResult(DistanceMatrixOutput(labmat))
def get_params(self) -> KSSParams:
return KSSParams(U = self._U)
def set_params(self, *, params: KSSParams) -> None:
self._U = params['U']
def __getstate__(self) -> dict:
return {
'constructor': {
'hyperparams': self.hyperparams,
'random_seed': self.random_seed,
'docker_containers': self.docker_containers,
},
'params': self.get_params(),
'random_state': self._random_state,
}
def __setstate__(self, state: dict) -> None:
self.__init__(**state['constructor']) # type: ignore
self.set_params(params=state['params'])
self._random_state = state['random_state']
#placeholder for now, just calls base version.
@classmethod
def can_accept(cls, *, method_name: str, arguments: typing.Dict[str, typing.Union[metadata_module.Metadata, type]], hyperparams: KSSHyperparams) -> typing.Optional[metadata_module.DataMetadata]:
return super().can_accept(method_name=method_name, arguments=arguments, hyperparams=hyperparams)
| en | 0.753098 | Does clustering via the k-subspaces method. #link to file and repo @inproceedings{agarwal2004k, title={K-means projective clustering}, author={<NAME> and <NAME>}, booktitle={Proceedings of the twenty-third ACM SIGMOD-SIGACT-SIGART symposium on Principles of database systems}, pages={155--165}, year={2004}, organization={ACM}} #egg=spider'.format( # randomly initialize subspaces # compute residuals # label by nearest subspace # alternate between subspace estimation and assignment # first update residuals after labels obtained # descending order # update prev_labels # label by nearest subspace Returns a generic result representing the cluster assignment labels in distance matrix form (i.e. distance is 0 if the two instances are in the same class, and 1 if they are not). # type: ignore #placeholder for now, just calls base version. | 2.493456 | 2 |