id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3530755 | from unittest import TestCase
from bs4 import BeautifulSoup
from tools.lib.capitalization import check_capitalization, get_safe_text, is_capitalized
class GetSafeTextTestCase(TestCase):
def test_get_safe_text(self) -> None:
string = "Zulip Zulip. Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Zulip zulip. Zulip some text!")
string = "Zulip Zulip? Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Zulip zulip? Zulip some text!")
string = "Zulip Zulip! Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Zulip zulip! Zulip some text!")
string = "Zulip Zulip, Zulip some text!"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Zulip zulip, zulip some text!")
string = "Not Ignored Phrase"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Not Ignored Phrase")
string = "Not ignored phrase"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "Not ignored phrase")
string = ""
safe_text = get_safe_text(string)
self.assertEqual(safe_text, "")
string = """
<p>Please re-enter your password to confirm your identity.
(<a href="/accounts/password/reset/" target="_blank">Forgotten it?</a>)</p>
"""
safe_text = get_safe_text(string)
soup = BeautifulSoup(safe_text, "lxml")
rendered_text = " ".join(soup.text.split())
self.assertEqual(safe_text, rendered_text)
string = "Edited (__last_edit_timestr__)"
safe_text = get_safe_text(string)
self.assertEqual(safe_text, string)
class IsCapitalizedTestCase(TestCase):
def test_process_text(self) -> None:
string = "Zulip zulip. Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip? Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip! Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Zulip zulip, Zulip some text!"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Some number 25mib"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Not Ignored Phrase"
capitalized = is_capitalized(string)
self.assertFalse(capitalized)
string = "Not ignored phrase"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = ""
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Please re-enter your password to confirm your identity. (Forgotten it?)"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Edited (__last_edit_timestr__)"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "Iphone application"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
string = "One two etc_ three"
capitalized = is_capitalized(string)
self.assertTrue(capitalized)
class CheckCapitalizationTestCase(TestCase):
def test_check_capitalization(self) -> None:
strings = [
"Zulip Zulip. Zulip some text!",
"Zulip Zulip? Zulip some text!",
"Zulip Zulip! Zulip some text!",
"Zulip Zulip, Zulip some text!",
"Not Ignored Phrase",
"Not ignored phrase",
"Some text with realm in it",
"Realm in capital case",
]
errored, ignored, banned = check_capitalization(strings)
self.assertEqual(errored, ["Not Ignored Phrase"])
self.assertEqual(
ignored,
sorted(
[
"Zulip Zulip. Zulip some text!",
"Zulip Zulip? Zulip some text!",
"Zulip Zulip! Zulip some text!",
"Zulip Zulip, Zulip some text!",
]
),
)
self.assertEqual(
banned,
sorted(
[
"realm found in 'Some text with realm in it'. "
"The term realm should not appear in user-facing "
"strings. Use organization instead.",
"realm found in 'Realm in capital case'. "
"The term realm should not appear in user-facing "
"strings. Use organization instead.",
]
),
)
| StarcoderdataPython |
1649318 | from openpyxl.reader.excel import load_workbook
from re import sub
from NLP5 import getDirectResourceUrl, databaseWrapper, getRedirectedResourceURL, isLocation, checkDisambiguates
__author__ = '<NAME>'
def isLocationBool(keyword):
locationString = keyword
locationString = locationString.replace("_"," ")
locationString = sub(' +',' ',locationString) #delete double spaces between words if any
locationString = locationString.strip()
locationString = locationString.title()
resourceUrl = getDirectResourceUrl(locationString, databaseWrapper) # Check for direct resource
#if string has "-", try lowering the case of some names after "-"
if resourceUrl is None and '-' in locationString:
splitArray=locationString.split("-") #split the location into an array
for i in range(1,len(splitArray)):
inst=splitArray[:] #create instance
inst[i]=inst[i].lower() #lowercase i word in the array
locationStringMod = "-".join(inst) # rejoin array to a location
resourceUrl = getDirectResourceUrl(locationStringMod, databaseWrapper) #Check for direct resource
if resourceUrl is not None:
break
if resourceUrl is None:
locationString = locationString.replace(" ","_")
resourceUrl = getRedirectedResourceURL(locationString, databaseWrapper) # Check for indirect resource
locationType = isLocation(resourceUrl, databaseWrapper) # Check if string is a location
if locationType is not None:
if int(locationType) > 0:
return True
else:
return False
else:
DisambCount = int(checkDisambiguates(locationString, databaseWrapper)) # Check for disambiguates
if DisambCount > 0:
return True
else:
return False
wb = load_workbook('Test data28092015.xlsx')
for i in range(2, 475):
print str(i-1) + '/474'
keyword = str(wb['Sheet1']['A' + str(i)].value)
result = 0
if isLocationBool(keyword):
result = 1
wb['Sheet1']['C' + str(i)] = result
wb.save('Test data28092015.xlsx') | StarcoderdataPython |
4856380 | # Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDeviceSlot.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.idl
import raritan.rpc.peripheral
import raritan.rpc.sensors
# enumeration
class PortType(Enumeration):
idlType = "peripheral.PortType:1.0.0"
values = [
"ONEWIRE_ONBOARD",
"ONEWIRE_DEV_PORT",
"ONEWIRE_HUB_PORT",
"ONEWIRE_CHAIN_POS",
]
PortType.ONEWIRE_ONBOARD = PortType(0)
PortType.ONEWIRE_DEV_PORT = PortType(1)
PortType.ONEWIRE_HUB_PORT = PortType(2)
PortType.ONEWIRE_CHAIN_POS = PortType(3)
# structure
class PosElement(Structure):
idlType = "peripheral.PosElement:1.0.0"
elements = ["portType", "port"]
def __init__(self, portType, port):
typecheck.is_enum(portType, raritan.rpc.peripheral.PortType, AssertionError)
typecheck.is_string(port, AssertionError)
self.portType = portType
self.port = port
@classmethod
def decode(cls, json, agent):
obj = cls(
portType=raritan.rpc.peripheral.PortType.decode(json["portType"]),
port=json["port"],
)
return obj
def encode(self):
json = {}
json["portType"] = raritan.rpc.peripheral.PortType.encode(self.portType)
json["port"] = self.port
return json
# structure
class DeviceID(Structure):
idlType = "peripheral.DeviceID:2.0.0"
elements = ["serial", "type", "isActuator", "channel"]
def __init__(self, serial, type, isActuator, channel):
typecheck.is_string(serial, AssertionError)
typecheck.is_struct(type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_int(channel, AssertionError)
self.serial = serial
self.type = type
self.isActuator = isActuator
self.channel = channel
@classmethod
def decode(cls, json, agent):
obj = cls(
serial=json["serial"],
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
channel=json["channel"],
)
return obj
def encode(self):
json = {}
json["serial"] = self.serial
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["channel"] = self.channel
return json
# structure
class Address(Structure):
idlType = "peripheral.Address:2.0.0"
elements = ["position", "type", "isActuator", "channel"]
def __init__(self, position, type, isActuator, channel):
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_struct(type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_int(channel, AssertionError)
self.position = position
self.type = type
self.isActuator = isActuator
self.channel = channel
@classmethod
def decode(cls, json, agent):
obj = cls(
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
channel=json["channel"],
)
return obj
def encode(self):
json = {}
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["channel"] = self.channel
return json
# value object
class Device(ValueObject):
idlType = "peripheral.Device:2.0.0"
def __init__(self, deviceID, position, packageClass, device):
typecheck.is_struct(deviceID, raritan.rpc.peripheral.DeviceID, AssertionError)
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_string(packageClass, AssertionError)
typecheck.is_interface(device, raritan.rpc.sensors.Sensor, AssertionError)
self.deviceID = deviceID
self.position = position
self.packageClass = packageClass
self.device = device
def encode(self):
json = {}
json["deviceID"] = raritan.rpc.peripheral.DeviceID.encode(self.deviceID)
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["packageClass"] = self.packageClass
json["device"] = Interface.encode(self.device)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
deviceID=raritan.rpc.peripheral.DeviceID.decode(json["deviceID"], agent),
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
packageClass=json["packageClass"],
device=Interface.decode(json["device"], agent),
)
return obj
def listElements(self):
elements = ["deviceID", "position", "packageClass", "device"]
return elements
# interface
class DeviceSlot(Interface):
idlType = "peripheral.DeviceSlot:2.0.0"
ERR_INVALID_PARAMS = 1
ERR_NOT_SUPPORTED = 2
CHANNEL_INVALID = -1
# structure
class Location(Structure):
idlType = "peripheral.DeviceSlot.Location:1.0.0"
elements = ["x", "y", "z"]
def __init__(self, x, y, z):
typecheck.is_string(x, AssertionError)
typecheck.is_string(y, AssertionError)
typecheck.is_string(z, AssertionError)
self.x = x
self.y = y
self.z = z
@classmethod
def decode(cls, json, agent):
obj = cls(
x=json["x"],
y=json["y"],
z=json["z"],
)
return obj
def encode(self):
json = {}
json["x"] = self.x
json["y"] = self.y
json["z"] = self.z
return json
# structure
class Settings(Structure):
idlType = "peripheral.DeviceSlot.Settings:1.0.0"
elements = [
"name",
"description",
"location",
"useDefaultThresholds",
"properties",
]
def __init__(
self, name, description, location, useDefaultThresholds, properties
):
typecheck.is_string(name, AssertionError)
typecheck.is_string(description, AssertionError)
typecheck.is_struct(
location, raritan.rpc.peripheral.DeviceSlot.Location, AssertionError
)
typecheck.is_bool(useDefaultThresholds, AssertionError)
self.name = name
self.description = description
self.location = location
self.useDefaultThresholds = useDefaultThresholds
self.properties = properties
@classmethod
def decode(cls, json, agent):
obj = cls(
name=json["name"],
description=json["description"],
location=raritan.rpc.peripheral.DeviceSlot.Location.decode(
json["location"], agent
),
useDefaultThresholds=json["useDefaultThresholds"],
properties=dict(
[(elem["key"], elem["value"]) for elem in json["properties"]]
),
)
return obj
def encode(self):
json = {}
json["name"] = self.name
json["description"] = self.description
json["location"] = raritan.rpc.peripheral.DeviceSlot.Location.encode(
self.location
)
json["useDefaultThresholds"] = self.useDefaultThresholds
json["properties"] = [
dict(key=k, value=v) for k, v in self.properties.items()
]
return json
# value object
class DeviceChangedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceSlot.DeviceChangedEvent:1.0.0"
def __init__(self, oldDevice, newDevice, source):
super(raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self).__init__(
source
)
typecheck.is_valobj(
oldDevice, raritan.rpc.peripheral.Device, AssertionError
)
typecheck.is_valobj(
newDevice, raritan.rpc.peripheral.Device, AssertionError
)
self.oldDevice = oldDevice
self.newDevice = newDevice
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self
).encode()
json["oldDevice"] = ValueObject.encode(self.oldDevice)
json["newDevice"] = ValueObject.encode(self.newDevice)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldDevice=ValueObject.decode(json["oldDevice"], agent),
newDevice=ValueObject.decode(json["newDevice"], agent),
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldDevice", "newDevice"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceSlot.DeviceChangedEvent, self
).listElements()
)
return elements
# value object
class SettingsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "peripheral.DeviceSlot.SettingsChangedEvent:1.0.0"
def __init__(self, oldSettings, newSettings, actUserName, actIpAddr, source):
super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(
oldSettings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
typecheck.is_struct(
newSettings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).encode()
json["oldSettings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(
self.oldSettings
)
json["newSettings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(
self.newSettings
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.peripheral.DeviceSlot.Settings.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.peripheral.DeviceSlot.Settings.decode(
json["newSettings"], agent
),
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceSlot.SettingsChangedEvent, self
).listElements()
)
return elements
def getDevice(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDevice", args)
_ret_ = ValueObject.decode(rsp["_ret_"], agent)
typecheck.is_valobj(_ret_, raritan.rpc.peripheral.Device, DecodeException)
return _ret_
def assign(self, devid):
agent = self.agent
typecheck.is_struct(devid, raritan.rpc.peripheral.DeviceID, AssertionError)
args = {}
args["devid"] = raritan.rpc.peripheral.DeviceID.encode(devid)
rsp = agent.json_rpc(self.target, "assign", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def assignAddress(self, packageClass, address):
agent = self.agent
typecheck.is_string(packageClass, AssertionError)
typecheck.is_struct(address, raritan.rpc.peripheral.Address, AssertionError)
args = {}
args["packageClass"] = packageClass
args["address"] = raritan.rpc.peripheral.Address.encode(address)
rsp = agent.json_rpc(self.target, "assignAddress", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def unassign(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "unassign", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getSettings(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getSettings", args)
_ret_ = raritan.rpc.peripheral.DeviceSlot.Settings.decode(rsp["_ret_"], agent)
typecheck.is_struct(
_ret_, raritan.rpc.peripheral.DeviceSlot.Settings, DecodeException
)
return _ret_
def setSettings(self, settings):
agent = self.agent
typecheck.is_struct(
settings, raritan.rpc.peripheral.DeviceSlot.Settings, AssertionError
)
args = {}
args["settings"] = raritan.rpc.peripheral.DeviceSlot.Settings.encode(settings)
rsp = agent.json_rpc(self.target, "setSettings", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDevicePackage.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.peripheral
# structure
class PackageInfo(Structure):
idlType = "peripheral.PackageInfo:2.0.0"
elements = ["state", "position", "hwInfo", "fwInfo"]
def __init__(self, state, position, hwInfo, fwInfo):
typecheck.is_enum(
state, raritan.rpc.peripheral.PackageInfo.State, AssertionError
)
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_struct(
hwInfo, raritan.rpc.peripheral.PackageInfo.HardwareInfo, AssertionError
)
typecheck.is_struct(
fwInfo, raritan.rpc.peripheral.PackageInfo.FirmwareInfo, AssertionError
)
self.state = state
self.position = position
self.hwInfo = hwInfo
self.fwInfo = fwInfo
@classmethod
def decode(cls, json, agent):
obj = cls(
state=raritan.rpc.peripheral.PackageInfo.State.decode(json["state"]),
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
hwInfo=raritan.rpc.peripheral.PackageInfo.HardwareInfo.decode(
json["hwInfo"], agent
),
fwInfo=raritan.rpc.peripheral.PackageInfo.FirmwareInfo.decode(
json["fwInfo"], agent
),
)
return obj
def encode(self):
json = {}
json["state"] = raritan.rpc.peripheral.PackageInfo.State.encode(self.state)
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
json["hwInfo"] = raritan.rpc.peripheral.PackageInfo.HardwareInfo.encode(
self.hwInfo
)
json["fwInfo"] = raritan.rpc.peripheral.PackageInfo.FirmwareInfo.encode(
self.fwInfo
)
return json
# enumeration
class State(Enumeration):
idlType = "peripheral.PackageInfo.State:1.0.0"
values = ["NORMAL", "FW_UPDATE", "INTERNAL_ERROR", "CONFIG_ERROR"]
State.NORMAL = State(0)
State.FW_UPDATE = State(1)
State.INTERNAL_ERROR = State(2)
State.CONFIG_ERROR = State(3)
# structure
class HardwareInfo(Structure):
idlType = "peripheral.PackageInfo.HardwareInfo:1.0.0"
elements = [
"serial",
"packageClass",
"model",
"minDowngradeVersion",
"revision",
]
def __init__(self, serial, packageClass, model, minDowngradeVersion, revision):
typecheck.is_string(serial, AssertionError)
typecheck.is_string(packageClass, AssertionError)
typecheck.is_string(model, AssertionError)
typecheck.is_int(minDowngradeVersion, AssertionError)
typecheck.is_string(revision, AssertionError)
self.serial = serial
self.packageClass = packageClass
self.model = model
self.minDowngradeVersion = minDowngradeVersion
self.revision = revision
@classmethod
def decode(cls, json, agent):
obj = cls(
serial=json["serial"],
packageClass=json["packageClass"],
model=json["model"],
minDowngradeVersion=json["minDowngradeVersion"],
revision=json["revision"],
)
return obj
def encode(self):
json = {}
json["serial"] = self.serial
json["packageClass"] = self.packageClass
json["model"] = self.model
json["minDowngradeVersion"] = self.minDowngradeVersion
json["revision"] = self.revision
return json
# structure
class FirmwareInfo(Structure):
idlType = "peripheral.PackageInfo.FirmwareInfo:1.0.0"
elements = ["compileDate", "version", "updateDate"]
def __init__(self, compileDate, version, updateDate):
typecheck.is_time(compileDate, AssertionError)
typecheck.is_struct(
version,
raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version,
AssertionError,
)
typecheck.is_time(updateDate, AssertionError)
self.compileDate = compileDate
self.version = version
self.updateDate = updateDate
@classmethod
def decode(cls, json, agent):
obj = cls(
compileDate=raritan.rpc.Time.decode(json["compileDate"]),
version=raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version.decode(
json["version"], agent
),
updateDate=raritan.rpc.Time.decode(json["updateDate"]),
)
return obj
def encode(self):
json = {}
json["compileDate"] = raritan.rpc.Time.encode(self.compileDate)
json[
"version"
] = raritan.rpc.peripheral.PackageInfo.FirmwareInfo.Version.encode(
self.version
)
json["updateDate"] = raritan.rpc.Time.encode(self.updateDate)
return json
# structure
class Version(Structure):
idlType = "peripheral.PackageInfo.FirmwareInfo.Version:1.0.0"
elements = ["majorNumber", "minorNumber"]
def __init__(self, majorNumber, minorNumber):
typecheck.is_int(majorNumber, AssertionError)
typecheck.is_int(minorNumber, AssertionError)
self.majorNumber = majorNumber
self.minorNumber = minorNumber
@classmethod
def decode(cls, json, agent):
obj = cls(
majorNumber=json["majorNumber"],
minorNumber=json["minorNumber"],
)
return obj
def encode(self):
json = {}
json["majorNumber"] = self.majorNumber
json["minorNumber"] = self.minorNumber
return json
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralDeviceManager.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.event
import raritan.rpc.idl
import raritan.rpc.peripheral
import raritan.rpc.sensors
# interface
class DeviceManager(Interface):
idlType = "peripheral.DeviceManager:2.0.0"
ERR_INVALID_PARAMS = 1
# enumeration
class ZCoordMode(Enumeration):
idlType = "peripheral.DeviceManager.ZCoordMode:1.0.0"
values = ["RACKUNITS", "FREEFORM"]
ZCoordMode.RACKUNITS = ZCoordMode(0)
ZCoordMode.FREEFORM = ZCoordMode(1)
# structure
class Settings(Structure):
idlType = "peripheral.DeviceManager.Settings:1.0.0"
elements = [
"zCoordMode",
"autoManageNewDevices",
"deviceAltitude",
"presenceDetectionTimeout",
"defaultThresholdsMap",
]
def __init__(
self,
zCoordMode,
autoManageNewDevices,
deviceAltitude,
presenceDetectionTimeout,
defaultThresholdsMap,
):
typecheck.is_enum(
zCoordMode,
raritan.rpc.peripheral.DeviceManager.ZCoordMode,
AssertionError,
)
typecheck.is_bool(autoManageNewDevices, AssertionError)
typecheck.is_float(deviceAltitude, AssertionError)
typecheck.is_int(presenceDetectionTimeout, AssertionError)
self.zCoordMode = zCoordMode
self.autoManageNewDevices = autoManageNewDevices
self.deviceAltitude = deviceAltitude
self.presenceDetectionTimeout = presenceDetectionTimeout
self.defaultThresholdsMap = defaultThresholdsMap
@classmethod
def decode(cls, json, agent):
obj = cls(
zCoordMode=raritan.rpc.peripheral.DeviceManager.ZCoordMode.decode(
json["zCoordMode"]
),
autoManageNewDevices=json["autoManageNewDevices"],
deviceAltitude=json["deviceAltitude"],
presenceDetectionTimeout=json["presenceDetectionTimeout"],
defaultThresholdsMap=dict(
[
(
elem["key"],
raritan.rpc.sensors.NumericSensor.Thresholds.decode(
elem["value"], agent
),
)
for elem in json["defaultThresholdsMap"]
]
),
)
return obj
def encode(self):
json = {}
json["zCoordMode"] = raritan.rpc.peripheral.DeviceManager.ZCoordMode.encode(
self.zCoordMode
)
json["autoManageNewDevices"] = self.autoManageNewDevices
json["deviceAltitude"] = self.deviceAltitude
json["presenceDetectionTimeout"] = self.presenceDetectionTimeout
json["defaultThresholdsMap"] = [
dict(
key=k, value=raritan.rpc.sensors.NumericSensor.Thresholds.encode(v)
)
for k, v in self.defaultThresholdsMap.items()
]
return json
# structure
class MetaData(Structure):
idlType = "peripheral.DeviceManager.MetaData:1.0.0"
elements = ["oneWirePortCount", "onboardDeviceCount"]
def __init__(self, oneWirePortCount, onboardDeviceCount):
typecheck.is_int(oneWirePortCount, AssertionError)
typecheck.is_int(onboardDeviceCount, AssertionError)
self.oneWirePortCount = oneWirePortCount
self.onboardDeviceCount = onboardDeviceCount
@classmethod
def decode(cls, json, agent):
obj = cls(
oneWirePortCount=json["oneWirePortCount"],
onboardDeviceCount=json["onboardDeviceCount"],
)
return obj
def encode(self):
json = {}
json["oneWirePortCount"] = self.oneWirePortCount
json["onboardDeviceCount"] = self.onboardDeviceCount
return json
# structure
class DeviceTypeInfo(Structure):
idlType = "peripheral.DeviceManager.DeviceTypeInfo:1.0.0"
elements = [
"type",
"isActuator",
"identifier",
"name",
"defaultRange",
"defaultDecDigits",
]
def __init__(
self, type, isActuator, identifier, name, defaultRange, defaultDecDigits
):
typecheck.is_struct(
type, raritan.rpc.sensors.Sensor.TypeSpec, AssertionError
)
typecheck.is_bool(isActuator, AssertionError)
typecheck.is_string(identifier, AssertionError)
typecheck.is_string(name, AssertionError)
typecheck.is_struct(
defaultRange, raritan.rpc.sensors.NumericSensor.Range, AssertionError
)
typecheck.is_int(defaultDecDigits, AssertionError)
self.type = type
self.isActuator = isActuator
self.identifier = identifier
self.name = name
self.defaultRange = defaultRange
self.defaultDecDigits = defaultDecDigits
@classmethod
def decode(cls, json, agent):
obj = cls(
type=raritan.rpc.sensors.Sensor.TypeSpec.decode(json["type"], agent),
isActuator=json["isActuator"],
identifier=json["identifier"],
name=json["name"],
defaultRange=raritan.rpc.sensors.NumericSensor.Range.decode(
json["defaultRange"], agent
),
defaultDecDigits=json["defaultDecDigits"],
)
return obj
def encode(self):
json = {}
json["type"] = raritan.rpc.sensors.Sensor.TypeSpec.encode(self.type)
json["isActuator"] = self.isActuator
json["identifier"] = self.identifier
json["name"] = self.name
json["defaultRange"] = raritan.rpc.sensors.NumericSensor.Range.encode(
self.defaultRange
)
json["defaultDecDigits"] = self.defaultDecDigits
return json
# value object
class SettingsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "peripheral.DeviceManager.SettingsChangedEvent:1.0.0"
def __init__(self, oldSettings, newSettings, actUserName, actIpAddr, source):
super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(
oldSettings,
raritan.rpc.peripheral.DeviceManager.Settings,
AssertionError,
)
typecheck.is_struct(
newSettings,
raritan.rpc.peripheral.DeviceManager.Settings,
AssertionError,
)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).encode()
json["oldSettings"] = raritan.rpc.peripheral.DeviceManager.Settings.encode(
self.oldSettings
)
json["newSettings"] = raritan.rpc.peripheral.DeviceManager.Settings.encode(
self.newSettings
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings=raritan.rpc.peripheral.DeviceManager.Settings.decode(
json["oldSettings"], agent
),
newSettings=raritan.rpc.peripheral.DeviceManager.Settings.decode(
json["newSettings"], agent
),
# for event.UserEvent
actUserName=json["actUserName"],
actIpAddr=json["actIpAddr"],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.SettingsChangedEvent, self
).listElements()
)
return elements
# value object
class DeviceEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.DeviceEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(raritan.rpc.peripheral.DeviceManager.DeviceEvent, self).__init__(
source
)
typecheck.is_valobj(device, raritan.rpc.peripheral.Device, AssertionError)
for x0 in allDevices:
typecheck.is_valobj(x0, raritan.rpc.peripheral.Device, AssertionError)
self.device = device
self.allDevices = allDevices
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceEvent, self
).encode()
json["device"] = ValueObject.encode(self.device)
json["allDevices"] = [ValueObject.encode(x0) for x0 in self.allDevices]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["device", "allDevices"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceEvent, self
).listElements()
)
return elements
# value object
class DeviceAddedEvent(DeviceEvent):
idlType = "peripheral.DeviceManager.DeviceAddedEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self).__init__(
device, allDevices, source
)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.DeviceEvent
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceAddedEvent, self
).listElements()
)
return elements
# value object
class DeviceRemovedEvent(DeviceEvent):
idlType = "peripheral.DeviceManager.DeviceRemovedEvent:1.0.0"
def __init__(self, device, allDevices, source):
super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).__init__(device, allDevices, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.DeviceEvent
device=ValueObject.decode(json["device"], agent),
allDevices=[ValueObject.decode(x0, agent) for x0 in json["allDevices"]],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceRemovedEvent, self
).listElements()
)
return elements
# value object
class UnknownDeviceAttachedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.UnknownDeviceAttachedEvent:1.0.0"
def __init__(self, romCode, position, source):
super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent, self
).__init__(source)
typecheck.is_string(romCode, AssertionError)
for x0 in position:
typecheck.is_struct(
x0, raritan.rpc.peripheral.PosElement, AssertionError
)
self.romCode = romCode
self.position = position
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent, self
).encode()
json["romCode"] = self.romCode
json["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in self.position
]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
romCode=json["romCode"],
position=[
raritan.rpc.peripheral.PosElement.decode(x0, agent)
for x0 in json["position"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["romCode", "position"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.UnknownDeviceAttachedEvent,
self,
).listElements()
)
return elements
# enumeration
class DeviceFirmwareUpdateState(Enumeration):
idlType = "peripheral.DeviceManager.DeviceFirmwareUpdateState:1.0.0"
values = ["UPDATE_STARTED", "UPDATE_SUCCESSFUL", "UPDATE_FAILED"]
DeviceFirmwareUpdateState.UPDATE_STARTED = DeviceFirmwareUpdateState(0)
DeviceFirmwareUpdateState.UPDATE_SUCCESSFUL = DeviceFirmwareUpdateState(1)
DeviceFirmwareUpdateState.UPDATE_FAILED = DeviceFirmwareUpdateState(2)
# value object
class DeviceFirmwareUpdateStateChangedEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent:1.0.0"
def __init__(self, oldVersion, newVersion, serial, state, source):
super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).__init__(source)
typecheck.is_string(oldVersion, AssertionError)
typecheck.is_string(newVersion, AssertionError)
typecheck.is_string(serial, AssertionError)
typecheck.is_enum(
state,
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState,
AssertionError,
)
self.oldVersion = oldVersion
self.newVersion = newVersion
self.serial = serial
self.state = state
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).encode()
json["oldVersion"] = self.oldVersion
json["newVersion"] = self.newVersion
json["serial"] = self.serial
json[
"state"
] = raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState.encode(
self.state
)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldVersion=json["oldVersion"],
newVersion=json["newVersion"],
serial=json["serial"],
state=raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateState.decode(
json["state"]
),
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["oldVersion", "newVersion", "serial", "state"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.DeviceFirmwareUpdateStateChangedEvent,
self,
).listElements()
)
return elements
# value object
class PackageEvent(raritan.rpc.idl.Event):
idlType = "peripheral.DeviceManager.PackageEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(raritan.rpc.peripheral.DeviceManager.PackageEvent, self).__init__(
source
)
typecheck.is_struct(
packageInfo, raritan.rpc.peripheral.PackageInfo, AssertionError
)
for x0 in allPackages:
typecheck.is_struct(
x0, raritan.rpc.peripheral.PackageInfo, AssertionError
)
self.packageInfo = packageInfo
self.allPackages = allPackages
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageEvent, self
).encode()
json["packageInfo"] = raritan.rpc.peripheral.PackageInfo.encode(
self.packageInfo
)
json["allPackages"] = [
raritan.rpc.peripheral.PackageInfo.encode(x0) for x0 in self.allPackages
]
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = ["packageInfo", "allPackages"]
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageEvent, self
).listElements()
)
return elements
# value object
class PackageAddedEvent(PackageEvent):
idlType = "peripheral.DeviceManager.PackageAddedEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).__init__(packageInfo, allPackages, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.PackageEvent
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageAddedEvent, self
).listElements()
)
return elements
# value object
class PackageRemovedEvent(PackageEvent):
idlType = "peripheral.DeviceManager.PackageRemovedEvent:1.0.0"
def __init__(self, packageInfo, allPackages, source):
super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).__init__(packageInfo, allPackages, source)
def encode(self):
json = super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).encode()
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
# for peripheral.DeviceManager_2_0_0.PackageEvent
packageInfo=raritan.rpc.peripheral.PackageInfo.decode(
json["packageInfo"], agent
),
allPackages=[
raritan.rpc.peripheral.PackageInfo.decode(x0, agent)
for x0 in json["allPackages"]
],
# for idl.Event
source=Interface.decode(json["source"], agent),
)
return obj
def listElements(self):
elements = []
elements = (
elements
+ super(
raritan.rpc.peripheral.DeviceManager.PackageRemovedEvent, self
).listElements()
)
return elements
# structure
class Statistics(Structure):
idlType = "peripheral.DeviceManager.Statistics:1.0.0"
elements = ["cSumErrCnt"]
def __init__(self, cSumErrCnt):
typecheck.is_int(cSumErrCnt, AssertionError)
self.cSumErrCnt = cSumErrCnt
@classmethod
def decode(cls, json, agent):
obj = cls(
cSumErrCnt=json["cSumErrCnt"],
)
return obj
def encode(self):
json = {}
json["cSumErrCnt"] = self.cSumErrCnt
return json
def getDeviceSlots(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDeviceSlots", args)
_ret_ = [Interface.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_interface(
x0, raritan.rpc.peripheral.DeviceSlot, DecodeException
)
return _ret_
def getDeviceSlot(self, idx):
agent = self.agent
typecheck.is_int(idx, AssertionError)
args = {}
args["idx"] = idx
rsp = agent.json_rpc(self.target, "getDeviceSlot", args)
_ret_ = Interface.decode(rsp["_ret_"], agent)
typecheck.is_interface(
_ret_, raritan.rpc.peripheral.DeviceSlot, DecodeException
)
return _ret_
def getDiscoveredDevices(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDiscoveredDevices", args)
_ret_ = [ValueObject.decode(x0, agent) for x0 in rsp["_ret_"]]
for x0 in _ret_:
typecheck.is_valobj(x0, raritan.rpc.peripheral.Device, DecodeException)
return _ret_
def getDiscoveredPackageInfos(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDiscoveredPackageInfos", args)
_ret_ = [
raritan.rpc.peripheral.PackageInfo.decode(x0, agent) for x0 in rsp["_ret_"]
]
for x0 in _ret_:
typecheck.is_struct(x0, raritan.rpc.peripheral.PackageInfo, DecodeException)
return _ret_
def getSettings(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getSettings", args)
_ret_ = raritan.rpc.peripheral.DeviceManager.Settings.decode(
rsp["_ret_"], agent
)
typecheck.is_struct(
_ret_, raritan.rpc.peripheral.DeviceManager.Settings, DecodeException
)
return _ret_
def setSettings(self, settings):
agent = self.agent
typecheck.is_struct(
settings, raritan.rpc.peripheral.DeviceManager.Settings, AssertionError
)
args = {}
args["settings"] = raritan.rpc.peripheral.DeviceManager.Settings.encode(
settings
)
rsp = agent.json_rpc(self.target, "setSettings", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getMetaData(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getMetaData", args)
_ret_ = raritan.rpc.peripheral.DeviceManager.MetaData.decode(
rsp["_ret_"], agent
)
typecheck.is_struct(
_ret_, raritan.rpc.peripheral.DeviceManager.MetaData, DecodeException
)
return _ret_
def getDeviceTypeInfos(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getDeviceTypeInfos", args)
_ret_ = [
raritan.rpc.peripheral.DeviceManager.DeviceTypeInfo.decode(x0, agent)
for x0 in rsp["_ret_"]
]
for x0 in _ret_:
typecheck.is_struct(
x0, raritan.rpc.peripheral.DeviceManager.DeviceTypeInfo, DecodeException
)
return _ret_
def getStatistics(self):
agent = self.agent
args = {}
rsp = agent.json_rpc(self.target, "getStatistics", args)
_ret_ = raritan.rpc.peripheral.DeviceManager.Statistics.decode(
rsp["_ret_"], agent
)
typecheck.is_struct(
_ret_, raritan.rpc.peripheral.DeviceManager.Statistics, DecodeException
)
return _ret_
# Do NOT edit this file!
# It was generated by IdlC class idl.json.python.ProxyAsnVisitor.
#
# Section generated from "/home/nb/builds/MEGA/px2-3.0.0-3.0.9-branch-20140613-none-release-none-pdu-raritan/fwcomponents/mkdist/tmp/px2_final/libidl_client/topofw/peripheral/idl/PeripheralG2Production.idl"
#
import raritan.rpc
from raritan.rpc import (
Interface,
Structure,
ValueObject,
Enumeration,
typecheck,
DecodeException,
)
import raritan.rpc.peripheral
# interface
class G2Production(Interface):
idlType = "peripheral.G2Production:2.0.0"
ERR_INVALID_PARAMS = 1
ERR_NO_CONFIG_MODE = 2
ERR_NO_DEVICE = 3
ERR_NO_FIRMWARE_FILE = 4
ERR_FIRMWARE_INVALID = 5
ERR_PROTECTED = 6
ERR_UPDATE_IN_PROGRESS = 7
# structure
class FirmwareInfo(Structure):
idlType = "peripheral.G2Production.FirmwareInfo:1.0.0"
elements = [
"crc",
"compiler",
"compilerVersion",
"compileDate",
"version",
"subVersion",
"configurationId",
"updateDate",
]
def __init__(
self,
crc,
compiler,
compilerVersion,
compileDate,
version,
subVersion,
configurationId,
updateDate,
):
typecheck.is_int(crc, AssertionError)
typecheck.is_string(compiler, AssertionError)
typecheck.is_int(compilerVersion, AssertionError)
typecheck.is_string(compileDate, AssertionError)
typecheck.is_int(version, AssertionError)
typecheck.is_int(subVersion, AssertionError)
typecheck.is_int(configurationId, AssertionError)
typecheck.is_string(updateDate, AssertionError)
self.crc = crc
self.compiler = compiler
self.compilerVersion = compilerVersion
self.compileDate = compileDate
self.version = version
self.subVersion = subVersion
self.configurationId = configurationId
self.updateDate = updateDate
@classmethod
def decode(cls, json, agent):
obj = cls(
crc=json["crc"],
compiler=json["compiler"],
compilerVersion=json["compilerVersion"],
compileDate=json["compileDate"],
version=json["version"],
subVersion=json["subVersion"],
configurationId=json["configurationId"],
updateDate=json["updateDate"],
)
return obj
def encode(self):
json = {}
json["crc"] = self.crc
json["compiler"] = self.compiler
json["compilerVersion"] = self.compilerVersion
json["compileDate"] = self.compileDate
json["version"] = self.version
json["subVersion"] = self.subVersion
json["configurationId"] = self.configurationId
json["updateDate"] = self.updateDate
return json
def updateFirmware(self, romcode):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
args = {}
args["romcode"] = romcode
rsp = agent.json_rpc(self.target, "updateFirmware", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def updateFirmwarePos(self, position):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
rsp = agent.json_rpc(self.target, "updateFirmwarePos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def getFirmwareInfo(self, romcode):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
args = {}
args["romcode"] = romcode
rsp = agent.json_rpc(self.target, "getFirmwareInfo", args)
_ret_ = rsp["_ret_"]
info = raritan.rpc.peripheral.G2Production.FirmwareInfo.decode(
rsp["info"], agent
)
typecheck.is_int(_ret_, DecodeException)
typecheck.is_struct(
info, raritan.rpc.peripheral.G2Production.FirmwareInfo, DecodeException
)
return (_ret_, info)
def getFirmwareInfoPos(self, position):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
rsp = agent.json_rpc(self.target, "getFirmwareInfoPos", args)
_ret_ = rsp["_ret_"]
info = raritan.rpc.peripheral.G2Production.FirmwareInfo.decode(
rsp["info"], agent
)
typecheck.is_int(_ret_, DecodeException)
typecheck.is_struct(
info, raritan.rpc.peripheral.G2Production.FirmwareInfo, DecodeException
)
return (_ret_, info)
# enumeration
class ConfigurationSpace(Enumeration):
idlType = "peripheral.G2Production.ConfigurationSpace:1.0.0"
values = ["HARDWARE", "FUNCTION", "FIRMWARE", "RESERVED"]
ConfigurationSpace.HARDWARE = ConfigurationSpace(0)
ConfigurationSpace.FUNCTION = ConfigurationSpace(1)
ConfigurationSpace.FIRMWARE = ConfigurationSpace(2)
ConfigurationSpace.RESERVED = ConfigurationSpace(3)
def readConfigurationSpace(self, romcode, cs):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
args = {}
args["romcode"] = romcode
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
rsp = agent.json_rpc(self.target, "readConfigurationSpace", args)
_ret_ = rsp["_ret_"]
cfg = [x0 for x0 in rsp["cfg"]]
typecheck.is_int(_ret_, DecodeException)
for x0 in cfg:
typecheck.is_byte(x0, DecodeException)
return (_ret_, cfg)
def readConfigurationSpacePos(self, position, cs):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
rsp = agent.json_rpc(self.target, "readConfigurationSpacePos", args)
_ret_ = rsp["_ret_"]
cfg = [x0 for x0 in rsp["cfg"]]
typecheck.is_int(_ret_, DecodeException)
for x0 in cfg:
typecheck.is_byte(x0, DecodeException)
return (_ret_, cfg)
def eraseConfigurationSpace(self, romcode, cs):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
args = {}
args["romcode"] = romcode
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
rsp = agent.json_rpc(self.target, "eraseConfigurationSpace", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def eraseConfigurationSpacePos(self, position, cs):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
rsp = agent.json_rpc(self.target, "eraseConfigurationSpacePos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def writeConfigurationSpace(self, romcode, cs, cfg):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
for x0 in cfg:
typecheck.is_byte(x0, AssertionError)
args = {}
args["romcode"] = romcode
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
args["cfg"] = [x0 for x0 in cfg]
rsp = agent.json_rpc(self.target, "writeConfigurationSpace", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def writeConfigurationSpacePos(self, position, cs, cfg):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_enum(
cs, raritan.rpc.peripheral.G2Production.ConfigurationSpace, AssertionError
)
for x0 in cfg:
typecheck.is_byte(x0, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["cs"] = raritan.rpc.peripheral.G2Production.ConfigurationSpace.encode(cs)
args["cfg"] = [x0 for x0 in cfg]
rsp = agent.json_rpc(self.target, "writeConfigurationSpacePos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def readRegisters(self, romcode, address, count):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_int(address, AssertionError)
typecheck.is_int(count, AssertionError)
args = {}
args["romcode"] = romcode
args["address"] = address
args["count"] = count
rsp = agent.json_rpc(self.target, "readRegisters", args)
_ret_ = rsp["_ret_"]
data = [x0 for x0 in rsp["data"]]
typecheck.is_int(_ret_, DecodeException)
for x0 in data:
typecheck.is_byte(x0, DecodeException)
return (_ret_, data)
def readRegistersPos(self, position, address, count):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_int(address, AssertionError)
typecheck.is_int(count, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["address"] = address
args["count"] = count
rsp = agent.json_rpc(self.target, "readRegistersPos", args)
_ret_ = rsp["_ret_"]
data = [x0 for x0 in rsp["data"]]
typecheck.is_int(_ret_, DecodeException)
for x0 in data:
typecheck.is_byte(x0, DecodeException)
return (_ret_, data)
def writeRegisters(self, romcode, address, data):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_int(address, AssertionError)
for x0 in data:
typecheck.is_byte(x0, AssertionError)
args = {}
args["romcode"] = romcode
args["address"] = address
args["data"] = [x0 for x0 in data]
rsp = agent.json_rpc(self.target, "writeRegisters", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def writeRegistersPos(self, position, address, data):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_int(address, AssertionError)
for x0 in data:
typecheck.is_byte(x0, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["address"] = address
args["data"] = [x0 for x0 in data]
rsp = agent.json_rpc(self.target, "writeRegistersPos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def writeRegisterBits(self, romcode, address, mask, bits):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_int(address, AssertionError)
typecheck.is_byte(mask, AssertionError)
typecheck.is_byte(bits, AssertionError)
args = {}
args["romcode"] = romcode
args["address"] = address
args["mask"] = mask
args["bits"] = bits
rsp = agent.json_rpc(self.target, "writeRegisterBits", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def writeRegisterBitsPos(self, position, address, mask, bits):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_int(address, AssertionError)
typecheck.is_byte(mask, AssertionError)
typecheck.is_byte(bits, AssertionError)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["address"] = address
args["mask"] = mask
args["bits"] = bits
rsp = agent.json_rpc(self.target, "writeRegisterBitsPos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
# enumeration
class ResetMethod(Enumeration):
idlType = "peripheral.G2Production.ResetMethod:1.0.0"
values = ["BROWNOUT", "WATCHDOG"]
ResetMethod.BROWNOUT = ResetMethod(0)
ResetMethod.WATCHDOG = ResetMethod(1)
def reset(self, romcode, method):
agent = self.agent
typecheck.is_string(romcode, AssertionError)
typecheck.is_enum(
method, raritan.rpc.peripheral.G2Production.ResetMethod, AssertionError
)
args = {}
args["romcode"] = romcode
args["method"] = raritan.rpc.peripheral.G2Production.ResetMethod.encode(method)
rsp = agent.json_rpc(self.target, "reset", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
def resetPos(self, position, method):
agent = self.agent
for x0 in position:
typecheck.is_struct(x0, raritan.rpc.peripheral.PosElement, AssertionError)
typecheck.is_enum(
method, raritan.rpc.peripheral.G2Production.ResetMethod, AssertionError
)
args = {}
args["position"] = [
raritan.rpc.peripheral.PosElement.encode(x0) for x0 in position
]
args["method"] = raritan.rpc.peripheral.G2Production.ResetMethod.encode(method)
rsp = agent.json_rpc(self.target, "resetPos", args)
_ret_ = rsp["_ret_"]
typecheck.is_int(_ret_, DecodeException)
return _ret_
| StarcoderdataPython |
72424 | <filename>autofit/graphical/factor_graphs/factor.py<gh_stars>0
from abc import ABC
from functools import lru_cache
from inspect import getfullargspec
from itertools import chain, repeat
from typing import \
(
Tuple, Dict, Union, Set, Callable, List
)
import numpy as np
from autofit.graphical.factor_graphs.abstract import \
(
AbstractNode, FactorValue
)
from autofit.graphical.utils import \
(
aggregate, Axis, cached_property
)
from autofit.mapper.variable import Variable
class AbstractFactor(AbstractNode, ABC):
def __init__(
self,
name="",
**kwargs: Variable,
):
super().__init__(**kwargs)
self._name = name or f"factor_{self.id}"
self._deterministic_variables = set()
@property
def deterministic_variables(self) -> Set[Variable]:
return self._deterministic_variables
@property
def name(self) -> str:
return self._name
def __mul__(self, other):
"""
When two factors are multiplied together this creates a graph
"""
from autofit.graphical.factor_graphs.graph import FactorGraph
return FactorGraph([self]) * other
@property
def variables(self) -> Set[Variable]:
"""
Dictionary mapping the names of variables to those variables
"""
return set(self._kwargs.values())
@property
def _kwargs_dims(self) -> Dict[str, int]:
"""
The number of plates for each keyword argument variable
"""
return {
key: len(value)
for key, value
in self._kwargs.items()
}
@cached_property
def _variable_plates(self) -> Dict[str, np.ndarray]:
"""
Maps the name of each variable to the indices of its plates
within this node
"""
return {
variable: self._match_plates(
variable.plates
)
for variable
in self.all_variables
}
@property
def n_deterministic(self) -> int:
"""
How many deterministic variables are there associated with this node?
"""
return len(self._deterministic_variables)
def __hash__(self):
return hash((type(self), self.id))
def _resolve_args(
self,
**kwargs: np.ndarray
) -> dict:
"""
Transforms in the input arguments to match the arguments
specified for the factor.
Parameters
----------
args
kwargs
Returns
-------
"""
return {n: kwargs[v.name] for n, v in self._kwargs.items()}
class Factor(AbstractFactor):
"""
A node in a graph representing a factor with analytic evaluation
of its Jacobian
Parameters
----------
factor
the function being wrapped, must accept calls through keyword
argument
name: optional, str
the name of the factor, if not passed then uses the name
of the function passed
vectorised: optional, bool
if true the factor will call the function directly over multiple
inputs. If false the factor will call the function iteratively
over each argument.
is_scalar: optional, bool
if true the factor returns a scalar value. Note if multiple arguments
are passed then a vector will still be returned
kwargs: Variables
Variables for each keyword argument for the function
Methods
-------
__call__({x: x0}, axis=axis)
calls the factor, the passed input must be a dictionary with
where the keys are the Variable objects that the function takes
as input. The Variable keys only have to match the _names_
of the variables of the function.
`axis` controls the shape of the output if the variables and factor have
plates associated with them, when axis=False then no reduction is performed,
otherwise it is equivalent to calling np.sum(log_val, axis=axis) on the
returned value
returns a FactorValue object which behaves like an np.ndarray
func_jacobian({x: x0}, variables=(x,), axis=axis)
calls the factor and returns it value and the jacobian of its value
with respect to the `variables` passed. if variables is None then
it returns the jacobian with respect to all variables.
returns fval, {x: d fval / dx}
"""
def __init__(
self,
factor: Callable,
name="",
vectorised=False,
is_scalar=False,
**kwargs: Variable
):
"""
A node in a graph representing a factor
Parameters
----------
factor
A wrapper around some callable
args
Variables representing positional arguments for the function
kwargs
Variables representing keyword arguments for the function
"""
self.vectorised = vectorised
self.is_scalar = is_scalar
self._set_factor(factor)
args = getfullargspec(self._factor).args
kwargs = {
**kwargs,
**{
arg: Variable(arg)
for arg
in args
if arg not in kwargs and arg != "self"
}
}
super().__init__(
**kwargs,
name=name or factor.__name__
)
def _set_factor(self, factor):
self._factor = factor
self._has_exact_projection = getattr(
factor, 'has_exact_projection', None)
self._calc_exact_projection = getattr(
factor, 'calc_exact_projection', None)
self._calc_exact_update = getattr(
factor, 'calc_exact_update', None)
def has_exact_projection(self, mean_field) -> bool:
if self._has_exact_projection:
return self._has_exact_projection(
**self.resolve_variable_dict(mean_field))
else:
return False
def calc_exact_projection(self, mean_field) -> 'MeanField':
if self._calc_exact_projection:
from autofit.graphical.mean_field import MeanField
projection = self._calc_exact_projection(
**self.resolve_variable_dict(mean_field))
return MeanField({
self._kwargs[v]: dist for v, dist in projection.items()
})
else:
return NotImplementedError
def calc_exact_update(self, mean_field) -> 'MeanField':
if self._calc_exact_update:
from autofit.graphical.mean_field import MeanField
projection = self._calc_exact_update(
**self.resolve_variable_dict(mean_field))
return MeanField({
self._kwargs[v]: dist for v, dist in projection.items()
})
else:
return NotImplementedError
def safe_exact_update(self, mean_field) -> Tuple[bool, 'MeanField']:
if self._has_exact_projection:
from autofit.graphical.mean_field import MeanField
_mean_field = self.resolve_variable_dict(mean_field)
if self._has_exact_projection(**_mean_field):
projection = self._calc_exact_update(**_mean_field)
return True, MeanField({
self._kwargs[v]: dist for v, dist in projection.items()
})
return False, mean_field
def __hash__(self) -> int:
# TODO: might this break factor repetition somewhere?
return hash(self._factor)
def _reshape_factor(
self, factor_val, values
):
shift, shape = self._function_shape(**values)
if self.is_scalar:
if shift:
return np.sum(
factor_val, axis=np.arange(1, np.ndim(factor_val)))
return np.sum(factor_val)
return np.reshape(factor_val, shape)
def _function_shape(
self,
**kwargs: np.ndarray) -> Tuple[int, ...]:
"""
Calculates the expected function shape based on the variables
"""
var_shapes = {
k: np.shape(x) for k, x in kwargs.items()}
return self._var_shape(**var_shapes)
@lru_cache()
def _var_shape(self, **kwargs: Tuple[int, ...]) -> Tuple[int, ...]:
"""This is called by _function_shape
caches result so that does not have to be recalculated each call
lru_cache caches f(x=1, y=2) to f(y=2, x=1), but in this case
it should be find as the order of kwargs is set by self._kwargs
which should be stable
"""
var_shapes = {self._kwargs[k]: v for k, v in kwargs.items()}
var_dims_diffs = {
v: len(s) - v.ndim
for v, s in var_shapes.items()
}
"""
If all the passed variables have an extra dimension then
we assume we're evaluating multiple instances of the function at the
same time
otherwise an error is raised
"""
if set(var_dims_diffs.values()) == {1}:
# Check if we're passing multiple values e.g. for sampling
shift = 1
elif set(var_dims_diffs.values()) == {0}:
shift = 0
else:
raise ValueError("dimensions of passed inputs do not match")
"""
Updating shape of output array to match input arrays
singleton dimensions are always assumed to match as in
standard array broadcasting
e.g. (1, 2, 3) == (3, 2, 1)
"""
shape = np.ones(self.ndim + shift, dtype=int)
for v, vs in var_shapes.items():
ind = self._variable_plates[v] + shift
vshape = vs[shift:]
if shift:
ind = np.r_[0, ind]
vshape = (vs[0],) + vshape
if shape.size:
if not (
np.equal(shape[ind], 1) |
np.equal(shape[ind], vshape) |
np.equal(vshape, 1)).all():
raise AssertionError(
"Shapes do not match"
)
shape[ind] = np.maximum(shape[ind], vshape)
return shift, tuple(shape)
def _call_factor(
self,
**kwargs: np.ndarray
) -> np.ndarray:
"""
Call the underlying function
Parameters
----------
args
Positional arguments for the function
kwargs
Keyword arguments for the function
Returns
-------
Value returned by the factor
"""
# kws = self._resolve_args(
# **kwargs
# )
if self.vectorised:
return self._factor(**kwargs)
"""Some factors may not be vectorised to broadcast over
multiple inputs
this method checks whether multiple input values have been
passed, and if so automatically loops over the inputs.
If any of the inputs have initial dimension one, it repeats
that value to match the length of the other inputs
If the other inputs do not match then it raises ValueError
"""
kwargs_dims = {k: np.ndim(a) for k, a in kwargs.items()}
# Check dimensions of inputs directly match plates
direct_call = (
all(dim == kwargs_dims[k] for k, dim in self._kwargs_dims.items()))
if direct_call:
return self._factor(**kwargs)
# Check dimensions of inputs match plates + 1
vectorised = (
all(dim + 1 == kwargs_dims[k]
for k, dim in self._kwargs_dims.items()))
if not vectorised:
raise ValueError(
"input dimensions do not match required dims"
f"input: **kwargs={kwargs_dims}"
f"required: "
f"**kwargs={self._kwargs_dims}")
kw_lens = {k: len(a) for k, a in kwargs.items()}
# checking 1st dimensions match
sizes = set(kw_lens.values())
dim0 = max(sizes)
if sizes.difference({1, dim0}):
raise ValueError(
f"size mismatch first dimensions passed: {sizes}")
iter_kws = {
k: iter(a) if kw_lens[k] == dim0 else iter(repeat(a[0]))
for k, a in kwargs.items()}
# iterator to generate keyword arguments
def gen_kwargs():
for _ in range(dim0):
yield {
k: next(a) for k, a in iter_kws.items()}
# TODO this loop can also be parallelised for increased performance
res = np.array([
self._factor(**kws)
for kws in gen_kwargs()])
return res
def __call__(
self,
variable_dict: Dict[
Variable,
Union[
np.ndarray,
float,
List[float]
]
],
axis: Axis = False,
) -> FactorValue:
"""
Call the underlying factor
Parameters
----------
args
Positional arguments for the factor
kwargs
Keyword arguments for the factor
Returns
-------
Object encapsulating the result of the function call
"""
kwargs = self.resolve_variable_dict(variable_dict)
val = self._call_factor(**kwargs)
val = aggregate(self._reshape_factor(val, kwargs), axis)
return FactorValue(val, {})
def broadcast_variable(
self,
variable: str,
value: np.ndarray
) -> np.ndarray:
"""
broadcasts the value of a variable to match the specific shape
of the factor
if the number of dimensions passed of the variable is 1
greater than the dimensions of the variable then it's assumed
that that dimension corresponds to multiple samples of that variable
"""
return self._broadcast(
self._variable_plates[variable],
value
)
def collapse(
self,
variable: str,
value: np.ndarray,
agg_func=np.sum
) -> np.ndarray:
"""
broadcasts `value` to match the specific shape of the factor,
where `value` has the shape of the factor
if the number of dimensions passed of the variable is 1
greater than the dimensions of the variable then it's assumed
that that dimension corresponds to multiple samples of that variable
"""
ndim = np.ndim(value)
shift = ndim - self.ndim
assert shift in {0, 1}
inds = self._variable_plates[variable] + shift
dropaxes = tuple(np.setdiff1d(
np.arange(shift, ndim), inds))
# to ensured axes of returned array is in the correct order
moved = np.moveaxis(value, inds, np.sort(inds))
return agg_func(moved, axis=dropaxes)
def __eq__(self, other: Union["Factor", Variable]):
"""
If set equal to a variable that variable is taken to be deterministic and
so a DeterministicFactorNode is generated.
"""
if isinstance(other, Factor):
if isinstance(other, type(self)):
return (
(self._factor == other._factor)
and (frozenset(self._kwargs.items())
== frozenset(other._kwargs.items()))
and (frozenset(self.variables)
== frozenset(other.variables))
and (frozenset(self.deterministic_variables)
== frozenset(self.deterministic_variables)))
else:
return False
return DeterministicFactor(
self._factor,
other,
name=self.name,
**self._kwargs
)
def __repr__(self) -> str:
args = ", ".join(chain(
map("{0[0]}={0[1]}".format, self._kwargs.items())))
return f"Factor({self.name}, {args})"
class DeterministicFactor(Factor):
"""
A deterministic factor is used to convert a function f(g(x)) to f(y)g(x) (integrating over y wit
a delta function) so that it can be represented in a factor graph.
Parameters
----------
factor
The original factor to which the deterministic factor is associated
variable
The deterministic variable that is returned by the factor, so
to represent the case f(g(x)), we would define,
```
>>> x = Variable('x')
>>> y = Variable('y')
>>> g_ = Factor(g, x) == y
>>> f_ = Factor(f, y)
```
Alternatively g could be directly defined,
```
>>> g_ = DeterministicFactor(g, y, x=x)
```
kwargs
Variables for the original factor
"""
def __init__(
self,
factor: Callable,
variable: Variable,
*args: Variable,
name: str = '',
**kwargs: Variable
):
"""
A deterministic factor is used to convert a function f(g(x)) to f(y)g(x) (integrating over y wit
a delta function) so that it can be represented in a factor graph.
Parameters
----------
factor
The original factor to which the deterministic factor is associated
variable
The deterministic factor used
args
Variables for the original factor
kwargs
Variables for the original factor
"""
super().__init__(
factor,
*args,
name=name or factor.__name__,
**kwargs
)
self._deterministic_variables = {
variable
}
def __call__(
self,
variable_dict: Dict[Variable, np.ndarray],
axis: Axis = False,
# **kwargs: np.ndarray
) -> FactorValue:
"""
Call this factor with a set of arguments
Parameters
----------
args
Positional arguments for the underlying factor
kwargs
Keyword arguments for the underlying factor
Returns
-------
An object encapsulating the value for the factor
"""
kwargs = self.resolve_variable_dict(variable_dict)
res = self._call_factor(**kwargs)
shift, shape = self._function_shape(**kwargs)
plate_dim = dict(zip(self.plates, shape[shift:]))
det_shapes = {
v: shape[:shift] + tuple(
plate_dim[p] for p in v.plates)
for v in self.deterministic_variables
}
if not (isinstance(res, tuple) or self.n_deterministic > 1):
res = res,
log_val = (
0. if (shape == () or axis is None) else
aggregate(np.zeros(tuple(1 for _ in shape)), axis))
det_vals = {
k: np.reshape(val, det_shapes[k])
if det_shapes[k]
else val
for k, val
in zip(self._deterministic_variables, res)
}
return FactorValue(log_val, det_vals)
def __repr__(self) -> str:
factor_str = super().__repr__()
var_str = ", ".join(sorted(variable.name for variable in self._deterministic_variables))
return f"({factor_str} == ({var_str}))"
| StarcoderdataPython |
248400 | # coding: utf-8
from gae_mini_profiler import profiler
from gae_mini_profiler import templatetags
import flask
import flask_debugtoolbar
import config
import util
app = flask.Flask(__name__)
app.config.from_object(config)
app.jinja_env.line_statement_prefix = '#'
app.jinja_env.line_comment_prefix = '##'
app.jinja_env.globals.update(
check_form_fields=util.check_form_fields,
is_iterable=util.is_iterable,
slugify=util.slugify,
update_query_argument=util.update_query_argument,
)
toolbar = flask_debugtoolbar.DebugToolbarExtension(app)
import auth
import control
import model
import task
from api import helpers
api_v1 = helpers.Api(app, prefix='/api/v1')
import api.v1
if config.DEVELOPMENT:
from werkzeug import debug
try:
app.wsgi_app = debug.DebuggedApplication(
app.wsgi_app, evalex=True, pin_security=False,
)
except TypeError:
app.wsgi_app = debug.DebuggedApplication(app.wsgi_app, evalex=True)
app.testing = False
###############################################################################
# gae mini profiler
###############################################################################
@app.context_processor
def inject_profiler():
return dict(profiler_includes=templatetags.profiler_includes())
app = profiler.ProfilerWSGIMiddleware(app)
| StarcoderdataPython |
107401 | <filename>src/resources/base_resource.py
from typing import Type
from flask_restful import Resource
from flask import request
from src.dao.base_dao import BaseDao
class BaseResource(Resource):
def __init__(self, dao: BaseDao, type_model: Type):
self.__dao = dao
self.__model_type = type_model
def get(self, id = None):
if id:
return self.__dao.read_by_id(id)
return self.__dao.read_all()
def post(self):
data = request.json
item = self.__model_type(**data)
self.__dao.save(item)
return item, 201
def put(self, id):
data = request.json
if data['id'] == id:
item = self.__dao.read_by_id(id)
for key, value in data.items():
setattr(item, key, value)
return self.__dao.save(item)
return None, 404
def delete(self, id):
item = self.__dao.read_by_id(id)
self.__dao.delete(item)
return None, 204
| StarcoderdataPython |
4992808 | <gh_stars>10-100
"""Removed unique constraint from topics
Revision ID: db25f23bffc8
Revises: <PASSWORD>
Create Date: 2021-08-23 17:16:20.389142
"""
from alembic import op
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_index(op.f('ix_repos_topics_repo_id_title'), 'repos_topics', ['repo_id', 'title'])
op.drop_constraint('uc_repos_topics_repo_id_title', 'repos_topics', type_='unique')
def downgrade():
op.create_unique_constraint(
'uc_repos_topics_repo_id_title', 'repos_topics', ['repo_id', 'title']
)
op.drop_index(op.f('ix_repos_topics_repo_id_title'), table_name='repos_topics')
| StarcoderdataPython |
132941 | <reponame>pakit/test_recipes
""" Formula that requires cyclea recipe. """
from pakit import Dummy, Recipe
class Cycleb(Recipe):
"""
Dummy recipe does nothing special but have dependency.
"""
def __init__(self):
super(Cycleb, self).__init__()
self.homepage = 'dummy'
self.repos = {
'stable': Dummy()
}
self.requires = ['cyclea']
def build(self):
pass
def verify(self):
pass
| StarcoderdataPython |
3474946 | <filename>src/train.py
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import os
from others.logging import init_logger
from train_abstractive import validate, train, test_text, baseline
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Basic args
parser.add_argument("-mode", default='train', type=str, choices=['train', 'validate', 'test', 'lead', 'oracle'])
parser.add_argument("-test_mode", default='abs', type=str, choices=['ext', 'abs'])
parser.add_argument("-src_data_mode", default='utt', type=str, choices=['utt', 'word'])
parser.add_argument("-data_path", default='bert_data/ali')
parser.add_argument("-model_path", default='models')
parser.add_argument("-result_path", default='results/ali')
parser.add_argument("-bert_dir", default='bert/chinese_bert')
parser.add_argument('-log_file', default='logs/temp.log')
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-seed', default=666, type=int)
# Batch sizes
parser.add_argument("-batch_size", default=2000, type=int)
parser.add_argument("-batch_ex_size", default=4, type=int)
parser.add_argument("-test_batch_size", default=20000, type=int)
parser.add_argument("-test_batch_ex_size", default=50, type=int)
# Model args
parser.add_argument("-encoder", default='bert', type=str, choices=['bert', 'transformer', 'rnn'])
parser.add_argument("-decoder", default='transformer', type=str, choices=['transformer', 'rnn'])
parser.add_argument("-share_emb", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-max_pos", default=512, type=int)
parser.add_argument("-finetune_bert", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dec_dropout", default=0.2, type=float)
parser.add_argument("-dec_layers", default=3, type=int)
parser.add_argument("-dec_hidden_size", default=768, type=int)
parser.add_argument("-dec_heads", default=8, type=int)
parser.add_argument("-dec_ff_size", default=2048, type=int)
parser.add_argument("-enc_hidden_size", default=768, type=int)
parser.add_argument("-enc_ff_size", default=2048, type=int)
parser.add_argument("-enc_heads", default=8, type=int)
parser.add_argument("-enc_dropout", default=0.2, type=float)
parser.add_argument("-enc_layers", default=3, type=int)
# args for copy mechanism and coverage
parser.add_argument("-coverage", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-copy_attn", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-copy_attn_force", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-copy_loss_by_seqlength", type=str2bool, nargs='?', const=True, default=False)
# args for sent-level encoder
parser.add_argument("-hier_dropout", default=0.2, type=float)
parser.add_argument("-hier_layers", default=2, type=int)
parser.add_argument("-hier_hidden_size", default=768, type=int)
parser.add_argument("-hier_heads", default=8, type=int)
parser.add_argument("-hier_ff_size", default=2048, type=int)
# args for topic model
parser.add_argument("-topic_model", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-loss_lambda", default=0.001, type=float)
parser.add_argument("-tokenize", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-idf_info_path", default="bert_data/idf_info.pt")
parser.add_argument("-topic_num", default=50, type=int)
parser.add_argument("-word_emb_size", default=100, type=int)
parser.add_argument("-word_emb_mode", default="word2vec", type=str, choices=["glove", "word2vec"])
parser.add_argument("-word_emb_path", default="pretrain_emb/word2vec", type=str)
parser.add_argument("-use_idf", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-split_noise", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-max_word_count", default=6000, type=int)
parser.add_argument("-min_word_count", default=5, type=int)
parser.add_argument("-agent", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-cust", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-noise_rate", type=float, default=0.5)
# Training process args
parser.add_argument("-save_checkpoint_steps", default=2000, type=int)
parser.add_argument("-accum_count", default=2, type=int)
parser.add_argument("-report_every", default=5, type=int)
parser.add_argument("-train_steps", default=80000, type=int)
parser.add_argument("-label_smoothing", default=0.1, type=float)
parser.add_argument("-generator_shard_size", default=32, type=int)
parser.add_argument("-max_tgt_len", default=100, type=int)
# Beam search decoding args
parser.add_argument("-alpha", default=0.6, type=float)
parser.add_argument("-beam_size", default=3, type=int)
parser.add_argument("-min_length", default=10, type=int)
parser.add_argument("-max_length", default=100, type=int)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
# Optim args
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-sep_optim", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-lr_bert", default=0.001, type=float)
parser.add_argument("-lr_other", default=0.01, type=float)
parser.add_argument("-lr_topic", default=0.0001, type=float)
parser.add_argument("-lr", default=0.001, type=float)
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-warmup", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-warmup_steps", default=5000, type=int)
parser.add_argument("-warmup_steps_bert", default=5000, type=int)
parser.add_argument("-warmup_steps_other", default=5000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
# Pretrain args
parser.add_argument("-pretrain", type=str2bool, nargs='?', const=True, default=False)
# Baseline model pretrain args
parser.add_argument("-pretrain_steps", default=80000, type=int)
# Utility args
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-test_start_from", default=-1, type=int)
parser.add_argument("-train_from", default='')
parser.add_argument("-train_from_ignore_optim", type=str2bool, nargs='?', const=True, default=False)
# args for RL
parser.add_argument("-freeze_step", default=500, type=int)
parser.add_argument("-ex_max_token_num", default=500, type=int)
parser.add_argument("-sent_hidden_size", default=768, type=int)
parser.add_argument("-sent_ff_size", default=2048, type=int)
parser.add_argument("-sent_heads", default=8, type=int)
parser.add_argument("-sent_dropout", default=0.2, type=float)
parser.add_argument("-sent_layers", default=3, type=int)
parser.add_argument("-pn_hidden_size", default=768, type=int)
parser.add_argument("-pn_ff_size", default=2048, type=int)
parser.add_argument("-pn_heads", default=8, type=int)
parser.add_argument("-pn_dropout", default=0.2, type=float)
parser.add_argument("-pn_layers", default=2, type=int)
parser.add_argument("-mask_token_prob", default=0.15, type=float)
parser.add_argument("-select_sent_prob", default=0.90, type=float)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in range(len(args.visible_gpus.split(',')))]
args.world_size = len(args.gpu_ranks)
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'validate'):
validate(args, device_id)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except RuntimeWarning:
print("Unrecognized cp step.")
step = 0
test_text(args, device_id, cp, step)
else:
print("Undefined mode! Please check input.")
| StarcoderdataPython |
6562557 | TARGETS = {
'a001': "a001.wav", # 굵은 여성
'p112': "p112_003.wav", # 견자희
'p226': "p226_049-22k.wav", # 중년 남자
'p234': "p234_009-22k.wav", # 젊은 여성
'p237': "p237_007-22k.wav", # 중후한 남성
'p345': "p345_009-22k.wav", # 허스키한 남성
"p277": "p277_034-22k.wav", # 어린 여성
"p286": "p286_010-22k.wav", # 얉은 남성
"p300": "p300_068-22k.wav", # 깐깐한 여성
"p306": "p306_012-22k.wav", # 점잖은 여성
"p329": "p329_041-22k.wav", # 조용한 여성
"p330": "p330_048-22k.wav", # 보통 여성
"p361": "p361_026-22k.wav", # 당당한 여성
} | StarcoderdataPython |
379787 | <gh_stars>10-100
# coding=utf-8
from flask import url_for
from psi.app import const
from psi.app.utils import db_util
from tests import fixture
from tests.base_test_case import BaseTestCase
from tests.object_faker import object_faker
from tests.views.organization.base_organization_test import BaseOrganizationTestCase
class TestCreateOrganization(BaseOrganizationTestCase):
def test_create(self):
from psi.app.models import EnumValues
type_id = EnumValues.get(const.DIRECT_SELLING_STORE_ORG_TYPE_KEY).id
with self.test_client:
fixture.login_as_admin(self.test_client)
org_name = object_faker.faker.name()
org_desc = object_faker.faker.text(max_nb_chars=20)
create_url = self.create_endpoint(view='organization')
self.assertPageRendered(
endpoint=create_url,
method=self.test_client.get,
expect_contents=['betterlife', '直营店'])
self.assertPageRendered(
endpoint=create_url,
method=self.test_client.post,
expect_contents=[org_name, org_desc],
data={
"type": type_id,
"name": org_name,
"description": org_desc,
"parent": 1
})
self.assertDeleteSuccessful(
endpoint=url_for(
'organization.delete_view',
id=2,
url=url_for('organization.index_view')),
deleted_data=[org_name, org_desc])
from psi.app.models import Organization
user, pwd = object_faker.user(
role_names=[
'organization_create', 'organization_view',
'organization_delete', 'organization_edit'
],
organization=Organization.query.get(1))
db_util.save_objects_commit(user)
fixture.login_user(self.test_client, user.email, pwd)
from psi.app.models import EnumValues
org_type = EnumValues.get(const.DIRECT_SELLING_STORE_ORG_TYPE_KEY)
self.assertCreateFail(
endpoint=create_url,
create_data=[org_name, org_desc],
data={
"type": org_type.id,
"name": org_name,
"description": org_desc,
"parent": 1
})
| StarcoderdataPython |
1655757 | <filename>tests/@proxyTest/makeOpMethods.py
#!/bin/env python
# 2007-03-27 generate boilerplate matlab methods
from awmstools import spitOut
BINARY_ARITH = '''plus
minus
mtimes
times
mpower
power
mldivide
mrdivide
ldivide
rdivide
eq
ne
lt
gt
le
ge
horzcat
vertcat
'''.split()
UNARY_ARITH='''
uminus
uplus
transpose
ctranspose
'''.split() # XXX: size needs to be handcoded
for op in BINARY_ARITH:
spitOut(file=op + ".m", s=
'''function res=%(op)s(X,Y)
res=%(op)s(X.data, Y.data);
end
''' % dict(op=op))
for op in UNARY_ARITH:
spitOut(file=op + ".m", s=
'''function res=%(op)s(X)
res=%(op)s(X.data);
end
'''% dict(op=op))
| StarcoderdataPython |
8150917 | <reponame>adwaita1/ALGORITHMMS<gh_stars>1-10
memo={}
def DP(x,y,i,j):
if i>=len(x):
memo[(i,j)]=len(y)-j
return len(y)-j
if j>=len(y):
memo[(i,j)]=len(x)-i
return len(x)-i
if (i,j) in memo:
return memo[(i,j)]
if x[i]==y[j]:
memo[(i,j)]=min(DP(x,y,i,j+1),DP(x,y,i+1,j),DP(x,y,i+1,j+1))
return memo[(i,j)]
else:
res=1+min(DP(x,y,i,j+1),DP(x,y,i+1,j),DP(x,y,i+1,j+1))
memo[(i,j)]=res
return res
'''Driver PRogram'''
x='MICHAELANGELO'
y='HIEROGLYPHOLOGY'
s1='a'
s2='ab'
r=DP(x,y,0,0)
print(r)
| StarcoderdataPython |
3428045 | <reponame>nasa/fmdtools<gh_stars>1-10
"""
Description: Translates simulation outputs to pandas tables for display, export, etc.
Uses methods:
- :meth:`hist`: Returns formatted pandas dataframe of model history
- :meth:`objtab`: Make table of function OR flow value attributes - objtype = 'function' or 'flow'
- :meth:`stats`: Makes a table of #of degraded flows, # of degraded functions, and # of total faults over time given a single result history
- :meth:`degflows`: Makes a of flows over time, where 0 is degraded and 1 is nominal
- :meth:`degflowvals`: Makes a table of individual flow state values over time, where 0 is degraded and 1 is nominal
- :meth:`degfxns`: Makes a table showing which functions are degraded over time (0 for degraded, 1 for nominal)
- :meth:`deghist`: Makes a table of all funcitons and flows that are degraded over time. If withstats=True, the total # of each type degraded is provided in the last columns
- :meth:`heatmaps`: Makes a table of a heatmap dictionary
- :meth:`costovertime`: Makes a table of the total cost, rate, and expected cost of all faults over time
- :meth:`samptime`: Makes a table of the times sampled for each phase given a dict (i.e. app.sampletimes)
- :meth:`summary:` Makes a table of a summary dictionary from a given model run
- :meth:`result`: Makes a table of results (degraded functions/flows, cost, rate, expected cost) of a single run
- :meth:`dicttab`: Makes table of a generic dictionary
- :meth:`maptab`: Makes table of a generic map
- :meth:`nominal_stats`: Makes a table of quantities of interest from endclasses from a nominal approach.
- :meth:`nested_stats`: Makes a table of quantities of interest from endclasses from a nested approach.
- :meth:`nominal_factor_comparison`: Compares a metric for a given set of model parameters/factors over a set of nominal scenarios.
- :meth:`nested_factor_comparison`: Compares a metric for a given set of model parameters/factors over a nested set of nominal and fault scenarios.
Also used for FMEA-like tables:
- :meth:`simplefmea`: Makes a simple fmea (rate, cost, expected cost) of the endclasses of a list of fault scenarios run
- :meth:`phasefmea`: Makes a simple fmea of the endclasses of a set of fault scenarios run grouped by phase.
- :meth:`summfmea`: Makes a simple fmea of the endclasses of a set of fault scenarios run grouped by fault.
- :meth:`fullfmea`: Makes full fmea table (degraded functions/flows, cost, rate, expected cost) of scenarios given endclasses dict (cost, rate, expected cost) and summaries dict (degraded functions, degraded flows)
"""
#File Name: resultdisp/tabulate.py
#Author: <NAME>
#Created: November 2019 (Refactored April 2020)
import pandas as pd
import numpy as np
from fmdtools.resultdisp.process import expected, average, percent, rate, overall_diff, nan_to_x, bootstrap_confidence_interval
#makehisttable
# put history in a tabular format
def hist(mdlhist):
""" Returns formatted pandas dataframe of model history"""
if "nominal" in mdlhist.keys(): mdlhist=mdlhist['faulty']
if any(isinstance(i,dict) for i in mdlhist['flows'].values()):
flowtable = objtab(mdlhist, 'flows')
else:
flowtable = objtab(mdlhist, 'flowvals')
fxntable = objtab(mdlhist, 'functions')
timetable = pd.DataFrame()
timetable['time', 't'] = mdlhist['time']
timetable.reindex([('time', 't')], axis="columns")
histtable = pd.concat([timetable, fxntable, flowtable], axis =1)
index = pd.MultiIndex.from_tuples(histtable.columns)
histtable = histtable.reindex(index, axis='columns')
return histtable
def objtab(hist, objtype):
"""make table of function OR flow value attributes - objtype = 'function' or 'flow'"""
df = pd.DataFrame()
labels = []
for fxn, atts in hist[objtype].items():
for att, val in atts.items():
if att != 'faults':
if type(val)==dict:
for subatt, subval in val.items():
if subatt!= 'faults':
label=(fxn, att+'_'+subatt)
labels=labels+[label]
df[label]=subval
else:
label_faults(hist[objtype][fxn][att].get('faults', {}), df, fxn+'_'+subatt, labels)
else:
label=(fxn, att)
labels=labels+[label]
df[label]=val
if objtype =='functions':
label_faults(hist[objtype][fxn].get('faults', {}), df, fxn, labels)
index = pd.MultiIndex.from_tuples(labels)
df = df.reindex(index, axis="columns")
return df
def label_faults(faulthist, df, fxnlab, labels):
if type(faulthist)==dict:
for fault in faulthist:
label=(fxnlab, fault+' fault')
labels+=[label]
df[label]=faulthist[fault]
elif len(faulthist)==1:
label=(fxnlab, 'faults')
labels+=[label]
df[label]=faulthist
def stats(reshist):
"""Makes a table of #of degraded flows, # of degraded functions, and # of total faults over time given a single result history"""
table = pd.DataFrame(reshist['stats'])
table.insert(0, 'time', reshist['time'])
return table
def degflows(reshist):
"""Makes a table of flows over time, where 0 is degraded and 1 is nominal"""
table = pd.DataFrame(reshist['flows'])
table.insert(0, 'time', reshist['time'])
return table
def degflowvals(reshist):
"""Makes a table of individual flow state values over time, where 0 is degraded and 1 is nominal"""
table = objtab(reshist, 'flowvals')
table.insert(0, 'time', reshist['time'])
return table
def degfxns(reshist):
"""Makes a table showing which functions are degraded over time (0 for degraded, 1 for nominal)"""
table = pd.DataFrame()
for fxnname in reshist['functions']:
table[fxnname]=reshist['functions'][fxnname]['status']
table.insert(0, 'time', reshist['time'])
return table
def deghist(reshist, withstats=False):
"""Makes a table of all funcitons and flows that are degraded over time. If withstats=True, the total # of each type degraded is provided in the last columns """
fxnstable = degfxns(reshist)
flowstable = pd.DataFrame(reshist['flows'])
if withstats:
statstable = pd.DataFrame(reshist['stats'])
return pd.concat([fxnstable, flowstable, statstable], axis =1)
else:
return pd.concat([fxnstable, flowstable], axis =1)
def heatmaps(heatmaps):
"""Makes a table of a heatmap dictionary"""
table = pd.DataFrame(heatmaps)
return table.transpose()
def costovertime(endclasses, app):
"""
Makes a table of the total cost, rate, and expected cost of all faults over time
Parameters
----------
endclasses : dict
dict with rate,cost, and expected cost for each injected scenario
app : sampleapproach
sample approach used to generate the list of scenarios
Returns
-------
costovertime : dataframe
pandas dataframe with the total cost, rate, and expected cost for the set of scenarios
"""
costovertime={'cost':{time:0.0 for time in app.times}, 'rate':{time:0.0 for time in app.times}, 'expected cost':{time:0.0 for time in app.times}}
for scen in app.scenlist:
costovertime['cost'][scen['properties']['time']]+=endclasses[scen['properties']['name']]['cost']
costovertime['rate'][scen['properties']['time']]+=endclasses[scen['properties']['name']]['rate']
costovertime['expected cost'][scen['properties']['time']]+=endclasses[scen['properties']['name']]['expected cost']
return pd.DataFrame.from_dict(costovertime)
def samptime(sampletimes):
"""Makes a table of the times sampled for each phase given a dict (i.e. app.sampletimes)"""
table = pd.DataFrame()
for phase, times in sampletimes.items():
table[phase]= [str(list(times.keys()))]
return table.transpose()
def summary(summary):
"""Makes a table of a summary dictionary from a given model run"""
return pd.DataFrame.from_dict(summary, orient = 'index')
def result(endresults, summary):
"""Makes a table of results (degraded functions/flows, cost, rate, expected cost) of a single run"""
table = pd.DataFrame(endresults['classification'], index=[0])
table['degraded functions'] = [summary['degraded functions']]
table['degraded flows'] = [summary['degraded flows']]
return table
def dicttab(dictionary):
"""Makes table of a generic dictionary"""
return pd.DataFrame(dictionary, index=[0])
def maptab(mapping):
"""Makes table of a generic map"""
table = pd.DataFrame(mapping)
return table.transpose()
def nominal_stats(nomapp, nomapp_endclasses, metrics='all', inputparams='from_range', scenarios='all'):
"""
Makes a table of quantities of interest from endclasses.
Parameters
----------
nomapp : NominalApproach
NominalApproach used to generate the simulation.
nomapp_endclasses: dict
End-state classifcations for the set of simulations from propagate.nominalapproach()
metrics : 'all'/list, optional
Metrics to show on the plot. The default is 'all'.
inputparams : 'from_range'/'all',list, optional
Parameters to show on the plot. The default is 'from_range'.
scenarios : 'all','range'/list, optional
Scenarios to include in the plot. 'range' is a given range_id in the nominalapproach.
Returns
-------
table : pandas DataFrame
Table with the metrics of interest layed out over the input parameters for the set of scenarios in endclasses
"""
if metrics=='all': metrics = [*nomapp_endclasses[[*nomapp_endclasses][0]]]
if scenarios=='all': scens = [*nomapp_endclasses]
elif type(scenarios)==str: scens = nomapp.ranges[scenarios]['scenarios']
elif not type(scenarios)==list: raise Exception("Invalid option for scenarios. Provide 'all'/'rangeid' or list")
else: scens = scenarios
if inputparams=='from_range':
ranges=[*nomapp.ranges]
if not(scenarios=='all') and not(type(scenarios)==list): app_range= scenarios
elif len(ranges)==1: app_range=ranges[0]
else: raise Exception("Multiple approach ranges "+str(ranges)+" in approach. Use inputparams=`all` or inputparams=[param1, param2,...]")
inputparams= [*nomapp.ranges[app_range]['inputranges']]
elif inputparams=='all': inputparams=[*nomapp.scenarios.values()][0]['properties']['inputparams']
elif inputparams=='none': inputparams=[]
table_values=[]
for inputparam in inputparams:
table_values.append([nomapp.scenarios[e]['properties']['inputparams'][inputparam] for e in scens])
for metric in metrics:
table_values.append([nomapp_endclasses[e][metric] for e in scens])
table = pd.DataFrame(table_values, columns=[*nomapp_endclasses], index=inputparams+metrics)
return table
def nominal_factor_comparison(nomapp, endclasses, params, metrics='all', rangeid='default', nan_as=np.nan, percent=True, difference=True, give_ci=False, **kwargs):
"""
Compares a metric for a given set of model parameters/factors over set of nominal scenarios.
Parameters
----------
nomapp : NominalApproach
Nominal Approach used to generate the simulations
endclasses : dict
dict of endclasses from propagate.nominal_approach or nested_approach with structure:
{scen_x:{metric1:x, metric2:x...}} or {scen_x:{fault:{metric1:x, metric2:x...}}}
params : list/str
List of parameters (or parameter) to use for the factor levels in the comparison
metrics : 'all'/list, optional
Metrics to show in the table. The default is 'all'.
rangeid : str, optional
Nominal Approach range to use for the test, if run over a single range.
The default is 'default', which either:
- picks the only range (if there is only one), or
- compares between ranges (if more than one)
nan_as : float, optional
Number to parse NaNs as (if present). The default is np.nan.
percent : bool, optional
Whether to compare metrics as bools (True - results in a comparison of percentages of indicator variables)
or as averages (False - results in a comparison of average values of real valued variables). The default is True.
difference : bool, optional
Whether to tabulate the difference of the metric from the nominal over each scenario (True),
or the value of the metric over all (False). The default is True.
give_ci = bool:
gives the bootstrap confidence interval for the given statistic using the given kwargs
'combined' combines the values as a strings in the table (for display)
kwargs : keyword arguments for bootstrap_confidence_interval (sample_size, num_samples, interval, seed)
Returns
-------
table : pandas table
Table with the metric statistic (percent or average) over the nominal scenario and each listed function/mode (as differences or averages)
"""
if rangeid=='default':
if len(nomapp.ranges.keys())==1:
rangeid=[*nomapp.ranges.keys()][0]
factors = nomapp.get_param_scens(rangeid, *params)
else:
factors = {rangeid:nomapp.ranges[rangeid]['scenarios'] for rangeid in nomapp.ranges}
else: factors = nomapp.get_param_scens(rangeid, *params)
if [*endclasses.values()][0].get('nominal', False): endclasses ={scen:ec['nominal'] for scen, ec in endclasses.items()}
if metrics=='all': metrics = [ec for ec,val in [*endclasses.values()][0].items() if type(val) in [float, int]]
if type(params)==str: params=[params]
full_stats=[]
for metric in metrics:
factor_stats = []
for factor, scens in factors.items():
endclass_fact = {scen:endclass for scen, endclass in endclasses.items() if scen in scens}
if not percent: nominal_metrics = [nan_to_x(scen[metric], nan_as) for scen in endclass_fact.values()]
else: nominal_metrics = [np.sign(nan_to_x(scen[metric], nan_as)) for scen in endclass_fact.values()]
factor_stats= factor_stats + [sum(nominal_metrics)/len(nominal_metrics)]
if give_ci:
factor_boot, factor_lb, factor_ub = bootstrap_confidence_interval(nominal_metrics, **kwargs)
factor_stats = factor_stats + [factor_lb, factor_ub]
full_stats.append(factor_stats)
if give_ci=='combined': full_stats = [[str(round(v,3))+' ('+str(round(f[i+1],3))+','+str(round(f[i+2],3))+')' for i,v in enumerate(f) if not i%3] for f in full_stats]
if give_ci !=True:
table = pd.DataFrame(full_stats, columns = factors, index=metrics)
table.columns.name=tuple(params)
else:
columns = [(f, stat) for f in factors for stat in ["", "LB", "UB"]]
table = pd.DataFrame(full_stats, columns=columns, index=metrics)
table.columns = pd.MultiIndex.from_tuples(table.columns, names=['metric', ''])
table.columns.name=tuple(params)
return table
def resilience_factor_comparison(nomapp, nested_endclasses, params, value, faults='functions', rangeid='default', nan_as=np.nan, percent=True, difference=True, give_ci=False, **kwargs):
"""
Compares a metric for a given set of model parameters/factors over a nested set of nominal and fault scenarios.
Parameters
----------
nomapp : NominalApproach
Nominal Approach used to generate the simulations
nested_endclasses : dict
dict of endclasses from propagate.nested_approach with structure: {scen_x:{fault:{metric1:x, metric2:x...}}}
params : list/str
List of parameters (or parameter) to use for the factor levels in the comparison
value : string
metric of the endclass (returned by mdl.find_classification) to use for the comparison.
faults : str/list, optional
Set of faults to run the comparison over
--'modes' (all fault modes),
--'functions' (modes for each function are grouped)
--'mode type' (modes with the same name are grouped)
-- or a set of specific modes/functions. The default is 'functions'.
rangeid : str, optional
Nominal Approach range to use for the test, if run over a single range.
The default is 'default', which either:
- picks the only range (if there is only one), or
- compares between ranges (if more than one)
nan_as : float, optional
Number to parse NaNs as (if present). The default is np.nan.
percent : bool, optional
Whether to compare metrics as bools (True - results in a comparison of percentages of indicator variables)
or as averages (False - results in a comparison of average values of real valued variables). The default is True.
difference : bool, optional
Whether to tabulate the difference of the metric from the nominal over each scenario (True),
or the value of the metric over all (False). The default is True.
give_ci = bool:
gives the bootstrap confidence interval for the given statistic using the given kwargs
'combined' combines the values as a strings in the table (for display)
kwargs : keyword arguments for bootstrap_confidence_interval (sample_size, num_samples, interval, seed)
Returns
-------
table : pandas table
Table with the metric statistic (percent or average) over the nominal scenario and each listed function/mode (as differences or averages)
"""
if rangeid=='default':
if len(nomapp.ranges.keys())==1:
rangeid=[*nomapp.ranges.keys()][0]
factors = nomapp.get_param_scens(rangeid, *params)
else:
factors = {rangeid:nomapp.ranges[rangeid]['scenarios'] for rangeid in nomapp.ranges}
else: factors = nomapp.get_param_scens(rangeid, *params)
if faults=='functions': faultlist = set([e.partition(' ')[0] for scen in nested_endclasses for e in nested_endclasses[scen]])
elif faults=='modes': faultlist = set([e.partition(',')[0] for scen in nested_endclasses for e in nested_endclasses[scen]])
elif faults=='mode type': faultlist = set([e.partition(',')[0].partition(' ')[2] for scen in nested_endclasses for e in nested_endclasses[scen]])
elif type(faults) ==str: raise Exception("Invalid faults option: "+faults)
elif type(faults)==list: faultlist =set(faults)
else: faultlist=faults
faultlist.discard('nominal'); faultlist.discard(' '); faultlist.discard('')
if type(params)==str: params=[params]
full_stats=[]
for factor, scens in factors.items():
endclass_fact = {scen:endclass for scen, endclass in nested_endclasses.items() if scen in scens}
ec_metrics = overall_diff(endclass_fact, value, nan_as=nan_as, as_ind=percent, no_diff=not difference)
if not percent: nominal_metrics = [nan_to_x(res_scens['nominal'][value], nan_as) for res_scens in endclass_fact.values()]
else: nominal_metrics = [np.sign(float(nan_to_x(res_scens['nominal'][value]), nan_as)) for res_scens in endclass_fact.values()]
factor_stats=[sum(nominal_metrics)/len(nominal_metrics)]
if give_ci:
factor_boot, factor_lb, factor_ub = bootstrap_confidence_interval(nominal_metrics, **kwargs)
factor_stats = factor_stats + [factor_lb, factor_ub]
for fault in faultlist:
if faults=='functions': fault_metrics = [metric for res_scens in ec_metrics.values() for res_scen,metric in res_scens.items() if fault in res_scen.partition(' ')[0]]
else: fault_metrics = [metric for res_scens in ec_metrics.values() for res_scen,metric in res_scens.items() if fault in res_scen.partition(',')[0]]
if len(fault_metrics)>0:
factor_stats.append(sum(fault_metrics)/len(fault_metrics))
if give_ci:
factor_boot, factor_lb, factor_ub = bootstrap_confidence_interval(fault_metrics, **kwargs)
factor_stats= factor_stats+[factor_lb, factor_ub]
else:
if not give_ci: factor_stats.append(np.NaN)
else: factor_stats= factor_stats + [np.NaN,np.NaN,np.NaN]
full_stats.append(factor_stats)
if give_ci=='combined': full_stats = [[str(round(v,3))+' ('+str(round(f[i+1],3))+','+str(round(f[i+2],3))+')' for i,v in enumerate(f) if not i%3] for f in full_stats]
if give_ci !=True:
table = pd.DataFrame(full_stats, columns = ['nominal']+list(faultlist), index=factors)
table.columns.name=tuple(params)
else:
columns = [(f, stat) for f in ['nominal']+list(faultlist) for stat in ["", "LB", "UB"]]
table = pd.DataFrame(full_stats, columns=columns, index=factors)
table.columns = pd.MultiIndex.from_tuples(table.columns, names=['fault', ''])
table.columns.name=tuple(params)
return table
def nested_stats(nomapp, nested_endclasses, percent_metrics=[], rate_metrics=[], average_metrics=[], expected_metrics=[], inputparams='from_range', scenarios='all'):
"""
Makes a table of quantities of interest from endclasses.
Parameters
----------
nomapp : NominalApproach
NominalApproach used to generate the simulation.
endclasses : dict
End-state classifcations for the set of simulations from propagate.nested_approach()
percent_metrics : list
List of metrics to calculate a percent of (e.g. use with an indicator variable like failure=1/0 or True/False)
rate_metrics : list
List of metrics to calculate the probability of using the rate variable in endclasses
average_metrics : list
List of metrics to calculate an average of (e.g., use for float values like speed=25)
expected_metrics : list
List of metrics to calculate the expected value of using the rate variable in endclasses
inputparams : 'from_range'/'all',list, optional
Parameters to show on the table. The default is 'from_range'.
scenarios : 'all','range'/list, optional
Scenarios to include in the table. 'range' is a given range_id in the nominalapproach.
Returns
-------
table : pandas DataFrame
Table with the averages/percentages of interest layed out over the input parameters for the set of scenarios in endclasses
"""
if scenarios=='all': scens = [*nested_endclasses]
elif type(scenarios)==str: scens = nomapp.ranges[scenarios]['scenarios']
elif not type(scenarios)==list: raise Exception("Invalid option for scenarios. Provide 'all'/'rangeid' or list")
else: scens = scenarios
if inputparams=='from_range':
ranges=[*nomapp.ranges]
if not(scenarios=='all') and not(type(scenarios)==list): app_range= scenarios
elif len(ranges)==1: app_range=ranges[0]
else: raise Exception("Multiple approach ranges "+str(ranges)+" in approach. Use inputparams=`all` or inputparams=[param1, param2,...]")
inputparams= [*nomapp.ranges[app_range]['inputranges']]
elif inputparams=='all':
inputparams=[*nomapp.scenarios.values()][0]['properties']['inputparams']
table_values=[]; table_rows = inputparams
for inputparam in inputparams:
table_values.append([nomapp.scenarios[e]['properties']['inputparams'][inputparam] for e in scens])
for metric in percent_metrics:
table_values.append([percent(nested_endclasses[e], metric) for e in scens])
table_rows.append('perc_'+metric)
for metric in rate_metrics:
table_values.append([rate(nested_endclasses[e], metric) for e in scens])
table_rows.append('rate_'+metric)
for metric in average_metrics:
table_values.append([average(nested_endclasses[e], metric) for e in scens])
table_rows.append('ave_'+metric)
for metric in expected_metrics:
table_values.append([expected(nested_endclasses[e], metric) for e in scens])
table_rows.append('exp_'+metric)
table = pd.DataFrame(table_values, columns=[*nested_endclasses], index=table_rows)
return table
##FMEA-like tables
def simplefmea(endclasses):
"""Makes a simple fmea (rate, cost, expected cost) of the endclasses of a list of fault scenarios run"""
table = pd.DataFrame(endclasses)
return table.transpose()
def phasefmea(endclasses, app):
"""
Makes a simple fmea of the endclasses of a set of fault scenarios run grouped by phase.
Parameters
----------
endclasses : dict
dict of endclasses of the simulation runs
app : sampleapproach
sample approach used for the underlying probability model of the set of scenarios run
Returns
-------
table: dataframe
table with cost, rate, and expected cost of each fault in each phase
"""
fmeadict = dict.fromkeys(app.scenids.keys())
for modephase, ids in app.scenids.items():
rate= sum([endclasses[scenid]['rate'] for scenid in ids])
cost= sum(np.array([endclasses[scenid]['cost'] for scenid in ids])*np.array(list(app.weights[modephase[0]][modephase[1]].values())))
expcost= sum([endclasses[scenid]['expected cost'] for scenid in ids])
fmeadict[modephase] = {'rate':rate, 'cost':cost, 'expected cost': expcost}
table=pd.DataFrame(fmeadict)
return table.transpose()
def summfmea(endclasses, app):
"""
Makes a simple fmea of the endclasses of a set of fault scenarios run grouped by fault.
Parameters
----------
endclasses : dict
dict of endclasses of the simulation runs
app : sampleapproach
sample approach used for the underlying probability model of the set of scenarios run
Returns
-------
table: dataframe
table with cost, rate, and expected cost of each fault (over all phases)
"""
fmeadict = dict()
for modephase, ids in app.scenids.items():
rate= sum([endclasses[scenid]['rate'] for scenid in ids])
cost= sum(np.array([endclasses[scenid]['cost'] for scenid in ids])*np.array(list(app.weights[modephase[0]][modephase[1]].values())))
expcost= sum([endclasses[scenid]['expected cost'] for scenid in ids])
if getattr(app, 'jointmodes', []): index = str(modephase[0])
else: index = modephase[0]
if not fmeadict.get(modephase[0]): fmeadict[index]= {'rate': 0.0, 'cost':0.0, 'expected cost':0.0}
fmeadict[index]['rate'] += rate
fmeadict[index]['cost'] += cost/len([1.0 for (fxnmode,phase) in app.scenids if fxnmode==modephase[0]])
fmeadict[index]['expected cost'] += expcost
table=pd.DataFrame(fmeadict)
return table.transpose()
def fullfmea(endclasses, summaries):
"""Makes full fmea table (degraded functions/flows, cost, rate, expected cost) of scenarios given endclasses dict (cost, rate, expected cost) and summaries dict (degraded functions, degraded flows)"""
degradedtable = pd.DataFrame(summaries)
simplefmea=pd.DataFrame(endclasses)
fulltable = pd.concat([degradedtable, simplefmea])
return fulltable.transpose()
| StarcoderdataPython |
3346309 | <gh_stars>0
from django.shortcuts import render
from django.views.generic import TemplateView, CreateView, UpdateView, DetailView, ListView
from utils.decorators import has_dashboard_permission_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.urls import reverse
from django.contrib import messages
from users.models import User
from utils.helpers import (
validate_normal_form, get_simple_context_data, get_simple_object, delete_simple_object, user_has_permission
)
# App Imports
from .forms import CompanyCreateForm, CompanyManageForm
from .models import Company
dashboard_decorators = [login_required, has_dashboard_permission_required]
"""
-------------------------------------------------------------------
** Company ***
-------------------------------------------------------------------
"""
def get_company_common_contexts(request):
common_contexts = get_simple_context_data(
request=request, app_namespace='company', model_namespace="company", model=Company, list_template=None, fields_to_hide_in_table=["id", "slug", "description", "updated_at"]
)
return common_contexts
@method_decorator(dashboard_decorators, name='dispatch')
class CompanyCreateView(CreateView):
template_name = "admin_panel/snippets/manage.html"
form_class = CompanyCreateForm
def form_valid(self, form, **kwargs):
# Get form values
name = form.instance.name
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
if form.is_valid():
try:
user_obj = User(
name=name,
email=email,
is_company=True
)
user_obj.set_password(password)
user_obj.save()
# save company user
form.instance.user = user_obj
except Exception as E:
messages.error(
self.request, 'Failed to create user!'
)
return super().form_invalid(form)
field_qs = Company.objects.filter(
name__iexact=name
)
result = validate_normal_form(
field='name', field_qs=field_qs,
form=form, request=self.request
)
if result == 1:
return super().form_valid(form)
else:
return super().form_invalid(form)
def get_success_url(self):
return reverse("company:create_company")
def get_context_data(self, **kwargs):
context = super(
CompanyCreateView, self
).get_context_data(**kwargs)
context['page_title'] = 'Create Company'
context['page_short_title'] = 'Create Company'
for key, value in get_company_common_contexts(request=self.request).items():
context[key] = value
return context
@method_decorator(dashboard_decorators, name='dispatch')
class CompanyDetailView(DetailView):
template_name = "admin_panel/snippets/detail-common.html"
def get_object(self):
return get_simple_object(key='slug', model=Company, self=self)
def get_context_data(self, **kwargs):
context = super(
CompanyDetailView, self
).get_context_data(**kwargs)
context['page_title'] = f'Comapny - {self.get_object().name} Detail'
context['page_short_title'] = f'Comapny - {self.get_object().name} Detail'
for key, value in get_company_common_contexts(request=self.request).items():
context[key] = value
return context
@method_decorator(dashboard_decorators, name='dispatch')
class ComapnyUpdateView(UpdateView):
template_name = 'admin_panel/snippets/manage.html'
form_class = CompanyManageForm
def get_object(self):
return get_simple_object(key="slug", model=Company, self=self)
def get_success_url(self):
return reverse("company:create_company")
def form_valid(self, form):
self.object = self.get_object()
name = form.instance.name
field_qs = Company.objects.filter(
name__iexact=name
).exclude(name__iexact=self.object.name)
result = validate_normal_form(
field='name', field_qs=field_qs,
form=form, request=self.request
)
if result == 1:
# update user name to company name
user_qs = User.objects.filter(email__iexact=self.object.user.email)
if user_qs and not name == self.object.user.name:
user_qs.update(name=name)
return super().form_valid(form)
else:
return super().form_invalid(form)
def get_context_data(self, **kwargs):
context = super(
ComapnyUpdateView, self
).get_context_data(**kwargs)
context['page_title'] = f'Update Company "{self.get_object().name}"'
context['page_short_title'] = f'Update Company "{self.get_object().name}"'
for key, value in get_company_common_contexts(request=self.request).items():
context[key] = value
return context
@csrf_exempt
@has_dashboard_permission_required
@login_required
def delete_company(request):
return delete_simple_object(request=request, key='slug', model=Company, redirect_url="company:create_company")
| StarcoderdataPython |
6622771 | <reponame>TomohikoK/PyCat<gh_stars>0
def double(arg: int) -> int:
return arg * 2
x: List[str] = ['a', 'bb']
x1 = list(map(double @ length_of_str, x))
x2 = list(map(double, list(map(length_of_str, x))))
assert x1 == x2 # どちらの値も[2, 4]
| StarcoderdataPython |
5116047 | <gh_stars>0
#!/bin/python3
S = input().strip()
try:
i = int(S)
print(S)
except:
print('Bad String')
| StarcoderdataPython |
3245094 | <gh_stars>1-10
import logging
import coreapi
from django.http import QueryDict
from django.conf import settings
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, mixins
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from talentmap_api.available_tandem.models import AvailableFavoriteTandem
from talentmap_api.user_profile.models import UserProfile
from talentmap_api.projected_tandem.models import ProjectedFavoriteTandem
import talentmap_api.fsbid.services.available_positions as services
import talentmap_api.fsbid.services.projected_vacancies as pvservices
import talentmap_api.fsbid.services.common as comservices
logger = logging.getLogger(__name__)
FAVORITES_LIMIT = settings.FAVORITES_LIMIT
class AvailableFilter():
declared_filters = [
"exclude_available",
"exclude_projected",
]
use_api = True
class Meta:
fields = "__all__"
class AvailableFavoriteTandemListView(APIView):
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter('page', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, description='A page number within the paginated result set.'),
openapi.Parameter('limit', openapi.IN_QUERY, type=openapi.TYPE_INTEGER, description='Number of results to return per page.'),
openapi.Parameter('ordering', openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Ordering')
])
def get(self, request, *args, **kwargs):
"""
get:
Return a list of all of the user's tandem favorite available positions.
"""
user = UserProfile.objects.get(user=self.request.user)
aps = AvailableFavoriteTandem.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
limit = request.query_params.get('limit', 15)
page = request.query_params.get('page', 1)
ordering = request.query_params.get('ordering', None)
if aps:
comservices.archive_favorites(aps, request)
pos_nums = ','.join(aps)
return Response(services.get_available_positions(
QueryDict(f"id={pos_nums}&limit={limit}&page={page}&ordering={ordering}"),
request.META['HTTP_JWT'],
f"{request.scheme}://{request.get_host()}"))
return Response({"count": 0, "next": None, "previous": None, "results": []})
class AvailableFavoriteTandemIdsListView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
"""
get:
Return a list of the ids of the user's favorite available positions.
"""
user = UserProfile.objects.get(user=self.request.user)
aps = AvailableFavoriteTandem.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
return Response(aps)
class FavoritesTandemCSVView(APIView):
permission_classes = (IsAuthenticated,)
filter_class = AvailableFilter
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter('exclude_available', openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN, description='Whether to exclude available positions'),
openapi.Parameter('exclude_projected', openapi.IN_QUERY, type=openapi.TYPE_BOOLEAN, description='Whether to exclude projected vacancies'),
])
def get(self, request, *args, **kwargs):
"""
Return a list of all of the user's favorite positions.
"""
user = UserProfile.objects.get(user=self.request.user)
data = []
# AP Tandem
aps = AvailableFavoriteTandem.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
if request.query_params.get('exclude_available') != 'true' and aps:
pos_nums = ','.join(aps)
apdata = services.get_available_positions(
QueryDict(f"id={pos_nums}&limit={len(aps)}&page=1"),
request.META['HTTP_JWT'],
f"{request.scheme}://{request.get_host()}")
data = data + apdata.get('results')
# PV Tandem
pvs = ProjectedFavoriteTandem.objects.filter(user=user, archived=False).values_list("fv_seq_num", flat=True)
if request.query_params.get('exclude_projected') != 'true' and pvs:
pv_pos_nums = ','.join(pvs)
pvdata = pvservices.get_projected_vacancies(
QueryDict(f"id={pv_pos_nums}&limit={len(pvs)}&page=1"),
request.META['HTTP_JWT'],
f"{request.scheme}://{request.get_host()}")
data = data + pvdata.get('results')
return comservices.get_ap_and_pv_csv(data, "tandem-favorites", True)
class AvailableFavoriteTandemActionView(APIView):
'''
Controls the favorite status of a available position
Responses adapted from Github gist 'stars' https://developer.github.com/v3/gists/#star-a-gist
'''
permission_classes = (IsAuthenticated,)
def get(self, request, pk, format=None):
'''
Indicates if the available position is a favorite
Returns 204 if the available position is a favorite, otherwise, 404
'''
user = UserProfile.objects.get(user=self.request.user)
if AvailableFavoriteTandem.objects.filter(user=user, cp_id=pk, archived=False).exists():
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
def put(self, request, pk, format=None):
'''
Marks the available position as a favorite
'''
user = UserProfile.objects.get(user=self.request.user)
aps = AvailableFavoriteTandem.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
comservices.archive_favorites(aps, request)
aps_after_archive = AvailableFavoriteTandem.objects.filter(user=user, archived=False).values_list("cp_id", flat=True)
if len(aps_after_archive) >= FAVORITES_LIMIT:
return Response({"limit": FAVORITES_LIMIT}, status=status.HTTP_507_INSUFFICIENT_STORAGE)
else:
AvailableFavoriteTandem.objects.get_or_create(user=user, cp_id=pk)
return Response(status=status.HTTP_204_NO_CONTENT)
def delete(self, request, pk, format=None):
'''
Removes the available position from favorites
'''
user = UserProfile.objects.get(user=self.request.user)
AvailableFavoriteTandem.objects.filter(user=user, cp_id=pk).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| StarcoderdataPython |
9713713 | <gh_stars>0
from athanor.commands.command import AthanorCommand
class CmdStaff(AthanorCommand):
key = '@staff'
admin_switches = ('add', 'order', 'duty', 'vacation', 'notes', 'createcat', 'deletecat', 'renamecat',
'ordercat')
def switch_createcat(self):
if not self.lhs:
raise ValueError("Must enter a Category Name!")
if not self.rhs:
raise ValueError("Must enter a Category Order!")
self.systems['staff'].category_create(self.session, self.lhs, self.rhs)
def switch_renamecat(self):
if not self.lhs:
raise ValueError("Must enter a Category Name!")
if not self.rhs:
raise ValueError("Must enter a new Category name!")
self.systems['staff'].category_rename(self.session, self.lhs, self.rhs)
def switch_ordercat(self):
if not self.lhs:
raise ValueError("Must enter a Category Name!")
if not self.rhs:
raise ValueError("Must enter a new Category order!")
self.systems['staff'].category_reorder(self.session, self.lhs, self.rhs)
def switch_add(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
if not self.rhs:
raise ValueError("Must enter a new Category name!")
self.systems['staff'].staff_add(self.session, self.lhs, self.rhs)
def switch_remove(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
self.systems['staff'].staff_remove(self.session, self.lhs)
def switch_order(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
if not self.rhs:
raise ValueError("Must enter a new Staff order!")
self.systems['staff'].staff_order(self.session, self.lhs, self.rhs)
def switch_notes(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
if not self.rhs:
raise ValueError("Must enter a new Staff notes!")
self.systems['staff'].staff_notes(self.session, self.lhs, self.rhs)
def switch_duty(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
if not self.rhs:
raise ValueError("Must enter a new Staff duty!")
self.systems['staff'].staff_duty(self.session, self.lhs, self.rhs)
def switch_vacation(self):
if not self.lhs:
raise ValueError("Must enter an Account Name!")
if not self.rhs:
raise ValueError("Must enter a new Staff vacation!")
self.systems['staff'].staff_vacation(self.session, self.lhs, self.rhs)
def _main(self):
self.msg(self.systems['staff'].render_stafflist(self.session))
| StarcoderdataPython |
12814791 | #!/usr/bin/env python3
from typing import Any, Generic, List, Optional, TypeVar, cast
import rospy
from std_msgs.msg import Bool, Float32, Int16
import time
class Watchdog:
def __init__(self, timeout: float):
self._timeout = timeout
self.pet()
def pet(self):
self._lastTime = time.clock_gettime(0)
def isMad(self):
return time.clock_gettime(0) - self._lastTime >= self._timeout
T = TypeVar('T')
w = Watchdog(1)
class SubBuf(Generic[T]):
def __init__(self, topic_name: str, topic_type: type):
self.data: Optional[T] = None
self.sub = rospy.Subscriber(topic_name, topic_type, self._msg_cb)
def _msg_cb(self, msg: Any):
self.data = cast(T, msg.data)
w.pet()
def float_to_int16_msg(value: float) -> Int16:
return Int16(round(value * 32767))
def main():
rospy.init_node('bad_arm_driver')
# retrieve params
controller_ns = cast(str, rospy.get_param('~controller_ns'))
claw_ns_0 = cast(str, rospy.get_param('~claw_ns_0'))
claw_ns_1 = cast(str, rospy.get_param('~claw_ns_1'))
claw_ns_2 = cast(str, rospy.get_param('~claw_ns_2'))
claw_ns_3 = cast(str, rospy.get_param('~claw_ns_3'))
spin_rate = rospy.get_param('~spin_rate', 50)
# create controller subs
trigger_l: SubBuf[float] = SubBuf(f'{controller_ns}/axis/trigger_left', Float32)
trigger_r: SubBuf[float] = SubBuf(f'{controller_ns}/axis/trigger_right', Float32)
stick_l: SubBuf[float] = SubBuf(f'{controller_ns}/axis/stick_left_y', Float32)
stick_r: SubBuf[float] = SubBuf(f'{controller_ns}/axis/stick_right_y', Float32)
bumper_l: SubBuf[bool] = SubBuf(f'{controller_ns}/button/shoulder_l', Bool)
bumper_r: SubBuf[bool] = SubBuf(f'{controller_ns}/button/shoulder_r', Bool)
pov_x: SubBuf[float] = SubBuf(f'{controller_ns}/axis/pov_x', Float32)
pov_y: SubBuf[float] = SubBuf(f'{controller_ns}/axis/pov_y', Float32)
btn_a: SubBuf[bool] = SubBuf(f'{controller_ns}/button/a', Bool)
btn_b: SubBuf[bool] = SubBuf(f'{controller_ns}/button/b', Bool)
btn_y: SubBuf[bool] = SubBuf(f'{controller_ns}/button/y', Bool)
# create wroboclaw pubs
pub_turntable = rospy.Publisher(f'{claw_ns_0}/cmd/left', Int16, queue_size=4)
pub_shoulder = rospy.Publisher(f'{claw_ns_0}/cmd/right', Int16, queue_size=4)
pub_elbow = rospy.Publisher(f'{claw_ns_1}/cmd/left', Int16, queue_size=4)
pub_forearm = rospy.Publisher(f'{claw_ns_1}/cmd/right', Int16, queue_size=4)
pub_wrist_a = rospy.Publisher(f'{claw_ns_2}/cmd/left', Int16, queue_size=4)
pub_wrist_b = rospy.Publisher(f'{claw_ns_2}/cmd/right', Int16, queue_size=4)
pub_eef = rospy.Publisher(f'{claw_ns_3}/cmd/left', Int16, queue_size=4)
# main loop
sleeper = rospy.Rate(spin_rate)
while not rospy.is_shutdown():
if not w.isMad():
if trigger_l.data is not None and trigger_r.data is not None:
pub_turntable.publish(float_to_int16_msg(trigger_r.data - trigger_l.data))
if stick_l.data is not None:
pub_shoulder.publish(float_to_int16_msg(stick_l.data))
if stick_r.data is not None:
pub_elbow.publish(float_to_int16_msg(stick_r.data))
if bumper_l.data is not None and bumper_r.data is not None:
if bumper_l.data:
if bumper_r.data:
pub_forearm.publish(Int16(0))
pub_forearm.publish(Int16(16384))
elif bumper_r.data:
pub_forearm.publish(Int16(-16384))
else:
pub_forearm.publish(Int16(0))
if pov_x.data is not None and pov_y.data is not None:
wrist_spd_a = 0
wrist_spd_b = 0
if pov_x.data > 0:
wrist_spd_a = 1
wrist_spd_b = 1
elif pov_x.data < 0:
wrist_spd_a = -1
wrist_spd_b = -1
elif pov_y.data > 0:
wrist_spd_a = 1
wrist_spd_b = -1
elif pov_y.data < 0:
wrist_spd_a = -1
wrist_spd_b = 1
pub_wrist_a.publish(Int16(24576 * wrist_spd_a))
pub_wrist_b.publish(Int16(-24576 * wrist_spd_b))
if btn_a.data is not None and btn_b.data is not None:
if btn_a.data:
if btn_b.data:
pub_eef.publish(Int16(0))
pub_eef.publish(Int16(24576))
elif btn_b.data:
pub_eef.publish(Int16(-24576))
else:
pub_eef.publish(Int16(0))
else:
pub_turntable.publish(Int16(0))
pub_shoulder.publish(Int16(0))
pub_elbow.publish(Int16(0))
pub_forearm.publish(Int16(0))
pub_wrist_a.publish(Int16(0))
pub_wrist_b.publish(Int16(0))
pub_eef.publish(Int16(0))
sleeper.sleep()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3345828 | <filename>bhpm/jax_code/models/kernels.py
# File: matrix.py
# Created Date: 2020-04-18
# Author: <NAME> (<EMAIL>)
"""
Kernels that are meant to produce matrices of values.
We use the vectorized squared-distance algorithm for speed.
As a trade-off, We avoid doing kernel grads on these.
This is a reasonable place for "modules" to live; kernel grads can be comptued
by building functions and grabbing the module's parameters
"""
from functools import partial
import jax
import jax.numpy as np
from .transforms import transform_params
def _squared_distance(x1, x2, scales=None):
z1, z2 = (x1, x2) if scales is None else (x1 / scales, x2 / scales)
return ( # clip_up( FIXME
np.sum(z1 * z1, axis=1, keepdims=True)
- 2.0 * z1 @ z2.T
+ np.sum(z2 * z2, axis=1, keepdims=True).T
)
_remat_squared_distance = jax.remat(_squared_distance)
def vmap(k, diag=False):
"""
Vectorize a "single" kernel of the form k(params, x1, x2)
diag: k(params, x): (N,DX) -> (N,)
full: k(params, x1, x2): (N,DX), (M,DX) -> (N,M)
"""
if diag:
# k(params, x)
return jax.vmap(lambda params, x: k(params, x, x), (None, 0))
else:
# k(params, x1, x2)
inside = jax.vmap(lambda params, x1, x2: k(params, x1, x2), (None, None, 0))
return jax.vmap(lambda params, x1, x2: inside(params, x1, x2), (None, 0, None))
# Is this faster?
# return jax.vmap(
# lambda params, x1, x2: jax.vmap(
# lambda x2: k(params, x1, x2)
# )(x2),
# (None, 0, None)
# )
def periodic():
"""
From Duvenaud, "Automatic model construction with Gaussian processes" (2014)
Fig. 2.1
"""
t_wrapper = partial(transform_params, transform=np.exp)
def init_fun(rng, input_shape):
params = {
"raw_variance": np.array(0.0),
"raw_scale": np.array(0.0),
"raw_periods": np.zeros((input_shape[1],)),
}
return (input_shape[0], input_shape[0]), params
@t_wrapper
def apply_fun(params, x1, x2):
r = np.sqrt(_squared_distance(x1, x2, scales=params["periods"]))
return params["variance"] * np.exp(
-params["scale"] * np.power(np.sin(np.pi * r), 2)
)
@t_wrapper
def apply_diag_fun(params, x):
return params["variance"] * np.ones(x.shape[0])
@t_wrapper
def apply_single_fun(params, x1, x2):
"""
Maps a pair of 1D vectors to a scalar (use this for grads)
"""
dr = (x1 - x2) / params["periods"]
r = np.sqrt(np.dot(dr, dr))
return params["variance"] * np.exp(
-params["scale"] * np.power(np.sin(np.pi * r), 2)
)
return {
"init": init_fun,
"apply": apply_fun,
"apply_diag": apply_diag_fun,
"apply_single": apply_single_fun,
}
def rbf():
t_wrapper = partial(transform_params, transform=np.exp)
def init_fun(rng, input_shape, scales=None, variance=None):
params = {
"raw_variance": np.log(variance) if variance is not None else np.array(0.0),
"raw_scales": np.log(scales)
if scales is not None
else np.zeros((input_shape[1],)),
}
return (input_shape[0], input_shape[0]), params
@t_wrapper
def apply_fun(params, x1, x2, remat=False):
"""
:param remat: if True, slam the squared distance calculaation with a remat to
prevent XLA fusion bug w/ x64.
"""
sd = _squared_distance if not remat else _remat_squared_distance
return params["variance"] * np.exp(-sd(x1, x2, scales=params["scales"]))
@t_wrapper
def safe_apply_func(params, x1, x2):
# "Safe" version that doesn't cause https://github.com/google/jax/issues/3122
return vmap(apply_single_fun)(params, x1, x2)
@t_wrapper
def apply_diag_fun(params, x):
return params["variance"] * np.ones(x.shape[0])
@t_wrapper
def apply_single_fun(params, x1, x2):
"""
Maps a pair of 1D vectors to a scalar (use this for grads)
"""
dr = (x1 - x2) / params["scales"]
r2 = np.dot(dr, dr)
return params["variance"] * np.exp(-r2)
return {
"init": init_fun,
"apply": apply_fun,
"apply_diag": apply_diag_fun,
"apply_single": apply_single_fun,
"safe_apply": safe_apply_func,
}
| StarcoderdataPython |
11279675 | <reponame>relikd/lektor-groupby-plugin<gh_stars>0
from lektor.db import Page # isinstance
from lektor.pluginsystem import Plugin # subclass
from typing import TYPE_CHECKING, Iterator, Any
from .backref import GroupByRef, VGroups
from .groupby import GroupBy
from .pruner import prune
from .resolver import Resolver
from .vobj import VPATH, GroupBySource, GroupByBuildProgram
if TYPE_CHECKING:
from lektor.builder import Builder, BuildState
from lektor.sourceobj import SourceObject
from .watcher import GroupByCallbackArgs
class GroupByPlugin(Plugin):
name = 'GroupBy Plugin'
description = 'Cluster arbitrary records with field attribute keyword.'
def on_setup_env(self, **extra: Any) -> None:
self.has_changes = False
self.resolver = Resolver(self.env)
self.env.add_build_program(GroupBySource, GroupByBuildProgram)
self.env.jinja_env.filters.update(vgroups=VGroups.iter)
def on_before_build(
self, builder: 'Builder', source: 'SourceObject', **extra: Any
) -> None:
# before-build may be called before before-build-all (issue #1017)
# make sure it is always evaluated first
if isinstance(source, Page):
self._init_once(builder)
def on_after_build(self, build_state: 'BuildState', **extra: Any) -> None:
if build_state.updated_artifacts:
self.has_changes = True
def on_after_build_all(self, builder: 'Builder', **extra: Any) -> None:
# only rebuild if has changes (bypass idle builds)
# or the very first time after startup (url resolver & pruning)
if self.has_changes or not self.resolver.has_any:
self._init_once(builder).build_all(builder) # updates resolver
self.has_changes = False
def on_after_prune(self, builder: 'Builder', **extra: Any) -> None:
# TODO: find a better way to prune unreferenced elements
prune(builder, VPATH, self.resolver.files)
# ------------
# internal
# ------------
def _init_once(self, builder: 'Builder') -> GroupBy:
try:
return GroupByRef.of(builder)
except AttributeError:
groupby = GroupBy(self.resolver)
GroupByRef.set(builder, groupby)
self._load_quick_config(groupby)
# let other plugins register their @groupby.watch functions
self.emit('before-build-all', groupby=groupby, builder=builder)
groupby.queue_all(builder)
return groupby
def _load_quick_config(self, groupby: GroupBy) -> None:
''' Load config file quick listeners. '''
config = self.get_config()
for key in config.sections():
if '.' in key: # e.g., key.fields and key.key_map
continue
watcher = groupby.add_watcher(key, config)
split = config.get(key + '.split') # type: str
@watcher.grouping()
def _fn(args: 'GroupByCallbackArgs') -> Iterator[str]:
val = args.field
if isinstance(val, str):
val = map(str.strip, val.split(split)) if split else [val]
if isinstance(val, (list, map)):
yield from val
| StarcoderdataPython |
4847261 | <reponame>zhengkai15/pytorch-examples
import torch
import matplotlib.pyplot as plt
w = 2
b = 1
noise = torch.rand(100, 1)
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
# 因为输入层格式要为(-1, 1),所以这里将(100)的格式转成(100, 1)
y = w*x + b + noise
# 拟合分布在y=2x+1上并且带有噪声的散点
model = torch.nn.Sequential(
torch.nn.Linear(1, 16),
torch.nn.Tanh(),
torch.nn.Linear(16, 1),
)
# 自定义的网络,带有2个全连接层和一个tanh层
loss_fun = torch.nn.MSELoss()
# 定义损失函数为均方差
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 使用adam作为优化器更新网络模型的权重,学习率为0.01
plt.ion()
# 图形交互
for _ in range(10000):
ax = plt.axes()
output = model(x)
# 数据向后传播(经过网络层的一次计算)
loss = loss_fun(output, y)
# 计算损失值
# print("before zero_grad:{}".format(list(model.children())[0].weight.grad))
# print("-"*100)
model.zero_grad()
# 优化器清空梯度
# print("before zero_grad:{}".format(list(model.children())[0].weight.grad))
# print("-"*100)
# 通过注释地方可以对比发现执行zero_grad方法以后倒数梯度将会被清0
# 如果不清空梯度的话,则会不断累加梯度,从而影响到当前梯度的计算
loss.backward()
# 向后传播,计算当前梯度,如果这步不执行,那么优化器更新时则会找不到梯度
optimizer.step()
# 优化器更新梯度参数,如果这步不执行,那么因为梯度没有发生改变,loss会一直计算最开始的那个梯度
if _ % 100 == 0:
plt.cla()
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), output.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
# print("w:", list(model.children())[0].weight.t() @ list(model.children())[-1].weight.t())
# 通过这句可以查看权值变化,可以发现最后收敛到2附近
plt.ioff()
plt.show() | StarcoderdataPython |
4871778 | from oauth2_provider.signals import app_authorized
"""
Why are we doing this? The reason is because ``django-oauth-toolkit`` does
not attach the user object to the authorized token when user specific
"client credentials" application gets authorized.
"""
def handle_app_authorized(sender, request, token, **kwargs):
"""
Function will be fired whenever a new token was authorized. We are
adding the following extra functionality:
(1) If the ``application`` has a user associated with it then we will
attach the user to the newely created token - if the token was not
assigned the user!
(2) This signal will fire on every oAuth 2.0 authorization request.
"""
if token:
if token.application:
if token.application.user and token.user is None:
token.user = token.application.user
token.save()
app_authorized.connect(handle_app_authorized)
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from oauth2_provider.models import (
Application,
AbstractApplication,
# AbstractAccessToken,
# AccessToken,
# RefreshToken
)
from foundation.models import UserApplication, Device
@receiver(post_save, sender=Device)
def create_oauth_for_device(sender, instance, created, **kwargs):
"""
Function will be fired whenever a new `Device` has been created. We do this
because we want to automatically generate our `client credentials` oAuth 2.0
authorization. We are doing this because we want to make setting up devices
to be easier.
"""
if created:
application, created = Application.objects.update_or_create(
name=str(instance.uuid),
defaults={
"user": instance.user,
"name": str(instance.uuid),
"skip_authorization": True,
"authorization_grant_type": AbstractApplication.GRANT_CLIENT_CREDENTIALS,
"client_type": AbstractApplication.CLIENT_CONFIDENTIAL
}
)
@receiver(post_delete, sender=Device)
def delete_oauth_for_device(sender, instance, **kwargs):
"""
Function will be fired when a `Device` has been deleted. We want to
automatically unauthorize the existing oAuth 2.0 authorization we have
for that device.
"""
if instance:
Application.objects.filter(name=instance.uuid).delete()
@receiver(post_delete, sender=UserApplication)
def delete_oauth_for_user_application(sender, instance, **kwargs):
"""
Function will be fired when a `Application` has been deleted. We want to
automatically unauthorize the existing oAuth 2.0 authorization we have
for that device.
"""
if instance:
Application.objects.filter(name=instance.uuid).delete()
| StarcoderdataPython |
3528184 | <gh_stars>1-10
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import factory
from cart.models import CartItem, Cart
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = get_user_model()
django_get_or_create = ('username',)
username = '<EMAIL>'
email = '<EMAIL>'
first_name = 'Iyanuoluwa'
last_name = 'Ajao'
password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')
is_superuser = True
is_staff = True
is_active = True
class CartFactory(factory.DjangoModelFactory):
user = factory.SubFactory(UserFactory, cart=None)
checked_out = True
class Meta:
model = Cart
class CartItemFactory(factory.DjangoModelFactory):
cart = factory.SubFactory(CartFactory)
quantity = 10
price = 100.00
product_object_id = factory.SelfAttribute('product.id')
product_content_type = factory.LazyAttribute(
lambda o: ContentType.objects.get_for_model(o.product))
product = factory.SubFactory(UserFactory)
class Meta:
exclude = ['product']
model = CartItem
| StarcoderdataPython |
5194493 | """Utility functions used throughout the package.
Attributes:
use_colorlog (bool): Whether the logging should use colorlog or not.
"""
import os
import sys
import logging
import logging.handlers
import logging.config
import matplotlib.markers
import matplotlib.lines
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import gc
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from astroquery.simbad import Simbad
from astropy.time import Time
from astropy.table import Table
from astropy.coordinates import solar_system_ephemeris
from astropy.coordinates import get_body
from astropy.io import fits
from astropy.wcs import WCS
from multiprocessing_logging import install_mp_handler
from typing import Optional, Union, Tuple, List
from mocpy import MOC
# crosshair imports
from matplotlib.transforms import Affine2D
import matplotlib.path as path
try:
import colorlog
use_colorlog = True
except ImportError:
use_colorlog = False
from vasttools.survey import get_askap_observing_location
def get_logger(
debug: bool,
quiet: bool,
logfile: str = None
) -> logging.RootLogger:
"""
Set up the logger.
Args:
debug: Set stream level to debug.
quiet: Suppress all non-essential output.
logfile: File to output log to.
Returns:
Logger object.
"""
logger = logging.getLogger()
s = logging.StreamHandler()
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
logformat = '[%(asctime)s] - %(levelname)s - %(message)s'
if use_colorlog:
formatter = colorlog.ColoredFormatter(
"%(log_color)s[%(asctime)s] - %(levelname)s - %(blue)s%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white', },
secondary_log_colors={},
style='%'
)
else:
formatter = logging.Formatter(logformat, datefmt="%Y-%m-%d %H:%M:%S")
s.setFormatter(formatter)
if debug:
s.setLevel(logging.DEBUG)
else:
if quiet:
s.setLevel(logging.WARNING)
else:
s.setLevel(logging.INFO)
logger.addHandler(s)
if logfile is not None:
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
install_mp_handler(logger=logger)
return logger
def _set_crosshair(self) -> None:
"""This function adds a true crosshair marker to matplotlib.
============================== ===========================================
marker description
============================== ===========================================
`"c"` crosshair
Usage:
```python
import matplotlib.pyplot as plt
import crosshair
plt.scatter(0,0, marker='c', s=100)
plt.show()
```
Notes:
I tried to stay as close to the style of `matplotlib/lib/markers.py`,
so it can easily implemented in mpl after further testing.
How to implement this in matplotlib via a module was inspired by:
https://stackoverflow.com/a/16655800/5064815
Be aware that for small sizes the crosshair looks like four dots or
even a circle. This is due to the fact that in this case the linewidth
is larger then the length of the 'hairs' of the crosshair. This is know
and similar behaviour is seen for other markers at small sizes.
Author:
<NAME> (13/07/2017)
Returns:
None
"""
_crosshair_path = path.Path([(0.0, -0.5), # center, bottom
(0.0, -0.25), # center, q_bot
(-0.5, 0.0), # left, center
(-0.25, 0.0), # q_left, center
(0.0, 0.25), # center, q_top
(0.0, 0.5), # center, top
(0.25, 0.0), # q_right, center
(0.5, 0.0)], # right, center
[path.Path.MOVETO,
path.Path.LINETO,
path.Path.MOVETO,
path.Path.LINETO,
path.Path.MOVETO,
path.Path.LINETO,
path.Path.MOVETO,
path.Path.LINETO])
self._transform = Affine2D().scale(1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = _crosshair_path
def crosshair() -> None:
"""
A wrapper function to set the crosshair marker in
matplotlib using the function written by <NAME>.
Returns:
None
"""
matplotlib.markers.MarkerStyle._set_crosshair = _set_crosshair
matplotlib.markers.MarkerStyle.markers['c'] = 'crosshair'
matplotlib.lines.Line2D.markers = matplotlib.markers.MarkerStyle.markers
def check_file(path: str) -> bool:
"""
Check if logging file exists.
Args:
path: filepath to check
Returns:
Boolean representing the file existence, 'True' if present, otherwise
'False'.
"""
logger = logging.getLogger()
exists = os.path.isfile(path)
if not exists:
logger.critical(
"Cannot find file '%s'!", path
)
return exists
def build_catalog(coords: str, source_names: str) -> pd.DataFrame:
"""
Build the catalogue of target sources.
Args:
coords: The coordinates (comma-separated) or filename entered.
source_names: Comma-separated source names.
Returns:
Catalogue of target sources.
"""
logger = logging.getLogger()
if " " not in coords:
logger.info("Loading file {}".format(coords))
# Give explicit check to file existence
user_file = os.path.abspath(coords)
if not os.path.isfile(user_file):
logger.critical("{} not found!".format(user_file))
logger.critical("Exiting.")
sys.exit()
try:
catalog = pd.read_csv(user_file, comment="#")
catalog.dropna(how="all", inplace=True)
logger.debug(catalog)
catalog.columns = map(str.lower, catalog.columns)
logger.debug(catalog.columns)
no_ra_col = "ra" not in catalog.columns
no_dec_col = "dec" not in catalog.columns
if no_ra_col or no_dec_col:
logger.critical(
"Cannot find one of 'ra' or 'dec' in input file.")
logger.critical("Please check column headers!")
sys.exit()
if "name" not in catalog.columns:
catalog["name"] = [
"{}_{}".format(
i, j) for i, j in zip(
catalog['ra'], catalog['dec'])]
else:
catalog['name'] = catalog['name'].astype(str)
except Exception as e:
logger.critical(
"Pandas reading of {} failed!".format(coords))
logger.critical("Check format!")
sys.exit()
else:
catalog_dict = {'ra': [], 'dec': []}
coords = coords.split(",")
for i in coords:
ra_str, dec_str = i.split(" ")
catalog_dict['ra'].append(ra_str)
catalog_dict['dec'].append(dec_str)
if source_names != "":
source_names = source_names.split(",")
if len(source_names) != len(catalog_dict['ra']):
logger.critical(
("All sources must be named "
"when using '--source-names'."))
logger.critical("Please check inputs.")
sys.exit()
else:
source_names = [
"{}_{}".format(
i, j) for i, j in zip(
catalog_dict['ra'], catalog_dict['dec'])]
catalog_dict['name'] = source_names
catalog = pd.DataFrame.from_dict(catalog_dict)
catalog = catalog[['name', 'ra', 'dec']]
catalog['name'] = catalog['name'].astype(str)
return catalog
def build_SkyCoord(catalog: pd.DataFrame) -> SkyCoord:
"""
Create a SkyCoord array for each target source.
Args:
catalog: Catalog of source coordinates.
Returns:
Target source(s) SkyCoord.
"""
logger = logging.getLogger()
ra_str = catalog['ra'].iloc[0]
if catalog['ra'].dtype == np.float64:
hms = False
deg = True
elif ":" in ra_str or " " in ra_str:
hms = True
deg = False
else:
deg = True
hms = False
if hms:
src_coords = SkyCoord(
catalog['ra'],
catalog['dec'],
unit=(
u.hourangle,
u.deg))
else:
src_coords = SkyCoord(
catalog['ra'],
catalog['dec'],
unit=(
u.deg,
u.deg))
return src_coords
def read_selavy(
selavy_path: str,
cols: Optional[List[str]] = None
) -> pd.DataFrame:
"""
Load a selavy catalogue from file. Can handle VOTables and csv files.
Args:
selavy_path: Path to the file.
cols: Columns to use. Defaults to None, which returns all columns.
Returns:
Dataframe containing the catalogue.
"""
if selavy_path.endswith(".xml") or selavy_path.endswith(".vot"):
df = Table.read(
selavy_path, format="votable", use_names_over_ids=True
).to_pandas()
if cols is not None:
df = df[df.columns.intersection(cols)]
elif selavy_path.endswith(".csv"):
# CSVs from CASDA have all lowercase column names
df = pd.read_csv(selavy_path, usecols=cols).rename(
columns={"spectral_index_from_tt": "spectral_index_from_TT"}
)
else:
df = pd.read_fwf(selavy_path, skiprows=[1], usecols=cols)
return df
def filter_selavy_components(
selavy_df: pd.DataFrame,
selavy_sc: SkyCoord,
imsize: Union[Angle, Tuple[Angle, Angle]],
target: SkyCoord
) -> pd.DataFrame:
"""
Create a shortened catalogue by filtering out selavy components
outside of the image.
Args:
selavy_df: Dataframe of selavy components.
selavy_sc: SkyCoords containing selavy components.
imsize: Size of the image along each axis. Can be a single Angle
object or a tuple of two Angle objects.
target: SkyCoord of target centre.
Returns:
Shortened catalogue.
"""
seps = target.separation(selavy_sc)
mask = seps <= imsize / 1.4
return selavy_df[mask].reset_index(drop=True)
def simbad_search(
objects: List[str],
logger: Optional[logging.RootLogger] = None
) -> Union[Tuple[SkyCoord, List[str]], Tuple[None, None]]:
"""
Searches SIMBAD for object coordinates and returns coordinates and names
Args:
objects: List of object names to query.
logger: Logger to use, defaults to None.
Returns:
Coordinates and source names. Each will be NoneType if search fails.
"""
if logger is None:
logger = logging.getLogger()
Simbad.add_votable_fields('ra(d)', 'dec(d)', 'typed_id')
try:
result_table = Simbad.query_objects(objects)
if result_table is None:
return None, None
ra = result_table['RA_d']
dec = result_table['DEC_d']
c = SkyCoord(ra, dec, unit=(u.deg, u.deg))
simbad_names = np.array(result_table['TYPED_ID'])
return c, simbad_names
# TODO: This needs better handling below.
except Exception as e:
logger.debug(
"Error in performing the SIMBAD object search!\nError: %s",
e, exc_info=True
)
return None, None
def match_planet_to_field(
group: pd.DataFrame, sep_thresh: float = 4.0
) -> pd.DataFrame:
"""
Processes a dataframe that contains observational info
and calculates whether a planet is within 'sep_thresh' degrees of the
observation.
Used as part of groupby functions hence the argument
is a group.
Args:
group: Required columns are planet, DATEOBS, centre-ra and centre-dec.
sep_thresh: The separation threshold for the planet position to the
field centre. If the planet is lower than this value then the
planet is considered to be in the field. Unit is degrees.
Returns:
The group with planet location information added and
filtered for only those which are within 'sep_thresh' degrees. Hence
an empty dataframe could be returned.
"""
planet = group.iloc[0]['planet']
dates = Time(group['DATEOBS'].tolist())
fields_skycoord = SkyCoord(
group['centre-ra'].values,
group['centre-dec'].values,
unit=(u.deg, u.deg)
)
ol = get_askap_observing_location()
with solar_system_ephemeris.set('builtin'):
planet_coords = get_body(planet, dates, ol)
seps = planet_coords.separation(
fields_skycoord
)
group['ra'] = planet_coords.ra.deg
group['dec'] = planet_coords.dec.deg
group['sep'] = seps.deg
group = group.loc[
group['sep'] < sep_thresh
]
return group
def check_racs_exists(base_dir: str) -> bool:
"""
Check if RACS directory exists
Args:
base_dir: Path to base directory
Returns:
True if exists, False otherwise.
"""
return os.path.isdir(os.path.join(base_dir, "EPOCH00"))
def create_source_directories(outdir: str, sources: List[str]) -> None:
"""
Create directory for all sources in a list.
Args:
outdir: Base directory.
sources: List of source names.
Returns:
None
"""
logger = logging.getLogger()
for i in sources:
name = i.replace(" ", "_").replace("/", "_")
name = os.path.join(outdir, name)
os.makedirs(name)
def gen_skycoord_from_df(
df: pd.DataFrame,
ra_col: str = 'ra',
dec_col: str = 'dec',
ra_unit: u.Unit = u.degree,
dec_unit: u.Unit = u.degree
) -> SkyCoord:
"""
Create a SkyCoord object from a provided dataframe.
Args:
df: A dataframe containing the RA and Dec columns.
ra_col: The column to use for the Right Ascension, defaults to 'ra'.
dec_col: The column to use for the Declination, defaults to 'dec'.
ra_unit: The unit of the RA column, defaults to degrees. Must be
an astropy.unit value.
dec_unit: The unit of the Dec column, defaults to degrees. Must be
an astropy.unit value.
Returns:
SkyCoord object
"""
sc = SkyCoord(
df[ra_col].values, df[dec_col].values, unit=(ra_unit, dec_unit)
)
return sc
def pipeline_get_eta_metric(df: pd.DataFrame, peak: bool = False) -> float:
"""
Calculates the eta variability metric of a source.
Works on the grouped by dataframe using the fluxes
of the associated measurements.
Args:
df: A dataframe containing the grouped measurements, i.e. only
the measurements from one source. Requires the flux_int/peak and
flux_peak/int_err columns.
peak: Whether to use peak flux instead of integrated, defaults to
False.
Returns eta:
The eta variability metric.
"""
if df.shape[0] == 1:
return 0.
suffix = 'peak' if peak else 'int'
weights = 1. / df[f'flux_{suffix}_err'].values**2
fluxes = df[f'flux_{suffix}'].values
eta = (df.shape[0] / (df.shape[0]-1)) * (
(weights * fluxes**2).mean() - (
(weights * fluxes).mean()**2 / weights.mean()
)
)
return eta
def pipeline_get_variable_metrics(df: pd.DataFrame) -> pd.Series:
"""
Calculates the variability metrics of a source. Works on the grouped by
dataframe using the fluxes of the associated measurements.
Args:
df: A dataframe containing the grouped measurements, i.e. only
the measurements from one source. Requires the flux_int/peak and
flux_peak/int_err columns.
Returns:
The variability metrics, v_int, v_peak, eta_int and eta_peak
as a pandas series.
"""
d = {}
if df.shape[0] == 1:
d['v_int'] = 0.
d['v_peak'] = 0.
d['eta_int'] = 0.
d['eta_peak'] = 0.
else:
d['v_int'] = df['flux_int'].std() / df['flux_int'].mean()
d['v_peak'] = df['flux_peak'].std() / df['flux_peak'].mean()
d['eta_int'] = pipeline_get_eta_metric(df)
d['eta_peak'] = pipeline_get_eta_metric(df, peak=True)
return pd.Series(d)
def calculate_vs_metric(
flux_a: float, flux_b: float, flux_err_a: float, flux_err_b: float
) -> float:
"""
Calculate the Vs variability metric which is the t-statistic that the
provided fluxes are variable. See Section 5 of Mooley et al. (2016)
for details, DOI: 10.3847/0004-637X/818/2/105.
Args:
flux_a (float): flux value "A".
flux_b (float): flux value "B".
flux_err_a (float): error of `flux_a`.
flux_err_b (float): error of `flux_b`.
Returns:
float: the Vs metric for flux values "A" and "B".
"""
return (flux_a - flux_b) / np.hypot(flux_err_a, flux_err_b)
def calculate_m_metric(flux_a: float, flux_b: float) -> float:
"""
Calculate the m variability metric which is the modulation index between
two fluxes.
This is proportional to the fractional variability.
See Section 5 of Mooley et al. (2016) for details,
DOI: 10.3847/0004-637X/818/2/105.
Args:
flux_a (float): flux value "A".
flux_b (float): flux value "B".
Returns:
float: the m metric for flux values "A" and "B".
"""
return 2 * ((flux_a - flux_b) / (flux_a + flux_b))
def _distance_from_edge(x: np.ndarray) -> np.ndarray:
"""
Analyses the binary array x and determines the distance from
the edge (0).
Args:
x: The binary array to analyse.
Returns:
Array each cell containing distance from the edge.
"""
x = np.pad(x, 1, mode='constant')
dist = ndi.distance_transform_cdt(x, metric='taxicab')
return dist[1:-1, 1:-1]
def create_moc_from_fits(fits_file: str, max_depth: int = 9) -> MOC:
"""
Creates a MOC from (assuming) an ASKAP fits image
using the cheat method of analysing the edge pixels of the image.
Args:
fits_file: The path of the ASKAP FITS image to generate the MOC from.
max_depth: Max depth parameter passed to the
MOC.from_polygon_skycoord() function, defaults to 9.
Returns:
The MOC generated from the FITS file.
Raises:
Exception: When the FITS file cannot be found.
"""
if not os.path.isfile(fits_file):
raise Exception("{} does not exist".format(fits_file))
with fits.open(fits_file) as vast_fits:
data = vast_fits[0].data
if data.ndim == 4:
data = data[0, 0, :, :]
header = vast_fits[0].header
wcs = WCS(header, naxis=2)
binary = (~np.isnan(data)).astype(int)
mask = _distance_from_edge(binary)
x, y = np.where(mask == 1)
# need to know when to reverse by checking axis sizes.
pixels = np.column_stack((y, x))
coords = SkyCoord(wcs.wcs_pix2world(
pixels, 0), unit="deg", frame="icrs")
moc = MOC.from_polygon_skycoord(coords, max_depth=max_depth)
del binary
gc.collect()
return moc
| StarcoderdataPython |
1873138 | from pygen.cgen import *
from arithgen import ArithGen, gen_max_int_gen
import iterables
from utils import eval_branches, FunctionGenerator
import pgen
import random
class ClassGenerator(FunctionGenerator):
def __init__(self, module, stats, opts, rng=None):
self.opts = opts
self.module = module
self.rng = rng
if not rng:
self.rng = random.Random()
self.stats = stats
self.branches = [
(1.0, self.generate_monomorphic),
(1.0, self.generate_polymorphic),
(1.0, self.generate_duck),
]
def get_iterable(self, literals):
iter_gen = iterables.IterableGenerator(self.module, self.stats, self.opts, self.rng)
return iter_gen.get_iterable(literals)
def make_class_function(self):
c = self.create_class()
self.module.content.insert(0, c)
args = self.generate_arguments(2)
m = self.create_method(args)
c.content.append(m)
return c, m
def make_loop(self, literals, class_var, m):
loop_var = self.next_variable()
iter = self.get_iterable(literals)
l = ForLoop(loop_var, iter)
loop_literals = list(literals) + [loop_var]
args = [self.rng.choice(loop_literals) for i in m.args]
if self.rng.random() < 0.5:
func = class_var + '.' + m.name
else: # Sometimes copy the function into a variable
func = self.next_variable()
l.content.append(Assignment(func, '=', [class_var + '.' + m.name]))
l.content.append(CallStatement(func, args))
return l
def make_fill(self, m):
filled = [(1.0, self.fill_zero),
(1.0, self.fill_some_arith)]
branch = eval_branches(self.rng, filled)
branch(m)
def fill_zero(self, m):
m.content.append('return 0')
def fill_some_arith(self, m):
def low_numbers():
return str(self.rng.randint(-1,1))
numbers = [gen_max_int_gen().set_rng(self.rng), low_numbers]
exp = ArithGen(5, self.rng).generate(m.args + numbers)
m.content.extend([
Assignment('result', '=', [exp]),
'return result',
])
def generate_inline(self, literals):
branch = eval_branches(self.rng, self.branches)
return branch(literals)
def generate_monomorphic(self, literals):
"""Generates a monomorphic callsite"""
c, m = self.make_class_function()
self.make_fill(m)
result = []
class_var = self.next_variable()
result.append(Assignment(class_var, '=', [CallStatement(c, [])]))
l = self.make_loop(literals, class_var, m)
result.append(l)
return result
def generate_polymorphic(self, literals):
"""Generate a polymorphic callsite"""
c, m = self.make_class_function()
c_super, m_super = self.make_class_function()
m_super.name = m.name
c.super = [c_super.name]
self.make_fill(m)
self.make_fill(m_super)
class_var = self.next_variable()
clause = self.rng.choice(list(literals)) + " < " + self.rng.choice(list(literals))
i = IfStatement(clause,
[Assignment(class_var, '=', [CallStatement(c, [])])],
[Assignment(class_var, '=', [CallStatement(c_super, [])])]
)
result = [i]
l = self.make_loop(literals, class_var, m)
result.append(l)
return result
def generate_duck(self, literals):
"""Generate a duck typing callsite"""
c, m = self.make_class_function()
c_super, m_super = self.make_class_function()
m_super.name = m.name
self.make_fill(m)
self.make_fill(m_super)
class_var = self.next_variable()
clause = self.rng.choice(list(literals)) + " < " + self.rng.choice(list(literals))
i = IfStatement(clause,
[Assignment(class_var, '=', [CallStatement(c, [])])],
[Assignment(class_var, '=', [CallStatement(c_super, [])])]
)
result = [i]
l = self.make_loop(literals, class_var, m)
result.append(l)
return result
| StarcoderdataPython |
1872895 | <reponame>marktiu7/Web<filename>myweb/pub_form/form_login.py
from django import forms
from django.forms import ModelForm
from login.models import *
#login
class UserForm(ModelForm):
class Meta:
model = login
fields = '__all__'
labels ={
'username':'yonghuming',
'password':'<PASSWORD>',
}
widgets ={
'password':forms.PasswordInput,
}
error_messages ={
'username':{
'invalid':'haha'
}
} | StarcoderdataPython |
5194631 | <gh_stars>0
from prettytable import PrettyTable
from prettytable import PLAIN_COLUMNS
"""
SAÍDA PARA CALCULOS SIMPLES:
print("bin| 1100 + 0011 = 1111")
print("dec| 12 + 3 = 15")
print("hex| C + 3 = F")
print("oct| 1100 + 0011 = 1111")
print("asc| - + - = -")
"""
table = PrettyTable(["Bases", "Operador 1", "op",
"Operador 2", "=", "Resultado"])
caracteres = ["+", "-", "*", "/", "|", "&"]
def calculation(op1, op2, operation):
result = None
try:
op1 = int(op1)
op2 = int(op2)
except ValueError:
return f"Apenas operacoes com dois numeros inteiros positivos decimais"
if operation == "+":
result = op1 + op2
if operation == "-":
result = op1 - op2
if operation == "*":
result = op1 * op2
if operation == "/":
if op2 == 0:
return "Não é possível dividir por zero"
else:
result = op1 / op2
if operation == "|":
result = op1 | op2
if operation == "&":
result = op1 & op2
return result
def findOperation(expression):
global caracteres
for caractere in caracteres:
if caractere in expression:
return caractere
return False
def formataBases(decimal):
list_bases = []
list_bases.append("{:01x}".format(decimal))
list_bases.append("{:01d}".format(decimal))
list_bases.append("{:01o}".format(decimal))
list_bases.append("{:08b}".format(decimal))
if decimal > 32 and decimal < 126: #imprimivel
list_bases.append("{:01c}".format(decimal))
else:
list_bases.append("-")
return list_bases
while True:
expression = input("Digite a expressão: \n")
operation = findOperation(expression)
if operation != False:
hexadecimal = ['Hex']
decimal = ['Dec']
octal = ['Oct']
binary = ['Bin']
asc = ['Asc']
list_rows = [hexadecimal, decimal, octal, binary, asc]
operators = expression.split(operation)
result = calculation(operators[0], operators[1], operation)
if type(result) == str:
print(result)
continue
bases_op0 = formataBases(int(operators[0]))
bases_op1 = formataBases(int(operators[1]))
bases_res = formataBases(int(result))
for row in range(len(list_rows)):
list_rows[row].append(bases_op0[row])
list_rows[row].append(operation)
list_rows[row].append(bases_op1[row])
list_rows[row].append('=')
list_rows[row].append(bases_res[row])
table.add_row(list_rows[row])
print(table)
table.clear_rows()
else:
print("Não há operação a se fazer. tente uma das operações: [+ - * / | &]")
| StarcoderdataPython |
3221049 | <reponame>gabrielcervante/live-sharer<filename>venv/Lib/site-packages/PySide6/examples/widgets/layouts/dynamiclayouts/dynamiclayouts.py
############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide6 port of the widgets/layouts/dynamiclayouts example from Qt v5.x"""
from PySide6.QtCore import Qt, QSize
from PySide6.QtWidgets import (QApplication, QDialog, QLayout, QGridLayout,
QMessageBox, QGroupBox, QSpinBox, QSlider,
QProgressBar, QDial, QDialogButtonBox,
QComboBox, QLabel)
class Dialog(QDialog):
def __init__(self):
super(Dialog, self).__init__()
self.rotableWidgets = []
self.createRotableGroupBox()
self.createOptionsGroupBox()
self.createButtonBox()
mainLayout = QGridLayout()
mainLayout.addWidget(self.rotableGroupBox, 0, 0)
mainLayout.addWidget(self.optionsGroupBox, 1, 0)
mainLayout.addWidget(self.buttonBox, 2, 0)
mainLayout.setSizeConstraint(QLayout.SetMinimumSize)
self.mainLayout = mainLayout
self.setLayout(self.mainLayout)
self.setWindowTitle("Dynamic Layouts")
def rotateWidgets(self):
count = len(self.rotableWidgets)
if count % 2 == 1:
raise AssertionError("Number of widgets must be even")
for widget in self.rotableWidgets:
self.rotableLayout.removeWidget(widget)
self.rotableWidgets.append(self.rotableWidgets.pop(0))
for i in range(count//2):
self.rotableLayout.addWidget(self.rotableWidgets[count - i - 1], 0, i)
self.rotableLayout.addWidget(self.rotableWidgets[i], 1, i)
def buttonsOrientationChanged(self, index):
self.mainLayout.setSizeConstraint(QLayout.SetNoConstraint)
self.setMinimumSize(0, 0)
orientation = Qt.Orientation(int(self.buttonsOrientationComboBox.itemData(index)))
if orientation == self.buttonBox.orientation():
return
self.mainLayout.removeWidget(self.buttonBox)
spacing = self.mainLayout.spacing()
oldSizeHint = self.buttonBox.sizeHint() + QSize(spacing, spacing)
self.buttonBox.setOrientation(orientation)
newSizeHint = self.buttonBox.sizeHint() + QSize(spacing, spacing)
if orientation == Qt.Horizontal:
self.mainLayout.addWidget(self.buttonBox, 2, 0)
self.resize(self.size() + QSize(-oldSizeHint.width(), newSizeHint.height()))
else:
self.mainLayout.addWidget(self.buttonBox, 0, 3, 2, 1)
self.resize(self.size() + QSize(newSizeHint.width(), -oldSizeHint.height()))
self.mainLayout.setSizeConstraint(QLayout.SetDefaultConstraint)
def show_help(self):
QMessageBox.information(self, "Dynamic Layouts Help",
"This example shows how to change layouts "
"dynamically.")
def createRotableGroupBox(self):
self.rotableGroupBox = QGroupBox("Rotable Widgets")
self.rotableWidgets.append(QSpinBox())
self.rotableWidgets.append(QSlider())
self.rotableWidgets.append(QDial())
self.rotableWidgets.append(QProgressBar())
count = len(self.rotableWidgets)
for i in range(count):
self.rotableWidgets[i].valueChanged[int].\
connect(self.rotableWidgets[(i+1) % count].setValue)
self.rotableLayout = QGridLayout()
self.rotableGroupBox.setLayout(self.rotableLayout)
self.rotateWidgets()
def createOptionsGroupBox(self):
self.optionsGroupBox = QGroupBox("Options")
buttonsOrientationLabel = QLabel("Orientation of buttons:")
buttonsOrientationComboBox = QComboBox()
buttonsOrientationComboBox.addItem("Horizontal", Qt.Horizontal)
buttonsOrientationComboBox.addItem("Vertical", Qt.Vertical)
buttonsOrientationComboBox.currentIndexChanged[int].connect(self.buttonsOrientationChanged)
self.buttonsOrientationComboBox = buttonsOrientationComboBox
optionsLayout = QGridLayout()
optionsLayout.addWidget(buttonsOrientationLabel, 0, 0)
optionsLayout.addWidget(self.buttonsOrientationComboBox, 0, 1)
optionsLayout.setColumnStretch(2, 1)
self.optionsGroupBox.setLayout(optionsLayout)
def createButtonBox(self):
self.buttonBox = QDialogButtonBox()
closeButton = self.buttonBox.addButton(QDialogButtonBox.Close)
helpButton = self.buttonBox.addButton(QDialogButtonBox.Help)
rotateWidgetsButton = self.buttonBox.addButton("Rotate &Widgets", QDialogButtonBox.ActionRole)
rotateWidgetsButton.clicked.connect(self.rotateWidgets)
closeButton.clicked.connect(self.close)
helpButton.clicked.connect(self.show_help)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
dialog = Dialog()
dialog.exec_()
| StarcoderdataPython |
1654013 | # Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(tbachman) Figure out a better/common place for this
AGENT_TYPE_DVS = 'DVS agent'
APIC_SYNC_NETWORK = 'apic-sync-network'
HOST_SNAT_NETWORK_PREFIX = 'host-snat-network-for-internal-use-'
HOST_SNAT_POOL = 'host-snat-pool-for-internal-use'
HOST_SNAT_POOL_PORT = 'host-snat-pool-port-for-internal-use'
DEVICE_OWNER_SNAT_PORT = 'host-snat-pool-port-device-owner-internal-use'
# TODO(tbachman) figure out a better/common place for this
VIF_TYPE_DVS = "dvs"
| StarcoderdataPython |
4998597 | # MIT License
# Copyright (c) 2022 Muhammed
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Telegram Link : https://telegram.dog/Mo_Tech_Group
# Repo Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot
# License Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot/blob/LuciferMoringstar-Robot/LICENSE
import logging, asyncio, re
from pyrogram import Client, filters, enums
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.errors import FloodWait
from pyrogram.errors.exceptions.bad_request_400 import ChannelInvalid, ChatAdminRequired, UsernameInvalid, UsernameNotModified
from LuciferMoringstar_Robot import ADMINS, LOG_CHANNEL, temp
from database.autofilter_mdb import save_file
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
lock = asyncio.Lock()
@Client.on_callback_query(filters.regex(r'^index'))
async def index_files(bot, update):
if update.data.startswith('index_cancel'):
temp.CANCEL = True
return await update.answer("𝙲𝙰𝙽𝙲𝙴𝙻𝙻𝙸𝙽𝙶 𝙸𝙽𝙳𝙴𝚇𝙸𝙽𝙶..")
_, muhammedrk, chat, lst_msg_id, from_user = update.data.split("#")
if muhammedrk == 'reject':
await update.message.delete()
await bot.send_message(chat_id = int(from_user), text = """𝚈𝙾𝚄𝚁 𝚂𝚄𝙱𝙼𝙸𝚂𝚂𝙸𝙾𝙽 𝙵𝙾𝚁 𝙸𝙽𝙳𝙴𝚇𝙸𝙽𝙶 **{}** 𝙷𝙰𝚂 𝙱𝙴𝙴𝙽 𝙳𝙴𝙲𝙻𝙸𝙴𝙽𝙴𝙳 𝙱𝚈 𝙾𝚄𝚁 𝙼𝙾𝙳𝙴𝚁𝙰𝚃𝙾𝚁𝚂""".format(chat), reply_to_message_id = int(lst_msg_id))
return
if lock.locked():
return await update.answer("𝚆𝙰𝙸𝚃 𝚄𝙽𝚃𝙸𝙻 𝙿𝚁𝙴𝚅𝙸𝙾𝚄𝚂 𝙿𝚁𝙾𝙲𝙴𝚂𝚂 𝙲𝙾𝙼𝙿𝙻𝙴𝚃𝙴", show_alert=True)
msg = update.message
await update.answer("𝙿𝚁𝙾𝙲𝙴𝚂𝚂𝙸𝙽𝙶...⏳", show_alert=True)
if int(from_user) not in ADMINS:
await bot.send_message(int(from_user),
"𝚈𝙾𝚄𝚁 𝚂𝚄𝙱𝙼𝙸𝚂𝚂𝙸𝙾𝙽 𝙵𝙾𝚁 𝙸𝙽𝙳𝙴𝚇𝙸𝙽𝙶 {} 𝙷𝙰𝚂 𝙱𝙴𝙴𝙽 𝙰𝙲𝙲𝙴𝙿𝚃𝙴𝙳 𝙱𝚈 𝙾𝚄𝚁 𝙼𝙾𝙳𝙴𝚁𝙰𝚃𝙾𝚁𝚂 𝙰𝙽𝙳 𝚆𝙸𝙻𝙻 𝙱𝙴 𝙰𝙳𝙳𝙴𝙳 𝚂𝙾𝙾𝙽".format(chat),
reply_to_message_id=int(lst_msg_id))
pr0fess0r = [[ InlineKeyboardButton('𝚂𝚃𝙾𝙿', callback_data='close') ]]
await update.message.edit(text = "𝚂𝚃𝙰𝚁𝚃𝙸𝙽𝙶 𝙸𝙽𝙳𝙴𝚇𝙸𝙽𝙶..", reply_markup=InlineKeyboardMarkup(pr0fess0r))
try:
chat = int(chat)
except:
chat = chat
await index_files_to_db(int(lst_msg_id), chat, msg, bot)
@Client.on_message((filters.forwarded | (filters.regex("(https://)?(t\.me/|telegram\.me/|telegram\.dog/)(c/)?(\d+|[a-zA-Z_0-9]+)/(\d+)$")) & filters.text ) & filters.private & filters.incoming)
async def send_for_index(bot, message):
if message.text:
regex = re.compile("(https://)?(t\.me/|telegram\.me/|telegram\.dog/)(c/)?(\d+|[a-zA-Z_0-9]+)/(\d+)$")
match = regex.match(message.text)
if not match:
return await message.reply('Invalid link')
chat_id = match.group(4)
last_msg_id = int(match.group(5))
if chat_id.isnumeric():
chat_id = int(("-100" + chat_id))
elif message.forward_from_chat.type == enums.ChatType.CHANNEL:
last_msg_id = message.forward_from_message_id
chat_id = message.forward_from_chat.username or message.forward_from_chat.id
else:
return
try:
await bot.get_chat(chat_id)
except ChannelInvalid:
return await message.reply('This may be a private channel / group. Make me an admin over there to index the files.')
except (UsernameInvalid, UsernameNotModified):
return await message.reply('Invalid Link specified.')
except Exception as e:
logger.exception(e)
return await message.reply(f'Errors - {e}')
try:
k = await bot.get_messages(chat_id, last_msg_id)
except:
return await message.reply('Make Sure That Iam An Admin In The Channel, if channel is private')
if k.empty:
return await message.reply('This may be group and iam not a admin of the group.')
if message.from_user.id in ADMINS:
buttons = [[
InlineKeyboardButton('𝚈𝙴𝚂', callback_data=f'index#accept#{chat_id}#{last_msg_id}#{message.from_user.id}'),
InlineKeyboardButton('𝙲𝙻𝙾𝚂𝙴', callback_data='close_data')
]]
reply_markup = InlineKeyboardMarkup(buttons)
return await message.reply(
f'Do you Want To Index This Channel/ Group ?\n\nChat ID/ Username: <code>{chat_id}</code>\nLast Message ID: <code>{last_msg_id}</code>',
reply_markup=reply_markup)
if type(chat_id) is int:
try:
link = (await bot.create_chat_invite_link(chat_id)).invite_link
except ChatAdminRequired:
return await message.reply('Make sure iam an admin in the chat and have permission to invite users.')
else:
link = f"@{message.forward_from_chat.username}"
buttons = [[
InlineKeyboardButton('Accept Index', callback_data=f'index#accept#{chat_id}#{last_msg_id}#{message.from_user.id}')
],[
InlineKeyboardButton('Reject Index', callback_data=f'index#reject#{chat_id}#{message.id}#{message.from_user.id}')
]]
reply_markup = InlineKeyboardMarkup(buttons)
await bot.send_message(LOG_CHANNEL,
f'#IndexRequest\n\nBy : {message.from_user.mention} (<code>{message.from_user.id}</code>)\nChat ID/ Username - <code> {chat_id}</code>\nLast Message ID - <code>{last_msg_id}</code>\nInviteLink - {link}',
reply_markup=reply_markup)
await message.reply('ThankYou For the Contribution, Wait For My Moderators to verify the files.')
@Client.on_message(filters.command('setskip') & filters.user(ADMINS))
async def set_skip_number(bot, update):
if ' ' in update.text:
_, skip = update.text.split(" ")
try:
skip = int(skip)
except:
return await update.reply("Skip number should be an integer.")
await update.reply(f"Successfully set SKIP number as {skip}")
temp.CURRENT = int(skip)
else:
await update.reply("Give me a skip number")
async def index_files_to_db(lst_msg_id, chat, msg, bot):
total_files = 0
duplicate = 0
errors = 0
deleted = 0
no_media = 0
async with lock:
try:
total = lst_msg_id + 1
current = temp.CURRENT
temp.CANCEL = False
while current < total:
if temp.CANCEL:
await msg.edit("Succesfully Cancelled")
break
try:
message = await bot.get_messages(chat_id=chat, message_ids=current, replies=0)
except FloodWait as e:
await asyncio.sleep(e.x)
message = await bot.get_messages(chat, current, replies=0)
except Exception as e:
logger.exception(e)
try:
for file_type in ("document", "video", "audio"):
media = getattr(message, file_type, None)
if media is not None:
break
else:
continue
media.file_type = file_type
media.caption = message.caption
aynav, vnay = await save_file(media)
if aynav:
total_files += 1
elif vnay == 0:
duplicate += 1
elif vnay == 2:
errors += 1
except Exception as e:
if "NoneType" in str(e):
if message.empty:
deleted += 1
elif not media:
no_media += 1
logger.warning("Skipping deleted / Non-Media messages (if this continues for long, use /setskip to set a skip number)")
else:
logger.exception(e)
current += 1
if current % 20 == 0:
can = [[InlineKeyboardButton('𝙲𝙰𝙽𝙲𝙴𝙻', callback_data='index_cancel')]]
reply = InlineKeyboardMarkup(can)
await msg.edit_text(text=f"• 𝚃𝙾𝚃𝙰𝙻 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝙵𝙴𝚃𝙲𝙷𝙴𝙳 : <code>{current}</code>\n• 𝚃𝙾𝚃𝙰𝙻 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝚂𝙰𝚅𝙴𝙳 : <code>{total_files}</code>\n• 𝙳𝚄𝙿𝙻𝙸𝙲𝙰𝚃𝙴 𝙵𝙸𝙻𝙴𝚂 𝚂𝙺𝙸𝙿𝙴𝙳 : <code>{duplicate}</code>\n• 𝙳𝙴𝙻𝙴𝚃𝙴𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝚂𝙺𝙸𝙿𝙿𝙴𝙳 : <code>{deleted}</code>\n 𝙽𝙾𝙽-𝙼𝙴𝙳𝙸𝙰 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝚂𝙺𝙸𝙿𝙿𝙴𝙳 : <code>{no_media}</code>\n• 𝙴𝚁𝚁𝙾𝚁 𝙾𝙲𝙲𝚄𝚁𝙴𝙳 : <code>{errors}</code>", reply_markup=reply)
except Exception as e:
logger.exception(e)
await msg.edit(f'Error: {e}')
else:
await msg.edit(f'• 𝚂𝚄𝙲𝙲𝙴𝚂𝙵𝚄𝙻𝙻𝚈 𝚂𝙰𝚅𝙴𝙳 <code>{total_files}</code> 𝚃𝙾 𝙳𝙰𝚃𝙰𝙱𝙰𝚂𝙴.!\n• 𝙳𝚄𝙿𝙻𝙸𝙲𝙰𝚃𝙴 𝙵𝙸𝙻𝙴𝚂 𝚂𝙺𝙸𝙿𝙿𝙴𝙳 : <code>{duplicate}</code>\n• 𝙳𝙴𝙻𝙴𝚃𝙴𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝚂𝙺𝙸𝙿𝙿𝙴𝙳 : <code>{deleted}</code>\n• 𝙽𝙾𝙽-𝙼𝙴𝙳𝙸𝙰 𝙼𝙴𝚂𝚂𝙰𝙶𝙴𝚂 𝚂𝙺𝙸𝙿𝙿𝙴𝙳 : <code>{no_media}</code>\n• 𝙴𝚁𝚁𝙾𝚁𝚂 𝙾𝙲𝙲𝚄𝚁𝙴𝙳 : <code>{errors}</code>')
| StarcoderdataPython |
11325133 | #!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from mg_process_macs2.tool.macs2 import Macs2
# ------------------------------------------------------------------------------
class process_macs2(Workflow): # pylint: disable=invalid-name,too-few-public-methods
"""
Functions for demonstrating the pipeline set up.
"""
configuration = {}
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing Test")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing a test file.
Parameters
----------
input_files : dict
Dictionary of file locations
metadata : list
Required meta data
output_files : dict
Locations of the output files to be returned by the pipeline
Returns
-------
output_files : dict
Locations for the output txt
output_metadata : dict
Matching metadata for each of the files
"""
# Initialise the test tool
macs2_handle = Macs2(self.configuration)
macs2_files, macs2_meta = macs2_handle.run(input_files, metadata, output_files)
return (macs2_files, macs2_meta)
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
logger.info("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_macs2,
config,
in_metadata,
out_metadata)
# 2. The App has finished
logger.info("2. Execution finished; see " + out_metadata)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="Index the genome file")
PARSER.add_argument("--config", help="Configuration file")
PARSER.add_argument("--in_metadata", help="Location of input metadata file")
PARSER.add_argument("--out_metadata", help="Location of output metadata file")
PARSER.add_argument("--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| StarcoderdataPython |
11269706 | <reponame>scottwittenburg/vcs<gh_stars>10-100
import basevcstest
class TestVCSPatterns(basevcstest.VCSBaseTest):
def testLargePatterns(self):
fillarea = self.x.createfillarea()
fillarea.x = [[0, .33, .33, 0], [.33, .67, .67, .33], [.67, 1, 1, .67]]
fillarea.y = [[0, 0, 1, 1]] * 3
fillarea.style = ["solid", "pattern", "hatch"]
fillarea.index = [1, 5, 5]
fillarea.color = [50, 50, 50]
self.x.plot(fillarea, bg=self.bg)
fnm = "test_vcs_large_pattern_hatch.png"
self.checkImage(fnm)
| StarcoderdataPython |
12846923 | from django.views import View
class PostOnlyView(View):
form_class = None
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
pass
def form_invalid(self, form):
pass | StarcoderdataPython |
8016825 | # Enter your code here. Read input from STDIN. Print output to STDOUT
import cmath
r=complex(input())
print(cmath.polar(r)[0])
print(cmath.polar(r)[1])
| StarcoderdataPython |
6694458 | <reponame>caomingpei/smart-contract-vulnerability-detector<gh_stars>1-10
"""
Module detecting deprecated standards.
"""
from slither.core.cfg.node import NodeType
from slither.core.declarations.solidity_variables import (
SolidityVariableComposed,
SolidityFunction,
)
from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
from slither.slithir.operations import LowLevelCall
from slither.visitors.expression.export_values import ExportValues
# Reference: https://smartcontractsecurity.github.io/SWC-registry/docs/SWC-111
class DeprecatedStandards(AbstractDetector):
"""
Use of Deprecated Standards
"""
ARGUMENT = "deprecated-standards"
HELP = "Deprecated Solidity Standards"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#deprecated-standards"
WIKI_TITLE = "Deprecated standards"
WIKI_DESCRIPTION = "Detect the usage of deprecated standards."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract ContractWithDeprecatedReferences {
// Deprecated: Change block.blockhash() -> blockhash()
bytes32 globalBlockHash = block.blockhash(0);
// Deprecated: Change constant -> view
function functionWithDeprecatedThrow() public constant {
// Deprecated: Change msg.gas -> gasleft()
if(msg.gas == msg.value) {
// Deprecated: Change throw -> revert()
throw;
}
}
// Deprecated: Change constant -> view
function functionWithDeprecatedReferences() public constant {
// Deprecated: Change sha3() -> keccak256()
bytes32 sha3Result = sha3("test deprecated sha3 usage");
// Deprecated: Change callcode() -> delegatecall()
address(this).callcode();
// Deprecated: Change suicide() -> selfdestruct()
suicide(address(0));
}
}
```"""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Replace all uses of deprecated symbols."
# The format for the following deprecated lists is [(detecting_signature, original_text, recommended_text)]
DEPRECATED_SOLIDITY_VARIABLE = [
("block.blockhash", "block.blockhash()", "blockhash()"),
("msg.gas", "msg.gas", "gasleft()"),
]
DEPRECATED_SOLIDITY_FUNCTIONS = [
("suicide(address)", "suicide()", "selfdestruct()"),
("sha3()", "sha3()", "keccak256()"),
]
DEPRECATED_NODE_TYPES = [(NodeType.THROW, "throw", "revert()")]
DEPRECATED_LOW_LEVEL_CALLS = [("callcode", "callcode", "delegatecall")]
def detect_deprecation_in_expression(self, expression):
"""Detects if an expression makes use of any deprecated standards.
Returns:
list of tuple: (detecting_signature, original_text, recommended_text)"""
# Perform analysis on this expression
export = ExportValues(expression)
export_values = export.result()
# Define our results list
results = []
# Check if there is usage of any deprecated solidity variables or functions
for dep_var in self.DEPRECATED_SOLIDITY_VARIABLE:
if SolidityVariableComposed(dep_var[0]) in export_values:
results.append(dep_var)
for dep_func in self.DEPRECATED_SOLIDITY_FUNCTIONS:
if SolidityFunction(dep_func[0]) in export_values:
results.append(dep_func)
return results
def detect_deprecated_references_in_node(self, node):
"""Detects if a node makes use of any deprecated standards.
Returns:
list of tuple: (detecting_signature, original_text, recommended_text)"""
# Define our results list
results = []
# If this node has an expression, we check the underlying expression.
if node.expression:
results += self.detect_deprecation_in_expression(node.expression)
# Check if there is usage of any deprecated solidity variables or functions
for dep_node in self.DEPRECATED_NODE_TYPES:
if node.type == dep_node[0]:
results.append(dep_node)
return results
def detect_deprecated_references_in_contract(self, contract):
"""Detects the usage of any deprecated built-in symbols.
Returns:
list of tuple: (state_variable | node, (detecting_signature, original_text, recommended_text))"""
results = []
for state_variable in contract.state_variables_declared:
if state_variable.expression:
deprecated_results = self.detect_deprecation_in_expression(
state_variable.expression
)
if deprecated_results:
results.append((state_variable, deprecated_results))
# Loop through all functions + modifiers in this contract.
# pylint: disable=too-many-nested-blocks
for function in contract.functions_and_modifiers_declared:
# Loop through each node in this function.
for node in function.nodes:
# Detect deprecated references in the node.
deprecated_results = self.detect_deprecated_references_in_node(node)
# Detect additional deprecated low-level-calls.
for ir in node.irs:
if isinstance(ir, LowLevelCall):
for dep_llc in self.DEPRECATED_LOW_LEVEL_CALLS:
if ir.function_name == dep_llc[0]:
deprecated_results.append(dep_llc)
# If we have any results from this iteration, add them to our results list.
if deprecated_results:
results.append((node, deprecated_results))
return results
def _detect(self):
"""Detects if an expression makes use of any deprecated standards.
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'deprecated_references'}
"""
results = []
for contract in self.contracts:
deprecated_references = self.detect_deprecated_references_in_contract(contract)
if deprecated_references:
for deprecated_reference in deprecated_references:
source_object = deprecated_reference[0]
deprecated_entries = deprecated_reference[1]
info = ["Deprecated standard detected ", source_object, ":\n"]
for (_dep_id, original_desc, recommended_disc) in deprecated_entries:
info += [
f'\t- Usage of "{original_desc}" should be replaced with "{recommended_disc}"\n'
]
res = self.generate_result(info)
results.append(res)
return results
| StarcoderdataPython |
8036578 | <reponame>parkun-by/broadcaster
import logging
import aio_pika
import config
import asyncio
from typing import Callable
from asyncio.events import AbstractEventLoop
logger = logging.getLogger(__name__)
class Rabbit:
async def start(self,
loop: AbstractEventLoop,
callback: Callable,
queue: str):
connected = False
pause = 1
while not connected:
try:
await self.connect(loop, callback, queue)
connected = True
pause = 1
except Exception:
connected = False
await asyncio.sleep(pause)
if pause < 30:
logger.info('Fail. Trying reconnect Rabbit.')
pause *= 2
else:
logger.exception('Fail. Trying reconnect Rabbit.')
async def connect(self,
loop: AbstractEventLoop,
callback: Callable,
queue_name: str) -> None:
self.connection = await aio_pika.connect_robust(
config.RABBIT_AMQP_ADDRESS,
loop=loop
)
async with self.connection:
# Creating channel
channel: aio_pika.Channel = await self.connection.channel()
await channel.declare_exchange(
name=config.RABBIT_EXCHANGE,
type=aio_pika.exchange.ExchangeType.DIRECT,
durable=True,
auto_delete=False,
internal=False,
passive=False
)
# Declaring queue
queue = await channel.declare_queue(
name=queue_name,
durable=True,
passive=False,
auto_delete=False,
arguments={
"x-queue-mode": "lazy"
}
)
await queue.bind(
exchange=config.RABBIT_EXCHANGE,
routing_key=config.ROUTING_KEY_VIOLATION
)
logger.info("Подключились к раббиту")
while True:
async with queue.iterator(no_ack=False) as queue_iter:
async for message in queue_iter:
async with message.process():
await callback(message.body.decode())
| StarcoderdataPython |
6552317 | <reponame>HoeYeon/Image_Captioning
# All paths are relative to train_val.py file
config = {
'images_path': 'train_val_data/Flicker8k_Dataset/', #Make sure you put that last slash(/)
'train_data_path': 'train_val_data/Flickr_8k.trainImages.txt',
'val_data_path': 'train_val_data/Flickr_8k.devImages.txt',
'captions_path': 'train_val_data/Flickr8k.token.txt',
'tokenizer_path': 'model_data/tokenizer.pkl',
'model_data_path': 'model_data/', #Make sure you put that last slash(/)
'model_load_path': 'model_data/BLEU_1-0.60-merge.hdf5',
'num_of_epochs': 20,
'max_length': 40, #This is set manually after training of model and required for test.py
'batch_size': 128,
'beam_search_k':3,
'test_data_path': 'test_data/', #Make sure you put that last slash(/)
'model_type': 'inceptionv3', # inceptionv3 or vgg16
'random_seed': 1035
}
rnnConfig = {
'embedding_size': 200,
'LSTM_units': 256,
'dense_units': 256,
'dropout': 0.5
}
| StarcoderdataPython |
11351735 | <reponame>anju24/liftover_helper<gh_stars>0
import setuptools
setuptools.setup(
name='LiftoverHelper',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/anju24/liftover_helper',
license='LICENSE.txt',
description='Pre and post processing of VCF files for liftover.',
packages=['scripts'],
install_requires=[
"pyvcf == 0.6.8",
],
python_requires='>=3.6',
)
| StarcoderdataPython |
1822028 | <reponame>USF-GT-Molecular-Modeling/hoomd-blue
# Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Test the C++ internal wall data structures."""
import numpy as np
import pytest
from hoomd.md import _md
class _TestCounter:
def __init__(self):
self.previous_cls = None
self.count = 0
def __call__(self, arg):
# Return empty string for non class arguments
if isinstance(arg, dict):
return ""
if arg == self.previous_cls:
self.count += 1
self.count = 1
self.previous_cls = arg
return f"{arg.__name__}-{self.count}"
_test_args = (
(_md.SphereWall, ({
"radius": 4.0,
"origin": (1.0, 0, 0),
"inside": True,
"open": False
}, {
"radius": 1.0,
"origin": (0.0, 0.0, 0.0),
"inside": False,
"open": False
}, {
"radius": 3.1415,
"origin": (-1.0, -2.0, 2.0),
"inside": False,
"open": True
})),
(_md.CylinderWall, ({
"radius": 4.0,
"origin": (1.0, 0, 0),
"axis": (1.0, 0.0, 0),
"inside": True,
"open": True
}, {
"radius": 1.0,
"origin": (0.0, 0.0, 0.0),
"axis": (0.0, 1.0, 0.0),
"inside": False,
"open": False
}, {
"radius": 3.1415,
"origin": (-1.0, -2.0, 1.0),
"axis": (0.0, 0.0, 1.0),
"inside": False,
"open": True
})),
(
_md.PlaneWall,
(
# The normals have to be unit vectors for the equality checks to
# hold. The C++ class currently normalizes any input vector.
{
"origin": (1.0, 0, 0),
"normal": (1.0, 0.0, 0),
"open": False
},
{
"origin": (0.0, 0.0, 0.0),
"normal": (0.0, 1.0, 0.0),
"open": True
},
{
"origin": (-1.0, -2.0, 1.0),
"normal": (0.0, 0.0, 1.0),
"open": False
})))
@pytest.mark.parametrize("cls, constructor_kwargs",
((cls, constructor_kwargs)
for cls, arg_list in _test_args
for constructor_kwargs in arg_list),
ids=_TestCounter())
def test_valid_construction(cls, constructor_kwargs):
obj = cls(**constructor_kwargs)
for key, value in constructor_kwargs.items():
assert np.allclose(getattr(obj, key), value)
@pytest.mark.parametrize("cls, constructor_kwargs",
((cls, arg_list[0]) for cls, arg_list in _test_args),
ids=_TestCounter())
def test_immutability(cls, constructor_kwargs):
obj = cls(**constructor_kwargs)
for key, value in constructor_kwargs.items():
with pytest.raises(AttributeError):
setattr(obj, key, value)
| StarcoderdataPython |
1710056 | <reponame>An00bRektn/CTF
#!/usr/bin/python3
from pwn import *
from hashlib import sha256
BLOCK_SIZE = 32
def decrypt_block(block, secret):
dec_block = b''
for i in range(BLOCK_SIZE):
val = (block[i]-secret[i]) % 256
dec_block += bytes([val])
return dec_block
r = remote('172.16.17.32',30855)
message = b'Command executed: cat secret.txt'
r.sendlineafter('>', 'cat secret.txt')
ct = r.recvlineS().strip()
blocks = [ct[i:i+64] for i in range(0, len(ct), 64)]
ref = bytes.fromhex(blocks[0])
init_key = b''
p = log.progress('brute forcing initial key...')
for i in range(BLOCK_SIZE):
for guess in range(256):
val = (message[i]+guess) % 256
p.status(f'val: {val}\nref: {ref[i]}\nrec: {init_key}')
if val == ref[i]:
init_key += bytes([guess])
info(f'init_key: {init_key.hex()}')
h = init_key
plaintext = b''
for block in blocks:
block = bytes.fromhex(block)
dec_block = decrypt_block(block, h)
h = sha256(block + dec_block).digest()
plaintext += dec_block
success(plaintext.decode('utf-8'))
| StarcoderdataPython |
3331814 | import unittest
import numpy as np
from toptim.optimizer import ParametersSet, create_engine
class ParametersSetTest(unittest.TestCase):
def test_Create_DataProvided_StoreValuesAsArray(self):
data = [1., 2., 3.]
_set = self._create_set(data)
self.assertTrue(isinstance(_set.values, np.ndarray))
np.testing.assert_array_equal(np.array(data), _set.values)
def test_Change_Always_ReturnCloneWithChangedData(self):
_set = self._create_set([1.0])
result = _set.change([2.2])
self._check_if_clone(_set, result)
self._check_result([2.2], result)
def test_Change_MaxCorrection_ReturnCloneClippedData(self):
max_correction = 0.1
old_values = [1., 2., 0., 0.1, 0.2]
new_values = [-1, 0.5, 2., 0.1, 0.25]
_set = self._create_set(old_values)
result = _set.change(new_values, max_correction)
self._check_if_clone(_set, result)
self._check_result([1. - max_correction, 2. - max_correction, 0. + max_correction, 0.1, 0.25], result)
def test_Clip_ScalarLimit_ReturnCloneDataWithinGivenLimits(self):
_set = self._create_set([0., -1., 2., 3.])
result = _set.clip(lower=0.5, upper=2.5)
self._check_if_clone(_set, result)
self._check_result([0.5, 0.5, 2., 2.5], result)
def test_Clip_ArrayLimit_ReturnCloneDataWithinIndividualLimits(self):
_set = self._create_set([0., -1., 2., 3.])
lower = np.full((4, ), 0.5)
upper = np.full((4, ), 2.5)
result = _set.clip(lower, upper)
self._check_if_clone(_set, result)
self._check_result([0.5, 0.5, 2., 2.5], result)
def test_Clip_Softly_ReturnCloneDataSlightlyExceedingLimits(self):
softening = 1e-6
_set = self._create_set([0., -1., 2., 3.])
result = _set.clip_softly(0.5, 2.5, softening)
self._check_if_clone(_set, result)
self._check_result([0.5 - softening*0.5, 0.5 - softening*1.5, 2., 2.5 + softening*0.5], result)
def test_Clip_SoftlyAndTwoEqualExceedLimit_ReturnCloneDataSlightlyExceedingLimits(self):
softening_ratio = 1e-2
_set = self._create_set([-1., -1.])
result = _set.clip_softly(0.0, 2.0, softening_ratio)
self._check_if_clone(_set, result)
self._check_result([0.0 - 1.0*softening_ratio, 0.0 - 1.0*softening_ratio], result)
def test_Clip_SoftlyAndParametersWithinLimits_ReturnCloneData(self):
tolerance = 0.1
_set = self._create_set([1., 2.])
result = _set.clip_softly(0.5, 2.5, tolerance)
self._check_if_clone(_set, result)
self._check_result([1., 2.], result)
def _check_result(self, expected, result):
np.testing.assert_allclose(
np.array(expected),
result.values
)
def test_Equal_Always_CompareData(self):
set_1 = self._create_set([1., 2.])
set_2 = self._create_set([3., 4.])
set_3 = self._create_set([1., 2.])
self.assertTrue(set_1 == set_3)
self.assertFalse(set_1 == set_2)
def _check_if_clone(self, original, output):
self.assertNotEqual(id(original), id(output))
def _create_set(self, data, **kwargs):
return ParametersSet(data, **kwargs)
class FullyStressDesignEngineTest(unittest.TestCase):
def test_Update_ParametersWithinLimits_ReturnValuesMultipliedByAbsStrains(self):
parameters = ParametersSet([0.1, 0.2])
field = np.array([0.3, 0.4])
def constraint_calculator(*args):
return 0.
engine = self._create_engine(constraint_calculator)
updated = engine.update_parameters(parameters, field)
np.testing.assert_allclose(
np.multiply(parameters.values, field),
updated.values)
def test_Update_ParametersViolateGlobalLimits_ReturnUpdatedParametersWithinGlobalBoundsWithTolerance(self):
_min, _max = 1e-6, 1. - 1e-6
parameters = ParametersSet([0.1, 0.2, 0.2])
field = np.array([30., 0.2, 0.0])
def constraint_calculator(*args):
return 0.
engine = self._create_engine(constraint_calculator, bounds=(_min, _max))
updated = engine.update_parameters(parameters, field)
_tol = engine._bounds_softening
self.assertTrue(updated.values[0] >= _max)
self.assertTrue(updated.values[0] <= _max + _tol)
self.assertAlmostEqual(0.04, updated.values[1])
self.assertTrue(updated.values[-1] <= _min)
self.assertTrue(updated.values[-1] >= _min - _tol)
def test_Update_ParametersViolateCorrectionLimit_ReturnUpdatedParametersWithinCorrectionLimit(self):
max_correction = 0.1
parameters = ParametersSet([0.1, 0.1, 0.2])
field = np.array([30., 0.5, 0.0])
def constraint_calculator(*args):
return 0.
engine = self._create_engine(constraint_calculator, max_correction=max_correction)
updated = engine.update_parameters(parameters, field)
np.testing.assert_allclose(
np.array([0.1 + max_correction, 0.05, 0.1]),
updated.values)
def test_Update_NoMaxCorrection_ReturnParametersSatisfyingConstraintAndGlobalBounds(self):
parameters = ParametersSet([0.1, 0.1, 0.2])
field = np.array([30., 0.5, 0.0])
min_bound = 0.1
max_bound = 0.9
constraint_calculator = self.create_constraint_calculator(1.)
engine = self._create_engine(calculate_exceeded_volume=constraint_calculator, bounds=(min_bound, max_bound))
updated = engine.update_parameters(parameters, field)
self.assertAlmostEqual(constraint_calculator(updated.values), 0., places=6)
self.assertTrue(np.all(updated.values >= min_bound))
self.assertTrue(np.all(updated.values <= max_bound))
def _create_engine(self, calculate_exceeded_volume, **kwargs):
return create_engine('fully_stress_design', calculate_exceeded_volume, **kwargs)
def create_constraint_calculator(self, expected_parameters_sum):
def calc(parameters):
return np.sum(parameters) - expected_parameters_sum
return calc
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6440066 | <reponame>cglewis/snakebite<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import argparse
import sys
import os
import pwd
import json
import xml.etree.ElementTree as ET
from urlparse import urlparse
from snakebite.client import Client
from snakebite.errors import FileNotFoundException
from snakebite.errors import DirectoryException
from snakebite.errors import FileException
from snakebite.errors import RequestError
from snakebite.formatter import format_listing
from snakebite.formatter import format_results
from snakebite.formatter import format_counts
from snakebite.formatter import format_fs_stats
from snakebite.formatter import format_stat
from snakebite.formatter import format_du
def exitError(error):
if isinstance(error, FileNotFoundException) or \
isinstance(error, DirectoryException) or \
isinstance(error, FileException):
print str(error)
elif isinstance(error, RequestError):
print "Request error: %s" % str(error)
else:
raise error
sys.exit(-1)
def command(args="", descr="", allowed_opts="", visible=True):
def wrap(f):
Commands.methods[f.func_name] = {"method": f,
"args": args,
"descr": descr,
"allowed_opts": allowed_opts,
"visible": visible}
return wrap
class Commands(object):
methods = {}
class ArgumentParserError(Exception):
def __init__(self, message, error_message, prog, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.error_message = error_message
self.prog = prog
class Parser(argparse.ArgumentParser):
def print_help(self):
print ''.join([self.usage, self.epilog])
def error(self, message): # Override error message to show custom help.
raise ArgumentParserError("SystemExit", message, self.prog)
class CommandLineParser(object):
GENERIC_OPTS = {'D': {"short": '-D',
"long": '--debug',
"help": 'Show debug information',
"action": 'store_true'},
'j': {"short": '-j',
"long": '--json',
"help": 'JSON output',
"action": 'store_true'},
'n': {"short": '-n',
"long": '--namenode',
"help": 'namenode host',
"type": str},
'V': {"short": '-V',
"long": '--version',
"help": 'Hadoop protocol version (default:8)',
"default": 7,
"type": float},
'p': {"short": '-p',
"long": '--port',
"help": 'namenode RPC port',
"type": int},
'H': {"short": '-h',
"long": '--human',
"help": 'human readable output',
"action": 'store_true'}
}
SUB_OPTS = {'R': {"short": '-R',
"long": '--recurse',
"help": 'recurse into subdirectories',
"action": 'store_true'},
'd': {"short": '-d',
"long": '--directory',
"help": 'show only the path and no children / check if path is a dir',
"action": 'store_true'},
's': {"short": '-s',
"long": '--summary',
"help": 'print summarized output',
"action": 'store_true'},
'z': {"short": '-z',
"long": '--zero',
"help": 'check for zero length',
"action": 'store_true'},
'e': {"short": '-e',
"long": '--exists',
"help": 'check if file exists',
"action": 'store_true'},
'checkcrc': {"short": '-checkcrc',
"long": "--checkcrc",
"help": 'check Crc',
"action": 'store_true'},
'f': {"short": '-f',
"long": "--append",
"help": 'show appended data as the file grows',
"action": 'store_true'},
'nl': {"short": '-nl',
"long": "--newline",
"help": 'add a newline character at the end of each file.',
"action": 'store_true'}
}
def __init__(self):
usage = "snakebite [general options] cmd [arguments]"
epilog = "\ngeneral options:\n"
epilog += "\n".join(sorted([" %-30s %s" % ("%s %s" % (v['short'], v['long']), v['help']) for k, v in self.GENERIC_OPTS.iteritems()]))
epilog += "\n\ncommands:\n"
epilog += "\n".join(sorted([" %-30s %s" % ("%s %s" % (k, v['args']), v['descr']) for k, v in Commands.methods.iteritems() if v['visible']]))
epilog += "\n\nto see command-specific options use: snakebite [cmd] --help"
self.parser = Parser(usage=usage, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter, add_help=False)
self._build_parent_parser()
self._add_subparsers()
def _build_parent_parser(self):
#general options
for opt_name, opt_data in self.GENERIC_OPTS.iteritems():
if 'action' in opt_data:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], action=opt_data['action'])
else:
if 'default' in opt_data:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], type=opt_data['type'], default=opt_data['default'])
else:
self.parser.add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'], type=opt_data['type'])
def _add_subparsers(self):
default_dir = os.path.join("/user", pwd.getpwuid(os.getuid())[0])
#sub-options
arg_parsers = {}
for opt_name, opt_data in self.SUB_OPTS.iteritems():
arg_parsers[opt_name] = argparse.ArgumentParser(add_help=False)
arg_parsers[opt_name].add_argument(opt_data['short'], opt_data['long'], help=opt_data['help'],
action=opt_data['action'])
subcommand_help_parser = argparse.ArgumentParser(add_help=False)
subcommand_help_parser.add_argument('-H', '--help', action='store_true')
# NOTE: args and dirs are logically equivalent except for default val.
# Difference in naming gives more valuable error/help output.
# 0 or more dirs
positional_arg_parsers = {}
positional_arg_parsers['[dirs]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['[dirs]'].add_argument('dir', nargs='*', default=[default_dir], help="[dirs]")
# 1 or more dirs
positional_arg_parsers['dir [dirs]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['dir [dirs]'].add_argument('dir', nargs='+', default=[default_dir], help="dir [dirs]")
# 2 dirs
positional_arg_parsers['src dst'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['src dst'].add_argument('src_dst', nargs=2, default=[default_dir], help="src dst")
# 1 or more args
positional_arg_parsers['[args]'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['[args]'].add_argument('arg', nargs='*', help="[args]")
# 1 arg
positional_arg_parsers['arg'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['arg'].add_argument('single_arg', default=default_dir, help="arg")
# 1 (integer) arg
positional_arg_parsers['(int) arg'] = argparse.ArgumentParser(add_help=False)
positional_arg_parsers['(int) arg'].add_argument('single_int_arg', default='0', help="(integer) arg",
type=int)
subparsers = self.parser.add_subparsers()
for cmd_name, cmd_info in Commands.methods.iteritems():
parents = [arg_parsers[opt] for opt in cmd_info['allowed_opts'] if opt in arg_parsers]
parents += [subcommand_help_parser]
if 'req_args' in cmd_info and not cmd_info['req_args'] is None:
parents += [positional_arg_parsers[arg] for arg in cmd_info['req_args']]
command_parser = subparsers.add_parser(cmd_name, add_help=False, parents=parents)
command_parser.set_defaults(command=cmd_name)
def read_config(self):
''' Check if any directory arguments contain hdfs://'''
if self.args and 'dir' in self.args:
dirs_to_check = self.args.dir
if self.args.command == 'mv':
dirs_to_check.append(self.args.single_arg)
for directory in dirs_to_check:
if 'hdfs://' in directory:
parse_result = urlparse(directory)
if not self.args.namenode is None and not self.args.port is None and (self.args.port != parse_result.port or self.args.namenode != parse_result.hostname):
print "error: conflicting nodenames or ports"
sys.exit(-1)
else:
self.args.namenode = parse_result.hostname
self.args.port = parse_result.port
directory = parse_result.path
if self.args.namenode and self.args.port:
return
''' Try to read the config from ~/.snakebiterc and if that doesn't exist, check $HADOOP_HOME/core-site.xml
and create a ~/.snakebiterc from that.
'''
config_file = os.path.join(os.path.expanduser('~'), '.snakebiterc')
try_paths = ['/etc/hadoop/conf/core-site.xml',
'/usr/local/etc/hadoop/conf/core-site.xml',
'/usr/local/hadoop/conf/core-site.xml']
if os.path.exists(config_file):
config = json.loads(open(os.path.join(os.path.expanduser('~'), '.snakebiterc')).read())
self.args.namenode = config['namenode']
self.args.port = config['port']
self.args.version = config.get('version', 7)
elif os.environ.get('HADOOP_HOME'):
hdfs_conf = os.path.join(os.environ['HADOOP_HOME'], 'conf', 'core-site.xml')
self._read_hadoop_config(hdfs_conf, config_file)
else:
# Try to find other paths
for hdfs_conf in try_paths:
self._read_hadoop_config(hdfs_conf, config_file)
# Bail out on the first find
if self.args.namenode and self.args.port:
continue
if self.args.namenode and self.args.port:
return
else:
print "No ~/.snakebiterc found, no HADOOP_HOME set and no -n and -p provided"
print "Tried to find core-site.xml in:"
for hdfs_conf in try_paths:
print " - %s" % hdfs_conf
print "\nYou can manually create ~/.snakebiterc with the following content:"
print '{"namenode": "ip/hostname", "port": 54310, "version": 7}'
sys.exit(1)
def _read_hadoop_config(self, hdfs_conf, config_file):
if os.path.exists(hdfs_conf):
tree = ET.parse(hdfs_conf)
root = tree.getroot()
for p in root.findall("./property"):
if p.findall('name')[0].text == 'fs.defaultFS':
parse_result = urlparse(p.findall('value')[0].text)
# Set config
self.args.namenode = parse_result.hostname
self.args.port = parse_result.port
# Write config to file
f = open(config_file, "w")
f.write(json.dumps({"namenode": self.args.namenode, "port": self.args.port, "version": self.args.version}))
f.close()
def parse(self, non_cli_input=None): # Allow input for testing purposes
if not sys.argv[1:] and not non_cli_input:
self.parser.print_help()
sys.exit(-1)
try:
args = self.parser.parse_args(non_cli_input)
except ArgumentParserError, error:
if "-H" in sys.argv or "--help" in sys.argv: # non cli input?
commands = [cmd for (cmd, description) in Commands.methods.iteritems() if description['visible'] is True]
command = error.prog.split()[-1]
if command in commands:
self.usage_helper(command)
else:
self.parser.print_help()
self.parser.exit(2)
else:
self.parser.print_usage(sys.stderr)
self.parser.exit(2, 'error: %s\n' % (error.error_message))
self.cmd = args.command
self.args = args
return self.args
def setup_client(self, host, port, hadoop_version):
self.client = Client(host, port, hadoop_version)
def execute(self):
if self.args.help:
#if 'ls -H' is called, execute 'usage ls'
self.args.arg = [self.cmd]
return Commands.methods['usage']['method'](self)
if not Commands.methods.get(self.cmd):
self.parser.print_help()
sys.exit(-1)
try:
return Commands.methods[self.cmd]['method'](self)
except Exception, e:
exitError(e)
def command(args="", descr="", allowed_opts="", visible=True, req_args=None):
def wrap(f):
Commands.methods[f.func_name] = {"method": f,
"args": args,
"descr": descr,
"allowed_opts": allowed_opts,
"visible": visible,
"req_args": req_args}
return wrap
@command(visible=False)
def commands(self):
print "\n".join(sorted([k for k, v in Commands.methods.iteritems() if v['visible']]))
@command(args="[path]", descr="create directories and their parents", visible=False, req_args=['[dirs]'])
def complete(self):
self.args.summary = True
self.args.directory = False
self.args.recurse = False
try:
for line in self._listing():
print line.replace(" ", "\\\\ ")
except FileNotFoundException:
pass
@command(args="[paths]", descr="list a path", allowed_opts=["d", "R", "s"], req_args=['[dirs]'])
def ls(self):
for line in self._listing():
print line
def _listing(self):
# Mimicking hadoop client behaviour
if self.args.directory:
include_children = False
recurse = False
include_toplevel = True
else:
include_children = True
include_toplevel = False
recurse = self.args.recurse
listing = self.client.ls(self.args.dir, recurse=recurse,
include_toplevel=include_toplevel,
include_children=include_children)
for line in format_listing(listing, json_output=self.args.json,
human_readable=self.args.human,
recursive=recurse,
summary=self.args.summary):
yield line
@command(args="[paths]", descr="create directories", req_args=['dir [dirs]'])
def mkdir(self):
creations = self.client.mkdir(self.args.dir)
for line in format_results(creations, json_output=self.args.json):
print line
@command(args="[paths]", descr="create directories and their parents", req_args=['dir [dirs]'])
def mkdirp(self):
creations = self.client.mkdir(self.args.dir, create_parent=True)
for line in format_results(creations, json_output=self.args.json):
print line
@command(args="<owner:grp> [paths]", descr="change owner", allowed_opts=["R"], req_args=['arg', 'dir [dirs]'])
def chown(self):
owner = self.args.single_arg
try:
mods = self.client.chown(self.args.dir, owner, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
except FileNotFoundException, e:
exitError(e)
@command(args="<mode> [paths]", descr="change file mode (octal)", allowed_opts=["R"], req_args=['(int) arg', 'dir [dirs]'])
def chmod(self):
mode = int(str(self.args.single_int_arg), 8)
mods = self.client.chmod(self.args.dir, mode, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
@command(args="<grp> [paths]", descr="change group", allowed_opts=["R"], req_args=['arg', 'dir [dirs]'])
def chgrp(self):
grp = self.args.single_arg
mods = self.client.chgrp(self.args.dir, grp, recurse=self.args.recurse)
for line in format_results(mods, json_output=self.args.json):
print line
@command(args="[paths]", descr="display stats for paths", req_args=['[dirs]'])
def count(self):
counts = self.client.count(self.args.dir)
for line in format_counts(counts, json_output=self.args.json,
human_readable=self.args.human):
print line
@command(args="", descr="display fs stats")
def df(self):
result = self.client.df()
for line in format_fs_stats(result, json_output=self.args.json,
human_readable=self.args.human):
print line
@command(args="[paths]", descr="display disk usage statistics", allowed_opts=["s"], req_args=['[dirs]'])
def du(self):
if self.args.summary:
include_children = False
include_toplevel = True
else:
include_children = True
include_toplevel = False
result = self.client.du(self.args.dir, include_toplevel=include_toplevel, include_children=include_children)
for line in format_du(result, json_output=self.args.json, human_readable=self.args.human):
print line
@command(args="[paths] dst", descr="move paths to destination", req_args=['dir [dirs]', 'arg'])
def mv(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.rename(paths, dst)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths]", descr="remove paths", allowed_opts=["R"], req_args=['dir [dirs]'])
def rm(self):
result = self.client.delete(self.args.dir, recurse=self.args.recurse)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths]", descr="creates a file of zero length", req_args=['dir [dirs]'])
def touchz(self):
result = self.client.touchz(self.args.dir)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="", descr="show server information")
def serverdefaults(self):
print self.client.serverdefaults()
@command(args="[dirs]", descr="delete a directory", req_args=['dir [dirs]'])
def rmdir(self):
result = self.client.rmdir(self.args.dir)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="<rep> [paths]", descr="set replication factor", allowed_opts=['R'], req_args=['(int) arg', 'dir [dirs]'])
def setrep(self):
rep_factor = int(self.args.single_int_arg)
result = self.client.setrep(self.args.dir, rep_factor, recurse=self.args.recurse)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="<cmd>", descr="show cmd usage", req_args=['[args]'])
def usage(self):
if not 'arg' in self.args or self.args.arg == []:
self.parser.print_help()
sys.exit(-1)
for sub_cmd in self.args.arg:
self.usage_helper(sub_cmd)
def usage_helper(self, command):
cmd_entry = Commands.methods.get(command)
if not cmd_entry:
self.parser.print_help()
sys.exit(-1)
cmd_args = []
cmd_descriptions = "\ncommand options: \n"
allowed_opts = cmd_entry.get('allowed_opts')
if allowed_opts:
cmd_args += ["[-%s]" % o for o in allowed_opts]
cmd_descriptions += "\n".join(sorted([" %-30s %s" % ("%s %s" % (self.SUB_OPTS[o]['short'], self.SUB_OPTS[o]['long']), self.SUB_OPTS[o]['help']) for o in allowed_opts]))
args = cmd_entry.get('args')
if args:
cmd_args.append(args)
print "usage: snakebite [general options] %s %s" % (command, " ".join(cmd_args))
general_opts = "\ngeneral options:\n"
general_opts += "\n".join(sorted([" %-30s %s" % ("%s %s" % (v['short'], v['long']), v['help']) for k, v in self.GENERIC_OPTS.iteritems()]))
print general_opts
if allowed_opts:
print cmd_descriptions
@command(args="[paths]", descr="stat information", req_args=['dir [dirs]'])
def stat(self):
print format_stat(self.client.stat(self.args.dir))
@command(args="path", descr="test a path", allowed_opts=['d', 'z', 'e'], req_args=['arg'])
def test(self):
path = self.args.single_arg
if self.client.test(path, exists=self.args.exists, directory=self.args.directory, zero_length=self.args.zero):
sys.exit(0)
else:
sys.exit(1)
@command(args="[paths]", descr="copy source paths to stdout", allowed_opts=['checkcrc'], req_args=['dir [dirs]'])
def cat(self):
for file_to_read in self.client.cat(self.args.dir, check_crc=self.args.checkcrc):
for load in file_to_read:
print load
@command(args="path dst", descr="copy local file reference to destination", req_args=['dir [dirs]', 'arg'])
def copyFromLocal(self):
src = self.args.dir
dst = self.args.single_arg
result = self.client.copyFromLocal(src, dst)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths] dst", descr="copy paths to local file system destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'])
def copyToLocal(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.copyToLocal(paths, dst, check_crc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="[paths] dst", descr="copy files from source to destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'])
def cp(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.cp(paths, dst, checkcrc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="file dst", descr="copy files to local file system destination", allowed_opts=['checkcrc'], req_args=['dir [dirs]', 'arg'])
def get(self):
paths = self.args.dir
dst = self.args.single_arg
result = self.client.copyToLocal(paths, dst, check_crc=self.args.checkcrc)
for line in format_results(result, json_output=self.args.json):
print line
@command(args="dir dst", descr="concatenates files in source dir into destination local file", allowed_opts=['nl'], req_args=['src dst'])
def getmerge(self):
source = self.args.src_dst[0]
dst = self.args.src_dst[1]
result = self.client.getmerge(source, dst, newline=self.args.newline)
for line in format_results(result, json_output=self.args.json):
print line
# @command(args="[paths] dst", descr="copy sources from local file system to destination", req_args=['dir [dirs]', 'arg'])
# def put(self):
# paths = self.args.dir
# dst = self.args.single_arg
# result = self.client.put(paths, dst)
# for line in format_results(result, json_output=self.args.json):
# print line
@command(args="path", descr="display last kilobyte of the file to stdout", allowed_opts=['f'], req_args=['arg'])
def tail(self):
path = self.args.single_arg
result = self.client.tail(path, append=self.args.append)
for line in result:
print line
@command(args="path [paths]", descr="output file in text format", allowed_opts=['checkcrc'], req_args=['dir [dirs]'])
def text(self):
paths = self.args.dir
result = self.client.text(paths)
for line in result:
print line
| StarcoderdataPython |
4932470 | #
# @lc app=leetcode id=229 lang=python3
#
# [229] Majority Element II
#
import collections
# @lc code=start
class Solution:
def majorityElement(self, nums):
if not nums:
return []
mi = len(nums)/3
d = collections.defaultdict(int)
ans = []
for item in nums:
if d[item] <= mi:
d[item] += 1
if item not in ans and d[item] > mi:
ans.append(item)
return ans
# @lc code=end
if __name__ == '__main__':
a = Solution
b = a.majorityElement([3,2,3])
print(b)
| StarcoderdataPython |
11347450 | n = int(input())
m = int(input())
k = int(input())
if k < n * m and ((k % n == 0) or (k % m == 0)):
print("YES")
else:
print("NO")
| StarcoderdataPython |
1622934 | <filename>sac/sac_wrapper.py
import os
import random
from collections import deque
from itertools import count
from typing import Tuple
import gym
import numpy as np
import torch
from torch import nn
from torch.distributions.independent import Independent
from torch.utils.tensorboard import SummaryWriter
import sac_constants as const
from networks.sac_networks import PolicyNet, Critic, Curl
from q_learning.replay_buffers import PrioritizedExperienceReplay
from utils import utils
from utils.utils import (
explained_variance,
Transition,
TransitionCurl,
clip_gradients,
polyak_averaging,
compute_grad_norm,
log_network_params,
ValueStats,
normalize_values,
)
class SACWrapper:
def __init__(
self, width: int, height: int, num_actions: int, action_limits: torch.tensor, writer: SummaryWriter = None
):
self.value_stats = ValueStats()
self.num_actions = num_actions
self.writer = writer
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.replay_buffer = PrioritizedExperienceReplay(
const.REPLAY_BUFFER_LEN,
const.BATCH_SIZE,
const.N_STEP_RETURNS,
const.ALPHA,
const.BETA0,
const.REPLAY_DELAY,
const.GAMMA,
const.NUM_ENVS,
)
self.critic = Critic(
width,
height,
const.FRAMES_STACKED,
num_actions,
num_fc_hidden_units=const.NUM_FC_HIDDEN_UNITS,
num_channels=const.NUM_CHANNELS,
num_latent_dims=const.NUM_LATENT_DIMS,
).to(self.device)
self.target_critic = Critic(
width,
height,
const.FRAMES_STACKED,
num_actions,
num_fc_hidden_units=const.NUM_FC_HIDDEN_UNITS,
num_channels=const.NUM_CHANNELS,
num_latent_dims=const.NUM_LATENT_DIMS,
).to(self.device)
self.target_critic.load_state_dict(self.critic.state_dict())
self.target_critic.eval()
self.policy_net = PolicyNet(
width,
height,
const.FRAMES_STACKED,
num_actions,
action_limits,
num_fc_hidden_units=const.NUM_FC_HIDDEN_UNITS,
num_channels=const.NUM_CHANNELS,
num_latent_dims=const.NUM_LATENT_DIMS,
).to(self.device)
if const.USE_CURL:
self.policy_net.encoder.load_state_dict(self.critic.encoder.state_dict())
self.policy_net.share_memory()
self.policy_optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=const.POLICY_LEARNING_RATE)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=const.Q_VALUE_LEARNING_RATE)
self.episode_returns = deque([], maxlen=const.EPISODES_PATIENCE)
self.max_mean_episode_return = -np.inf
self.log_entropy_coeff = torch.log(torch.ones(1, device=self.device) * const.INIT_ENTROPY_COEFF).requires_grad_(
True
)
self.entropy_coeff_optimizer = torch.optim.Adam([self.log_entropy_coeff], lr=const.ENTROPY_LEARNING_RATE)
self.target_entropy = -float(num_actions)
if const.USE_CURL:
self.curl = Curl(const.NUM_LATENT_DIMS, self.critic, self.target_critic)
self.encoder_optimizer = torch.optim.Adam(self.critic.encoder.parameters(), lr=const.ENCODER_LEARNING_RATE)
self.curl_optimizer = torch.optim.Adam([self.curl.W], lr=const.ENCODER_LEARNING_RATE)
def episode_terminated(self, episode_return: float, steps_done: int):
self.writer.add_scalar("EpisodeReturn/Training", episode_return, steps_done)
self.episode_returns.append(episode_return)
running_mean_return = sum(self.episode_returns) / len(self.episode_returns)
if len(self.episode_returns) >= const.EPISODES_PATIENCE and running_mean_return > self.max_mean_episode_return:
self.max_mean_episode_return = running_mean_return
best_model_file_path = os.path.join(const.LOG_DIR, "best_policy_net.pth")
torch.save(self.policy_net, best_model_file_path)
def update_target_networks(self):
with torch.no_grad():
if const.TARGET_UPDATE > 1:
self.target_critic.q_net1.load_state_dict(self.critic.q_net1.state_dict())
self.target_critic.q_net2.load_state_dict(self.critic.q_net2.state_dict())
self.target_critic.encoder.load_state_dict(self.critic.encoder.state_dict())
else:
polyak_averaging(self.target_critic.q_net1, self.critic.q_net1, const.TAU)
polyak_averaging(self.target_critic.q_net2, self.critic.q_net2, const.TAU)
polyak_averaging(self.target_critic.encoder, self.critic.encoder, const.TAU)
@staticmethod
def get_policy_loss(log_probs: torch.tensor, q_values: torch.tensor, ent_coeff: torch.tensor) -> torch.tensor:
return (ent_coeff.detach() * log_probs - q_values).mean()
@staticmethod
def get_q_value_loss(estimated_q_values: torch.tensor, q_value_targets: torch.tensor) -> torch.Tensor:
q_value_loss_fn = nn.MSELoss(reduction="none")
return q_value_loss_fn(estimated_q_values, q_value_targets.detach())
def prepare_batch_data(
self,
) -> Tuple[torch.tensor, torch.tensor, torch.tensor, torch.tensor, torch.tensor, torch.tensor, np.array]:
transitions, indices, weights = self.replay_buffer.sample()
weights = torch.FloatTensor(weights).to(self.device)
batch = Transition(*zip(*transitions))
state_batch, action_batch, reward_batch, next_state_batch, done_batch = (
torch.cat(batch.state),
torch.cat(batch.action),
torch.cat(batch.reward),
torch.cat(batch.next_state),
torch.cat(batch.done),
)
state_batch, action_batch, reward_batch, next_state_batch, done_batch = (
state_batch.to(self.device),
action_batch.to(self.device),
reward_batch.to(self.device),
next_state_batch.to(self.device),
done_batch.to(self.device),
)
return state_batch, action_batch, reward_batch, next_state_batch, done_batch, weights, indices
def prepare_batch_data_curl(
self,
) -> Tuple[
torch.tensor,
torch.tensor,
torch.tensor,
torch.tensor,
torch.tensor,
torch.tensor,
torch.tensor,
torch.Tensor,
np.array,
]:
transitions, indices, weights = self.replay_buffer.sample_curl(const.INPUT_SIZE)
weights = torch.FloatTensor(weights).to(self.device)
batch = TransitionCurl(*zip(*transitions))
(
state_batch,
state_anchor_batch,
state_target_batch,
action_batch,
reward_batch,
next_state_batch,
done_batch,
) = (
torch.cat(batch.state),
torch.cat(batch.state_anchor),
torch.cat(batch.state_target),
torch.cat(batch.action),
torch.cat(batch.reward),
torch.cat(batch.next_state),
torch.cat(batch.done),
)
(
state_batch,
state_anchor_batch,
state_target_batch,
action_batch,
reward_batch,
next_state_batch,
done_batch,
) = (
state_batch.to(self.device),
state_anchor_batch.to(self.device),
state_target_batch.to(self.device),
action_batch.to(self.device),
reward_batch.to(self.device),
next_state_batch.to(self.device),
done_batch.to(self.device),
)
return (
state_batch,
state_anchor_batch,
state_target_batch,
action_batch,
reward_batch,
next_state_batch,
done_batch,
weights,
indices,
)
def optimize_entropy_coeff(self, log_prob_batch: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
entropy_coeff = torch.exp(self.log_entropy_coeff).detach()
entropy_coeff_loss = -(self.log_entropy_coeff * (log_prob_batch + self.target_entropy).detach()).mean()
self.entropy_coeff_optimizer.zero_grad()
entropy_coeff_loss.backward()
self.entropy_coeff_optimizer.step()
return entropy_coeff, entropy_coeff_loss
def optimize_critic(
self,
state_batch: torch.Tensor,
action_batch: torch.Tensor,
next_state_batch: torch.Tensor,
reward_batch: torch.Tensor,
done_batch: torch.Tensor,
entropy_coeff: torch.Tensor,
weights: torch.Tensor,
indices: np.array,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
with torch.no_grad():
new_next_action_batch, new_next_log_prob_batch, _ = self.policy_net.evaluate(
next_state_batch, reparameterize=False
)
target_q1_value, target_q2_value = self.target_critic(next_state_batch, new_next_action_batch)
target_next_q_values = torch.min(target_q1_value, target_q2_value)
target_q_values = reward_batch.squeeze() + (const.GAMMA ** const.N_STEP_RETURNS) * (
1 - done_batch.squeeze()
) * (target_next_q_values.squeeze() - entropy_coeff.detach() * new_next_log_prob_batch.squeeze())
if const.NORMALIZE_VALUES:
target_q_values = normalize_values(target_q_values, shift_mean=False)
q1_values, q2_values = self.critic(state_batch, action_batch.unsqueeze(1), detach_encoder=False)
q1_values, q2_values = q1_values.squeeze(), q2_values.squeeze()
q_value_losses1 = self.get_q_value_loss(q1_values.float(), target_q_values.float())
q_value_losses2 = self.get_q_value_loss(q2_values.float(), target_q_values.float())
q_value_losses = q_value_losses1 + q_value_losses2
weighted_q_value_losses = weights * q_value_losses
weighted_q_value_loss = weighted_q_value_losses.mean()
self.critic_optimizer.zero_grad()
weighted_q_value_loss.backward()
clip_gradients(self.critic, const.CLIP_GRAD)
self.critic_optimizer.step()
self.replay_buffer.step()
self.replay_buffer.update(indices, (q_value_losses + 1e-8).view(-1).data.cpu().numpy())
return weighted_q_value_loss, q1_values, target_q_values
def optimize_actor(
self,
state_batch: torch.Tensor,
new_action_batch: torch.Tensor,
log_prob_batch: torch.Tensor,
entropy_coeff: torch.Tensor,
) -> torch.Tensor:
new_q1_values, new_q2_values = self.critic(state_batch, new_action_batch, detach_encoder=const.USE_CURL)
new_q1_values, new_q2_values = new_q1_values.squeeze(), new_q2_values.squeeze()
new_q_values = torch.min(new_q1_values, new_q2_values)
policy_loss = self.get_policy_loss(log_prob_batch.float(), new_q_values.float(), entropy_coeff)
self.policy_optimizer.zero_grad()
policy_loss.backward()
clip_gradients(self.policy_net, const.CLIP_GRAD)
self.policy_optimizer.step()
return policy_loss
def optimize_encoder(self, state_anchor_batch: torch.Tensor, state_target_batch: torch.Tensor):
z_anchor = self.curl.encode(state_anchor_batch)
z_target = self.curl.encode(state_target_batch, target=True)
logits = self.curl.compute_logits(z_anchor, z_target)
labels = torch.arange(logits.shape[0]).long().to(self.device)
xentropy_loss_fn = nn.CrossEntropyLoss(reduction="mean")
encoder_loss = xentropy_loss_fn(logits, labels)
self.encoder_optimizer.zero_grad()
self.curl_optimizer.zero_grad()
encoder_loss.backward()
clip_gradients(self.curl, const.CLIP_GRAD)
self.encoder_optimizer.step()
self.curl_optimizer.step()
self.policy_net.encoder.load_state_dict(self.critic.encoder.state_dict())
def optimize_model(self, steps_done: int):
for _ in range(const.NUM_GRADIENT_STEPS):
(
state_batch,
state_anchor_batch,
state_target_batch,
action_batch,
reward_batch,
next_state_batch,
done_batch,
weights,
indices,
) = self.prepare_batch_data_curl()
new_action_batch, log_prob_batch, new_policy = self.policy_net.evaluate(
state_batch, reparameterize=True, detach_encoder=const.USE_CURL
)
entropy_coeff, entropy_coeff_loss = self.optimize_entropy_coeff(log_prob_batch)
q_value_loss, q1_values, target_q_values = self.optimize_critic(
state_batch,
action_batch,
next_state_batch,
reward_batch,
done_batch,
entropy_coeff,
weights,
indices,
)
policy_loss = self.optimize_actor(state_batch, new_action_batch, log_prob_batch, entropy_coeff)
if steps_done % const.TARGET_UPDATE == 0:
self.update_target_networks()
if const.USE_CURL:
self.optimize_encoder(state_anchor_batch, state_target_batch)
if self.writer:
self.track_tensorboard_metrics(
steps_done,
policy_loss,
q_value_loss,
q1_values,
target_q_values,
explained_variance(target_q_values, q1_values),
new_policy,
entropy_coeff.item(),
entropy_coeff_loss,
)
def track_tensorboard_metrics(
self,
steps_done: int,
policy_loss: float,
q_value_loss: float,
estimated_q_values: torch.tensor,
target_q_values: torch.tensor,
explained_var: float,
policy: Independent,
entropy_coeff: float,
entropy_coeff_loss: float,
):
self.writer.add_scalar(f"Policy/Entropy", policy.entropy().mean(), steps_done)
self.writer.add_scalar(f"Policy/Entropy-Coefficient", entropy_coeff, steps_done)
self.writer.add_scalar(f"Policy/Entropy-Coefficient-Loss", entropy_coeff_loss, steps_done)
self.writer.add_scalar(f"Policy/Mean", policy.base_dist.loc.mean(), steps_done)
self.writer.add_scalar(f"Policy/Std", policy.base_dist.scale.mean(), steps_done)
self.writer.add_scalar("Policy/Loss", policy_loss, steps_done)
self.writer.add_scalar("Q-Value/Loss", q_value_loss, steps_done)
self.writer.add_scalar("Q-Value/1-Estimation", estimated_q_values.mean(), steps_done)
self.writer.add_scalar("Q-Value/Target", target_q_values.mean(), steps_done)
self.writer.add_scalar("Q-Value/Explained-Variance", explained_var, steps_done)
self.writer.add_scalar(f"Policy/GradNorm", compute_grad_norm(self.policy_net), steps_done)
self.writer.add_scalar(f"Q-Value/GradNorm", compute_grad_norm(self.critic), steps_done)
if steps_done % 500 == 0:
log_network_params(self.policy_net, self.writer, steps_done, "Policy-Net")
log_network_params(self.critic, self.writer, steps_done, "Critic-Net")
def eval_policy(self, steps_done: int):
print(f"EVALUATE SAC POLICY AFTER {steps_done} STEPS")
env = gym.make(const.ENV_NAME)
env.reset()
episode_returns = np.zeros(const.EVAL_EPISODE_COUNT)
for episode in range(const.EVAL_EPISODE_COUNT):
env.reset()
observation = utils.get_pendulum_screen(env, const.IMAGE_SIZE)
state = deque(
[torch.zeros(observation.size()) for _ in range(const.FRAMES_STACKED)], maxlen=const.FRAMES_STACKED
)
state.append(observation)
state_tensor = torch.stack(tuple(state), dim=1)
episode_return = 0
no_op_steps = random.randint(0, const.NO_OP_MAX_STEPS)
for t in count():
if t < no_op_steps:
_, _, done, _ = env.step(env.action_space.sample())
if done:
break
continue
if t % const.ACTION_REPETITIONS != 0:
_, reward, done, _ = env.step(u.cpu().numpy())
episode_return += reward
if done:
episode_returns[episode] = episode_return
break
continue
if steps_done < const.MIN_START_STEPS:
u = torch.from_numpy(env.action_space.sample()).to(self.device)
else:
_, u = self.policy_net.get_action(
utils.center_crop(state_tensor.to(self.device), const.INPUT_SIZE), eval_mode=True
)
u = u.squeeze(1)
_, reward, done, _ = env.step(u.cpu().numpy())
episode_return += reward
next_observation = utils.get_pendulum_screen(env, const.IMAGE_SIZE)
next_state = state.copy()
next_state.append(next_observation)
next_state_tensor = torch.stack(tuple(next_state), dim=1)
if done:
episode_returns[episode] = episode_return
break
state_tensor = next_state_tensor
state = next_state.copy()
self.writer.add_scalar("EpisodeReturn/Eval", np.mean(episode_returns), steps_done)
env.render()
env.close()
| StarcoderdataPython |
11383617 | # -*- coding: utf-8 -*
# Author: <NAME>
# File: li.py
# Create Date: 2017-01-18
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
n = numRows
if n == 1:
return s
jumpSum = 2 * n - 2
ret = ""
for i in xrange(n):
jump = 2 * n - 2 - 2 * i
j = i
while j < len(s):
ret += s[j]
if jump == 0:
jump = jumpSum
j += jump
jump = jumpSum - jump
return ret
########## test case ##########
s = Solution()
print s.convert("PAYPALISHIRING", 1) # "PAHNAPLSIIGYIR".
# Runtime: 115 ms, O(n)
# 尼玛打脸,runtime并没什么提高,O(n)的算法里还是常数消耗比较占时间。
# Tags: String
| StarcoderdataPython |
9632127 | <gh_stars>0
import numpy as np
import torch
import torch.utils.data as data
import data.util as util
import random
class LDRs_dataset(data.Dataset):
'''Read LQ images only in the test phase.'''
def __init__(self, opt):
super(LDRs_dataset, self).__init__()
self.opt = opt
self.paths_LDRs = None
self.LDRs_env = None # environment for lmdb
self.data_type = opt['data_type']
# read image list from lmdb or image files
print(opt['dataroot_LDRs'])
self.sizes_ldr, self.paths_ldr = util.get_image_paths(self.data_type, opt['dataroot_LDRs'])
# print(self.paths_ldr)
self.paths_short_ldr = util.get_paths(opt['dataroot_LDRs'], '*_short.png')
self.paths_medium_ldr = util.get_paths(opt['dataroot_LDRs'], '*_medium.png')
self.paths_long_ldr = util.get_paths(opt['dataroot_LDRs'], '*_long.png')
self.paths_exposures = util.get_paths(opt['dataroot_LDRs'], '*_exposures.npy')
assert self.paths_short_ldr, 'Error: LDRs paths are empty.'
def __getitem__(self, index):
short_ldr_path = None
# get exposures
exposures = np.load(self.paths_exposures[index])
floating_exposures = exposures - exposures[1]
# get LDRs image
ldr_images = []
short_ldr_paths = self.paths_short_ldr[index]
short_ldr_images = util.read_imgdata(short_ldr_paths, ratio=255.0)
medium_ldr_paths = self.paths_medium_ldr[index]
medium_ldr_images = util.read_imgdata(medium_ldr_paths, ratio=255.0)
long_ldr_paths = self.paths_long_ldr[index]
long_ldr_images = util.read_imgdata(long_ldr_paths, ratio=255.0)
H, W, C = short_ldr_images.shape
ldr_images.append(short_ldr_images)
ldr_images.append(medium_ldr_images)
ldr_images.append(long_ldr_images)
ldr_images = np.array(ldr_images)
img0 = ldr_images[0].astype(np.float32).transpose(2, 0, 1)
img1 = ldr_images[1].astype(np.float32).transpose(2, 0, 1)
img2 = ldr_images[2].astype(np.float32).transpose(2, 0, 1)
img0 = torch.from_numpy(img0)
img1 = torch.from_numpy(img1)
img2 = torch.from_numpy(img2)
img0 =img0.unsqueeze(0) # torch.Size([1, 6, 256, 256])
img1 =img1.unsqueeze(0)
img2 =img2.unsqueeze(0)
img_ldrs = torch.cat((img0, img1, img2))
sample = {'img_LDRs': img_ldrs, 'float_exp':floating_exposures, 'short_path': short_ldr_paths}
# return {'Short': img0, 'Medium': img1, 'Long': img2, 'short_path': short_ldr_paths}
def __len__(self):
return len(self.paths_exposures)
| StarcoderdataPython |
242474 | <gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import math
from scipy.stats.qmc import Sobol, Halton, LatinHypercube
from src.functions.particle import Particle
from src.functions.moment_matching import shift_samples
class Samples:
"""
Class for generating list of Particles given various initial conditions.
This will need to become more complex to return samples for different
sources and sampling distributions.
"""
def __init__(self, init_data, geometry, mesh):
self.generator = init_data.generator
self.RQMC = False
self.geometry = geometry
self.mesh = mesh
self.G = init_data.G
self.N = init_data.N
self.Nx = init_data.Nx
self.totalDim = init_data.totalDim
self.RB = init_data.RB
self.LB = init_data.LB
self.left = init_data.left
self.right = init_data.right
self.moment_match = init_data.moment_match
if (self.left):
self.phi_left = init_data.phi_left
if (self.right):
self.phi_right = init_data.phi_right
def GenerateParticles(self, q):
self.q = q
self.counter = 0
self.GetRnMatrix()
self.particles = []
if (self.left):
self.LeftBoundaryParticles()
self.counter += 1
if (self.right):
self.RightBoundaryParticles()
self.counter += 1
self.VolumetricParticles()
self.counter += 2 # used to index the random number matrix
if (self.moment_match):
self.moment_matching()
def VolumetricParticles(self):
for i in range(self.N):
randPos = self.rng[i,self.counter]
randMu = self.rng[i,self.counter+1]
pos = self.GetPos(randPos)
mu = self.GetDir(randMu)
zone = self.mesh.GetZone(pos, mu)
weight = self.VolumetricWeight(zone)
particle = Particle(pos, mu, weight)
self.particles.append(particle)
def RightBoundaryParticles(self):
for i in range(self.N):
randMu = self.rng[i,self.counter]
pos = np.array((self.RB - 1e-9,))
mu = -np.sqrt(randMu) - 1e-9
weight = self.BoundaryWeight(self.phi_right)
particle = Particle(pos, mu, weight)
self.particles.append(particle)
def LeftBoundaryParticles(self):
for i in range(self.N):
randMu = self.rng[i,self.counter]
pos = np.array((self.LB + 1e-9,))
mu = np.sqrt(randMu) + 1e-9
weight = self.BoundaryWeight(self.phi_left)
particle = Particle(pos, mu, weight)
self.particles.append(particle)
def RandomMatrix(self):
np.random.seed(12345)
return np.random.uniform(0,1,[self.N,self.totalDim])
def SobolMatrix(self):
sampler = Sobol(d=self.totalDim,scramble=self.RQMC)
m = round(math.log(self.N, 2))
return sampler.random_base2(m=m)
def HaltonMatrix(self):
sampler = Halton(d=self.totalDim,scramble=self.RQMC)
return sampler.random(n=self.N)
def LatinHypercube(self):
sampler = LatinHypercube(d=self.totalDim)
return sampler.random(n=self.N)
def GetRnMatrix(self):
if (self.generator == "random"):
self.rng = self.RandomMatrix()
elif (self.generator == "sobol"):
self.rng = self.SobolMatrix()
elif (self.generator == "halton"):
self.rng = self.HaltonMatrix()
elif (self.generator == "latin_hypercube"):
self.rng = self.LatinHypercube()
def GetPos(self, randPos):
return ((self.RB-self.LB)*randPos + self.LB)
def GetDir(self, randMu):
return (2*randMu - 1)
def GetR(self,pos):
if (pos.size > 1):
return np.sqrt(sum(pos**2))
else:
return np.abs(pos)
def VolumetricWeight(self, zone):
weight = self.q[zone,:]*self.geometry.CellVolume(zone)/self.N*self.Nx
return weight
def BoundaryWeight(self, BV):
# BV: boundary value, i.e. phi_left or phi_right
return (self.geometry.SurfaceArea()*BV/self.N)
def moment_matching(self):
## Currently only shifting volumetric particles
## could shift boundary particle angle in the future
x = np.zeros(self.N)
mu = np.zeros(self.N)
# we only want to shift the volumetric particles not the boundary
start = 0
end = self.N
if (self.left):
start += self.N
end += self.N
if (self.right):
start += self.N
end += self.N
# take angle and position from voluemtric particles into new arrays
count = 0
for i in range(start,end):
x[count] = self.particles[i].pos
mu[count] = self.particles[i].dir
count += 1
# shift new arrays
shifted_x = shift_samples(self.LB, self.RB, x)
shifted_mu = shift_samples(-1.0, 1.0, mu)
# put shifted values back into particles
count = 0
for j in range(start, end):
self.particles[j].pos = shifted_x[count]
self.particles[j].dir = shifted_mu[count]
count += 1
| StarcoderdataPython |
8131192 | <reponame>Mateus224/pytransform3d-1
"""
==========================================
Construct Rotation Matrix from Two Vectors
==========================================
We compute rotation matrix from two vectors that form a plane. The x-axis will
point in the same direction as the first vector, the y-axis corresponds to the
normalized vector rejection of b on a, and the z-axis is the cross product of
the other basis vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.rotations import matrix_from_two_vectors, plot_basis, random_vector
from pytransform3d.plot_utils import plot_vector
random_state = np.random.RandomState(1)
a = random_vector(random_state, 3) * 0.3
b = random_vector(random_state, 3) * 0.3
R = matrix_from_two_vectors(a, b)
ax = plot_vector(direction=a, color="r")
plot_vector(ax=ax, direction=b, color="g")
plot_basis(ax=ax, R=R)
plt.show() | StarcoderdataPython |
3201762 | #Write your code below this row 👇
sum = 0
for i in range(2 , 101 , 2):
sum += i
print(sum)
total = 0
for j in range(1, 101):
if(j % 2 == 0):
total += j
print(total) | StarcoderdataPython |
1860149 | '''
Created on 05.04.2021
@author: michael
'''
from os.path import basename
from reportlab.pdfgen.canvas import Canvas
from Asb.ScanConverter.ImageOperations import ImageFileOperations
from Asb.ScanConverter.Services import JobDefinition, FormatConversionService
from Asb.ScanConverter.Ocr.OCR import OcrRunner
from Asb.ScanConverter.Ocr.Alto import AltoPageLayout
import re
from lxml import etree, html
from injector import singleton, inject
from reportlab.lib.utils import ImageReader
import io
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import inch
INVISIBLE = 3
@singleton
class PdfService:
@inject
def __init__(self,
format_conversion_service: FormatConversionService,
image_operations: ImageFileOperations,
ocr_runner: OcrRunner):
self.format_conversion_service = format_conversion_service
self.image_ops = image_operations
self.ocr_runner = ocr_runner
def create_pdf_file(self, job: JobDefinition):
image_infos = job.fileinfos
pdf = Canvas(job.output_path, pageCompression=1)
pdf.setCreator('Scan-Convert')
pdf.setTitle(basename(job.output_path))
dpi = 300
images = self.collect_and_convert_images(image_infos, job)
images = self.sort_images(images, job.sort)
for image in images:
if image is None:
continue
width_in_dots, height_in_dots = image.size
try:
dpi = image.info['dpi'][0]
except KeyError:
pass
page_width = width_in_dots * 72 / dpi
page_height = height_in_dots * 72 / dpi
pdf.setPageSize((width_in_dots * inch / dpi, height_in_dots * inch / dpi))
img_stream = io.BytesIO()
image.save(img_stream, format='png')
img_stream.seek(0)
img_reader = ImageReader(img_stream)
pdf.drawImage(img_reader, 0, 0, width=page_width, height=page_height)
if job.ocr:
#alto_layout = self.ocr_runner.get_alto_layout(img)
# TODO: Configure language
hocr_layout = self.ocr_runner.get_hocr(image)
self.add_text_layer_from_hocr(pdf, hocr_layout, page_height, dpi)
pdf.showPage()
pdf.save()
if job.pdfa:
self._convert_to_pdfa(job)
def _convert_to_pdfa(self, job: JobDefinition):
raise Exception("PDF optimizing currently not supported")
def collect_and_convert_images(self, infos, job: JobDefinition):
images = []
for image_info in infos:
img = self.image_ops.load_image(image_info.filepath)
converted_img, image_info, job = self.format_conversion_service.perform_changes(img, image_info, job)
if job.split:
page_images = self.image_ops.split_image(converted_img)
else:
page_images = (converted_img, )
for image in page_images:
images.append(image)
return images
def sort_images(self, images, sorting):
if sorting is None:
return images
if sorting is JobDefinition.SORT_FIRST_PAGE:
return self.sort_images_first_page(images)
if sorting is JobDefinition.SORT_SHEETS:
return self.sort_images_sheets(images)
raise Exception("Unknown sorting request")
def sort_images_first_page(self, images):
first = images[0]
del(images[0])
images.append(first)
return images
def sort_images_sheets(self, images):
filenumbers = []
for bogen in range(0, int(len(images) / 4)):
filenumbers.append(len(images) - (bogen * 2 + 1))
filenumbers.append(0 + bogen * 2)
filenumbers.append(1 + bogen * 2)
filenumbers.append(len(images) - (bogen * 2 + 2))
sorted_images = [None] * len(images)
for i in range(0, len(filenumbers)):
sorted_images[filenumbers[i]] = images[i]
return sorted_images
def add_text_layer_from_alto(self, pdf: Canvas, alto_layout: AltoPageLayout, page_height: int, dpi: int):
"""Draw an invisible text layer for OCR data"""
for line in alto_layout.get_all_lines():
line_bbox = line.get_bounding_box()
for string in line.get_strings():
rawtext = string.get_text()
if rawtext == '':
continue
string_text_width = pdf.stringWidth(rawtext, 'Times-Roman', 8)
if string_text_width <= 0:
continue
string_bbox = string.get_bounding_box()
text = pdf.beginText()
text.setTextRenderMode(INVISIBLE)
text.setFont('Times-Roman', 8)
text.setTextOrigin(string_bbox[0] * 72 / dpi, page_height - (line_bbox[3] * 72 / dpi))
string_bbox_width = string.get_width() * 72 / dpi
text.setHorizScale(100.0 * string_bbox_width / string_text_width)
text.textLine(rawtext)
pdf.drawText(text)
def add_text_layer_from_hocr(self, pdf, hocr_layout, height, dpi):
'''
This terrible code is lifted from https://github.com/ocropus/hocr-tools
The result is better, because we have baseline information in hocr, which
is missing in alto.
'''
p1 = re.compile(r'bbox((\s+\d+){4})')
p2 = re.compile(r'baseline((\s+[\d\.\-]+){2})')
hocr = etree.fromstring(hocr_layout, html.XHTMLParser())
for line in hocr.xpath('//*[@class="ocr_line"]'):
bbox_line = p1.search(line.attrib['title']).group(1).split()
try:
baseline = p2.search(line.attrib['title']).group(1).split()
except AttributeError:
baseline = [0, 0]
bbox_line = [float(i) for i in bbox_line]
baseline = [float(i) for i in baseline]
xpath_elements = './/*[@class="ocrx_word"]'
if (not (line.xpath('boolean(' + xpath_elements + ')'))):
# if there are no words elements present,
# we switch to lines as elements
xpath_elements = '.'
for word in line.xpath(xpath_elements):
rawtext = word.text_content().strip()
if rawtext == '':
continue
text_width = pdf.stringWidth(rawtext, 'Times-Roman', 8)
if text_width <= 0:
continue
bbox_text = p1.search(word.attrib['title']).group(1).split()
bbox_text = [float(i) for i in bbox_text]
baseline_absolute = self.polyval(baseline,
(bbox_text[0] + bbox_text[2]) / 2 - bbox_line[0]) + bbox_line[3]
text = pdf.beginText()
text.setTextRenderMode(3) # double invisible
text.setFont('Times-Roman', 8)
text.setTextOrigin(bbox_text[0] * 72 / dpi, height - baseline_absolute * 72 / dpi)
bbox_text_width = (bbox_text[2] - bbox_text[0]) * 72 / dpi
text.setHorizScale(100.0 * bbox_text_width / text_width)
text.textLine(rawtext)
pdf.drawText(text)
def polyval(self, poly, x):
'''
WTF? Polyval? Really? "poly" is a tuple, decribed in the documentation
this way: The two numbers for the baseline are the slope (1st number) and
constant term (2nd number) of a linear equation describing the baseline
relative to the bottom left corner of the bounding box
'''
return x * poly[0] + poly[1]
| StarcoderdataPython |
11305793 | <gh_stars>10-100
#!/usr/bin/env python
'''
A script to convert variant output from the ncov pipeline.
'''
import os
import os.path
import sys
import csv
import argparse
import vcf
import re
class Variant(object):
def __init__(self, var):
'''
A class to handle variants from the variants.tsv file.
'''
self.var = var
def convert_var_to_annovar(self, chr='NC_045512v2'):
'''
Convert the var dictionary to ANNOVAR compatible inputs.
'''
start = str(int(self.var['POS']) + 1)
end = str(int(self.var['POS']) + 1)
if self.is_insertion():
ref = '-'
alt = re.sub('^[+]', '', self.var['ALT'])
elif self.is_deletion():
ref = re.sub('^[-]', '', self.var['ALT'])
alt = '-'
end = int(self.var['POS']) + len(ref)
else:
start = str(int(self.var['POS']))
end = str(int(self.var['POS']))
ref = self.var['REF']
alt = self.var['ALT']
return {'chr': chr,
'start': start,
'end': end,
'ref': ref,
'alt': alt}
def is_insertion(self):
'''
check if variant is an insertion
'''
return self.var['ALT'].startswith('+')
def is_deletion(self):
'''
check if variant is a deletion
'''
return self.var['ALT'].startswith('-')
def convert_vcf_to_annovar(var, chr='NC_045512v2'):
'''
Using the vcf object, create the ANNOVAR compatible output as a dictionary.
'''
start = 0
end = 0
ref = ''
alt = ''
if var.is_indel:
if var.is_deletion:
ref = re.sub(str(var.ALT[0]), '', var.REF)
alt = '-'
start = var.POS + 1
end = var.POS + len(str(ref))
else:
start = var.POS + 1
end = var.POS + 1
ref = '-'
alt = re.sub(var.REF, '', str(var.ALT[0]))
elif var.is_snp:
start = var.POS
end = var.POS
ref = var.REF
alt = var.ALT[0]
else:
print("invalid variant")
return
return {'chr': chr,
'start': start,
'end': end,
'ref': ref,
'alt': alt}
def main():
'''
Main routine for the script
'''
parser = argparse.ArgumentParser(description='convert variants to annovar input')
parser.add_argument('-f', '--file', help='variant file to convert')
parser.add_argument('-o', '--output', help='output filename')
parser.add_argument('-d', '--delimiter', default='\t', help='column delimiter for output')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
vars = list()
# check for filetype
if args.file.endswith('.variants.tsv'):
samplename = os.path.basename(args.file).replace('.variants.tsv', '')
with open(args.file, 'r') as fp:
var_reader = csv.DictReader(fp, delimiter='\t')
for var in var_reader:
ann_var = Variant(var=var)
vars.append(ann_var.convert_var_to_annovar())
if args.file.endswith('.pass.vcf') or args.file.endswith('.pass.vcf.gz'):
if args.file.endswith('.pass.vcf'):
samplename = os.path.basename(args.file).replace('.pass.vcf', '')
elif args.file.endswith('.pass.vcf.gz'):
samplename = os.path.basename(args.file).replace('.pass.vcf.gz', '')
vcf_reader = vcf.Reader(filename=args.file)
for var in vcf_reader:
vars.append(convert_vcf_to_annovar(var=var))
with open(args.output, 'w') as file_o:
for var in vars:
file_o.write(args.delimiter.join([var['chr'],
str(var['start']),
str(var['end']),
str(var['ref']),
str(var['alt']),
samplename]))
file_o.write('\n')
if __name__ == '__main__':
main()
| StarcoderdataPython |
8101309 | <gh_stars>1-10
import os
import time
from binary_tree import BinaryTree, HuffmanTree
from graph import DirectedGraph, UndirectedGraph, DirectedNetwork, UndirectedNetwork
def flash_message(message=None, keep_time=3):
if message: # 如果消息有效则先清屏再打印消息,否则先等待,再清屏
i = os.system("cls")
print(message)
time.sleep(keep_time)
else:
time.sleep(keep_time)
i = os.system("cls")
def option_catcher(option_handle_callback):
i = input("请选择:")
no = None
try:
no = int(i)
try:
return option_handle_callback(no)
except Exception as ex:
flash_message(str(ex))
except KeyError:
flash_message(f"不存在的选项 {no} 请重新选择!")
except ValueError:
flash_message(f"无法转换为数字的字符串:{i}")
class BinaryTreeController(object):
"""二叉树控制器"""
def __init__(self):
self.binary_tree = None # type: BinaryTree
def _raise_if_not_binary_tree(self):
if self.binary_tree is None:
raise Exception("请先构建二叉树!")
def create(self):
os.system("cls")
code = input(f"请输入前序遍历表示的二叉树(可使用'{BinaryTree.VOID_NODE_PLACEHOLDER}'表示空节点):")
self.binary_tree = BinaryTree.create(code)
flash_message("构建完成...")
def traverse(self):
if self.binary_tree is None:
raise Exception("请先构建二叉树!")
values = []
def visit_callback(node):
values.append(node.value)
def show_value_and_clear(prefix):
print(prefix + "".join(values))
values.clear()
def pre_order_traverse():
self.binary_tree.pre_order_traverse(visit_callback)
os.system("cls")
show_value_and_clear("前序遍历结果:")
time.sleep(3)
def in_order_traverse():
self.binary_tree.in_order_traverse(visit_callback)
os.system("cls")
show_value_and_clear("中序遍历结果:")
time.sleep(3)
def post_order_traverse():
self.binary_tree.post_order_traverse(visit_callback)
os.system("cls")
show_value_and_clear("后序遍历结果:")
time.sleep(3)
options = {
1: pre_order_traverse,
2: in_order_traverse,
3: post_order_traverse,
5: quit
}
while True:
print("***********二叉树遍历***********")
print("* 1 前序遍历 *")
print("* 2 中序遍历 *")
print("* 3 后序遍历 *")
print("* 4 返回上一层 *")
print("* 5 退出 *")
print("*******************************")
try:
if option_catcher(lambda no: True if no == 4 else options[no]()):
break
except Exception:
pass
def cal_depth(self):
self._raise_if_not_binary_tree()
flash_message(f"该树的深度为:{self.binary_tree.depth}")
def cal_end_node_count(self):
self._raise_if_not_binary_tree()
flash_message(f"该树的终端节点数为:{self.binary_tree.end_node_count}")
def find_parent(self):
self._raise_if_not_binary_tree()
name = input("请输入欲查找双亲的结点名:")
result = self.binary_tree.get_node_parent(lambda node: node.value == name)
if result:
flash_message(f"{name}的双亲是{result.value}")
else:
flash_message(f"未能找到{name}的双亲")
def find_sibling(self):
self._raise_if_not_binary_tree()
name = input("请输入欲查找兄弟的结点名:")
result = self.binary_tree.get_node_sibling(lambda node: node.value == name)
if result:
flash_message(f"{name}的兄弟是{result.value}")
else:
flash_message(f"未能找到{name}的兄弟")
def huffman_coding(self):
not_encoding_string = input("请输入一段待编码的字符串:")
not_encoding_chars = list(not_encoding_string)
with_weight_chars = {}
for char in set(not_encoding_chars):
while True:
try:
weight = int(input(f"请输入{char}的权重:"))
with_weight_chars.update({char: weight})
break
except ValueError:
print("请输入正确的值!")
flash_message(keep_time=0)
t = HuffmanTree.create(with_weight_chars)
code_dict = t.dump_code_dict()
temp_stack = []
for char_encoding_info in sorted(code_dict.items(), key=lambda k: not_encoding_chars.index(k[0])):
encoding = ''.join([str(s) for s in char_encoding_info[1]])
temp_stack.append(encoding)
print(f"{char_encoding_info[0]}的编码是{encoding}")
print() # 空行
print(f"该字符串'{not_encoding_string}'的哈夫曼编码为:{' '.join(temp_stack)}")
flash_message()
class GraphController(object):
"""图控制器"""
def __init__(self):
self.graphs = {}
def _name_maker(self, prefix="未命名"):
index = 1
while True:
new_name = f"{prefix}_{index}"
if new_name not in [name for name, graph in self.graphs.items()]:
return new_name
index += 1
def _create_graph(self, cls, cls_name, has_order=True, with_weight=True):
name_ok = False
name = None
while not name_ok:
name = input(f"请输入{cls_name}的名称(可留空):")
if name:
if name in (id_ for id_, graph in self.graphs.items()):
while True:
result = input("名称已存在,是否覆盖原图?(Y/n)")
if result.lower() == "y":
self.graphs[name] = cls()
name_ok = True
break
elif result.lower() == "n":
break
else:
self.graphs[name] = cls()
name_ok = True
else:
name = self._name_maker(cls_name)
self.graphs[name] = cls()
name_ok = True
g = self.graphs[name]
vertex_names = input("请输入顶点名称(使用空白符分隔):\n").split()
g.extend_vertexes(*vertex_names)
while True:
try:
edge_count = int(input("请输入边的数量:"))
break
except ValueError:
print("请输入正确的值!")
print("=================输入边信息=====================")
tips = []
if has_order:
tips.append("两个顶点从左到右为代表该边的指向")
else:
tips.append("两个顶点不分顺序")
if with_weight:
tips.append("使用空白符间隔三个参数,从左到右为 <顶点名称1> <顶点名称2> <边权值>")
else:
tips.append("使用空白符间隔两个参数,从左到右为 <顶点名称1> <顶点名称2>")
# 打印提示
for i, tip in enumerate(tips):
print(f"{i} . {tip}")
print() # 换行
for i in range(edge_count):
while True:
try:
if with_weight:
v1, v2, w = input(f"请输入第{i + 1}条边信息:").split()
g.add_new_edge(v1, v2, int(w))
else:
v1, v2 = input(f"请输入第{i + 1}条边信息:").split()
g.add_new_edge(v1, v2)
break
except Exception as ex:
print("错误:" + str(ex))
print("================================================")
print(f"创建的新{cls_name}的名称为:{name}")
print(f"该{cls_name}的邻接矩阵为:")
self._print_adjacency_matrix(g)
flash_message()
def _print_adjacency_matrix(self, g):
# 首先遍历邻接矩阵每一个元素,取最大值
max_elem = 0
for row in g.adjacency_matrix:
for elem in row:
if elem > max_elem:
max_elem = elem
max_elem_len = len(str(max_elem))
# 打印邻接矩阵
for row in g.adjacency_matrix:
for elem in row:
if elem <= g.DEFAULT_UNREACHABLE_MAX_VALUE:
# 对于网应当打印出无限符号
if isinstance(g, (UndirectedNetwork, DirectedNetwork)):
print(f"{'∞':>{max_elem_len + 1}}", end='') # 右对齐,宽度为最大元素的字面值长度加1
else:
print(f"{g.DEFAULT_UNREACHABLE_MAX_VALUE:>{max_elem_len + 1}}", end='') # 右对齐,宽度为最大元素的字面值长度加1
else:
print(f"{elem:>{max_elem_len + 1}}", end='') # 右对齐,宽度为最大元素的字面值长度加1
print() # 换行
def _select_graph(self, cls_s, cls_names):
available_graphs = [g_ for g_ in self.graphs.items() if isinstance(g_[1], cls_s)]
if not available_graphs:
raise RuntimeError(f"请先创建{', '.join(cls_names)}的实例后再操作!")
print(f"从下列{', '.join(cls_names)}的实例中选择一个:")
g_list = []
for i, g in enumerate(available_graphs):
g_list.append(g[1])
print(f"{i}.{g[0]}")
print() # 空行
def handler(no):
if not (0 <= no < len(self.graphs)):
raise KeyError
return no
while True:
selected_graph_no = option_catcher(handler)
if selected_graph_no is not None:
break
return g_list[selected_graph_no]
def create_undirected_graph(self):
self._create_graph(UndirectedGraph, "无向图", False, False)
def create_directed_graph(self):
self._create_graph(DirectedGraph, "有向图", True, False)
def create_undirected_network(self):
self._create_graph(UndirectedNetwork, "无向网", False, True)
def create_directed_network(self):
self._create_graph(DirectedNetwork, "有向网", True, True)
def traverse(self):
g = self._select_graph((UndirectedGraph, DirectedGraph, DirectedNetwork, UndirectedNetwork),
("无向图", "有向图", "有向网", "无向网"))
values = []
def visit_callback(vertex):
values.append(vertex.name)
def show_value_and_clear(prefix):
print(prefix + "".join(values))
values.clear()
start_vertex_name = input("请输入遍历起始点的名称:")
os.system("cls")
g.bfs_traverse(start_vertex_name, visit_callback)
show_value_and_clear("BFS 遍历结果:")
print()
g.dfs_traverse(start_vertex_name, visit_callback)
show_value_and_clear("DFS 遍历结果:")
flash_message()
def topological_sort(self):
g = self._select_graph((DirectedGraph, DirectedNetwork), ("有向图", "有向网"))
flash_message(f"拓扑排序结果:{''.join([v.name for v in g.topological_sort()])}")
def get_minimum_spanning_tree(self):
g = self._select_graph((DirectedNetwork, UndirectedNetwork), ("有向网", "无向网"))
tree = g.get_minimum_spanning_tree()
print("该最小生成树的邻接表:")
for v, adj_vs in tree.adjacency_dict.items():
print(f"{v.name}: {','.join([v.name for v in adj_vs])}")
print("该最小生成树的邻接矩阵:")
self._print_adjacency_matrix(tree)
flash_message()
def find_shortest_paths(self):
g = self._select_graph((UndirectedGraph, DirectedGraph, DirectedNetwork, UndirectedNetwork),
("无向图", "有向图", "有向网", "无向网"))
start_vertex_name = input("请输入起始点的名称:")
end_vertex_name = input("请输入终点的名称:")
# 判断 g 的类型,对于最短路径,带边权与不带边权的算法不一样
if isinstance(g, (UndirectedGraph, DirectedGraph)):
shortest_path = g.find_shortest_path(start_vertex_name, end_vertex_name)
flash_message(f"最短路径为 {'->'.join([v.name for v in shortest_path])}")
else:
shortest_paths = g.find_shortest_paths_with_weight(start_vertex_name, end_vertex_name)
flash_message(f"最短路径为 {'->'.join([v.name for v in shortest_paths[0][0]])}")
def find_critical_paths(self):
g = self._select_graph((DirectedNetwork, UndirectedNetwork), ("有向网",))
paths = g.find_critical_paths()
os.system("cls")
print("关键路径有:")
for path in paths:
print(f"{'->'.join([v.name for v in path])}")
flash_message()
def binary_tree():
t = BinaryTreeController()
options = {
1: t.create,
2: t.traverse,
3: t.cal_depth,
4: t.cal_end_node_count,
5: t.find_parent,
6: t.find_sibling,
7: t.huffman_coding,
9: quit
}
while True:
print("**************二叉树的基本操作及应用***************")
print("* 1 创建二叉树 *")
print("* 2 遍历二叉树(先/中/后) *")
print("* 3 计算树的深度 *")
print("* 4 计算叶子结点个数 *")
print("* 5 查找双亲 *")
print("* 6 查找兄弟 *")
print("* 7 Huffman编码(应用) *")
print("* 8 返回上一层 *")
print("* 9 退出 *")
print("***************************************************")
if option_catcher(lambda no: True if no == 8 else options[no]()):
break
def graph():
g = GraphController()
options = {
1: g.create_undirected_graph,
2: g.create_undirected_network,
3: g.create_directed_graph,
4: g.create_directed_network,
5: g.traverse,
6: g.topological_sort,
7: g.get_minimum_spanning_tree,
8: g.find_shortest_paths,
9: g.find_critical_paths,
11: quit
}
while True:
print("****************图的基本操作及应用*****************")
print("* 1 创建无向图 *")
print("* 2 创建无向网 *")
print("* 3 创建有向图 *")
print("* 4 创建有向网 *")
print("* 5 遍历 *")
print("* 6 拓扑排序 *")
print("* 7 最小生成树(应用) *")
print("* 8 最短路径(应用) *")
print("* 9 关键路径(应用) *")
print("* 10 返回上一层 *")
print("* 11 退出 *")
print("***************************************************")
if option_catcher(lambda no: True if no == 10 else options[no]()):
break
def quit():
flash_message("再见!")
exit(0)
def main():
options = {
1: binary_tree,
2: graph,
3: quit
}
while True:
print("*******************算法与数据结构******************")
print("* 1 树的基本操作及应用 *")
print("* 2 图的基本操作及应用 *")
print("* 3 退出 *")
print("*************************************************")
option_catcher(lambda no: options[no]())
if __name__ == '__main__':
main()
| StarcoderdataPython |
31800 | <gh_stars>1-10
import cupy as cp
import numpy as np
import pandas as pd
import itertools
import math
import networkx as nx
from gpucsl.pc.kernel_management import get_module
function_names = [
"compact<6,6>",
]
module = get_module("helpers/graph_helpers.cu", function_names, ("-D", "PYTHON_TEST"))
def test_compact_on_random_skeleton():
kernel = module.get_function("compact<6,6>")
d_skeleton = cp.array(
[
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1],
[1, 0, 1, 0, 0, 1],
[0, 1, 1, 1, 0, 0],
],
np.uint16,
)
expected_result = np.array(
[
[3, 1, 2, 5, 0, 0],
[3, 0, 2, 4, 0, 0],
[0, 0, 0, 0, 0, 0],
[5, 0, 1, 2, 4, 5],
[3, 0, 2, 5, 0, 0],
[3, 1, 2, 3, 0, 0],
],
np.uint32,
)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
kernel((1,), (6,), (d_skeleton, d_compacted_skeleton, 0, 6))
assert cp.isclose(expected_result, d_compacted_skeleton).all()
def test_compact_on_fully_connected_skeleton():
kernel = module.get_function("compact<6,6>")
d_skeleton = cp.ones((6, 6), np.uint16)
expected_result = np.array(
[
[5, 1, 2, 3, 4, 5],
[5, 0, 2, 3, 4, 5],
[5, 0, 1, 3, 4, 5],
[5, 0, 1, 2, 4, 5],
[5, 0, 1, 2, 3, 5],
[5, 0, 1, 2, 3, 4],
],
np.uint32,
)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
kernel((1,), (6,), (d_skeleton, d_compacted_skeleton, 0, 6))
assert cp.array_equal(expected_result, d_compacted_skeleton.get())
def test_compact_on_random_big_skeleton():
kernel = module.get_function("compact<6,6>")
size = 5000
d_skeleton = cp.random.choice([0, 1], size=(size, size)).astype(np.uint16)
d_compacted_skeleton = cp.zeros((6, 6), np.uint32)
print((math.ceil(size / 512),))
print((min(512, size),))
cp.cuda.profiler.start()
kernel(
(math.ceil(size / 512),),
(min(512, size),),
(d_skeleton, d_compacted_skeleton, 0, size),
)
cp.cuda.profiler.stop()
| StarcoderdataPython |
5077968 | <reponame>MattTaylorDLS/pymalcolm
import unittest
from mock import patch, ANY
from malcolm.core import call_with_params
from malcolm.modules.ca.parts import CAActionPart
class caint(int):
ok = True
@patch("malcolm.modules.ca.parts.caactionpart.CaToolsHelper")
class TestCAActionPart(unittest.TestCase):
def create_part(self, params=None):
if params is None:
params = dict(
name="mname",
description="desc",
pv="pv",
)
p = call_with_params(CAActionPart, **params)
self.yielded = list(p.create_method_models())
return p
def test_init(self, catools):
p = self.create_part()
assert p.params.pv == "pv"
assert p.params.value == 1
assert p.params.wait == True
assert p.method.description == "desc"
assert self.yielded == [("mname", ANY, p.caput)]
def test_reset(self, catools):
p = self.create_part()
p.catools.caget.reset_mock()
p.catools.caget.return_value = [caint(4)]
p.connect_pvs("unused context object")
p.catools.caget.assert_called_with(["pv"])
def test_caput(self, catools):
p = self.create_part()
p.catools.caput.reset_mock()
p.caput()
p.catools.caput.assert_called_once_with(
"pv", 1, wait=True, timeout=None)
def test_caput_status_pv_ok(self, catools):
p = self.create_part(dict(
name="mname", description="desc", pv="pv", statusPv="spv",
goodStatus="All Good"))
p.catools.caput.reset_mock()
p.catools.caget.return_value = "All Good"
p.caput()
def test_caput_status_pv_no_good(self, catools):
p = self.create_part(dict(
name="mname", description="desc", pv="pv", statusPv="spv",
goodStatus="All Good"))
p.catools.caput.reset_mock()
p.catools.caget.return_value = "No Good"
with self.assertRaises(AssertionError) as cm:
p.caput()
assert str(cm.exception) == \
"Status No Good: while performing 'caput -c -w 1000 pv 1'"
def test_caput_status_pv_message(self, catools):
p = self.create_part(dict(
name="mname", description="desc", pv="pv", statusPv="spv",
goodStatus="All Good", messagePv="mpv"))
p.catools.caput.reset_mock()
p.catools.caget.side_effect = ["No Good", "Bad things happened"]
with self.assertRaises(AssertionError) as cm:
p.caput()
assert str(cm.exception) == "Status No Good: Bad things happened: " \
"while performing 'caput -c -w 1000 pv 1'"
| StarcoderdataPython |
3415753 | <filename>client/src/dolbyio_rest_apis/media/models/result_error.py<gh_stars>1-10
"""
dolbyio_rest_apis.media.models.result_error
~~~~~~~~~~~~~~~
This module contains the Result Error model.
"""
from dolbyio_rest_apis.core.helpers import get_value_or_default
class ResultError(dict):
"""The :class:`ResultError` object, which represents an enhance job error."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.type = get_value_or_default(self, 'type', None)
self.title = get_value_or_default(self, 'title', None)
self.detail = get_value_or_default(self, 'detail', None)
| StarcoderdataPython |
6601519 | <gh_stars>0
# output parameterized data for a country
from covid_package.libs.aggregate_data import fetch_latest_data_date
def get_data_items(this_data, iso_list, param_list):
# get the last date that data is available
this_date = fetch_latest_data_date(this_data, iso_list)
# two items: gdp_per_capita & total_cases_per_million
"""
{
"AFG": {"gdp_per_capita": 1234, "total_cases_per_million": 12345, "total_deaths_per_million": 1234},
"USA": {"gdp_per_capita": 12345, "total_cases_per_million": 54321, "total_deaths_per_million": 4321}
}
"""
return_str = ""
for iso in iso_list:
for item in param_list:
return_str += "\n {}".format(this_data[iso][this_date][item])
return return_str
def main():
import sys
sys.path.append("c:\\Users\\Ipgnosis\\Documents\\Github\\ipgn_covid")
from covid_package.libs.store_data import read_data
# read the data file from the data dir
data = read_data()
test_country = "USA"
items_list = ['gdp_per_capita',
'total_cases_per_million', 'total_deaths_per_million']
# run the function
print(get_data_items(data, test_country, items_list))
# stand alone test run
if __name__ == "__main__":
main()
| StarcoderdataPython |
4828080 | from django.apps import apps
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db.models import (
Model, PositiveSmallIntegerField, TextField, FloatField, ForeignKey,
OneToOneField, ManyToManyField, BooleanField, DateTimeField, QuerySet,
Max, Sum, F, SET_NULL, CASCADE)
from django.utils.encoding import force_text
from django.utils.formats import date_format
from django.utils.translation import ugettext_lazy as _
from examens.utils import AnnotatedDiff
from libretto.models import Fichier
class Level(Model):
number = PositiveSmallIntegerField(
_('numéro'), unique=True, default=1,
validators=[MinValueValidator(1)])
help_message = TextField(_('message d’aide'))
sources = ManyToManyField(
'libretto.Source', through='LevelSource', related_name='+',
verbose_name=_('sources'))
class Meta:
verbose_name = _('niveau')
verbose_name_plural = _('niveaux')
ordering = ('number',)
def __str__(self):
return force_text(self.number)
def limit_choices_to_possible_sources():
return {'pk__in': (apps.get_model('libretto.Source').objects.exclude(
transcription='').filter(fichiers__type=Fichier.IMAGE))}
class LevelSource(Model):
level = ForeignKey(Level, related_name='level_sources', on_delete=CASCADE,
verbose_name=_('niveau'))
source = OneToOneField(
'libretto.source', limit_choices_to=limit_choices_to_possible_sources,
related_name='+', verbose_name=_('source'), on_delete=CASCADE)
class Meta:
verbose_name = _('source de niveau')
verbose_name_plural = _('sources de niveau')
class TakenExamQuerySet(QuerySet):
def get_for_request(self, request):
if request.user.is_authenticated():
return self.get_or_create(user=request.user)[0]
if not request.session.modified:
request.session.save()
session = apps.get_model('sessions.Session').objects.get(
pk=request.session.session_key)
return self.get_or_create(session=session)[0]
def annotate_time_spent(self):
return self.annotate(_time_spent=Sum(F('taken_levels__end')
- F('taken_levels__start')))
class TakenExam(Model):
user = OneToOneField(
settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=CASCADE,
related_name='+', verbose_name=_('utilisateur'))
session = OneToOneField(
'sessions.Session', null=True, blank=True, verbose_name=_('session'),
on_delete=SET_NULL, editable=False, related_name='+')
objects = TakenExamQuerySet.as_manager()
objects.use_for_related_fields = True
class Meta:
verbose_name = _('examen passé')
verbose_name_plural = _('examens passés')
ordering = ('user', 'session')
def __str__(self):
return force_text(self.session if self.user is None else self.user)
@property
def last_passed_level_number(self):
last_passed_level_number = self.taken_levels.filter(
passed=True).aggregate(n=Max('level__number'))['n']
if last_passed_level_number is None:
return 0
return last_passed_level_number
# TODO: Probably move this method somewhere else.
@property
def max_level_number(self):
return Level.objects.aggregate(n=Max('number'))['n']
def is_complete(self):
return self.last_passed_level_number == self.max_level_number
is_complete.short_description = _('est fini')
is_complete.boolean = True
@property
def current_level(self):
if not hasattr(self, '_current_level'):
self._current_level = Level.objects.get(
number=self.last_passed_level_number + 1)
return self._current_level
def get_time_spent(self):
if not hasattr(self, '_time_spent'):
self._time_spent = self._meta.model.objects.filter(
pk=self.pk).annotate_time_spent()[0]._time_spent
return self._time_spent
get_time_spent.short_description = _('temps passé')
get_time_spent.admin_order_field = '_time_spent'
@property
def last_taken_level(self):
return self.taken_levels.was_sent().order_by('-start').first()
def take_level(self):
current_level = self.current_level
taken_level = TakenLevel(taken_exam=self, level=current_level)
taken_for_this_level = self.taken_levels.filter(
level=self.current_level).order_by('-start')
last_sent_level = taken_for_this_level.was_sent().first()
already_taken_level = taken_for_this_level.first()
if already_taken_level is None:
taken_level.source = current_level.sources.order_by('?')[0]
elif not already_taken_level.was_sent:
return already_taken_level
else:
taken_level.source = already_taken_level.source
if last_sent_level is not None:
taken_level.transcription = last_sent_level.transcription
taken_level.save()
return taken_level
class TakenLevelQuerySet(QuerySet):
def was_sent(self):
return self.filter(end__isnull=False)
class TakenLevel(Model):
taken_exam = ForeignKey(TakenExam, related_name='taken_levels',
verbose_name=_('examen passé'), editable=False,
on_delete=CASCADE)
level = ForeignKey(
Level, verbose_name=_('niveau'), editable=False, related_name='+',
on_delete=CASCADE)
source = ForeignKey(
'libretto.Source', verbose_name=_('source'), editable=False,
related_name='+', on_delete=CASCADE)
transcription = TextField(verbose_name=_('transcription'))
score = FloatField(_('note'), null=True, blank=True, editable=False)
MAX_SCORE = 1.0
passed = BooleanField(_('passé'), default=False)
start = DateTimeField(_('début'), auto_now_add=True)
end = DateTimeField(_('fin'), null=True, blank=True, editable=False)
objects = TakenLevelQuerySet.as_manager()
objects.use_for_related_fields = True
class Meta:
verbose_name = _('niveau passé')
verbose_name_plural = _('niveaux passés')
ordering = ('start',)
def __str__(self):
return '%s, %s' % (self.level,
date_format(self.start, 'DATETIME_FORMAT'))
def save(self, *args, **kwargs):
if self.was_sent:
self.score = self.diff.get_score()
self.passed = self.score >= self.MAX_SCORE
super(TakenLevel, self).save(*args, **kwargs)
@property
def diff(self):
if not hasattr(self, '_diff'):
self._diff = AnnotatedDiff(self.transcription,
self.source.transcription)
return self._diff
@property
def diff_html(self):
return self.diff.get_html()
@property
def errors(self):
return self.diff.errors
@property
def was_sent(self):
return self.end is not None
| StarcoderdataPython |
3260558 | from unittest.mock import patch
from nose.tools import eq_
from pyecharts.charts import Gauge
@patch("pyecharts.render.engine.write_utf8_html_file")
def test_gauge_base(fake_writer):
c = Gauge().add("", [("完成率", 66.6)])
c.render()
_, content = fake_writer.call_args[0]
eq_(c.theme, "white")
eq_(c.renderer, "canvas")
| StarcoderdataPython |
8174558 | import os
from spanner import system
def retrieve_satellite_image_for_lat_lon_grid(df, filename, size=640):
if os.path.exists(filename):
return filename
regions = []
for lat, lon in set([tuple(row.tolist()) for row in df[['lat_int', 'lon_int']].values]):
regions.append([lat - 0.5, lat + 0.5, lon - 0.5, lon + 0.5])
return retrieve_satellite_image(regions, filename=filename, size=size)
def retrieve_satellite_image_for_region(df, filename, size=640):
if os.path.exists(filename):
return filename
regions = []
for field_id, df_field in df.groupby('Field_id'):
regions.append(df_to_region(df_field, field_id))
return retrieve_satellite_image(regions, filename=filename, size=size)
def retrieve_satellite_image_for_field(df, field_id, size=640):
filename = os.path.join('data', 'satellite', '%s.png' % (field_id))
if os.path.exists(filename):
return filename
return retrieve_satellite_image(df_to_region(df, field_id), filename, size=size)
def df_to_region(df, field_id):
df_field = df[df['Field_id'] == field_id]
lat_min = min(df_field['Latitude'])
lat_max = max(df_field['Latitude'])
lon_min = min(df_field['Longitude'])
lon_max = max(df_field['Longitude'])
return (lat_min, lat_max, lon_min, lon_max)
def retrieve_satellite_image(regions, filename, size=640):
if not os.path.exists(filename):
# google satellite image API
api = ['https://maps.googleapis.com/maps/api/staticmap?&size={size}x{size}&maptype=hybrid'.format(size=size)]
if len(regions) > 50:
labelarg = 'markers={lat},{lon}'
for r in regions:
api.append(labelarg.format(lat=round(r[1], 2), lon=round(r[3], 2)))
if len('&'.join(api)) > 2048:
print 'URL too long - truncating region labels!'
break
else:
patharg = 'path=color:0xff0000ff|weight:2|{latmn},{lonmn}|{latmn},{lonmx}|{latmx},'\
'{lonmx}|{latmx},{lonmn}|{latmn},{lonmn}'
api.extend([patharg.format(latmn=r[0], latmx=r[1], lonmn=r[2], lonmx=r[3]) for r in regions])
url = '&'.join(api)
system.run_command('curl "%s" > %s' % (url, filename))
return filename
| StarcoderdataPython |
8140891 | <gh_stars>0
fatorial = int(input("Digite o valor de n:"))
x = 2
total = 1
while x <= fatorial:
total = total * x
x = x + 1
print(total) | StarcoderdataPython |
316097 | <gh_stars>1-10
__all__ = ['genome_fetch'] | StarcoderdataPython |
11371216 | <gh_stars>0
__all__ = ["client", "excs", "resource", "utils", "validate"]
from .__version__ import ( # noqa: F401 imported but unused
__name__,
__about__,
__url__,
__version_info__,
__version__,
__author__,
__author_email__,
__maintainer__,
__license__,
__copyright__,
)
from .client import Aiogoogle, __all__ as _client_all # noqa: F401 imported but unused
from .resource import GoogleAPI, __all__ as _resource_all # noqa: F401 imported but unused
from .excs import ( # noqa: F401 imported but unused
AiogoogleError,
AuthError,
HTTPError,
ValidationError,
__all__ as _exception_all
)
__all__.extend(_client_all)
__all__.extend(_resource_all)
__all__.extend(_exception_all)
| StarcoderdataPython |
3548410 | <gh_stars>0
from tkinter import *
HEIGHT = 500
WIDTH = 800
window = Tk()
window.title('DISTRUGATORUL DE BULE')
c = Canvas(window, width=WIDTH, height=HEIGHT, bg='darkblue')
c.pack()
ship_id = c.create_polygon(5, 5, 5, 25, 30, 15, fill='red')
ship_id2 = c.create_oval(0, 0, 30, 30, outline='red')
SHIP_R = 15
MID_X = WIDTH / 2
MID_Y = HEIGHT / 2
c.move(ship_id, MID_X, MID_Y)
c.move(ship_id2, MID_X, MID_Y)
SHIP_SPD = 10
def move_ship(event):
if event.keysym == 'Up':
c.move(ship_id, 0, -SHIP_SPD)
c.move(ship_id2, 0, -SHIP_SPD)
elif event.keysym == 'Down':
c.move(ship_id, 0, SHIP_SPD)
c.move(ship_id2, 0, SHIP_SPD)
elif event.keysym == 'Left':
c.move(ship_id, -SHIP_SPD, 0)
c.move(ship_id2, -SHIP_SPD, 0)
elif event.keysym == 'Right':
c.move(ship_id, SHIP_SPD, 0)
c.move(ship_id2, SHIP_SPD, 0)
c.bind_all('<Key>', move_ship)
from random import randint
bub_id = list()
bub_r = list()
bub_speed = list()
MIN_BUB_R = 10
MAX_BUB_R = 30
MAX_BUB_SPD = 10
GAP = 100
c.create_text(50, 30, text='TIME', fill='white')
c.create_text(150, 30, text='SCORE', fill='white')
time_text = c.create_text(50, 50, fill='white')
score_text = c.create_text(150, 50, fill='white')
def show_score(score):
c.itemconfig(score_text, text=str(score))
def show_time(time_left):
c.itemconfig(time_text, text=str(time_left))
def create_bubble():
x = WIDTH + GAP
y = randint(0, HEIGHT)
r = randint(MIN_BUB_R, MAX_BUB_R)
id1 = c.create_oval(x - r, y - r, x + r, y + r, outline='white')
bub_id.append(id1)
bub_r.append(r)
bub_speed.append(randint(1, MAX_BUB_SPD))
def move_bubbles():
for i in range(len(bub_id)):
c.move(bub_id[i], -bub_speed[i], 0)
from time import sleep, time
BUB_CHANGE = 10
TIME_LIMIT = 30
BONUS_SCORE = 1000
def get_coords(id_num):
pos = c.coords(id_num)
x = (pos[0] + pos[2])/2
y = (pos[1] + pos[3]/2)
return x, y
def del_bubble(i):
del bub_r[i]
del bub_speed[i]
c.delete(bub_id[i])
del bub_id[i]
def clean_up_bubs():
for i in range(len(bub_id)-1, -1, -1):
x, y = get_coords(bub_id[i])
if x < -GAP:
del_bubble(i)
from math import sqrt
def distance(id1, id2):
x1, y1 = get_coords(id1)
x2, y2 = get_coords(id2)
return sqrt((x2 - x1)**2 + (y2 - y1)**2)
def collision():
points = 0
for bub in range(len(bub_id)-1, -1, -1):
if distance(ship_id2, bub_id[bub]) < (SHIP_R + bub_r[bub]):
points += (bub_r[bub] + bub_speed[bub])
del_bubble(bub)
return points
score = 0
bonus = 0
end = time() + TIME_LIMIT
while time() < end:
if randint(1, BUB_CHANGE) == 1:
create_bubble()
move_bubbles()
clean_up_bubs()
score += collision()
if (int(score / BONUS_SCORE)) > bonus:
bonus += 1
end += TIME_LIMIT
show_score(score)
show_time(int(end - time()))
window.update()
sleep(0.01)
c.create_text(MID_X, MID_Y, \
text='GAME OVER', fill='white', font=('Helvetica',30))
c.create_text(MID_X, MID_Y + 30, \
text='Score: '+ str(score), fill='white') | StarcoderdataPython |
11379861 | import cv2
import time
import numpy as np
import pickle
with open('range.pickle','rb') as f:
t = pickle.load(f)
lower_red = np.array([t[0],t[1],t[2]])
upper_red = np.array([t[3],t[4],t[5]])
cap = cv2.VideoCapture(0)
time.sleep(3)
background = 0
#initial capture
for i in range(50):
ret,background = cap.read()
background = np.flip(background,axis = 1)
while True:
ret,img = cap.read()
img = np.flip(img,axis=1)
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask1 = cv2.inRange(hsv,lower_red,upper_red)
mask1 = cv2.morphologyEx(mask1,cv2.MORPH_OPEN,np.ones((3,3),np.uint8))
mask1 = cv2.morphologyEx(mask1,cv2.MORPH_DILATE,np.ones((3,3),np.uint8))
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(img,img,mask=mask2)
res2 = cv2.bitwise_and(background,background,mask=mask1)
final = cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow("Evanesco",final)
cv2.waitKey(1)
| StarcoderdataPython |
1843730 | from honeybee_schema.energy.construction import OpaqueConstructionAbridged, \
WindowConstructionAbridged, WindowConstructionShadeAbridged
from copy import copy
from pydantic import ValidationError
import pytest
import os
import json
# target folder where all of the samples live
root = os.path.dirname(os.path.dirname(__file__))
target_folder = os.path.join(root, 'samples', 'construction')
def test_construction_window_double():
file_path = os.path.join(target_folder, 'construction_window_double.json')
WindowConstructionAbridged.parse_file(file_path)
def test_construction_window_triple():
file_path = os.path.join(target_folder, 'construction_window_triple.json')
WindowConstructionAbridged.parse_file(file_path)
def test_construction_opaque_door():
file_path = os.path.join(target_folder, 'construction_opaque_door.json')
OpaqueConstructionAbridged.parse_file(file_path)
def test_construction_opaque_roof():
file_path = os.path.join(target_folder, 'construction_opaque_roof.json')
OpaqueConstructionAbridged.parse_file(file_path)
def test_construction_opaque_wall():
file_path = os.path.join(target_folder, 'construction_opaque_wall.json')
OpaqueConstructionAbridged.parse_file(file_path)
def test_construction_window_shade():
file_path = os.path.join(target_folder, 'construction_window_shade.json')
WindowConstructionShadeAbridged.parse_file(file_path)
def test_construction_window_blinds():
file_path = os.path.join(target_folder, 'construction_window_blinds.json')
WindowConstructionShadeAbridged.parse_file(file_path)
def test_length_opaque():
file_path = os.path.join(target_folder, 'construction_opaque_wall.json')
with open(file_path) as json_file:
construction_wall = json.load(json_file)
cons_length_test = copy(construction_wall)
for i in range(10):
cons_length_test['materials'].append('material_{}'.format(i))
with pytest.raises(ValidationError):
OpaqueConstructionAbridged.parse_obj(cons_length_test)
cons_length_test['materials'] = []
with pytest.raises(ValidationError):
OpaqueConstructionAbridged.parse_obj(cons_length_test)
def test_length_window():
file_path = os.path.join(target_folder, 'construction_window_double.json')
with open(file_path) as json_file:
construction_window = json.load(json_file)
cons_length_test = copy(construction_window)
for i in range(8):
cons_length_test['materials'].append('material_{}'.format(i))
with pytest.raises(ValidationError):
WindowConstructionAbridged.parse_obj(cons_length_test)
cons_length_test['materials'] = []
with pytest.raises(ValidationError):
WindowConstructionAbridged.parse_obj(cons_length_test)
| StarcoderdataPython |
3347284 | """"""
import sys
import importlib
import traceback
from typing import Sequence, Any
from pathlib import Path
from datetime import datetime
from threading import Thread
from pandas import DataFrame
from trader.setting import SETTINGS
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.constant import Direction, Offset, OrderType, Interval
from vnpy.trader.object import (
OrderRequest,
HistoryRequest,
SubscribeRequest,
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
LogData,
BarData
)
from vnpy.trader.mddata import mddata_client
APP_NAME = "ScriptTrader"
EVENT_SCRIPT_LOG = "eScriptLog"
class ScriptEngine(BaseEngine):
""""""
setting_filename = "script_trader_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.strategy_active = False
self.strategy_thread = None
def init(self):
"""
Start script engine.
"""
result = mddata_client.init()
md_data_api = SETTINGS("mdata.api")
if result:
self.write_log(f"{md_data_api}数据接口初始化成功")
def start_strategy(self, script_path: str):
"""
Start running strategy function in strategy_thread.
"""
if self.strategy_active:
return
self.strategy_active = True
self.strategy_thread = Thread(
target=self.run_strategy, args=(script_path,))
self.strategy_thread.start()
self.write_log("策略交易脚本启动")
def run_strategy(self, script_path: str):
"""
Load strategy script and call the run function.
"""
path = Path(script_path)
sys.path.append(str(path.parent))
script_name = path.parts[-1]
module_name = script_name.replace(".py", "")
try:
module = importlib.import_module(module_name)
importlib.reload(module)
module.run(self)
except: # noqa
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg)
def stop_strategy(self):
"""
Stop the running strategy.
"""
if not self.strategy_active:
return
self.strategy_active = False
if self.strategy_thread:
self.strategy_thread.join()
self.strategy_thread = None
self.write_log("策略交易脚本停止")
def connect_gateway(self, setting: dict, gateway_name: str):
""""""
self.main_engine.connect(setting, gateway_name)
def send_order(
self,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
offset: Offset,
order_type: OrderType
) -> str:
""""""
contract = self.get_contract(vt_symbol)
if not contract:
return ""
req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
type=order_type,
volume=volume,
price=price,
offset=offset
)
vt_orderid = self.main_engine.send_order(req, contract.gateway_name)
return vt_orderid
def subscribe(self, vt_symbols):
""""""
for vt_symbol in vt_symbols:
contract = self.main_engine.get_contract(vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
def buy(self, vt_symbol: str, price: float, volume: float, order_type: OrderType = OrderType.LIMIT) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.LONG, Offset.OPEN, order_type)
def sell(self, vt_symbol: str, price: float, volume: float, order_type: OrderType = OrderType.LIMIT) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.SHORT, Offset.CLOSE, order_type)
def short(self, vt_symbol: str, price: float, volume: float, order_type: OrderType = OrderType.LIMIT) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.SHORT, Offset.OPEN, order_type)
def cover(self, vt_symbol: str, price: float, volume: float, order_type: OrderType = OrderType.LIMIT) -> str:
""""""
return self.send_order(vt_symbol, price, volume, Direction.LONG, Offset.CLOSE, order_type)
def cancel_order(self, vt_orderid: str) -> None:
""""""
order = self.get_order(vt_orderid)
if not order:
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_tick(self, vt_symbol: str, use_df: bool = False) -> TickData:
""""""
return get_data(self.main_engine.get_tick, arg=vt_symbol, use_df=use_df)
def get_ticks(self, vt_symbols: Sequence[str], use_df: bool = False) -> Sequence[TickData]:
""""""
ticks = []
for vt_symbol in vt_symbols:
tick = self.main_engine.get_tick(vt_symbol)
ticks.append(tick)
if not use_df:
return ticks
else:
return to_df(ticks)
def get_order(self, vt_orderid: str, use_df: bool = False) -> OrderData:
""""""
return get_data(self.main_engine.get_order, arg=vt_orderid, use_df=use_df)
def get_orders(self, vt_orderids: Sequence[str], use_df: bool = False) -> Sequence[OrderData]:
""""""
orders = []
for vt_orderid in vt_orderids:
order = self.main_engine.get_order(vt_orderid)
orders.append(order)
if not use_df:
return orders
else:
return to_df(orders)
def get_trades(self, vt_orderid: str, use_df: bool = False) -> Sequence[TradeData]:
""""""
trades = []
all_trades = self.main_engine.get_all_trades()
for trade in all_trades:
if trade.vt_orderid == vt_orderid:
trades.append(trade)
if not use_df:
return trades
else:
return to_df(trades)
def get_all_active_orders(self, use_df: bool = False) -> Sequence[OrderData]:
""""""
return get_data(self.main_engine.get_all_active_orders, use_df=use_df)
def get_contract(self, vt_symbol, use_df: bool = False) -> ContractData:
""""""
return get_data(self.main_engine.get_contract, arg=vt_symbol, use_df=use_df)
def get_all_contracts(self, use_df: bool = False) -> Sequence[ContractData]:
""""""
return get_data(self.main_engine.get_all_contracts, use_df=use_df)
def get_account(self, vt_accountid: str, use_df: bool = False) -> AccountData:
""""""
return get_data(self.main_engine.get_account, arg=vt_accountid, use_df=use_df)
def get_all_accounts(self, use_df: bool = False) -> Sequence[AccountData]:
""""""
return get_data(self.main_engine.get_all_accounts, use_df=use_df)
def get_position(self, vt_positionid: str, use_df: bool = False) -> PositionData:
""""""
return get_data(self.main_engine.get_position, arg=vt_positionid, use_df=use_df)
def get_all_positions(self, use_df: bool = False) -> Sequence[PositionData]:
""""""
return get_data(self.main_engine.get_all_positions, use_df=use_df)
def get_bars(self, vt_symbol: str, start_date: str, interval: Interval, use_df: bool = False) -> Sequence[BarData]:
""""""
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
return []
start = datetime.strptime(start_date, "%Y%m%d")
req = HistoryRequest(
symbol=contract.symbol,
exchange=contract.exchange,
start=start,
interval=interval
)
return get_data(mddata_client.query_history, arg=req, use_df=use_df)
def write_log(self, msg: str) -> None:
""""""
log = LogData(msg=msg, gateway_name=APP_NAME)
print(f"{log.time}\t{log.msg}")
event = Event(EVENT_SCRIPT_LOG, log)
self.event_engine.put(event)
def send_email(self, msg: str) -> None:
""""""
subject = "脚本策略引擎通知"
self.main_engine.send_email(subject, msg)
def to_df(data_list: Sequence):
""""""
if not data_list:
return None
dict_list = [data.__dict__ for data in data_list]
return DataFrame(dict_list)
def get_data(func: callable, arg: Any = None, use_df: bool = False):
""""""
if not arg:
data = func()
else:
data = func(arg)
if not use_df:
return data
elif data is None:
return data
else:
if not isinstance(data, list):
data = [data]
return to_df(data)
| StarcoderdataPython |
288100 | <filename>src/util.py<gh_stars>1-10
from src.structures import client
from discord.colour import Colour
import discord
class Util:
def __init__(self, bot: "client.MusicBot") -> None:
self.bot = bot
def embed(self, **kwargs) -> discord.Embed:
return discord.Embed(colour=Colour.blurple(), **kwargs)
| StarcoderdataPython |
6697523 | <gh_stars>0
import sys
from lib.automaton import Ean
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError('Need a EAN size as argument')
n = int(sys.argv[1])
dfa = Ean(n)
print(dfa.regular_expression()) | StarcoderdataPython |
3358672 | <gh_stars>1-10
import numpy as np
import pygame as pg
from numba import njit
def main():
size = np.random.randint(20,60) # size of the map
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
lx, ly, lz = (size*20, size*30, 1000)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock = np.random.uniform(2, size-3 ), np.random.uniform(2, size-3), 0, 0, 0
maph[int(enx)][int(eny)] = 0
shoot, sx, sy, sdir = 1, -1, -1, rot
res, res_o = 5, [96, 112, 160, 192, 224, 260, 300, 340, 400, 480, 540, 600, 800]
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
running = True
pg.init()
font = pg.font.SysFont("Arial", 18)
font2 = pg.font.SysFont("Impact", 48)
screen = pg.display.set_mode((800, 600))
rr, gg, bb = np.linspace(0,0.8, width*height), np.linspace(0.5,.1, width*height), np.linspace(1,0.1, width*height)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
surf = pg.transform.scale(surf, (750, 550))
screen.blit(surf, (25, 25))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,95))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,105))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,100))
screen.blit(font2.render(" Loading, please wait... ", 1, pg.Color("black"), pg.Color("grey")),(50,300))
pg.display.update()
clock = pg.time.Clock()
pg.mouse.set_visible(False)
et = 0.1
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
sstart, timer, count, autores, smooth = None, 0, -100, 1, 0
pause = 0
pg.mixer.set_num_channels(3)
ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3')
ambient.set_volume(0.5)
runfx = pg.mixer.Sound('soundfx/run.mp3')
shotfx = pg.mixer.Sound('soundfx/slap.mp3')
killfx = pg.mixer.Sound('soundfx/shutdown.mp3')
respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')
successfx = pg.mixer.Sound('soundfx/success.mp3')
failfx = pg.mixer.Sound('soundfx/fail.mp3')
pg.mixer.Channel(0).play(ambient, -1)
pg.mixer.Channel(1).play(respawnfx)
run = 1
score = 0
ticks = pg.time.get_ticks()/100000
while running:
count += 1
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
if not pause:
pause = 1
pg.mixer.Channel(1).play(respawnfx)
endmsg = " Game paused. Current score: " + str(score)
else:
endmsg = " Thanks for playing! Total score: " + str(score)
pg.mixer.Channel(1).play(killfx)
running = False
if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):
shoot = 1
if event.type == pg.KEYDOWN:
if event.key == ord('p'): # pause
if not pause:
pause = 1
endmsg = " Game paused. Current score: " + str(score)
elif (int(posx) != exitx or int(posy) != exity):
pause = 0
if pause and event.key == ord('n'): # new game
pause = 0
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock, run = 0, 0, 0, 0, 0, 1
shoot, sx, sy, sstart = 0, -1, -1, None
mplayer = np.zeros([size, size])
et = 0.1
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
count = -100
if autores:
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
pg.mixer.Channel(1).play(respawnfx)
if event.key == ord('t'): # toggle auto resolution
autores = not(autores)
if event.key == ord('y'): # toggle auto resolution
smooth = not(smooth)
if not autores:
if event.key == ord('q'): # manually change resolution
if res > 0 :
res = res-1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if event.key == ord('e'):
if res < len(res_o)-1 :
res = res+1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if not pause:
rr, gg, bb = super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, size)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
if shoot or smooth:
surf = pg.transform.smoothscale(surf, (800, 600))
else:
surf = pg.transform.scale(surf, (800, 600))
screen.blit(surf, (0, 0))
## fpss = int(clock.get_fps())pg.time.get_ticks()/100000
fpss = int(1000/(pg.time.get_ticks() - ticks*100000))
fps = font.render(str(fpss)+' w: '+ str(width) + ' Score: '+str(score), 1, pg.Color("coral"))
screen.blit(fps,(10,0))
if autores and count > 10: #auto adjust render resolution
if fpss < 50 and width > 100:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
if fpss > 65 and width < 728:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*1.1))
# player's movement
if (int(posx) == exitx and int(posy) == exity):
endmsg = " You escaped safely! "
pg.mixer.Channel(1).play(successfx)
score += 1
pause = 1
pressed_keys = pg.key.get_pressed()
et = clock.tick()/500
if et > 0.5:
et = 0.5
if shoot or sstart != None:
if sstart == None:
pg.mixer.Channel(2).play(shotfx)
if fpss < 60 and autores:
count = -50
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
sstart = pg.time.get_ticks()
elif pg.time.get_ticks() - sstart > 500:
shoot, sx, sy, sstart = 0, -1, -1, None
if enx == 0:
if not run:
pg.mixer.Channel(1).play(killfx)
run = 1
if np.random.uniform() > 0.999:
cos, sin = np.cos(rot), np.sin(rot)
for ee in range(100):
enx = np.clip(np.random.normal(posx, 5), 1, size-2)
eny = np.clip(np.random.normal(posy, 5), 1, size-2)
dtp = (enx-posx)**2 + (eny-posy)**2
if maph[int(enx)][int(eny)] == 0 and dtp > 16 and dtp < 49:
break
if maph[int(enx)][int(eny)] != 0:
enx, eny = 0, 0
else:
seenx, seeny, lock = enx, eny, 0
screen.blit(font2.render(" Enemy Respawning! ", 1, pg.Color("red"), pg.Color("grey")),(300,50))
pg.mixer.Channel(1).play(respawnfx)
else:
dtp = (enx-posx)**2 + (eny-posy)**2
if dtp < 1:
score -= 1
endmsg = " You died! Current score: " + str(score)
pg.mixer.Channel(1).play(failfx)
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
pause = 1
surf = pg.surfarray.make_surface((np.rot90(255-pixels*255)).astype('uint8'))
surf = pg.transform.smoothscale(surf, (800, 600))
screen.blit(surf, (0, 0))
elif dtp > 300:
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
run = 0
ticks = pg.time.get_ticks()/100000
lx = size/2 + 1000*np.cos(ticks)
ly = size/2 + 1000*np.sin(ticks)
posx, posy, rot, rot_v, shoot = movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart)
pg.mouse.set_pos([400, 300])
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir,seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
if run and (seenx == posx or seeny == posy):
run = False
pg.mixer.Channel(1).play(runfx)
else:
clock.tick(30)
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,45))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,55))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,50))
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
if (int(posx) == exitx and int(posy) == exity):
screen.blit(font2.render(" Your current score is "+str(score), 1, pg.Color("grey"), (80, 34, 80)),(50,390))
else:
screen.blit(font2.render(" Press P to continue ", 1, pg.Color("grey"), (80, 34, 80)),(50,390))
screen.blit(font2.render(" Press N for a new game ", 1, pg.Color("grey"), (45, 34, 100)),(50,460))
screen.blit(font2.render(" Press ESC to leave ", 1, pg.Color("grey"), (13, 34, 139)),(50,530))
pg.display.update()
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
pg.mixer.fadeout(1000)
pg.display.update()
print(endmsg)
pg.time.wait(2000)
pg.quit()
def maze_generator(x, y, size):
mr = np.random.uniform(0,1, (size,size))
mg = np.random.uniform(0,1, (size,size))
mb = np.random.uniform(0,1, (size,size))
mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))
maps = np.random.choice([0, 0, 0, 0, 1], (size,size))
mapt = np.random.choice([0, 0, 0, 1, 2], (size,size))
maptemp = np.random.choice([0,0, 1], (size,size))
maph = np.random.uniform(0.25, 0.99, (size,size))
maph[np.where(maptemp == 0)] = 0
maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1)
maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0)
maph[x][y], mapr[x][y] = (0, 0)
count = 0
while 1:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y], mapr[x][y] = (0, 0)
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps
def movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart):
x, y = (posx, posy)
p_mouse = pg.mouse.get_pos()
rot, rot_v = rot - np.clip((p_mouse[0]-400)/200, -0.2, .2), rot_v -(p_mouse[1]-300)/400
rot_v = np.clip(rot_v, -1, 1)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y = (x + et*np.cos(rot), y + et*np.sin(rot))
if pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y = (x - et*np.cos(rot), y - et*np.sin(rot))
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
x, y = (x - et*np.sin(rot), y + et*np.cos(rot))
if pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
x, y = (x + et*np.sin(rot), y - et*np.cos(rot))
if maph[int(x)][int(y)] == 0:
posx, posy = (x, y)
if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:
shoot = 1
return posx, posy, rot, rot_v, shoot
@njit(fastmath=True)
def super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, size):
texture=[[ .95, .99, .97, .8], # brick wall
[ .97, .95, .96, .85],
[.8, .85, .8, .8],
[ .93, .8, .98, .96],
[ .99, .8, .97, .95],
[.8, .85, .8, .8]]
idx = 0
for j in range(height): #vertical loop
rot_j = rot_v + np.deg2rad(24 - j/mod)
sinzo = inc*np.sin(rot_j)
coszo = inc*np.sqrt(abs(np.cos(rot_j)))
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo
modr = 1
cx, cy, c1r, c2r, c3r = 1, 1, 1, 1, 1
shot, enem, mapv = 0, 0, 0
dtp = np.random.uniform(0.002,0.01)
while 1:
if (mapv == 0 or (sinz > 0 and (z > mapv or (mapv==6 and (z>0.4 or z <0.2)) or(z > 0.57 and mapv > 1)))): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
if (z > 1 or z < 0): # check ceiling and floor
break
mapv = maph[int(x)][int(y)]
if mapv > 1 and z < 0.57:
if mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
break
if mapv == 3 or mapv == 9:
enem = 1
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
break
if mapv > 5 and z < 0.4 and z > 0.2:
if ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2 < dtp):#0.01):
shot = 1
break
if mapv > z and mapv < 2: # check walls
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
if (mapr[int(x)][int(y)]): # spherical mirror
if (modr == 1):
cx, cy = int(x), int(y)
modr = modr*0.7
if (modr < 0.2):
break
if (mapv - z <= abs(sinz) ): ## horizontal surface
sinz = -sinz
else:
nx = (x-int(x)-0.5)/0.5; ny = (y-int(y)-0.5)/0.5; nz =(z-int(z)-0.5)/0.5
dot = 2*(cos*nx + sin*ny + sinz*nz)
cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot)
x += cos; y += sin; z += sinz
else:
break
elif mapr[int(x)][int(y)]: # check reflections
if modr == 1:
cx, cy = int(x), int(y)
modr = modr*0.7
if modr < 0.2:
break
if abs(z-maph[int(x)][int(y)]) < abs(sinz):
sinz = -sinz
elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:
cos = -cos
else:
sin = -sin
else:
break
if z > 1: # ceiling
deltaDistZ = (lz-z)*deltaDistZ
x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz
dtol = np.sqrt((x-lx)**2+(y-ly)**2)
if dtol < 50: #light source
shot = 1
c1, c2, c3 = 1, 1, 0.5
else:
angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)
sh = (0.8+ abs(angle - int(angle))/5)/(dtol/1000)
if sh > 1:
sh = 1
if int(angle)%2 == 1:
c1, c2, c3 = 0.8*(1-sh), 0.86*(1-sh/4), (1-sh/10)
else:
c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)
if sx != -1:
c1, c2, c3 = 0.7*c1, 0.7*c2, 0.7*c3
elif z < 0: # floor
z = 0
if int(x*2)%2 == int(y*2)%2:
c1, c2, c3 = .8,.8,.8
else:
if int(x) == exitx and int(y) == exity: #exit
c1, c2, c3 = 0,0,.6
else:
c1, c2, c3 = .1,.1,.1
elif mapv < 2: # walls
c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mg[int(x)][int(y)]
if mapt[int(x)][int(y)]: # textured walls
if y%1 < 0.05 or y%1 > 0.95:
ww = int((x*3)%1*4)
else:
ww = int((y*3)%1*4)
if x%1 < 0.95 and x%1 > 0.05 and y%1 < 0.95 and y%1 > 0.05:
zz = int(x*5%1*6)
else:
zz = int(z*5%1*6)
text = texture[zz][ww]
c1, c2, c3 = c1*text, c2*text, c3*text
if mapv - z <= abs(sinz):
z = mapv
elif not maps[int(x)][int(y)]:
if int(x-cos) != int(x):
x = max(int(x-cos), int(x))
modr = modr*0.80
else:
y = max(int(y-sin), int(y))
modr = modr*0.9
else:
if shot:
sh = ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2)/0.012
c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 # shot
elif z> 0.45:
c1, c2, c3 = 0.6, 0.3, 0.3 # Head
elif z > 0.3:
c1, c2, c3 = 0.3, 0.5, 0.5 # Chest
else:
if enem:
c1, c2, c3 = 1, 0.2, 0.2 # Roller red
else:
c1, c2, c3 = 0.2, 0.2, 1 # Roller blue
if modr <= 0.7 and not shot:
c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]
if not shot and z < 1:
dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)
if dtp > 7:
modr = modr/np.log((dtp-6)/4+np.e)
if z < 1: # shadows
if sx != -1 and maph[int(sx)][int(sy)] > 1:
shot, c3 = 1, c3 * 0.9
dtol = np.sqrt((x-sx)**2+(y-sy)**2+(z-0.35)**2)
cos, sin, sinz = .01*(sx-x)/dtol, .01*(sy-y)/dtol, .01*(0.35-z)/dtol
else:
dtol = np.sqrt((x-lx)**2+(y-ly)**2+(z-lz)**2)
cos, sin, sinz = .01*(lx-x)/dtol, .01*(ly-y)/dtol, .01*(lz-z)/dtol
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if z < mapv and mapv < 1 and not maps[int(x)][int(y)]:
modr = modr*0.39
while modr > 0.45:
if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if shot:
if mapv > 5 or (sinz > 0 and z > 0.35) or (sinz < 0 and z < 0.35):
break
elif z >1:
break
if z < 0.57 and mapv > 1:
if mapv == 3 or mapv == 9:
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
elif mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
if mapv > 0 and z <= mapv and mapv < 2:
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
modr = modr*0.9
else:
modr = modr*0.9
pr[idx] = modr*np.sqrt(c1*c1r)
pg[idx] = modr*np.sqrt(c2*c2r)
pb[idx] = modr*np.sqrt(c3*c3r)
idx += 1
return pr, pg, pb
def adjust_resol(width):
height = int(0.75*width)
mod = width/64
inc = 0.02/mod
rr = np.random.uniform(0,1,width * height)
gg = np.random.uniform(0,1,width * height)
bb = np.random.uniform(0,1,width * height)
## print('Resolution: ', width, height)
return width, height, mod, inc, rr, gg, bb
@njit(fastmath=True)
def agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock):
if enx != 0:
if not lock or np.random.uniform(0,1) > 0.99:
dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)
cos, sin = (posx-enx)/dtp, (posy-eny)/dtp
x, y = enx, eny
for i in range(300):
x += 0.04*cos; y += 0.04*sin
if maph[int(x)][int(y)] != 0:
lock = 0
break
if(int(x) == int(posx) and int(y) == int(posy)):
seenx, seeny = posx, posy
lock = 1
break
if int(enx) == int(seenx) and int(eny) == int(seeny):
if not lock:
if shoot:
seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)
else:
seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2)
else:
seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)
dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2)
cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp
x, y = enx + et*(cos+np.random.normal(0,.5)), eny + et*(sin+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
if np.random.uniform(0,1) > 0.5:
x, y = enx - et*(sin+np.random.normal(0,.5)), eny + et*(cos+np.random.normal(0,.5))
else:
x, y = enx + et*(sin+np.random.normal(0,.5)), eny - et*(cos+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)
lock = 0
mplayer[int(enx)][int(eny)] = 3
mplayer[int(posx)][int(posy)] = 2
if shoot:
if sx == -1:
sdir = rot+np.random.uniform(-.1,.1)
sx, sy = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir)
sx, sy = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir)
if enx != 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:
shoot, sx, sy, enx, eny, seenx, seeny = 0, -1, -1, 0, 0, 0, 0
if maph[int(sx)][int(sy)] != 0:
shoot, sx, sy = 0, -1, -1
else:
mplayer[int(sx)][int(sy)] += 6
mplayer = maph + mplayer
return(enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock)
if __name__ == '__main__':
main()
| StarcoderdataPython |
5016845 | <gh_stars>0
with open('Jeanette_c.txt','w',encoding='utf-8') as f:
f.write('''我会给你电话,我们一起生个火,喝点小酒,在属于我们的地方辨认彼此。
别等待,别把故事留到后面讲,生命如此之短。这一片海和沙滩,这海滩上的散步,在潮水将我们所做的一切吞噬之前。
我爱你。
这是世上最难的三个字。 可除此以外。我还能说什么。''')
with open('Jeanette_c.txt','r',encoding='utf-8') as f:
s = f.read()
print(s) | StarcoderdataPython |
6425261 | """
Module with class for cell types in vtkUnstructuredGrid
"""
__copyright__ = "Copyright © 2018-2021 <NAME>"
__license__ = "SPDX-License-Identifier: MIT"
from enum import Enum, unique
import numpy as np
from .data_array import DTYPE_TO_VTK
@unique
class CellType(Enum):
"""
Enumerates the VTK cell types.
See https://lorensen.github.io/VTKExamples/site/VTKFileFormats/
"""
VERTEX = 1
POLY_VERTEX = 2
LINE = 3
POLY_LINE = 4
TRIANGLE = 5
TRIANGLE_STRIP = 6
POLYGON = 7
PIXEL = 8
QUAD = 9
TETRA = 10
VOXEL = 11
HEXAHEDRON = 12
WEDGE = 13
PYRAMID = 14
PENTAGONAL_PRISM = 15
HEXAGONAL_PRISM = 16
QUADRATIC_EDGE = 21
QUADRATIC_TRIANGLE = 22
QUADRATIC_QUAD = 23
QUADRATIC_TETRA = 24
QUADRATIC_HEXAHEDRON = 25
QUADRATIC_WEDGE = 26
QUADRATIC_PYRAMID = 27
BIQUADRATIC_QUAD = 28
TRIQUADRATIC_HEXAHEDRON = 29
QUADRATIC_LINEAR_QUAD = 30
QUADRATIC_LINEAR_WEDGE = 31
BIQUADRATIC_QUADRATIC_WEDGE = 32
BIQUADRATIC_QUADRATIC_HEXAHEDRON = 33
BIQUADRATIC_TRIANGLE = 34
CUBIC_LINE = 35
QUADRATIC_POLYGON = 36
NODES_PER_CELL = {
CellType.VERTEX: 1,
CellType.POLY_VERTEX: -1,
CellType.LINE: 2,
CellType.POLY_LINE: -1,
CellType.TRIANGLE: 3,
CellType.TRIANGLE_STRIP: -1,
CellType.POLYGON: -1,
CellType.PIXEL: 4,
CellType.QUAD: 4,
CellType.TETRA: 4,
CellType.VOXEL: 8,
CellType.HEXAHEDRON: 9,
CellType.WEDGE: 6,
CellType.PYRAMID: 5,
CellType.PENTAGONAL_PRISM: 10,
CellType.HEXAGONAL_PRISM: 12,
CellType.QUADRATIC_EDGE: 3,
CellType.QUADRATIC_TRIANGLE: 6,
CellType.QUADRATIC_QUAD: 8,
CellType.QUADRATIC_TETRA: 10,
CellType.QUADRATIC_HEXAHEDRON: 20,
CellType.QUADRATIC_WEDGE: 15,
CellType.QUADRATIC_PYRAMID: 13,
CellType.BIQUADRATIC_QUAD: 9,
CellType.TRIQUADRATIC_HEXAHEDRON: 27,
CellType.QUADRATIC_LINEAR_QUAD: 6,
CellType.QUADRATIC_LINEAR_WEDGE: 12,
CellType.BIQUADRATIC_QUADRATIC_WEDGE: 18,
CellType.BIQUADRATIC_QUADRATIC_HEXAHEDRON: 24,
CellType.BIQUADRATIC_TRIANGLE: 7,
CellType.CUBIC_LINE: 4,
CellType.QUADRATIC_POLYGON: -1,
}
def check_connectivity(connectivity):
"Sanity check for number of nodes per cell"
for cell_type, conn in connectivity.items():
if not isinstance(cell_type, CellType):
cell_type = CellType(cell_type)
if not isinstance(conn, np.ndarray):
raise TypeError("Connectivity needs to be of type numpy.ndarray")
int_types = {
dtype for dtype, label in DTYPE_TO_VTK.items() if 'Int' in label
}
if conn.dtype not in int_types | {np.dtype(object)}:
raise TypeError("Connectivity dtype needs to be an integer type or"
"an object type for variable size cells")
nnodes = NODES_PER_CELL[cell_type]
if nnodes != -1 and nnodes != conn.shape[1]:
return False
return True
| StarcoderdataPython |
223725 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains a dialog and widgets related to the module documentation
dialog, which displays the available documentation for a given VisTrails module.
QModuleDocumentation
"""
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.modules.module_registry import ModuleRegistryException
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
################################################################################
class QModuleDocumentation(QtGui.QDialog, QVistrailsPaletteInterface):
"""
QModuleDocumentation is a dialog for showing module documentation. duh.
"""
def __init__(self, parent=None):
"""
QModuleAnnotation(parent)
-> None
"""
QtGui.QDialog.__init__(self, parent)
# self.setModal(True)
self.setWindowTitle("Module Documentation")
self.setLayout(QtGui.QVBoxLayout())
# self.layout().addStrut()
self.name_label = QtGui.QLabel("")
self.layout().addWidget(self.name_label)
self.package_label = QtGui.QLabel("")
self.layout().addWidget(self.package_label)
# self.closeButton = QtGui.QPushButton('Ok', self)
self.textEdit = QtGui.QTextEdit(self)
self.layout().addWidget(self.textEdit, 1)
self.textEdit.setReadOnly(True)
self.textEdit.setTextCursor(QtGui.QTextCursor(self.textEdit.document()))
# self.layout().addWidget(self.closeButton)
# self.connect(self.closeButton, QtCore.SIGNAL('clicked(bool)'),
# self.close)
# self.closeButton.setShortcut('Enter')
self.update_descriptor()
def set_controller(self, controller):
if controller is not None:
scene = controller.current_pipeline_scene
selected_ids = scene.get_selected_module_ids()
modules = [controller.current_pipeline.modules[i]
for i in selected_ids]
if len(modules) == 1:
self.update_module(modules[0])
else:
self.update_module(None)
else:
self.update_descriptor()
def update_module(self, module=None):
descriptor = None
try:
if module and module.module_descriptor:
descriptor = module.module_descriptor
except ModuleRegistryException:
pass
self.update_descriptor(descriptor, module)
def update_descriptor(self, descriptor=None, module=None):
if descriptor is None:
# self.setWindowTitle("Module Documentation")
self.name_label.setText("Module name:")
self.package_label.setText("Module package:")
self.textEdit.setText("")
else:
# self.setWindowTitle('%s Documentation' % descriptor.name)
self.name_label.setText("Module name: %s" % descriptor.name)
self.package_label.setText("Module package: %s" % \
descriptor.module_package())
self.textEdit.setText(descriptor.module_documentation(module))
def activate(self):
if self.isVisible() == False:
self.show()
self.activateWindow()
| StarcoderdataPython |
5094099 | import elegy
import jax
import jax.numpy as jnp
import typing as tp
import einops
import optax
class MixerBlock(elegy.Module):
def __init__(self, elements: int, channels: int):
self.elements = elements
self.channels = channels
super().__init__()
def call(self, x):
transpose = lambda x: einops.rearrange(x, "... n d -> ... d n")
normalize = lambda x: elegy.nn.LayerNormalization()(x)
mpl_1 = lambda x: MLP(self.elements)(x)
mpl_2 = lambda x: MLP(self.channels)(x)
x0 = x if x.shape[-2] == self.elements else 0.0
x = x0 + transpose(mpl_1(transpose(normalize(x))))
x0 = x if x.shape[-1] == self.channels else 0.0
x = x0 + normalize(mpl_2(x))
return x
class MLP(elegy.Module):
def __init__(self, units: int):
self.units = units
super().__init__()
def call(self, x):
x = elegy.nn.Linear(self.units)(x)
x = jax.nn.gelu(x)
x = elegy.nn.Linear(self.units)(x)
return x
class Mixer(elegy.Module):
def __init__(
self,
metadata: tp.Dict[str, tp.Dict[str, tp.Any]],
labels: tp.List[str],
embedding_channels: int = 32,
num_layers: int = 2,
):
super().__init__()
self.features = {k: v for k, v in metadata.items() if k not in labels}
self.labels = {k: v for k, v in metadata.items() if k in labels}
self.embedding_channels = embedding_channels
self.num_layers = num_layers
def call(self, x):
x = self.embed_features(x)
# add CLS token
token = self.add_parameter(
"token",
lambda: elegy.initializers.TruncatedNormal()(
[1, x.shape[-1]],
jnp.float32,
),
)
token = einops.repeat(token, "... -> batch ...", batch=x.shape[0])
x = jnp.concatenate([token, x], axis=1)
for i in range(self.num_layers):
x = MixerBlock(
elements=x.shape[1],
channels=self.embedding_channels,
)(x)
# reduce channels
x = x[:, 0]
logits = elegy.nn.Linear(1)(x)
return logits
def embed_features(self, x):
assert set(x.keys()) == set(self.features.keys())
xs = []
for feature, feature_metadata in self.features.items():
kind = feature_metadata["kind"]
values = x[feature]
if kind == "continuous":
values = elegy.nn.Linear(self.embedding_channels)(values)
values = jax.nn.relu(values)
values = elegy.nn.Linear(self.embedding_channels)(values)
elif kind == "categorical":
values = values[:, 0]
vocab_size = feature_metadata["size"]
values = elegy.nn.Embedding(
vocab_size=vocab_size,
embed_dim=self.embedding_channels,
)(values)
else:
raise ValueError(f"unknown kind '{kind}'")
xs.append(values)
x = jnp.stack(xs, axis=1)
return x
batch_size: int = 16
epochs: int = 1000
def get_model(
feature_metadata,
labels,
X_train,
y_train,
X_valid,
y_valid,
):
model = elegy.Model(
module=Mixer(
metadata=feature_metadata,
labels=labels,
embedding_channels=96,
num_layers=2,
),
loss=[
elegy.losses.BinaryCrossentropy(from_logits=True),
# elegy.regularizers.GlobalL2(0.0001),
],
metrics=elegy.metrics.BinaryAccuracy(),
optimizer=optax.adamw(3e-5),
)
model.init(
jax.tree_map(lambda x: x[:batch_size], X_train),
jax.tree_map(lambda x: x[:batch_size], y_train),
)
model.summary(jax.tree_map(lambda x: x[:batch_size], X_train), depth=1)
return model
| StarcoderdataPython |
5025646 | <gh_stars>10-100
import pymongo
client = pymongo.MongoClient("mongodb://localhost:27017/")
print("##### RECORDS #####")
for hooman in client["mi-db"].hoomans.find():
print(hooman) | StarcoderdataPython |
5137439 | <reponame>deepakrana47/RNN-attention-network
from bs4 import BeautifulSoup
import re, numpy as np
import pandas as pd
from _text import Tokenizer
# from keras.preprocessing.sequence import pad_sequences
stopwrds = open('stopword.txt').read().split('\n')
def html2text(review):
"""Return extracted text string from provided HTML string."""
review_text = BeautifulSoup(review, "lxml").get_text()
if len(review_text) == 0:
review_text = review
review_text = re.sub(r"\<.*\>", "", review_text)
try:
review_text = review_text.encode('ascii', 'ignore').decode('ascii')#ignore \xc3 etc.
except UnicodeDecodeError:
review_text = review_text.decode("ascii", "ignore")
return review_text
def letters_only(text):
"""Return input string with only letters (no punctuation, no numbers)."""
# It is probably worth experimenting with milder prepreocessing (eg just removing punctuation)
text = re.sub("[^a-zA-Z]", " ", text)
text = re.sub(r"[ ]{2,}", ' ', text)
text2 = []
for i in text.split(' '):
if i not in stopwrds:
text2.append(i)
return ' '.join(text2)
def review_preprocess(review):
"""Preprocessing used before fitting/transforming RNN tokenizer - Html->text, remove punctuation/#s, lowercase."""
return letters_only(str(html2text(review)).lower())
def get_train_data(df, reviews_to_features_fn=None):
"""Extracts features (using reviews_to_features_fn), splits into train/test data, and returns
x_train, y_train, x_test, y_test. If no feature extraction function is provided, x_train/x_test will
simply consist of a Series of all the reviews.
"""
# df = pd.read_csv('labeledTrainData.tsv', header=0, quotechar='"', sep='\t')
SEED = 1000
# Shuffle data frame rows
np.random.seed(SEED)
df = df.iloc[np.random.permutation(len(df))]
if reviews_to_features_fn:
feature_rows = df["review"].map(reviews_to_features_fn)
if type(feature_rows[0]) == np.ndarray:
num_instances = len(feature_rows)
num_features = len(feature_rows[0])
x = np.concatenate(feature_rows.values).reshape((num_instances, num_features))
else:
x = feature_rows
else:
x = df["review"]
y = df["sentiment"]
# Split 80/20
test_start_index = int(df.shape[0] * .8)
x_train = x[0:test_start_index]
y_train = y[0:test_start_index]
x_val = x[test_start_index:]
y_val = y[test_start_index:]
return x_train, y_train, x_val, y_val
def preprocess(train, test, min_word_count=0, num_most_freq_words_to_include = None):
x_train, y_train, x_val, y_val = get_train_data(train, review_preprocess)
x_test = test["review"].map(review_preprocess)
test["sentiment"] = test["id"].map(lambda x: 1 if int(x.strip('"').split("_")[1]) >= 5 else 0)
y_test = test["sentiment"]
y_train, y_val, y_test = np.array(y_train), np.array(y_val), np.array(y_test)
train_review_list = x_train.tolist()
val_review_list = x_val.tolist()
test_review_list = x_test.tolist()
all_review_list = x_train.tolist() + x_val.tolist()
np.random.seed(1000)
tokenizer = Tokenizer(min_word_count=min_word_count)
# print all_review_list[0:2]
tokenizer.fit_on_texts(all_review_list)
train_reviews_tokenized = tokenizer.texts_to_sequences(train_review_list)
x_train = train_reviews_tokenized
# x_train = pad_sequences(train_reviews_tokenized, maxlen=MAX_REVIEW_LENGTH_FOR_KERAS_RNN)
val_review_tokenized = tokenizer.texts_to_sequences(val_review_list)
x_val = val_review_tokenized
# x_val = pad_sequences(val_review_tokenized, maxlen=MAX_REVIEW_LENGTH_FOR_KERAS_RNN)
test_review_tokenized = tokenizer.texts_to_sequences(test_review_list)
x_test = test_review_tokenized
# x_test = pad_sequences(test_review_tokenized, maxlen=MAX_REVIEW_LENGTH_FOR_KERAS_RNN)
word2idx = tokenizer.word_index
word2idx.update({'UNKOWN':0})
idx2word = {v:k for k,v in word2idx.items()}
return x_train, y_train, x_val, y_val, x_test, y_test, word2idx, idx2word
def init_weight(Mi, Mo=0):
if Mo == 0:
return np.random.randn(Mi)/np.sqrt(Mi)
return np.random.randn(Mi, Mo)/np.sqrt(Mi + Mo)
if __name__ == "__main__":
train = pd.read_csv("/media/zero/41FF48D81730BD9B/kaggle/word2vec-nlp/input/labeledTrainData.tsv", header=0,delimiter='\t')
test = pd.read_csv("/media/zero/41FF48D81730BD9B/kaggle/word2vec-nlp/input/testData.tsv", header=0, delimiter='\t')
x_train, y_train, x_val, y_val, x_test, y_test, word2idx, idx2word = preprocess(train, test) | StarcoderdataPython |
386935 | <filename>dask_mpi/tests/test_cli.py
from __future__ import print_function, division, absolute_import
import os
from time import sleep
import pytest
pytest.importorskip("mpi4py")
import requests
from distributed import Client
from distributed.comm.addressing import get_address_host_port
from distributed.metrics import time
from distributed.utils import tmpfile
from distributed.utils_test import popen
from distributed.utils_test import loop # noqa: F401
FNULL = open(os.devnull, "w") # hide output of subprocess
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_basic(loop, nanny, mpirun):
with tmpfile(extension="json") as fn:
cmd = mpirun + ["-np", "4", "dask-mpi", "--scheduler-file", fn, nanny]
with popen(cmd):
with Client(scheduler_file=fn) as c:
start = time()
while len(c.scheduler_info()["workers"]) != 3:
assert time() < start + 10
sleep(0.2)
assert c.submit(lambda x: x + 1, 10, workers="mpi-rank-1").result() == 11
def test_no_scheduler(loop, mpirun):
with tmpfile(extension="json") as fn:
cmd = mpirun + ["-np", "2", "dask-mpi", "--scheduler-file", fn]
with popen(cmd, stdin=FNULL):
with Client(scheduler_file=fn) as c:
start = time()
while len(c.scheduler_info()["workers"]) != 1:
assert time() < start + 10
sleep(0.2)
assert c.submit(lambda x: x + 1, 10).result() == 11
cmd = mpirun + ["-np", "1", "dask-mpi", "--scheduler-file", fn, "--no-scheduler"]
with popen(cmd):
start = time()
while len(c.scheduler_info()["workers"]) != 2:
assert time() < start + 10
sleep(0.2)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_non_default_ports(loop, nanny, mpirun):
with tmpfile(extension="json") as fn:
cmd = mpirun + ["-np", "2", "dask-mpi", "--scheduler-file", fn, nanny,
"--scheduler-port", "56723",
"--worker-port", "58464",
"--nanny-port", "50164"]
with popen(cmd):
with Client(scheduler_file=fn) as c:
start = time()
while len(c.scheduler_info()["workers"]) != 1:
assert time() < start + 10
sleep(0.2)
sched_info = c.scheduler_info()
sched_host, sched_port = get_address_host_port(
sched_info['address'])
assert sched_port == 56723
for worker_addr, worker_info in sched_info['workers'].items():
worker_host, worker_port = get_address_host_port(
worker_addr)
assert worker_port == 58464
if nanny == "--nanny":
nanny_port = worker_info['services']['nanny']
assert nanny_port == 50164
assert c.submit(lambda x: x + 1, 10).result() == 11
def check_port_okay(port):
start = time()
while True:
try:
response = requests.get("http://localhost:%d/status/" % port)
assert response.ok
break
except Exception:
sleep(0.1)
assert time() < start + 20
def test_bokeh_scheduler(loop, mpirun):
with tmpfile(extension="json") as fn:
cmd = mpirun + ["-np", "2", "dask-mpi", "--scheduler-file", fn, "--bokeh-port", "59583"]
with popen(cmd, stdin=FNULL):
check_port_okay(59583)
with pytest.raises(Exception):
requests.get("http://localhost:59583/status/")
@pytest.mark.skip
def test_bokeh_worker(loop, mpirun):
with tmpfile(extension="json") as fn:
cmd = mpirun + ["-np", "2", "dask-mpi", "--scheduler-file", fn, "--bokeh-worker-port", "59584"]
with popen(cmd, stdin=FNULL):
check_port_okay(59584)
| StarcoderdataPython |
6571861 | from flask import current_app
from flask_mail import Message
from . import celery, mail
from .inception import execute_final
@celery.task()
def execute_task(id):
app = current_app._get_current_object()
return execute_final(app, id)
@celery.task()
def send_mail(subject, body, receiver):
msg = Message(subject, recipients=[receiver])
msg.html = body
mail.send(msg)
| StarcoderdataPython |
9752677 | def polybius(text):
res=""
for i in text:
if i.isalpha():
temp=ord(i)-ord("A")
if ord(i)>=ord("J"):
temp-=1
res+=f"{temp//5+1}{temp%5+1}"
else:
res+=i
return res | StarcoderdataPython |
359725 | <reponame>justin-sayah/Friendable
from firebase_admin import credentials, firestore, initialize_app
from generate_suggestion import gen_result
cred = credentials.Certificate("google_auth_creds.json")
# initialize_app(cred)
db = firestore.client()
def get_group_object(user_number):
print('getting the group for ' + str(user_number))
groups = db.collection('group')
docs = groups.where(u'people', u'array_contains', str(user_number)).stream()
#should only be one doc
for doc in docs:
print(doc.to_dict())
return doc.to_dict()
def make_groups():
num_groups = 10
users = db.collection('users')
g = 5
for i in range(0, num_groups):
print('group number ' + str(i))
n = 0
docs = users.where(u'Classes', u'==', i).stream()
members = []
for doc in docs:
n += 1
doc = doc.to_dict()
# if doc['Phone'] != None:
# doc['number'] = doc['Phone']
# print(doc)
# print(type(doc))
members.append(doc)
# print('this group has ' + str(n))
group_nums = [1+max(0,n-(j+1))//(n//g) for j in range(n//g)]
# print(group_nums)
# create the groups
j = 0
current_group = []
group_index = 0
while(j < len(members)):
if group_nums[group_index] == 0:
#write group to database
create_group(current_group, group_index, i)
group_index += 1
current_group = []
current_group.append(members[j])
j += 1
group_nums[group_index] -= 1
# print('the group index is')
# print(group_index)
# print(group_nums)
if current_group != []:
create_group(current_group, group_index, i)
# print(current_group)
def create_group(current_group, group_index, class_num):
groups = db.collection('group')
print('creating groups')
print(group_index)
print(class_num)
#need to get an activity
activity = gen_result()
# current_group = ['users/' + str(member['number']) for member in current_group]
current_group = [str(member['number']) for member in current_group]
print(current_group)
dict = {}
dict['people'] = current_group #need to be references to the collection
dict['confirmed'] = []
dict['not_going'] = []
dict['group_id'] = str(group_index) + str(class_num)
dict['activity'] = activity #need to be an activity
# print(dict)
groups.document(str(group_index) + str(class_num)).set(dict)
# make_groups()
# get_group_object('9788065553') | StarcoderdataPython |
1940337 | <reponame>pponnuvel/hotsos<filename>plugins/storage/pyparts/ceph_daemon_checks.py
from core.checks import DPKGVersionCompare
from core.issues import (
issue_types,
issue_utils,
)
from core.utils import sorted_dict
from core.plugins.storage import (
bcache,
ceph,
)
from core.plugins.kernel import KernelChecksBase
YAML_PRIORITY = 1
LP1936136_BCACHE_CACHE_LIMIT = 70
OSD_PG_MAX_LIMIT = 500
OSD_PG_OPTIMAL_NUM = 200
OSD_META_LIMIT_KB = (10 * 1024 * 1024)
OSD_MAPS_LIMIT = 500 # mon_min_osdmap_epochs default
class CephOSDChecks(ceph.CephChecksBase):
def check_osdmaps_size(self):
"""
Check if there are too many osdmaps
By default mon_min_osdmaps_epochs (=500) osdmaps are stored by the
monitors. However, if the cluster isn't healthy for a long time,
the number of osdmaps stored will keep increasing which can result
in more disk utilization, possibly slower mons, etc.
Doc: https://docs.ceph.com/en/latest/dev/mon-osdmap-prune/
"""
report = self.cli.ceph_report_json_decoded()
if not report:
return
try:
osdmaps_count = len(report['osdmap_manifest']['pinned_maps'])
# mon_min_osdmap_epochs (= 500) maps are held by default. Anything
# over the limit, we need to look at and decide whether this could
# be temporary or needs further investigation.
if osdmaps_count > OSD_MAPS_LIMIT:
msg = ("Found {} pinned osdmaps. This can affect mon's "
"performance and also indicate bugs such as "
"https://tracker.ceph.com/issues/44184 and "
"https://tracker.ceph.com/issues/47290"
.format(osdmaps_count))
issue_utils.add_issue(issue_types.CephMapsWarning(msg))
except (ValueError, KeyError):
return
def check_require_osd_release(self):
cluster = ceph.CephCluster()
expected_rname = cluster.daemon_dump('osd').get('require_osd_release')
if not expected_rname:
return
for rname in cluster.daemon_release_names('osd'):
if expected_rname != rname:
msg = ("require_osd_release is {} but one or more osds is on "
"release {} - needs fixing".format(expected_rname,
rname))
issue_utils.add_issue(issue_types.CephOSDError(msg))
def check_osd_msgr_protocol_versions(self):
"""Check if any OSDs are not using the messenger v2 protocol
The msgr v2 is the second major revision of Ceph’s on-wire protocol
and should be the default Nautilus onward.
"""
if self.release_name <= 'mimic':
""" v2 only available for >= Nautilus. """
return
v1_osds = []
cluster = ceph.CephCluster()
osd_dump = cluster.daemon_dump('osd')
if not osd_dump:
return
osd_count = int(cluster.daemon_dump('osd').get('max_osd', 0))
if osd_count < 1:
return
counter = 0
while counter < osd_count:
key = "osd.{}".format(counter)
version_info = cluster.daemon_dump('osd').get(key)
if version_info and version_info.find("v2:") == -1:
v1_osds.append(counter+1)
counter = counter + 1
if v1_osds:
msg = ("{} OSDs do not bind to v2 address".format(len(v1_osds)))
issue_utils.add_issue(issue_types.CephOSDWarning(msg))
def check_ceph_bluefs_size(self):
"""
Check if the BlueFS metadata size is too large
"""
bad_meta_osds = []
ceph_osd_df_tree = self.cli.ceph_osd_df_tree_json_decoded()
if not ceph_osd_df_tree:
return
for device in ceph_osd_df_tree['nodes']:
if device['id'] >= 0:
meta_kb = device['kb_used_meta']
# Usually the meta data is expected to be in 0-4G range
# and we check if it's over 10G
if meta_kb > OSD_META_LIMIT_KB:
bad_meta_osds.append(device['name'])
if bad_meta_osds:
msg = ("{} OSDs have metadata size larger than 10G. This "
"indicates compaction failure/bug. Possibly affected by "
"https://tracker.ceph.com/issues/45903. "
"A workaround (>= Nautilus) is to manually compact using "
"'ceph-bluestore-tool'"
.format(bad_meta_osds))
issue_utils.add_issue(issue_types.CephOSDWarning(msg))
def get_ceph_pg_imbalance(self):
""" Validate PG counts on OSDs
Upstream recommends 50-200 OSDs ideally. Higher than 200 is also valid
if the OSD disks are of different sizes but that tends to be the
exception rather than the norm.
We also check for OSDs with excessive numbers of PGs that can cause
them to fail.
"""
suboptimal_pgs = {}
error_pgs = {}
ceph_osd_df_tree = self.cli.ceph_osd_df_tree_json_decoded()
if not ceph_osd_df_tree:
return
for device in ceph_osd_df_tree['nodes']:
if device['id'] >= 0:
osd_id = device['name']
pgs = device['pgs']
if pgs > OSD_PG_MAX_LIMIT:
error_pgs[osd_id] = pgs
margin = abs(100 - (100.0 / OSD_PG_OPTIMAL_NUM * pgs))
# allow 30% margin from optimal OSD_PG_OPTIMAL_NUM value
if margin > 30:
suboptimal_pgs[osd_id] = pgs
if error_pgs:
info = sorted_dict(error_pgs, key=lambda e: e[1], reverse=True)
self._output['osd-pgs-near-limit'] = info
msg = ("{} osds found with > {} pgs - this is close to the hard "
"limit at which point OSDs will stop creating pgs and fail "
"- please investigate".
format(len(error_pgs), OSD_PG_MAX_LIMIT))
issue = issue_types.CephCrushError(msg)
issue_utils.add_issue(issue)
if suboptimal_pgs:
info = sorted_dict(suboptimal_pgs, key=lambda e: e[1],
reverse=True)
self._output['osd-pgs-suboptimal'] = info
msg = ("{} osds found with > 10% margin from optimal {} pgs.".
format(len(suboptimal_pgs), OSD_PG_OPTIMAL_NUM))
issue = issue_types.CephCrushWarning(msg)
issue_utils.add_issue(issue)
@staticmethod
def version_as_a_tuple(ver):
"""
Return a version string as a tuple for easy comparison
"""
return tuple(map(int, (ver.split("."))))
def get_ceph_versions_mismatch(self):
"""
Get versions of all Ceph daemons.
"""
versions = ceph.CephCluster().daemon_versions()
if not versions:
return
global_vers = set()
daemon_version_info = {}
# these store highest ver and daemon name with highest ver
h_version = "0.0.0"
h_daemon = ""
for daemon_type in versions:
# skip the catchall
if daemon_type == 'overall':
continue
vers = []
for version in versions[daemon_type]:
vers.append(version)
global_vers.add(version)
# store the highest version any component has
if self.version_as_a_tuple(version) > \
self.version_as_a_tuple(h_version):
h_version = version
h_daemon = daemon_type
if vers:
daemon_version_info[daemon_type] = vers
if daemon_version_info:
self._output['versions'] = daemon_version_info
if len(global_vers) > 1:
msg = ('ceph daemon versions not aligned possibly because '
'cluster upgrade is incomplete/incorrectly done. '
'All daemons, except the clients, should be on the '
'same version for proper functioning.')
issue = issue_types.CephDaemonWarning(msg)
issue_utils.add_issue(issue)
# check if mon is lower than highest version we stored earlier
for version in versions.get('mon', []):
if self.version_as_a_tuple(version) < \
self.version_as_a_tuple(h_version):
msg = ("mon version {} is lower than {} version {}"
.format(version, h_daemon, h_version))
issue = issue_types.CephDaemonVersionsError(msg)
issue_utils.add_issue(issue)
def _build_buckets_from_crushdump(self, crushdump):
buckets = {}
# iterate jp for each bucket
for bucket in crushdump["buckets"]:
bid = bucket["id"]
items = []
for item in bucket["items"]:
items.append(item["id"])
buckets[bid] = {"name": bucket["name"],
"type_id": bucket["type_id"],
"type_name": bucket["type_name"],
"items": items}
return buckets
def get_crushmap_mixed_buckets(self):
"""
Report buckets that have mixed type of items,
as they will cause crush map unable to compute
the expected up set
"""
osd_crush_dump = self.cli.ceph_osd_crush_dump_json_decoded()
if not osd_crush_dump:
return
bad_buckets = []
buckets = self._build_buckets_from_crushdump(osd_crush_dump)
# check all bucket
for bid in buckets:
items = buckets[bid]["items"]
type_ids = []
for item in items:
if item >= 0:
type_ids.append(0)
else:
type_ids.append(buckets[item]["type_id"])
if not type_ids:
continue
# verify if the type_id list contain mixed type id
if type_ids.count(type_ids[0]) != len(type_ids):
bad_buckets.append(buckets[bid]["name"])
if bad_buckets:
msg = ("mixed crush bucket types identified in buckets '{}'. "
"This can cause data distribution to become skewed - "
"please check crush map".format(bad_buckets))
issue = issue_types.CephCrushWarning(msg)
issue_utils.add_issue(issue)
def check_bcache_vulnerabilities(self):
has_bcache = False
for osd in self.local_osds:
dev = osd.device
if self.is_bcache_device(dev):
has_bcache = True
if not has_bcache:
return
for cset in bcache.BcacheChecksBase().get_sysfs_cachesets():
if (cset.get("cache_available_percent") >=
LP1936136_BCACHE_CACHE_LIMIT):
return
# Get version of osd based on package installed. This is prone to
# inaccuracy since the daemon many not have been restarted after
# package update.
current = self.apt_check.get_version('ceph-osd')
if current <= DPKGVersionCompare("13.0.1"):
return
if current >= DPKGVersionCompare("14.2.10") and \
current <= DPKGVersionCompare("14.2.21"):
return
if current >= DPKGVersionCompare("15.2.2") and \
current <= DPKGVersionCompare("15.2.12"):
return
if current == DPKGVersionCompare("16.1.0") or \
current == DPKGVersionCompare("17.0.0"):
return
if KernelChecksBase().version >= "5.4":
return
bluefs_buffered_io = self.ceph_config.get('bluefs_buffered_io')
if bluefs_buffered_io is False:
return
# NOTE: we need a way to check that actual osd config
# then bluefs_buffered_io is True by default
msg = ("host may be vulnerable to bcache bug 1936136 - please ensure "
"bluefs_buffered_io is set to False or upgrade to kernel "
">= 5.4")
issue = issue_types.CephCrushWarning(msg)
issue_utils.add_issue(issue)
def __call__(self):
if self.local_osds:
osds = {}
for osd in self.local_osds:
osds.update(osd.to_dict())
self._output["local-osds"] = sorted_dict(osds)
self.check_bcache_vulnerabilities()
self.check_require_osd_release()
self.check_osd_msgr_protocol_versions()
self.check_ceph_bluefs_size()
self.get_ceph_pg_imbalance()
self.get_ceph_versions_mismatch()
self.get_crushmap_mixed_buckets()
self.check_osdmaps_size()
| StarcoderdataPython |
1777668 | """
testing API from game.py
"""
from data.game import Game
def main():
gm = Game(api_use=True)
gm.new_game()
flip_stock = ['S0', 0, 'S0']
gm.api_make_move(flip_stock)
gm.api_make_move(flip_stock)
gm.api_make_move(flip_stock)
gm.api_make_move(flip_stock)
gm.api_make_move(flip_stock)
gm.api_undo_move()
print(gm.board)
print(gm.api_get_moves())
print()
print(gm.api_read_stock())
print()
print(gm.api_read_waste_pile())
print()
print(gm.api_read_foundations())
print()
print(gm.api_read_tableaus())
input("Press enter to exit\n")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3257296 | <filename>ldap2sql.py
#!/usr/bin/python
import inspect
import os
import sys
import urllib
import urllib2
import hashlib
import logging
from sqlalchemy import create_engine
reload(sys)
sys.setdefaultencoding('UTF8')
cmd_folder = os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "contrib"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from activedirectory import ActiveDirectory
jira_stats = """
select 'issues' as metric, count(*) as value from jiraissue
UNION
select 'projects', count(*) from project
UNION
select 'customfields', count(*) from customfield
UNION
select 'workflows', count(distinct name) from os_wfentry
UNION
select 'users', count(*) from cwd_user
UNION
SELECT 'users_active', count(*)
FROM cwd_user, cwd_user_attributes
WHERE cwd_user_attributes.user_id = cwd_user.id
AND cwd_user_attributes.attribute_name = 'login.previousLoginMillis'
UNION
select 'roles', count(*) as roles from projectrole
UNION
select 'dashboards', count(*) as dashboards from portalpage
UNION
select 'plugins', count(*) as plugins from pluginstate where pluginenabled = 'true'
UNION
select 'actions', count(*) as actions from jiraaction
UNION
select 'issuetypes', count(*) as issuetype from issuetype
UNION
select 'statuses', count(*) as issuestatus from issuestatus
UNION
select 'issuetypescreenschemes', count(*) from issuetypescreenscheme
UNION
select 'issuelinktypes', count(*) from issuelinktype
UNION
select 'fieldscreenschemes', count(*) from fieldscreenscheme
UNION
select 'fieldscreens', count(*) from fieldscreen
UNION
select 'fieldlayouts', count(*) from fieldlayout
UNION
select 'fieldlayoutschemes', count(*) from fieldlayoutscheme
UNION
select 'fieldconfigscheme', count(*) from fieldconfigscheme
UNION
select 'changegroup', count(*) from changegroup
UNION
select 'changeitem', count(*) from changeitem
UNION
select 'agileboards', count(*) from "AO_60DB71_RAPIDVIEW"
UNION
select 'attachments', count(*) as attachments from fileattachment
UNION
select 'attachments_gb', round(sum(filesize)/1024/1024/1024) as attachments_gb from fileattachment
order by metric
;
"""
class CustomUpdater(object):
"""The methods both update and insert elements in the table as folows:
UPDATE table SET some_column='something' WHERE another_column='something else';
INSER INTO table (some_column) 'something' WHERE NOT EXISTS (SELECT 1 FROM table WHERE another_column='something else')
"""
def __init__(self, stats_uri=None, activedirectory_uri=None):
if stats_uri is not None:
self.engine = create_engine(stats_uri, convert_unicode=True)
if activedirectory_uri is not None:
self.ad = ActiveDirectory(activedirectory_uri, paged_size=1000, size_limit=50000)
self.fields = ['mail', 'title', 'manager', 'distinguishedName', 'postalCode', 'telephoneNumber', 'givenName', 'name', 'facsimileTelephoneNumber',
'department', 'company', 'streetAddress', 'sAMAccountType', 'mobile', 'c', 'l', 'st', 'extensionAttribute14',
'extensionAttribute15', 'extensionAttribute3', 'sAMAccountName', 'userAccountControl']
self.sql_names = ['mail', 'title', 'managerdn', 'distinguishedname', 'postalcode', 'phone', 'givenname', 'name', 'fax',
'department', 'company', 'streetaddress', 'samaccounttype', 'mobile', 'country', 'locale', 'state', 'vp',
'region', 'office', 'username', 'useraccountcontrol']
self.sql_times = ['created', 'changed']
self.time_fields = ['whenCreated', 'whenChanged']
self.exists = None
self.elem_dict = {}
self.users = []
"""Updates all the fields in custom.stats"""
def update_stats(self):
try:
self.engine.execute('INSERT INTO custom.stats (date) (SELECT CURRENT_DATE);')
except Exception:
pass
for row in self.engine.execute(jira_stats):
self.elem_dict[str(row[0])] = row[1]
for key, value in self.elem_dict.iteritems():
update_query = 'UPDATE custom.stats SET %s=%s WHERE date=CURRENT_DATE;' % (key, value)
self.engine.execute(update_query)
"""Updates most of the fields in custom.activedirectory
The method gets all the attributes for each user whose account was modified since the day of the last update
and parses those attributes to meet the fields in the table"""
def update_activedirectory(self, full=False):
if full:
newf = None
else:
newf = "(whenChanged>=" + self.get_max_date_ad() + ")"
self.users = self.ad.get_users(new_filter=newf, attrlist=self.fields)
logging.info('Found %s users in AD using filter = %s' % (len(self.users), newf))
if not self.users:
raise NotImplemented("WTH")
for count, user in enumerate(self.users):
if count % 100 == 0:
logging.info("%s..." % count)
#print count, user
try:
atr = self.users[user]
except NotImplementedError as e:
logging.error("Skipped user %s because %s" % (user, e))
continue
update_query = 'UPDATE custom.activedirectory SET counter = counter+1 '
for i in range(len(self.fields)):
update_query = self.update_fields(update_query, atr, self.fields[i], self.sql_names[i])
update_query = self.update_times(update_query, atr)
if int(atr['userAccountControl']) & 0x02:
update_query += ', is_active=\'false\''
else:
update_query += ', is_active=\'true\''
update_query += ' WHERE username=\'' + user + '\';'
insert_query = 'INSERT INTO custom.activedirectory ('
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += self.sql_names[i]
first = False
except (IndexError, KeyError):
pass
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', ' + self.sql_times[i]
except (IndexError, KeyError):
pass
# UPSERT implementation based on http://stackoverflow.com/a/6527838/99834
insert_query += ',is_active) SELECT '
insert_query = self.insert_fields(insert_query, atr)
insert_query = self.insert_times(insert_query, atr)
if int(atr['userAccountControl']) & 0x02:
insert_query += ',\'false\''
else:
insert_query += ',\'true\''
insert_query += ' WHERE NOT EXISTS (SELECT 1 FROM custom.activedirectory WHERE username= \''\
+ self.escape_quote(user) + '\');'
self.engine.execute(update_query)
self.engine.execute(insert_query)
# updating managers, LDAP returns DN instead of username for managers
# we look for all mana
"""Checks the deleted users from ldap by comparing the users from ldap with those from the database"""
def update_deleted(self):
sql_user = []
for row in self.engine.execute("SELECT samaccountname FROM custom.activedirectory WHERE is_deleted = 'false' ORDER BY samaccountname"):
if row[0]:
sql_user.append(row[0].encode('utf-8'))
self.users = self.ad.get_users()
for i in sql_user:
if not i in self.users:
logging.info("User %s was deleted from LDAP" % i)
self.engine.execute("UPDATE custom.activedirectory SET is_deleted = 'true' and deleted = now() where username = '%s'" % i)
"""Creates the url that should exist if the user has a gravatar picture conected with his email.
Then it checks if the url exists"""
def check_gravatar(self):
return # TODO: re-enable gravator check
self.users = self.ad.get_users()
for count, user in enumerate (self.users):
atr = self.ad.get_attributes(user = user)
try:
email = atr['mail']
default = 'http://www.gravatar.com/avatar/'
size = 40
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'d':default, 's':str(size)})
try:
u = self.find_matches(gravatar_url)
if len(u) == 0:
has_avatar = 'true'
else:
has_avatar = 'false'
except (urllib2.HTTPError, urllib2.URLError):
has_avatar = 'false'
except (IndexError, KeyError, TypeError):
has_avatar = 'false'
self.engine.execute('UPDATE custom.activedirectory SET has_gravatar=\'%s\' WHERE username=\'%s\';' % (has_avatar, user))
def find_matches(self, newu):
urls = []
urls.append('http://www.gravatar.com/avatar/64908bc7260a8be06b142d34f83b9781?s=40&d=http%3A%2F%2Fwww.gravatar.com%2Favatar%2F')
urls.append(newu)
d = {}
url_contents = {}
matches = []
for url in urls:
c = urllib2.urlopen(url)
url_contents[url] = []
while 1:
r = c.read(4096)
if not r: break
md5 = hashlib.md5(r).hexdigest()
url_contents[url].append(md5)
if md5 in d:
url2 = d[md5]
matches.append((md5, url, url2))
else:
d[md5] = []
d[md5].append(url)
return matches
def update_all(self, full=False):
"""Updates all the fields in all the custom tables"""
logging.info("Updating changes from AD...")
self.update_activedirectory(full=full)
for row in self.engine.execute('SELECT CURRENT_DATE'):
current_date = str(row[0])
current_date = current_date[:10]
break
for row in self.engine.execute('SELECT MAX(gravatar_check_date) FROM custom.activedirectory;'):
check_date = str(row[0])
check_date = check_date[:10]
break
if check_date == current_date:
self.check_gravatar()
self.update_stats()
logging.info("Updating deleted accounts...")
self.update_deleted() # must be before managers!
logging.info("Updating managers...")
self.update_managers()
def update_managers(self):
"""
This will populate the manager field with the username of the manager, based on the managerdn (the field returned by ldap)
:return:
"""
for row in self.engine.execute("""select ad.username, ad.manager as oldmanager, ad2.username as newmanager
from custom.activedirectory ad
left join custom.activedirectory ad2 on ad.managerdn = ad2.distinguishedname and NOT ad2.is_deleted
where ad.managerdn is not NULL AND ad.manager != ad2.username
--and ad.manager != ad2.username
--limit 100;"""):
(username, oldmanager, newmanager) = row
self.engine.execute("UPDATE custom.activedirectory SET manager='%s' where username='%s'" % (newmanager, username))
def update_fields(self, update_query, atr, varname, sql_name):
"""Updates the update_query string with the fields that don't require special parsing"""
try:
atr[varname]
update_query += ', ' + sql_name + "='" + self.escape_quote(atr[varname]).encode('utf-8') + "'"
except (IndexError, KeyError):
pass
return update_query
def insert_fields(self, insert_query, atr):
"""Updates the insert_query string with the same fields as the ones above"""
first = True
for i in range(len(self.sql_names)):
try:
atr[self.fields[i]]
if not first:
insert_query += ','
insert_query += '\'' + self.escape_quote(atr[self.fields[i]]).encode('utf-8') + '\''
first = False
except (IndexError, KeyError):
pass
return insert_query
def update_times(self, update_query, atr):
"""Updates the update_query string with the fields that require special parsing (date variables)"""
for i in range(len(self.time_fields)):
try:
update_query += ', ' + self.sql_times[i] + '=\'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return update_query
def insert_times(self, insert_query, atr):
"""Same as the above just for insert_query"""
for i in range(len(self.sql_times)):
try:
atr[self.time_fields[i]]
insert_query += ', \'' + self.convert_date(atr[self.time_fields[i]]).encode('utf-8') + '\''
except (IndexError, KeyError):
pass
return insert_query
def escape_quote(self, string):
"""Escapes the quotes in a string with double quote:
someone's string => someone''s string"""
new_str = string
count = 0
for i in range(len(string)):
if string[i] == '\'':
new_str = new_str[:count] + '\'' + string[i:]
count += 1
count += 1
return new_str
def get_max_date_ad(self):
"""Determines the last date at which the table was updated.
Finds the last date at which an account from the table was updated
and returns that date"""
for row in self.engine.execute("SELECT MAX(changed) FROM custom.activedirectory"):
date = row[0]
break
date = (str(date)).split('-')
if len(date) != 3 or len(date[0]) != 4 or len(date[1]) != 2 or len(date[2]) != 2:
logging.fatal("Couldn't get maximum date from custom.activedirectory")
sys.exit(1)
max_date = date[0] + date[1] + date[2] + "000000.0Z"
return max_date
def convert_date(self, string):
"""Converts date from the ldap timestamp to the sql timestamp
20010101121212.0Z => 2001-01-01 """
string = string[:8]
if len(string) != 8:
return None
try:
int(string)
res = string[:4] + '-' + string[4:6] + '-' + string[6:]
return res
except ValueError:
return None
def main():
logging_format = "%(asctime).19s %(levelname)8s %(message)s"
logging.basicConfig(
level=logging.INFO,
format=logging_format,
#filename='%s.log' % JIRA_PROJECT,
#mode="w"
)
if 'LDAP2DB_DB_URI' not in os.environ or 'LDAP2DB_AD_URI' not in os.environ:
logging.fatal("""You need to set configuration using environment variables.
LDAP2DB_DB_URI='postgresql+pg8000://dbuser:dbpass@db.example.com/dbname'
LDAP2DB_AD_URI='ldaps://pdc.example.com:3269/dc=example,dc=com'
""")
sys.exit(1)
db_uri = os.environ['LDAP2DB_DB_URI']
ad_uri = os.environ['LDAP2DB_AD_URI']
custom = CustomUpdater(
stats_uri=db_uri,
activedirectory_uri=ad_uri)
custom.update_all(full=False)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4864093 | <reponame>tyranus-project/steganography-telegram-bot
from aiogram import Dispatcher
from loguru import logger
from .common import register_common_handlers
from .decryption import register_decryption_handlers
from .encryption import register_encryption_handlers
from .exception import register_exception_handlers
def register_handlers(dp: Dispatcher):
"""Sets all bot handlers."""
logger.info("Configuring handlers...")
register_common_handlers(dp)
register_encryption_handlers(dp)
register_decryption_handlers(dp)
register_exception_handlers(dp)
| StarcoderdataPython |
8190569 | <reponame>SpeagleYao/2048-api<filename>2048_api_final/evaluate.py<gh_stars>0
from game2048.game import Game
from game2048.displays import Display
from game2048.agents import Agent
from keras.models import load_model
import keras
import tensorflow as tf
import numpy as np
def single_run(size, score_to_win, AgentClass, **kwargs):
game = Game(size, score_to_win)
agent = AgentClass(game, display=Display(), **kwargs)
agent.play(verbose=True)
return game.score
#NUM_X_CLASSES = 14
class TestAgent(Agent):
def __init__(self, game, display=None):
self.game = game
self.display = display
self.model1 = load_model('my_best_DL_2048_model1.h5')
self.model2 = load_model('my_best_DL_2048_model2.h5')
self.model3 = load_model('my_best_DL_2048_model3.h5')
self.model4 = load_model('my_best_DL_2048_model4.h5')
self.model5 = load_model('my_best_DL_2048_model5.h5')
self.model6 = load_model('my_best_DL_2048_model6.h5')
self.model7 = load_model('my_best_DL_2048_model7.h5')
def step(self):
board = np.log2(np.maximum(np.array(self.game.board), 1)).reshape(1, 4, 4, 1)
board = keras.utils.np_utils.to_categorical(board, 14)
direction = []
direction.extend(np.argmax(self.model1.predict(board), axis=1))
direction.extend(np.argmax(self.model2.predict(board), axis=1))
direction.extend(np.argmax(self.model3.predict(board), axis=1))
direction.extend(np.argmax(self.model4.predict(board), axis=1))
direction.extend(np.argmax(self.model5.predict(board), axis=1))
direction.extend(np.argmax(self.model6.predict(board), axis=1))
direction.extend(np.argmax(self.model7.predict(board), axis=1))
direction = np.argmax(np.bincount(direction))
return direction
if __name__ == '__main__':
GAME_SIZE = 4
SCORE_TO_WIN = 2048
N_TESTS = 10
'''====================
Use your own agent here.'''
'''===================='''
scores = []
for _ in range(N_TESTS):
score = single_run(GAME_SIZE, SCORE_TO_WIN,
AgentClass=TestAgent)
scores.append(score)
print("Average scores: @%s times" % N_TESTS, sum(scores) / len(scores))
| StarcoderdataPython |
6695799 | #!/usr/bin/env python3
import sys
def make_crc_table(polynomial):
table = []
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ polynomial
else:
crc = crc >> 1
byte = byte >> 1
table.append(crc)
return table
def calc_crc32(table, crc, data):
inv_crc = ~crc & 0xFFFFFFFF
for byte in data:
inv_crc = table[(byte ^ inv_crc) & 0xFF] ^ (inv_crc >> 8);
return ~inv_crc & 0xFFFFFFFF
if len(sys.argv) < 3:
print("Not enough arguments")
exit()
table = make_crc_table(0xC385B254)
# CRC A
crc_a = 0
input_file = open(sys.argv[1], "rb")
try:
while True:
chunk = input_file.read(32768)
if len(chunk) == 0:
break
crc_a = calc_crc32(table, crc_a, chunk)
finally:
input_file.close()
# CRC B
model = "DR590W1"
model_bytes = []
for c in model:
model_bytes.append(ord(c))
crc_b = calc_crc32(table, crc_a, model_bytes)
# CRC C
byte_swapped_crc_b = [crc_b & 0xFF, (crc_b >> 8) & 0xFF, (crc_b >> 16) & 0xFF, (crc_b >> 24) & 0xFF]
crc_c = calc_crc32(table, crc_a, byte_swapped_crc_b)
# Output to file
output_file = open(sys.argv[2], "wb")
try:
output_file.write(crc_a.to_bytes(4, byteorder="little"))
output_file.write(crc_b.to_bytes(4, byteorder="little"))
output_file.write(crc_c.to_bytes(4, byteorder="little"))
finally:
output_file.close()
| StarcoderdataPython |
1936940 | <reponame>BodhiTechnology/PythonLib
class SimpleFlyWeight():
"""
principle: predefined the values and reuse it for time to time
"""
def __init__(self):
self.grades = [letter + suffix for letter in 'ABCD' for suffix in ('+', '', '-')] + ['F']
def compute_grade(self, percent):
percent = max(59, min(99, percent))
return self.grades[(99-percent) * 3 // 10]
class DynamicFlyWeight():
_instances = {}
def __new__(cls, percent):
percent = max(50, min(99, percent))
letter = 'FDCBA'[(percent - 50) // 10]
self = cls._instances.get(letter)
if self is None:
self = cls._instances[letter] = object.__new__(DynamicFlyWeight)
self.letter = letter
return self
def __repr__(self):
return 'Grade {!r}'.format(self.letter)
if __name__ == '__main__':
simple_example = SimpleFlyWeight()
print('equivalent grade for 0:', simple_example.compute_grade(0))
print('equivalent grade for 99:', simple_example.compute_grade(99))
print('equivalent grade for 77:', simple_example.compute_grade(75))
print(
DynamicFlyWeight(55),
DynamicFlyWeight(85),
DynamicFlyWeight(95),
DynamicFlyWeight(100))
print(len(DynamicFlyWeight._instances))
print(DynamicFlyWeight(99), DynamicFlyWeight(100))
print(len(DynamicFlyWeight._instances))
print(DynamicFlyWeight(75))
print(len(DynamicFlyWeight._instances))
| StarcoderdataPython |
6550334 | <gh_stars>10-100
#!/usr/bin/env python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from subprocess import Popen
lyrics = """Installation is complete. You need to restart the
computer in order to use the new installation.
You can continue to use this live media, although
any changes you make or documents you save will
not be preserved on reboot."""
class PyApp():
def on_reboot(self, widget):
Popen('shutdown -r now', shell=True)
Gtk.main_quit()
def on_close(self, widget):
Gtk.main_quit()
def __init__(self):
window = Gtk.Window()
window.set_border_width(8)
window.connect("destroy", Gtk.main_quit)
window.set_title("Installation Completed")
window.set_icon_from_file("/usr/local/lib/gbi/image/logo.png")
box1 = Gtk.VBox(False, 0)
window.add(box1)
box1.show()
box2 = Gtk.VBox(False, 10)
box2.set_border_width(10)
box1.pack_start(box2, True, True, 0)
box2.show()
label = Gtk.Label(lyrics)
box2.pack_start(label, True, True, 0)
box2 = Gtk.HBox(False, 10)
box2.set_border_width(5)
box1.pack_start(box2, False, True, 0)
box2.show()
table = Gtk.Table(1, 2, True)
restart = Gtk.Button("Restart")
restart.connect("clicked", self.on_reboot)
Continue = Gtk.Button("Continue")
Continue.connect("clicked", self.on_close)
table.attach(Continue, 0, 1, 0, 1)
table.attach(restart, 1, 2, 0, 1)
box2.pack_start(table, True, True, 0)
window.show_all()
PyApp()
Gtk.main()
| StarcoderdataPython |
4940380 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='gitflow_linter',
version='0.0.5',
description='Checks if GitFlow is respected in a given repository, considering provided rules',
long_description=read("README.md"),
long_description_content_type='text/markdown',
url='https://github.com/fighterpoul/gitflow_linter',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(exclude=["tests"]),
entry_points={
'console_scripts': [
'gitflow-linter = gitflow_linter:main',
'gitflow-linter-plugins = gitflow_linter:available_plugins',
],
},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
],
include_package_data=True,
python_requires='>3.8',
license="MIT",
install_requires=[
'PyYAML>=5.4.1',
'GitPython>=3.1.17',
'click>=7',
]
)
| StarcoderdataPython |
1909903 | class ExternalTaskFinished:
def __init__(self, external_task_id, result):
self.__external_task_id = external_task_id
self.__result = result
async def send_to_external_task_api(self, external_task_api, identity, worker_id):
await external_task_api.finish_external_task(
identity,
worker_id,
self.__external_task_id,
self.__result
)
| StarcoderdataPython |
5060272 | <reponame>vijaykumawat256/Prompt-Summarization<gh_stars>0
def interpret(code):
| StarcoderdataPython |
1973470 | from enum import Enum, auto
from state import State
from constants.upgrades import upgrades as u_data
class StateModifier():
class Action(Enum):
PURCHASE_BUILDING = auto()
PURCHASE_UPGRADE = auto()
PURCHASE_COOKIE = auto()
def __init__(self, state: State):
self.base_state = State(state)
def modify(self, action, item):
self.modified_state = State(self.base_state)
if action is self.Action.PURCHASE_BUILDING:
self.modified_state.data['buildings'][item]['owned'] += 1
item_name = item
item_cost = self.base_state.get_building_cost(item)
elif action is self.Action.PURCHASE_UPGRADE:
self.modified_state.data['upgrades'][item]['purchased'] = True
upgrade_data = u_data[item]
item_name = upgrade_data['name']
item_cost = upgrade_data['cost']
self.modification = {
'action': action,
'item': item_name,
'total_cost': item_cost,
'remaining_cost': max(0, item_cost - self.modified_state.data['misc']['cookies_current'])
}
self.modified_state.data['misc']['cookies_current'] = max(
0, self.modified_state.data['misc']['cookies_current'] - item_cost)
| StarcoderdataPython |
11286305 | #! /usr/bin/python
import math
import sys
def set_correct_normal(possible_internal_points,plane): #Make the orientation of Normal correct
for point in possible_internal_points:
dist = dotProduct(plane.normal,point - plane.pointA)
if(dist != 0) :
if(dist > 10**-10):
plane.normal.x = -1*plane.normal.x
plane.normal.y = -1*plane.normal.y
plane.normal.z = -1*plane.normal.z
return
def printV(vec): # Print points
print vec.x, vec.y, vec.z
def cross(pointA, pointB): # Cross product
x = (pointA.y*pointB.z) - (pointA.z*pointB.y)
y = (pointA.z*pointB.x) - (pointA.x*pointB.z)
z = (pointA.x*pointB.y) - (pointA.y*pointB.x)
return Point(x, y, z)
def dotProduct(pointA, pointB): # Dot product
return (pointA.x*pointB.x + pointA.y*pointB.y + pointA.z*pointB.z)
def checker_plane(a, b): #Check if two planes are equal or not
if ((a.pointA.x == b.pointA.x) and (a.pointA.y == b.pointA.y) and (a.pointA.z == b.pointA.z)):
if ((a.pointB.x == b.pointB.x) and (a.pointB.y == b.pointB.y) and (a.pointB.z == b.pointB.z)):
if ((a.pointC.x == b.pointC.x) and (a.pointC.y == b.pointC.y) and (a.pointC.z == b.pointC.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
if ((a.pointA.x == b.pointB.x) and (a.pointA.y == b.pointB.y) and (a.pointA.z == b.pointB.z)):
if ((a.pointB.x == b.pointA.x) and (a.pointB.y == b.pointA.y) and (a.pointB.z == b.pointA.z)):
if ((a.pointC.x == b.pointC.x) and (a.pointC.y == b.pointC.y) and (a.pointC.z == b.pointC.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointA.x) and (a.pointC.y == b.pointA.y) and (a.pointC.z == b.pointA.z)):
return True
if ((a.pointA.x == b.pointC.x) and (a.pointA.y == b.pointC.y) and (a.pointA.z == b.pointC.z)):
if ((a.pointB.x == b.pointA.x) and (a.pointB.y == b.pointA.y) and (a.pointB.z == b.pointA.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
elif ((a.pointB.x == b.pointC.x) and (a.pointB.y == b.pointC.y) and (a.pointB.z == b.pointC.z)):
if ((a.pointC.x == b.pointB.x) and (a.pointC.y == b.pointB.y) and (a.pointC.z == b.pointB.z)):
return True
return False
def checker_edge(a, b): # Check if 2 edges have same 2 vertices
if ((a.pointA == b.pointA)and(a.pointB == b.pointB)) or ((a.pointB == b.pointA)and(a.pointA == b.pointB)):
return True
return False
class Edge: # Make a object of type Edge which have two points denoting the vertices of the edges
def __init__(self,pointA,pointB):
self.pointA = pointA
self.pointB = pointB
def __str__(self):
string = "Edge"
string += "\n\tA: "+ str(self.pointA.x)+","+str(self.pointA.y)+","+str(self.pointA.z)
string += "\n\tB: "+ str(self.pointB.x)+","+str(self.pointB.y)+","+str(self.pointB.z)
return string
def __hash__(self):
return hash((self.pointA,self.pointB))
def __eq__(self,other):
# print "comparing Edges"
return checker_edge(self,other)
class Point: #Point class denoting the points in the space
def __init__(self, x=None, y=None, z=None):
self.x = x
self.y = y
self.z = z
def __sub__(self, pointX):
return Point(self.x - pointX.x, self.y - pointX.y, self.z - pointX.z)
def __add__(self, pointX):
return Point(self.x + pointX.x, self.y + pointX.y, self.z + pointX.z)
def length(self):
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def __str__(self):
return str(self.x)+","+str(self.y)+","+str(self.z)
def __hash__(self):
return hash((self.x,self.y,self.z))
def __eq__(self,other):
# print "Checking equality of Point"
return (self.x==other.x) and(self.y==other.y) and(self.z==other.z)
class Plane: # Plane class having 3 points for a triangle
def __init__(self, pointA, pointB, pointC):
self.pointA = pointA
self.pointB = pointB
self.pointC = pointC
self.normal = None
self.distance = None
self.calcNorm()
self.to_do = set()
self.edge1 = Edge(pointA, pointB)
self.edge2 = Edge(pointB, pointC)
self.edge3 = Edge(pointC, pointA)
def calcNorm(self):
point1 = self.pointA - self.pointB
point2 = self.pointB - self.pointC
normVector = cross(point1,point2)
length = normVector.length()
normVector.x = normVector.x/length
normVector.y = normVector.y/length
normVector.z = normVector.z/length
self.normal = normVector
self.distance = dotProduct(self.normal,self.pointA)
def dist(self, pointX):
return (dotProduct(self.normal,pointX - self.pointA))
def get_edges(self):
return [self.edge1, self.edge2, self.edge3]
def calculate_to_do(self, temp=None):
if (temp != None):
for p in temp:
dist = self.dist(p)
if dist > 10**(-10):
self.to_do.add(p)
else:
for p in points:
dist = self.dist(p)
if dist > 10**(-10):
self.to_do.add(p)
def __eq__(self,other):
# print 'Checking Plane Equality'
return checker_plane(self,other)
def __str__(self):
string = "Plane : "
string += "\n\tX: "+str(self.pointA.x)+","+str(self.pointA.y)+","+str(self.pointA.z)
string += "\n\tY: "+str(self.pointB.x)+","+str(self.pointB.y)+","+str(self.pointB.z)
string += "\n\tZ: "+str(self.pointC.x)+","+str(self.pointC.y)+","+str(self.pointC.z)
string += "\n\tNormal: "+str(self.normal.x)+","+str(self.normal.y)+","+str(self.normal.z)
return string
def __hash__(self):
return hash((self.pointA,self.pointB,self.pointC))
def calc_horizon(visited_planes,plane,eye_point,edge_list): # Calculating the horizon for an eye to make new faces
if (plane.dist(eye_point) > 10**-10):
visited_planes.append(plane)
edges = plane.get_edges()
for edge in edges:
neighbour = adjacent_plane(plane,edge)
if (neighbour not in visited_planes):
result = calc_horizon(visited_planes,neighbour,eye_point,edge_list)
if(result == 0):
edge_list.add(edge)
return 1
else:
return 0
def adjacent_plane(main_plane,edge): # Finding adjacent planes to an edge
for plane in list_of_planes:
edges = plane.get_edges()
if (plane != main_plane) and (edge in edges):
return plane
def distLine(pointA, pointB, pointX): #Calculate the distance of a point from a line
vec1 = pointX - pointA
vec2 = pointX - pointB
vec3 = pointB - pointA
vec4 = cross(vec1, vec2)
if vec3.length() == 0:
return None
else:
return vec4.length()/vec3.length()
def max_dist_line_point(pointA, pointB): #Calculate the maximum distant point from a line for initial simplex
maxDist = 0;
for point in points:
if (pointA != point) and (pointB != point):
dist = abs(distLine(pointA,pointB,point))
if dist>maxDist:
maxDistPoint = point
maxDist = dist
return maxDistPoint
def max_dist_plane_point(plane): # Calculate the maximum distance from the plane
maxDist = 0
for point in points:
dist = abs(plane.dist(point))
if (dist > maxDist):
maxDist = dist
maxDistPoint = point
return maxDistPoint
def find_eye_point(plane, to_do_list): # Calculate the maximum distance from the plane
maxDist = 0
for point in to_do_list:
dist = plane.dist(point)
if (dist > maxDist):
maxDist = dist
maxDistPoint = point
return maxDistPoint
def initial_dis(p, q): # Gives the Euclidean distance
return math.sqrt((p.x-q.x)**2+(p.y-q.y)**2+(p.z-q.z)**2)
def initial_max(now): # From the extreme points calculate the 2 most distant points
maxi = -1
found = [[], []]
for i in xrange(6):
for j in xrange(i+1, 6):
dist = initial_dis(now[i], now[j])
if dist > maxi:
found = [now[i], now[j]]
return found
def initial(): # To calculate the extreme points to make the initial simplex
x_min_temp = 10**9
x_max_temp = -10**9
y_min_temp = 10**9
y_max_temp = -10**9
z_min_temp = 10**9
z_max_temp = -10**9
for i in xrange(num):
if points[i].x > x_max_temp:
x_max_temp = points[i].x
x_max = points[i]
if points[i].x < x_min_temp:
x_min_temp = points[i].x
x_min = points[i]
if points[i].y > y_max_temp:
y_max_temp = points[i].y
y_max = points[i]
if points[i].y < y_min_temp:
y_min_temp = points[i].y
y_min = points[i]
if points[i].z > z_max_temp:
z_max_temp = points[i].z
z_max = points[i]
if points[i].z < z_min_temp:
z_min_temp = points[i].z
z_min = points[i]
return (x_max, x_min, y_max, y_min, z_max, z_min)
points = [] # List to store the points
if len(sys.argv) < 2:
print "Few arguments. See README for help"
sys.exit()
try: # Open the input and store it in points list
data = open(sys.argv[1], "r")
num = int(data.readline())
for line in data:
a = map(float, line.split())
points.append(Point(a[0], a[1], a[2]))
if num < 4:
print "Less than 4 points so 1D or 2D"
sys.exit()
finally:
data.close()
try:
extremes = initial() # calculate the extreme points for every axis.
initial_line = initial_max(extremes) # Make the initial line by joining farthest 2 points
third_point = max_dist_line_point(initial_line[0], initial_line[1]) # Calculate the 3rd point to make a plane
first_plane = Plane(initial_line[0], initial_line[1], third_point) # Make the initial plane by joining 3rd point to the line
fourth_point = max_dist_plane_point(first_plane) # Make the fourth plane to make a tetrahedron
except:
print "Figure either in 2D or 3D"
sys.exit()
possible_internal_points = [initial_line[0],initial_line[1],third_point,fourth_point] # List that helps in calculating orientation of point
second_plane = Plane(initial_line[0], initial_line[1], fourth_point) # The other planes of the tetrahedron
third_plane = Plane(initial_line[0], fourth_point, third_point)
fourth_plane = Plane(initial_line[1], third_point, fourth_point)
set_correct_normal(possible_internal_points,first_plane) # Setting the orientation of normal correct
set_correct_normal(possible_internal_points,second_plane)
set_correct_normal(possible_internal_points,third_plane)
set_correct_normal(possible_internal_points,fourth_plane)
first_plane.calculate_to_do() # Calculating the to_do list which stores the point for which eye_point have to be found
second_plane.calculate_to_do()
third_plane.calculate_to_do()
fourth_plane.calculate_to_do()
list_of_planes = [] # List containing all the planes
list_of_planes.append(first_plane)
list_of_planes.append(second_plane)
list_of_planes.append(third_plane)
list_of_planes.append(fourth_plane)
any_left = True # Checking if planes with to do list is over
while any_left:
any_left = False
for working_plane in list_of_planes:
if len(working_plane.to_do) > 0:
any_left = True
eye_point = find_eye_point(working_plane, working_plane.to_do) # Calculate the eye point of the face
edge_list = set()
visited_planes = []
calc_horizon(visited_planes, working_plane, eye_point, edge_list) # Calculate the horizon
for internal_plane in visited_planes: # Remove the internal planes
list_of_planes.remove(internal_plane)
for edge in edge_list: # Make new planes
new_plane = Plane(edge.pointA, edge.pointB, eye_point)
set_correct_normal(possible_internal_points,new_plane)
temp_to_do = set()
for internal_plane in visited_planes:
temp_to_do = temp_to_do.union(internal_plane.to_do)
new_plane.calculate_to_do(temp_to_do)
list_of_planes.append(new_plane)
final_vertices = set()
for plane in list_of_planes:
final_vertices.add(plane.pointA)
final_vertices.add(plane.pointB)
final_vertices.add(plane.pointC)
try: # Open the output file
data1 = open("data/"+sys.argv[1].split('.')[0]+".out", "w")
data1.write(str(len(final_vertices)) + '\n')
for point in final_vertices:
data1.write(str(point.x) +' '+ str(point.x) +' '+ str(point.x) + '\n')
finally:
data1.close()
| StarcoderdataPython |
3527610 | import json
import os
import multiprocessing
import time
import scipy.io as scio
from utils import *
base_dir = '~/'
name_path = os.path.join(base_dir, 'trainval.txt')
thread_num = 8
name_list = []
with open(name_path, 'r') as f:
for line in f:
line = line.strip('\n')
name_list.append(line)
data_path = os.path.join(base_dir, 'MCG-Pascal-Main_trainvaltest_2012-proposals')
json_path = os.path.join(base_dir, 'proposals')
global_limit = 100
# global_limit = 200
execution_interval = []
def work(start, end):
for name in name_list[start:end]:
print(name)
mat_path = os.path.join(data_path, name + '.mat')
data = scio.loadmat(mat_path)
superpixels = data['superpixels']
labels = data['labels']
proposals = []
limit = min(global_limit, len(labels))
for item in labels[:limit]:
label = item[0][0]
img = np.zeros(shape=superpixels.shape)
for index in label:
mask = superpixels == index
img[mask] = 1
img = img.astype(bool)
proposals.append(img)
proposals_encode = list(map(rle_encode, proposals))
json_file_name = os.path.join(json_path, name + '.json')
with open(json_file_name, 'w') as f:
json.dump(proposals_encode, f)
def preparation():
global execution_interval
execution_interval = []
name_len = len(name_list)
for i in range(thread_num - 1):
execution_interval.append(int(i * name_len / thread_num))
execution_interval.append(name_len)
def multiprocessing_function():
processes = []
for index in range(len(execution_interval) - 1):
processes.append(multiprocessing.Process(target=work, args=(execution_interval[index],
execution_interval[index + 1])))
for p in processes:
p.start()
for p in processes:
p.join()
def main():
start = time.time()
preparation()
multiprocessing_function()
print("over")
end = time.time()
print(str(round(end - start, 3)) + 's')
if __name__ == '__main__':
main()
| StarcoderdataPython |
11202699 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the convection.DiagnoseConvectivePrecipitation plugin."""
import datetime
import unittest
import iris
import numpy as np
from cf_units import Unit
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube
from iris.tests import IrisTest
from improver.convection import DiagnoseConvectivePrecipitation
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_variable_cube,
)
# Fraction to convert from mm/hr to m/s.
# m/s are SI units, however, mm/hr values are easier to handle.
mm_hr_to_m_s = 2.7778e-7
def set_up_precipitation_rate_cube():
"""Create a cube with metadata and values suitable for
precipitation rate."""
data = np.zeros((1, 4, 4))
data[0, 0, :] = 2.0
data[0, 1, :] = 4.0
data[0, 2, :] = 8.0
data[0, 3, :] = 16.0
data[0, 0, 2] = 0.0
data[0, 2, 1] = 0.0
data[0, 3, 0] = 0.0
precip_cube = set_up_variable_cube(
data.astype(np.float32), "lwe_precipitation_rate", "mm h-1", "equalarea",
)
precip_cube.convert_units("m s-1")
coord_points = np.array([0.0, 2000.0, 4000.0, 6000.0])
precip_cube.coord("projection_y_coordinate").points = coord_points
precip_cube.coord("projection_x_coordinate").points = coord_points
return precip_cube
def apply_threshold(cube, threshold):
"""Apply threshold and convert to binary, rather than logical values."""
cube.data = cube.data > threshold
cube.data = cube.data.astype(int)
return cube
def lower_higher_threshold_cubelist(cube, lower_threshold, higher_threshold):
"""Apply low and high thresholds and put into a cube list."""
lower_cube = apply_threshold(cube.copy(), lower_threshold)
higher_cube = apply_threshold(cube.copy(), higher_threshold)
return iris.cube.CubeList([lower_cube, higher_cube])
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
lower_threshold = 0.001 * mm_hr_to_m_s
higher_threshold = 5 * mm_hr_to_m_s
neighbourhood_method = "square"
radii = 2000.0
result = str(
DiagnoseConvectivePrecipitation(
lower_threshold, higher_threshold, neighbourhood_method, radii
)
)
msg = (
"<DiagnoseConvectivePrecipitation: lower_threshold 2.7778e-10; "
"higher_threshold 1.3889e-06; neighbourhood_method: square; "
"radii: 2000.0; fuzzy_factor None; comparison_operator: >; "
"lead_times: None; weighted_mode: True;"
"use_adjacent_grid_square_differences: True>"
)
self.assertEqual(str(result), msg)
class Test__calculate_convective_ratio(IrisTest):
"""Test the _calculate_convective_ratio method."""
def setUp(self):
"""Set up the cube."""
self.lower_threshold = 0.001 * mm_hr_to_m_s
self.higher_threshold = 5 * mm_hr_to_m_s
self.neighbourhood_method = "square"
self.radii = 2000.0
self.cube = set_up_precipitation_rate_cube()
self.lower_cube = self.cube.copy()
self.higher_cube = self.cube.copy()
self.cubelist = lower_higher_threshold_cubelist(
self.cube, self.lower_threshold, self.higher_threshold,
)
self.threshold_list = [self.lower_threshold, self.higher_threshold]
def test_basic(self):
"""Test a basic example using the default values for the keyword
arguments. Make sure that the output is a cube with the expected
data."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.2, 0.28571429, 0.28571429, 0.4],
[0.5, 0.57142857, 0.625, 0.66666667],
[1.0, 1.0, 1.0, 1.0],
]
]
)
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
)._calculate_convective_ratio(self.cubelist, self.threshold_list)
self.assertArrayAlmostEqual(result, expected)
def test_no_precipitation(self):
"""If there is no precipitation, then the convective ratio will try
to do a 0/0 division, which will result in NaN values. Check that
the output array works as intended."""
cube = set_up_precipitation_rate_cube()
cube.data = np.zeros(cube.shape)
expected = np.full(cube.shape, np.nan)
cubelist = lower_higher_threshold_cubelist(
cube, self.lower_threshold, self.higher_threshold
)
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
)._calculate_convective_ratio(cubelist, self.threshold_list)
self.assertArrayAlmostEqual(result, expected)
def test_catch_infinity_values(self):
"""Test an example where the infinity values are generated.
Ensure these are caught as intended."""
lower_threshold = 5 * mm_hr_to_m_s
higher_threshold = 0.001 * mm_hr_to_m_s
cubelist = lower_higher_threshold_cubelist(
self.cube, lower_threshold, higher_threshold
)
msg = "A value of infinity was found"
with self.assertRaisesRegex(ValueError, msg):
DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
)._calculate_convective_ratio(cubelist, self.threshold_list)
def test_catch_greater_than_1_values(self):
"""Test an example where the greater than 1 values are generated.
Ensure these are caught as intended."""
lower_threshold = 5 * mm_hr_to_m_s
higher_threshold = 0.001 * mm_hr_to_m_s
cubelist = lower_higher_threshold_cubelist(
self.cube, lower_threshold, higher_threshold
)
radii = 4000.0
msg = "A value of greater than 1.0 was found"
with self.assertRaisesRegex(ValueError, msg):
DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
radii,
)._calculate_convective_ratio(cubelist, self.threshold_list)
def test_multiple_lead_times_neighbourhooding(self):
"""Test where neighbourhood is applied for multiple lead times, where
different radii are applied at each lead time."""
expected = np.array(
[
[
[
[0.25, 0.166667, 0.166667, 0.0],
[0.166667, 0.11111111, 0.11111111, 0.0],
[0.166667, 0.11111111, 0.11111111, 0.0],
[0.0, 0.0, 0.0, 0.0],
],
[
[0.1111111, 0.0833333, 0.0833333, 0.1111111],
[0.0833333, 0.0625, 0.0625, 0.0833333],
[0.0833333, 0.0625, 0.0625, 0.0833333],
[0.1111111, 0.0833333, 0.0833333, 0.1111111],
],
]
]
)
# Set up a cube with 3 and 6 hour forecast periods
precip = set_up_variable_cube(
np.ones((1, 4, 4), dtype=np.float32),
"lwe_precipitation_rate",
"mm h-1",
"equalarea",
time=datetime.datetime(2015, 11, 19, 3),
frt=datetime.datetime(2015, 11, 19, 0),
)
precip = add_coordinate(
precip,
[datetime.datetime(2015, 11, 19, 3), datetime.datetime(2015, 11, 19, 6)],
"time",
order=[1, 0, 2, 3],
is_datetime=True,
)
coord_points = np.array([0.0, 2000.0, 4000.0, 6000.0])
precip.coord("projection_y_coordinate").points = coord_points
precip.coord("projection_x_coordinate").points = coord_points
data = np.full((1, 2, 4, 4), 1.0)
data[0, 0, 1, 1] = 20.0
data[0, 1, 1, 1] = 20.0
precip.data = data.astype(np.float32)
precip.convert_units("m s-1")
cubelist = lower_higher_threshold_cubelist(
precip, self.lower_threshold, self.higher_threshold
)
lead_times = [3, 6]
radii = [2000.0, 4000.0]
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
radii=radii,
lead_times=lead_times,
)._calculate_convective_ratio(cubelist, self.threshold_list)
self.assertArrayAlmostEqual(result, expected)
def test_circular_neighbourhood(self):
"""Test a circular neighbourhood."""
expected = np.array(
[
[
[0.0, 0.0, np.nan, 0.0],
[0.0, 0.0, 0.0, 0.0],
[1.0, np.nan, 1.0, 1.0],
[np.nan, 1.0, 1.0, 1.0],
]
]
)
neighbourhood_method = "circular"
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
neighbourhood_method,
self.radii,
)._calculate_convective_ratio(self.cubelist, self.threshold_list)
self.assertArrayAlmostEqual(result, expected)
def test_circular_neighbourhood_weighted_mode(self):
"""Test a circular neighbourhood with the weighted_mode
set to True."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.2, 0.0, 0.25, 0.2],
[0.666667, 0.75, 0.75, 0.8],
[1.0, 1.0, 1.0, 1.0],
]
]
)
neighbourhood_method = "circular"
weighted_mode = False
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
neighbourhood_method,
self.radii,
weighted_mode=weighted_mode,
)._calculate_convective_ratio(self.cubelist, self.threshold_list)
self.assertArrayAlmostEqual(result, expected)
class Test_absolute_differences_between_adjacent_grid_squares(IrisTest):
"""Test the absolute_differences_between_adjacent_grid_squares method."""
def setUp(self):
"""Set up the cube."""
self.lower_threshold = 0.001 * mm_hr_to_m_s
self.higher_threshold = 5 * mm_hr_to_m_s
self.neighbourhood_method = "square"
self.radii = 2000.0
self.cube = set_up_precipitation_rate_cube()
def test_basic(self):
"""Test that differences are calculated correctly between adjacent
grid squares along x and y. Check that absolute values are returned."""
expected_x = np.array(
[
[
[0.000000e00, 5.555600e-07, 5.555600e-07],
[0.000000e00, 0.000000e00, 0.000000e00],
[2.222240e-06, 2.222240e-06, 0.000000e00],
[4.444480e-06, 0.000000e00, 0.000000e00],
]
]
)
expected_y = np.array(
[
[
[5.555600e-07, 5.555600e-07, 1.111120e-06, 5.555600e-07],
[1.111120e-06, 1.111120e-06, 1.111120e-06, 1.111120e-06],
[2.222240e-06, 4.444480e-06, 2.222240e-06, 2.222240e-06],
]
]
)
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
).absolute_differences_between_adjacent_grid_squares(self.cube)
self.assertIsInstance(result, iris.cube.CubeList)
self.assertArrayAlmostEqual(result[0].data, expected_x)
self.assertArrayAlmostEqual(result[1].data, expected_y)
class Test_iterate_over_threshold(IrisTest):
"""Test the iterate_over_threshold method."""
def setUp(self):
"""Set up the cube."""
self.lower_threshold = 0.001 * mm_hr_to_m_s
self.higher_threshold = 5 * mm_hr_to_m_s
self.neighbourhood_method = "square"
self.radii = 2000.0
self.cube = set_up_precipitation_rate_cube()
def test_basic(self):
"""Test an example for iterating over a list of thresholds."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
]
]
)
cubelist = iris.cube.CubeList([self.cube, self.cube])
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
).iterate_over_threshold(cubelist, self.higher_threshold)
self.assertIsInstance(result, iris.cube.CubeList)
self.assertArrayAlmostEqual(result[0].data, expected)
def test_fuzzy_factor(self):
"""Test an example where a fuzzy_factor is specified."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.166667, 0.166667, 0.166667, 0.166667],
[1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0],
]
]
)
fuzzy_factor = 0.7
cubelist = iris.cube.CubeList([self.cube, self.cube])
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
fuzzy_factor=fuzzy_factor,
).iterate_over_threshold(cubelist, self.higher_threshold)
self.assertIsInstance(result, iris.cube.CubeList)
self.assertArrayAlmostEqual(result[0].data, expected, decimal=4)
self.assertArrayAlmostEqual(result[1].data, expected, decimal=4)
def test_below_threshold(self):
"""Test an example where the points below the specified threshold
are regarded as significant."""
expected = np.array(
[
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
]
]
)
comparison_operator = "<="
lower_threshold = 5 * mm_hr_to_m_s
higher_threshold = 0.001 * mm_hr_to_m_s
cubelist = iris.cube.CubeList([self.cube, self.cube])
result = DiagnoseConvectivePrecipitation(
lower_threshold,
higher_threshold,
self.neighbourhood_method,
self.radii,
comparison_operator=comparison_operator,
).iterate_over_threshold(cubelist, self.higher_threshold)
self.assertIsInstance(result, iris.cube.CubeList)
self.assertArrayAlmostEqual(result[0].data, expected)
self.assertArrayAlmostEqual(result[1].data, expected)
class Test_sum_differences_between_adjacent_grid_squares(IrisTest):
"""Test the sum_differences_between_adjacent_grid_squares method."""
def setUp(self):
"""Set up the cube."""
self.lower_threshold = 0.001 * mm_hr_to_m_s
self.higher_threshold = 5 * mm_hr_to_m_s
self.neighbourhood_method = "square"
self.radii = 2000.0
self.cube = set_up_precipitation_rate_cube()
def test_basic(self):
"""Test that the sum of differences between adjacent grid squares,
when accounting for the offset between the grid of the difference
cube and the original grid is as expected."""
expected = np.array(
[
[
[0.0, 2.0, 1.0, 0.0],
[2.0, 2.0, 0.0, 1.0],
[1.0, 2.0, 3.0, 3.0],
[1.0, 2.0, 1.0, 1.0],
]
]
)
# Set up threshold_cube_x.
threshold_cube_x_data = np.array(
[[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 0.0]]]
)
threshold_cube_x = self.cube.copy()
threshold_cube_x = threshold_cube_x[:, :, :-1]
threshold_cube_x.data = threshold_cube_x_data
# Set up threshold_cube_y.
threshold_cube_y_data = np.array(
[[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0]]]
)
threshold_cube_y = self.cube.copy()
threshold_cube_y = threshold_cube_y[:, :-1, :]
threshold_cube_y.data = threshold_cube_y_data
thresholded_cube = iris.cube.CubeList([threshold_cube_x, threshold_cube_y])
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
).sum_differences_between_adjacent_grid_squares(self.cube, thresholded_cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected)
def test_2d_input_cube(self):
"""Test that the sum of differences between adjacent grid squares,
when accounting for the offset between the grid of the difference
cube and the original grid is as expected for a 2d cube."""
expected = np.array(
[
[0.0, 2.0, 1.0, 0.0],
[2.0, 2.0, 0.0, 1.0],
[1.0, 2.0, 3.0, 3.0],
[1.0, 2.0, 1.0, 1.0],
]
)
cube = self.cube[0, :, :]
# Set up threshold_cube_x.
threshold_cube_x_data = np.array(
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 0.0]]
)
threshold_cube_x = cube.copy()
threshold_cube_x = threshold_cube_x[:, :-1]
threshold_cube_x.data = threshold_cube_x_data
# Set up threshold_cube_y.
threshold_cube_y_data = np.array(
[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0]]
)
threshold_cube_y = cube.copy()
threshold_cube_y = threshold_cube_y[:-1, :]
threshold_cube_y.data = threshold_cube_y_data
thresholded_cube = iris.cube.CubeList([threshold_cube_x, threshold_cube_y])
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
).sum_differences_between_adjacent_grid_squares(cube, thresholded_cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected)
class Test_process(IrisTest):
"""Test the process method."""
def setUp(self):
"""Set up the cube."""
self.lower_threshold = 0.001 * mm_hr_to_m_s
self.higher_threshold = 5 * mm_hr_to_m_s
self.neighbourhood_method = "square"
self.radii = 2000.0
self.cube = set_up_precipitation_rate_cube()
def test_use_adjacent_grid_square_differences(self):
"""Diagnose convective precipitation using the differences between
adjacent grid squares."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.357143, 0.318182, 0.272727, 0.214286],
[0.6, 0.571429, 0.526316, 0.454545],
[0.818182, 0.8, 0.769231, 0.714286],
]
]
)
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
).process(self.cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected)
def test_does_not_use_adjacent_grid_square_differences(self):
"""Diagnose convective precipitation using the precipitation rate
field directly, rather than calculating differences between adjacent
grid squares."""
expected = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[0.2, 0.28571429, 0.28571429, 0.4],
[0.5, 0.57142857, 0.625, 0.66666667],
[1.0, 1.0, 1.0, 1.0],
]
]
)
result = DiagnoseConvectivePrecipitation(
self.lower_threshold,
self.higher_threshold,
self.neighbourhood_method,
self.radii,
use_adjacent_grid_square_differences=False,
).process(self.cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
281258 |
from .table import table
from .split import split
from .dataset import dataset
from .cache import cache
| StarcoderdataPython |
8066517 | <reponame>Ascend/modelzoo<gh_stars>10-100
# Copyright (c) <NAME> 2016,
# All rights reserved
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://spdx.org/licenses/BSD-3-Clause.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Popular Learning Rate Schedulers"""
from __future__ import division
import math
import torch
from bisect import bisect_right
__all__ = ['IterationPolyLR']
class IterationPolyLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, last_epoch=-1):
self.target_lr = target_lr
self.max_iters = max_iters
self.power = power
super(IterationPolyLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
N = self.max_iters
T = self.last_epoch
factor = pow(1 - T / N, self.power)
# https://blog.csdn.net/mieleizhi0522/article/details/83113824
return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs]
| StarcoderdataPython |
8078952 | from flask import current_app as app
class Sellproduct2:
def __init__(self, seller_id, product_id, current, category, name, descrip, img_link, price, available):
self.seller_id = seller_id
self.product_id = product_id
self.current = current
self.category = category
self.name = name
self.descrip = descrip
self.img_link = img_link
self.price = price
self.available = available
@staticmethod
def get_by_seller(sid):
rows = app.db.execute('''
SELECT SellProducts.seller_id, SellProducts.product_id, SellProducts.current, Products.category, Products.name, Products.descrip, Products.img_link, Products.price, Products.available
FROM SellProducts, Products
WHERE SellProducts.seller_id = :seller_id
AND SellProducts.current = :current
AND Products.product_id = SellProducts.product_id
''',
seller_id=sid,
current=True)
return [Sellproduct2(*row) for row in rows]
@staticmethod
def get_by_seller_past(sid):
rows = app.db.execute('''
SELECT SellProducts.seller_id, SellProducts.product_id, SellProducts.current, Products.category, Products.name, Products.descrip, Products.img_link, Products.price, Products.available
FROM SellProducts, Products
WHERE SellProducts.seller_id = :seller_id
AND SellProducts.current = :current
AND Products.product_id = SellProducts.product_id
''',
seller_id=sid,
current=False)
return [Sellproduct2(*row) for row in rows] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.